Retire repo
This repo was created by accident, use deb-python-os-win instead. Needed-By: I1ac1a06931c8b6dd7c2e73620a0302c29e605f03 Change-Id: I81894aea69b9d09b0977039623c26781093a397a
This commit is contained in:
parent
579ec01225
commit
3b2f07a90d
@ -1,7 +0,0 @@
|
||||
[run]
|
||||
branch = True
|
||||
source = os_win
|
||||
omit = os_win/openstack/*
|
||||
|
||||
[report]
|
||||
ignore_errors = True
|
54
.gitignore
vendored
54
.gitignore
vendored
@ -1,54 +0,0 @@
|
||||
*.py[cod]
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Packages
|
||||
*.egg
|
||||
*.egg-info
|
||||
dist
|
||||
build
|
||||
.eggs
|
||||
eggs
|
||||
parts
|
||||
bin
|
||||
var
|
||||
sdist
|
||||
develop-eggs
|
||||
.installed.cfg
|
||||
lib
|
||||
lib64
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
.coverage
|
||||
.tox
|
||||
nosetests.xml
|
||||
.testrepository
|
||||
.venv
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
|
||||
# Mr Developer
|
||||
.mr.developer.cfg
|
||||
.project
|
||||
.pydevproject
|
||||
|
||||
# Complexity
|
||||
output/*.html
|
||||
output/*/index.html
|
||||
|
||||
# Sphinx
|
||||
doc/build
|
||||
|
||||
# pbr generates these
|
||||
AUTHORS
|
||||
ChangeLog
|
||||
|
||||
# Editors
|
||||
*~
|
||||
.*.swp
|
||||
.*sw?
|
@ -1,4 +0,0 @@
|
||||
[gerrit]
|
||||
host=review.openstack.org
|
||||
port=29418
|
||||
project=openstack/os-win.git
|
3
.mailmap
3
.mailmap
@ -1,3 +0,0 @@
|
||||
# Format is:
|
||||
# <preferred e-mail> <other e-mail 1>
|
||||
# <preferred e-mail> <other e-mail 2>
|
@ -1,7 +0,0 @@
|
||||
[DEFAULT]
|
||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
||||
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
||||
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
|
||||
${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
@ -1,17 +0,0 @@
|
||||
If you would like to contribute to the development of OpenStack, you must
|
||||
follow the steps in this page:
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html
|
||||
|
||||
If you already have a good understanding of how the system works and your
|
||||
OpenStack accounts are set up, you can skip to the development workflow
|
||||
section of this documentation to learn how changes to OpenStack should be
|
||||
submitted for review via the Gerrit tool:
|
||||
|
||||
http://docs.openstack.org/infra/manual/developers.html#development-workflow
|
||||
|
||||
Pull requests submitted through GitHub will be ignored.
|
||||
|
||||
Bugs should be filed on Launchpad, not GitHub:
|
||||
|
||||
https://bugs.launchpad.net/os-win
|
@ -1,4 +0,0 @@
|
||||
os-win Style Commandments
|
||||
===============================================
|
||||
|
||||
Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/
|
175
LICENSE
175
LICENSE
@ -1,175 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
@ -1,6 +0,0 @@
|
||||
include AUTHORS
|
||||
include ChangeLog
|
||||
exclude .gitignore
|
||||
exclude .gitreview
|
||||
|
||||
global-exclude *.pyc
|
19
README.rst
19
README.rst
@ -1,19 +0,0 @@
|
||||
===============================
|
||||
os-win
|
||||
===============================
|
||||
|
||||
Windows / Hyper-V library for OpenStack projects.
|
||||
|
||||
Library contains Windows / Hyper-V code commonly used in the OpenStack
|
||||
projects: nova, cinder, networking-hyperv. The library can be used in any
|
||||
other OpenStack projects where it is needed.
|
||||
|
||||
* Free software: Apache license
|
||||
* Documentation: http://docs.openstack.org/developer/os-win
|
||||
* Source: http://git.openstack.org/cgit/openstack/os-win
|
||||
* Bugs: http://bugs.launchpad.net/os-win
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* TODO
|
13
README.txt
Normal file
13
README.txt
Normal file
@ -0,0 +1,13 @@
|
||||
This project is no longer maintained.
|
||||
|
||||
The contents of this repository are still available in the Git
|
||||
source code management system. To see the contents of this
|
||||
repository before it reached its end of life, please check out the
|
||||
previous commit with "git checkout HEAD^1".
|
||||
|
||||
Use instead the project deb-python-os-win at
|
||||
http://git.openstack.org/cgit/openstack/deb-python-os-win .
|
||||
|
||||
For any further questions, please email
|
||||
openstack-dev@lists.openstack.org or join #openstack-dev on
|
||||
Freenode.
|
@ -1,75 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath('../..'))
|
||||
# -- General configuration ----------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
#'sphinx.ext.intersphinx',
|
||||
'oslosphinx'
|
||||
]
|
||||
|
||||
# autodoc generation is a bit aggressive and a nuisance when doing heavy
|
||||
# text edit cycles.
|
||||
# execute "export SPHINX_DEBUG=1" in your terminal to disable
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = 'os-win'
|
||||
copyright = '2015, Cloudbase Solutions Srl'
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
add_module_names = True
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# -- Options for HTML output --------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||
# html_theme_path = ["."]
|
||||
# html_theme = '_theme'
|
||||
# html_static_path = ['static']
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = '%sdoc' % project
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass
|
||||
# [howto/manual]).
|
||||
latex_documents = [
|
||||
('index',
|
||||
'%s.tex' % project,
|
||||
'%s Documentation' % project,
|
||||
'OpenStack Foundation', 'manual'),
|
||||
]
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
#intersphinx_mapping = {'http://docs.python.org/': None}
|
@ -1,4 +0,0 @@
|
||||
============
|
||||
Contributing
|
||||
============
|
||||
.. include:: ../../CONTRIBUTING.rst
|
@ -1,25 +0,0 @@
|
||||
.. os-win documentation master file, created by
|
||||
sphinx-quickstart on Tue Jul 9 22:26:36 2015.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Welcome to os-win's documentation!
|
||||
========================================================
|
||||
|
||||
Contents:
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
readme
|
||||
installation
|
||||
usage
|
||||
contributing
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
@ -1,12 +0,0 @@
|
||||
============
|
||||
Installation
|
||||
============
|
||||
|
||||
At the command line::
|
||||
|
||||
$ pip install os-win
|
||||
|
||||
Or, if you have virtualenvwrapper installed::
|
||||
|
||||
$ mkvirtualenv os-win
|
||||
$ pip install os-win
|
@ -1 +0,0 @@
|
||||
.. include:: ../../README.rst
|
@ -1,7 +0,0 @@
|
||||
========
|
||||
Usage
|
||||
========
|
||||
|
||||
To use os-win in a project::
|
||||
|
||||
import os_win
|
@ -1,6 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
# The list of modules to copy from oslo-incubator.git
|
||||
|
||||
# The base module to hold the copy of openstack.common
|
||||
base=os_win
|
@ -1,29 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from eventlet import patcher
|
||||
import pbr.version
|
||||
|
||||
|
||||
__version__ = pbr.version.VersionInfo(
|
||||
'os_win').version_string()
|
||||
|
||||
if sys.platform == 'win32':
|
||||
import wmi
|
||||
# We need to make sure that WMI uses the unpatched threading module.
|
||||
wmi.threading = patcher.original('threading')
|
@ -1,406 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ast
|
||||
import re
|
||||
|
||||
import pep8
|
||||
|
||||
"""
|
||||
Guidelines for writing new hacking checks
|
||||
|
||||
- Use only for os_win specific tests. OpenStack general tests
|
||||
should be submitted to the common 'hacking' module.
|
||||
- Pick numbers in the range N3xx. Find the current test with
|
||||
the highest allocated number and then pick the next value.
|
||||
- Keep the test method code in the source file ordered based
|
||||
on the N3xx value.
|
||||
- List the new rule in the top level HACKING.rst file
|
||||
"""
|
||||
|
||||
UNDERSCORE_IMPORT_FILES = []
|
||||
|
||||
cfg_re = re.compile(r".*\scfg\.")
|
||||
asse_trueinst_re = re.compile(
|
||||
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
|
||||
"(\w|\.|\'|\"|\[|\])+\)\)")
|
||||
asse_equal_type_re = re.compile(
|
||||
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
|
||||
"(\w|\.|\'|\"|\[|\])+\)")
|
||||
asse_equal_in_end_with_true_or_false_re = re.compile(r"assertEqual\("
|
||||
r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)")
|
||||
asse_equal_in_start_with_true_or_false_re = re.compile(r"assertEqual\("
|
||||
r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)")
|
||||
asse_equal_end_with_none_re = re.compile(
|
||||
r"assertEqual\(.*?,\s+None\)$")
|
||||
asse_equal_start_with_none_re = re.compile(
|
||||
r"assertEqual\(None,")
|
||||
asse_true_false_with_in_or_not_in = re.compile(r"assert(True|False)\("
|
||||
r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)")
|
||||
asse_true_false_with_in_or_not_in_spaces = re.compile(r"assert(True|False)"
|
||||
r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+"
|
||||
r"[\[|'|\"](, .*)?\)")
|
||||
asse_raises_regexp = re.compile(r"assertRaisesRegexp\(")
|
||||
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
|
||||
log_translation = re.compile(
|
||||
r"(.)*LOG\.(audit|error|critical)\(\s*('|\")")
|
||||
log_translation_info = re.compile(
|
||||
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
|
||||
log_translation_exception = re.compile(
|
||||
r"(.)*LOG\.(exception)\(\s*(_\(|'|\")")
|
||||
log_translation_LW = re.compile(
|
||||
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
|
||||
translated_log = re.compile(
|
||||
r"(.)*LOG\.(audit|error|info|critical|exception)"
|
||||
"\(\s*_\(\s*('|\")")
|
||||
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
|
||||
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
|
||||
underscore_import_check = re.compile(r"(.)*import _(.)*")
|
||||
import_translation_for_log_or_exception = re.compile(
|
||||
r"(.)*(from\sos_win._i18n\simport)\s_")
|
||||
# We need this for cases where they have created their own _ function.
|
||||
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
|
||||
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
|
||||
|
||||
|
||||
class BaseASTChecker(ast.NodeVisitor):
|
||||
"""Provides a simple framework for writing AST-based checks.
|
||||
|
||||
Subclasses should implement visit_* methods like any other AST visitor
|
||||
implementation. When they detect an error for a particular node the
|
||||
method should call ``self.add_error(offending_node)``. Details about
|
||||
where in the code the error occurred will be pulled from the node
|
||||
object.
|
||||
|
||||
Subclasses should also provide a class variable named CHECK_DESC to
|
||||
be used for the human readable error message.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, tree, filename):
|
||||
"""This object is created automatically by pep8.
|
||||
|
||||
:param tree: an AST tree
|
||||
:param filename: name of the file being analyzed
|
||||
(ignored by our checks)
|
||||
"""
|
||||
self._tree = tree
|
||||
self._errors = []
|
||||
|
||||
def run(self):
|
||||
"""Called automatically by pep8."""
|
||||
self.visit(self._tree)
|
||||
return self._errors
|
||||
|
||||
def add_error(self, node, message=None):
|
||||
"""Add an error caused by a node to the list of errors for pep8."""
|
||||
message = message or self.CHECK_DESC
|
||||
error = (node.lineno, node.col_offset, message, self.__class__)
|
||||
self._errors.append(error)
|
||||
|
||||
def _check_call_names(self, call_node, names):
|
||||
if isinstance(call_node, ast.Call):
|
||||
if isinstance(call_node.func, ast.Name):
|
||||
if call_node.func.id in names:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def use_timeutils_utcnow(logical_line, filename):
|
||||
# tools are OK to use the standard datetime module
|
||||
if "/tools/" in filename:
|
||||
return
|
||||
|
||||
msg = "N310: timeutils.utcnow() must be used instead of datetime.%s()"
|
||||
|
||||
datetime_funcs = ['now', 'utcnow']
|
||||
for f in datetime_funcs:
|
||||
pos = logical_line.find('datetime.%s' % f)
|
||||
if pos != -1:
|
||||
yield (pos, msg % f)
|
||||
|
||||
|
||||
def capital_cfg_help(logical_line, tokens):
|
||||
msg = "N313: capitalize help string"
|
||||
|
||||
if cfg_re.match(logical_line):
|
||||
for t in range(len(tokens)):
|
||||
if tokens[t][1] == "help":
|
||||
txt = tokens[t + 2][1]
|
||||
if len(txt) > 1 and txt[1].islower():
|
||||
yield(0, msg)
|
||||
|
||||
|
||||
def assert_true_instance(logical_line):
|
||||
"""Check for assertTrue(isinstance(a, b)) sentences
|
||||
|
||||
N316
|
||||
"""
|
||||
if asse_trueinst_re.match(logical_line):
|
||||
yield (0, "N316: assertTrue(isinstance(a, b)) sentences not allowed")
|
||||
|
||||
|
||||
def assert_equal_type(logical_line):
|
||||
"""Check for assertEqual(type(A), B) sentences
|
||||
|
||||
N317
|
||||
"""
|
||||
if asse_equal_type_re.match(logical_line):
|
||||
yield (0, "N317: assertEqual(type(A), B) sentences not allowed")
|
||||
|
||||
|
||||
def assert_equal_none(logical_line):
|
||||
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences
|
||||
|
||||
N318
|
||||
"""
|
||||
res = (asse_equal_start_with_none_re.search(logical_line) or
|
||||
asse_equal_end_with_none_re.search(logical_line))
|
||||
if res:
|
||||
yield (0, "N318: assertEqual(A, None) or assertEqual(None, A) "
|
||||
"sentences not allowed")
|
||||
|
||||
|
||||
def no_translate_debug_logs(logical_line, filename):
|
||||
"""Check for 'LOG.debug(_('
|
||||
|
||||
As per our translation policy,
|
||||
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
|
||||
we shouldn't translate debug level logs.
|
||||
|
||||
* This check assumes that 'LOG' is a logger.
|
||||
* Use filename so we can start enforcing this in specific folders instead
|
||||
of needing to do so all at once.
|
||||
|
||||
N319
|
||||
"""
|
||||
if logical_line.startswith("LOG.debug(_("):
|
||||
yield(0, "N319 Don't translate debug level logs")
|
||||
|
||||
|
||||
def no_import_translation_in_tests(logical_line, filename):
|
||||
"""Check for 'from os_win._i18n import _'
|
||||
N337
|
||||
"""
|
||||
if 'os_win/tests/' in filename:
|
||||
res = import_translation_for_log_or_exception.match(logical_line)
|
||||
if res:
|
||||
yield(0, "N337 Don't import translation in tests")
|
||||
|
||||
|
||||
def no_setting_conf_directly_in_tests(logical_line, filename):
|
||||
"""Check for setting CONF.* attributes directly in tests
|
||||
|
||||
The value can leak out of tests affecting how subsequent tests run.
|
||||
Using self.flags(option=value) is the preferred method to temporarily
|
||||
set config options in tests.
|
||||
|
||||
N320
|
||||
"""
|
||||
if 'os_win/tests/' in filename:
|
||||
res = conf_attribute_set_re.match(logical_line)
|
||||
if res:
|
||||
yield (0, "N320: Setting CONF.* attributes directly in tests is "
|
||||
"forbidden. Use self.flags(option=value) instead")
|
||||
|
||||
|
||||
def validate_log_translations(logical_line, physical_line, filename):
|
||||
# Translations are not required in the test directory
|
||||
if "os_win/tests" in filename:
|
||||
return
|
||||
if pep8.noqa(physical_line):
|
||||
return
|
||||
msg = "N328: LOG.info messages require translations `_LI()`!"
|
||||
if log_translation_info.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "N329: LOG.exception messages require translations `_LE()`!"
|
||||
if log_translation_exception.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "N330: LOG.warning, LOG.warn messages require translations `_LW()`!"
|
||||
if log_translation_LW.match(logical_line):
|
||||
yield (0, msg)
|
||||
msg = "N321: Log messages require translations!"
|
||||
if log_translation.match(logical_line):
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
def no_mutable_default_args(logical_line):
|
||||
msg = "N322: Method's default argument shouldn't be mutable!"
|
||||
if mutable_default_args.match(logical_line):
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
def check_explicit_underscore_import(logical_line, filename):
|
||||
"""Check for explicit import of the _ function
|
||||
|
||||
We need to ensure that any files that are using the _() function
|
||||
to translate logs are explicitly importing the _ function. We
|
||||
can't trust unit test to catch whether the import has been
|
||||
added so we need to check for it here.
|
||||
"""
|
||||
|
||||
# Build a list of the files that have _ imported. No further
|
||||
# checking needed once it is found.
|
||||
if filename in UNDERSCORE_IMPORT_FILES:
|
||||
pass
|
||||
elif (underscore_import_check.match(logical_line) or
|
||||
custom_underscore_check.match(logical_line)):
|
||||
UNDERSCORE_IMPORT_FILES.append(filename)
|
||||
elif (translated_log.match(logical_line) or
|
||||
string_translation.match(logical_line)):
|
||||
yield(0, "N323: Found use of _() without explicit import of _ !")
|
||||
|
||||
|
||||
def use_jsonutils(logical_line, filename):
|
||||
# tools are OK to use the standard json module
|
||||
if "/tools/" in filename:
|
||||
return
|
||||
|
||||
msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s"
|
||||
|
||||
if "json." in logical_line:
|
||||
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
|
||||
for f in json_funcs:
|
||||
pos = logical_line.find('json.%s' % f)
|
||||
if pos != -1:
|
||||
yield (pos, msg % {'fun': f[:-1]})
|
||||
|
||||
|
||||
class CheckForStrUnicodeExc(BaseASTChecker):
|
||||
"""Checks for the use of str() or unicode() on an exception.
|
||||
|
||||
This currently only handles the case where str() or unicode()
|
||||
is used in the scope of an exception handler. If the exception
|
||||
is passed into a function, returned from an assertRaises, or
|
||||
used on an exception created in the same scope, this does not
|
||||
catch it.
|
||||
"""
|
||||
|
||||
CHECK_DESC = ('N325 str() and unicode() cannot be used on an '
|
||||
'exception. Remove or use six.text_type()')
|
||||
|
||||
def __init__(self, tree, filename):
|
||||
super(CheckForStrUnicodeExc, self).__init__(tree, filename)
|
||||
self.name = []
|
||||
self.already_checked = []
|
||||
|
||||
def visit_TryExcept(self, node):
|
||||
for handler in node.handlers:
|
||||
if handler.name:
|
||||
self.name.append(handler.name.id)
|
||||
super(CheckForStrUnicodeExc, self).generic_visit(node)
|
||||
self.name = self.name[:-1]
|
||||
else:
|
||||
super(CheckForStrUnicodeExc, self).generic_visit(node)
|
||||
|
||||
def visit_Call(self, node):
|
||||
if self._check_call_names(node, ['str', 'unicode']):
|
||||
if node not in self.already_checked:
|
||||
self.already_checked.append(node)
|
||||
if isinstance(node.args[0], ast.Name):
|
||||
if node.args[0].id in self.name:
|
||||
self.add_error(node.args[0])
|
||||
super(CheckForStrUnicodeExc, self).generic_visit(node)
|
||||
|
||||
|
||||
class CheckForTransAdd(BaseASTChecker):
|
||||
"""Checks for the use of concatenation on a translated string.
|
||||
|
||||
Translations should not be concatenated with other strings, but
|
||||
should instead include the string being added to the translated
|
||||
string to give the translators the most information.
|
||||
"""
|
||||
|
||||
CHECK_DESC = ('N326 Translated messages cannot be concatenated. '
|
||||
'String should be included in translated message.')
|
||||
|
||||
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
|
||||
|
||||
def visit_BinOp(self, node):
|
||||
if isinstance(node.op, ast.Add):
|
||||
if self._check_call_names(node.left, self.TRANS_FUNC):
|
||||
self.add_error(node.left)
|
||||
elif self._check_call_names(node.right, self.TRANS_FUNC):
|
||||
self.add_error(node.right)
|
||||
super(CheckForTransAdd, self).generic_visit(node)
|
||||
|
||||
|
||||
def assert_true_or_false_with_in(logical_line):
|
||||
"""Check for assertTrue/False(A in B), assertTrue/False(A not in B),
|
||||
assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
|
||||
sentences.
|
||||
|
||||
N334
|
||||
"""
|
||||
res = (asse_true_false_with_in_or_not_in.search(logical_line) or
|
||||
asse_true_false_with_in_or_not_in_spaces.search(logical_line))
|
||||
if res:
|
||||
yield (0, "N334: Use assertIn/NotIn(A, B) rather than "
|
||||
"assertTrue/False(A in/not in B) when checking collection "
|
||||
"contents.")
|
||||
|
||||
|
||||
def assert_raises_regexp(logical_line):
|
||||
"""Check for usage of deprecated assertRaisesRegexp
|
||||
|
||||
N335
|
||||
"""
|
||||
res = asse_raises_regexp.search(logical_line)
|
||||
if res:
|
||||
yield (0, "N335: assertRaisesRegex must be used instead "
|
||||
"of assertRaisesRegexp")
|
||||
|
||||
|
||||
def dict_constructor_with_list_copy(logical_line):
|
||||
msg = ("N336: Must use a dict comprehension instead of a dict constructor"
|
||||
" with a sequence of key-value pairs."
|
||||
)
|
||||
if dict_constructor_with_list_copy_re.match(logical_line):
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
def assert_equal_in(logical_line):
|
||||
"""Check for assertEqual(A in B, True), assertEqual(True, A in B),
|
||||
assertEqual(A in B, False) or assertEqual(False, A in B) sentences
|
||||
|
||||
N338
|
||||
"""
|
||||
res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or
|
||||
asse_equal_in_end_with_true_or_false_re.search(logical_line))
|
||||
if res:
|
||||
yield (0, "N338: Use assertIn/NotIn(A, B) rather than "
|
||||
"assertEqual(A in B, True/False) when checking collection "
|
||||
"contents.")
|
||||
|
||||
|
||||
def factory(register):
|
||||
register(use_timeutils_utcnow)
|
||||
register(capital_cfg_help)
|
||||
register(no_import_translation_in_tests)
|
||||
register(assert_true_instance)
|
||||
register(assert_equal_type)
|
||||
register(assert_equal_none)
|
||||
register(assert_raises_regexp)
|
||||
register(no_translate_debug_logs)
|
||||
register(no_setting_conf_directly_in_tests)
|
||||
register(validate_log_translations)
|
||||
register(no_mutable_default_args)
|
||||
register(check_explicit_underscore_import)
|
||||
register(use_jsonutils)
|
||||
register(CheckForStrUnicodeExc)
|
||||
register(CheckForTransAdd)
|
||||
register(assert_true_or_false_with_in)
|
||||
register(dict_constructor_with_list_copy)
|
||||
register(assert_equal_in)
|
@ -1,38 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""oslo.i18n integration module.
|
||||
|
||||
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
|
||||
|
||||
"""
|
||||
|
||||
import oslo_i18n
|
||||
|
||||
|
||||
_translators = oslo_i18n.TranslatorFactory(domain='os_win')
|
||||
|
||||
# The primary translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
||||
|
||||
# Translators for log levels.
|
||||
#
|
||||
# The abbreviated names are meant to reflect the usual use of a short
|
||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||
# the level.
|
||||
_LI = _translators.log_info
|
||||
_LW = _translators.log_warning
|
||||
_LE = _translators.log_error
|
||||
_LC = _translators.log_critical
|
174
os_win/_utils.py
174
os_win/_utils.py
@ -1,174 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
import six
|
||||
import socket
|
||||
import time
|
||||
import types
|
||||
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import reflection
|
||||
|
||||
from os_win._i18n import _LE
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
synchronized = lockutils.synchronized_with_prefix('oswin-')
|
||||
|
||||
|
||||
def execute(*cmd, **kwargs):
|
||||
"""Convenience wrapper around oslo's execute() method."""
|
||||
return processutils.execute(*cmd, **kwargs)
|
||||
|
||||
|
||||
def parse_server_string(server_str):
|
||||
"""Parses the given server_string and returns a tuple of host and port.
|
||||
If it's not a combination of host part and port, the port element
|
||||
is an empty string. If the input is invalid expression, return a tuple of
|
||||
two empty strings.
|
||||
"""
|
||||
try:
|
||||
# First of all, exclude pure IPv6 address (w/o port).
|
||||
if netaddr.valid_ipv6(server_str):
|
||||
return (server_str, '')
|
||||
|
||||
# Next, check if this is IPv6 address with a port number combination.
|
||||
if server_str.find("]:") != -1:
|
||||
(address, port) = server_str.replace('[', '', 1).split(']:')
|
||||
return (address, port)
|
||||
|
||||
# Third, check if this is a combination of an address and a port
|
||||
if server_str.find(':') == -1:
|
||||
return (server_str, '')
|
||||
|
||||
# This must be a combination of an address and a port
|
||||
(address, port) = server_str.split(':')
|
||||
return (address, port)
|
||||
|
||||
except (ValueError, netaddr.AddrFormatError):
|
||||
LOG.error(_LE('Invalid server_string: %s'), server_str)
|
||||
return ('', '')
|
||||
|
||||
|
||||
def get_wrapped_function(function):
|
||||
"""Get the method at the bottom of a stack of decorators."""
|
||||
if not hasattr(function, '__closure__') or not function.__closure__:
|
||||
return function
|
||||
|
||||
def _get_wrapped_function(function):
|
||||
if not hasattr(function, '__closure__') or not function.__closure__:
|
||||
return None
|
||||
|
||||
for closure in function.__closure__:
|
||||
func = closure.cell_contents
|
||||
|
||||
deeper_func = _get_wrapped_function(func)
|
||||
if deeper_func:
|
||||
return deeper_func
|
||||
elif isinstance(closure.cell_contents, types.FunctionType):
|
||||
return closure.cell_contents
|
||||
|
||||
return _get_wrapped_function(function)
|
||||
|
||||
|
||||
def retry_decorator(max_retry_count=5, timeout=None, inc_sleep_time=1,
|
||||
max_sleep_time=1, exceptions=(), error_codes=()):
|
||||
"""Retries invoking the decorated method in case of expected exceptions.
|
||||
|
||||
:param max_retry_count: The maximum number of retries performed. If 0, no
|
||||
retry is performed. If None, there will be no limit
|
||||
on the number of retries.
|
||||
:param timeout: The maximum time for which we'll retry invoking the method.
|
||||
If 0 or None, there will be no time limit.
|
||||
:param inc_sleep_time: The time sleep increment used between retries.
|
||||
:param max_sleep_time: The maximum time to wait between retries.
|
||||
:param exceptions: A list of expected exceptions for which retries will be
|
||||
performed.
|
||||
:param error_codes: A list of expected error codes. The error code is
|
||||
retrieved from the 'error_code' exception attribute,
|
||||
for example in case of Win32Exception. If this argument
|
||||
is not passed, retries will be performed for any of the
|
||||
expected exceptions.
|
||||
"""
|
||||
|
||||
if isinstance(error_codes, six.integer_types):
|
||||
error_codes = (error_codes, )
|
||||
|
||||
def wrapper(f):
|
||||
def inner(*args, **kwargs):
|
||||
try_count = 0
|
||||
sleep_time = 0
|
||||
time_start = time.time()
|
||||
|
||||
while True:
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except exceptions as exc:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
err_code = getattr(exc, 'error_code', None)
|
||||
expected_err_code = (err_code in error_codes
|
||||
or not error_codes)
|
||||
|
||||
time_elapsed = time.time() - time_start
|
||||
time_left = (timeout - time_elapsed
|
||||
if timeout else 'undefined')
|
||||
tries_left = (max_retry_count - try_count
|
||||
if max_retry_count is not None
|
||||
else 'undefined')
|
||||
|
||||
should_retry = (
|
||||
expected_err_code
|
||||
and tries_left
|
||||
and (time_left == 'undefined'
|
||||
or time_left > 0))
|
||||
ctxt.reraise = not should_retry
|
||||
|
||||
if should_retry:
|
||||
try_count += 1
|
||||
func_name = reflection.get_callable_name(f)
|
||||
|
||||
sleep_time = min(sleep_time + inc_sleep_time,
|
||||
max_sleep_time)
|
||||
if timeout:
|
||||
sleep_time = min(sleep_time, time_left)
|
||||
|
||||
LOG.debug("Got expected exception %(exc)s while "
|
||||
"calling function %(func_name)s. "
|
||||
"Retries left: %(retries_left)s. "
|
||||
"Time left: %(time_left)s. "
|
||||
"Time elapsed: %(time_elapsed)s "
|
||||
"Retrying in %(sleep_time)s seconds.",
|
||||
dict(exc=exc,
|
||||
func_name=func_name,
|
||||
retries_left=tries_left,
|
||||
time_left=time_left,
|
||||
time_elapsed=time_elapsed,
|
||||
sleep_time=sleep_time))
|
||||
time.sleep(sleep_time)
|
||||
return inner
|
||||
return wrapper
|
||||
|
||||
|
||||
def get_ips(addr):
|
||||
addr_info = socket.getaddrinfo(addr, None, 0, 0, 0)
|
||||
# Returns IPv4 and IPv6 addresses, ordered by protocol family
|
||||
addr_info.sort()
|
||||
return [a[4][0] for a in addr_info]
|
@ -1,163 +0,0 @@
|
||||
# Copyright 2012 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Hyper-V / Windows related constants.
|
||||
"""
|
||||
|
||||
HYPERV_VM_STATE_OTHER = 1
|
||||
HYPERV_VM_STATE_ENABLED = 2
|
||||
HYPERV_VM_STATE_DISABLED = 3
|
||||
HYPERV_VM_STATE_SHUTTING_DOWN = 4
|
||||
HYPERV_VM_STATE_REBOOT = 10
|
||||
HYPERV_VM_STATE_PAUSED = 32768
|
||||
HYPERV_VM_STATE_SUSPENDED = 32769
|
||||
|
||||
|
||||
WMI_JOB_STATUS_STARTED = 4096
|
||||
WMI_JOB_STATE_RUNNING = 4
|
||||
WMI_JOB_STATE_COMPLETED = 7
|
||||
|
||||
VM_SUMMARY_NUM_PROCS = 4
|
||||
VM_SUMMARY_ENABLED_STATE = 100
|
||||
VM_SUMMARY_MEMORY_USAGE = 103
|
||||
VM_SUMMARY_UPTIME = 105
|
||||
|
||||
|
||||
ARCH_I686 = 0
|
||||
ARCH_MIPS = 1
|
||||
ARCH_ALPHA = 2
|
||||
ARCH_PPC = 3
|
||||
ARCH_ARMV7 = 5
|
||||
ARCH_IA64 = 6
|
||||
ARCH_X86_64 = 9
|
||||
|
||||
|
||||
PROCESSOR_FEATURE = {
|
||||
3: 'mmx',
|
||||
6: 'sse',
|
||||
7: '3dnow',
|
||||
8: 'rdtsc',
|
||||
9: 'pae',
|
||||
10: 'sse2',
|
||||
12: 'nx',
|
||||
13: 'sse3',
|
||||
17: 'xsave',
|
||||
20: 'slat',
|
||||
21: 'vmx',
|
||||
}
|
||||
|
||||
|
||||
CTRL_TYPE_IDE = "IDE"
|
||||
CTRL_TYPE_SCSI = "SCSI"
|
||||
|
||||
DISK = "VHD"
|
||||
DISK_FORMAT = DISK
|
||||
DVD = "DVD"
|
||||
DVD_FORMAT = "ISO"
|
||||
VOLUME = "VOLUME"
|
||||
|
||||
DISK_FORMAT_MAP = {
|
||||
DISK_FORMAT.lower(): DISK,
|
||||
DVD_FORMAT.lower(): DVD
|
||||
}
|
||||
|
||||
DISK_FORMAT_VHD = "VHD"
|
||||
DISK_FORMAT_VHDX = "VHDX"
|
||||
|
||||
VHD_TYPE_FIXED = 2
|
||||
VHD_TYPE_DYNAMIC = 3
|
||||
VHD_TYPE_DIFFERENCING = 4
|
||||
|
||||
SCSI_CONTROLLER_SLOTS_NUMBER = 64
|
||||
IDE_CONTROLLER_SLOTS_NUMBER = 2
|
||||
|
||||
_BDI_DEVICE_TYPE_TO_DRIVE_TYPE = {'disk': DISK,
|
||||
'cdrom': DVD}
|
||||
|
||||
|
||||
HOST_POWER_ACTION_SHUTDOWN = "shutdown"
|
||||
HOST_POWER_ACTION_REBOOT = "reboot"
|
||||
HOST_POWER_ACTION_STARTUP = "startup"
|
||||
|
||||
IMAGE_PROP_VM_GEN = "hw_machine_type"
|
||||
IMAGE_PROP_VM_GEN_1 = "hyperv-gen1"
|
||||
IMAGE_PROP_VM_GEN_2 = "hyperv-gen2"
|
||||
|
||||
VM_GEN_1 = 1
|
||||
VM_GEN_2 = 2
|
||||
|
||||
JOB_STATE_COMPLETED = 7
|
||||
JOB_STATE_TERMINATED = 8
|
||||
JOB_STATE_KILLED = 9
|
||||
JOB_STATE_EXCEPTION = 10
|
||||
JOB_STATE_COMPLETED_WITH_WARNINGS = 32768
|
||||
|
||||
# Special vlan_id value in ovs_vlan_allocations table indicating flat network
|
||||
FLAT_VLAN_ID = -1
|
||||
TRUNK_ENDPOINT_MODE = 5
|
||||
|
||||
TYPE_FLAT = 'flat'
|
||||
TYPE_LOCAL = 'local'
|
||||
TYPE_VLAN = 'vlan'
|
||||
|
||||
SERIAL_CONSOLE_BUFFER_SIZE = 4 << 10
|
||||
MAX_CONSOLE_LOG_FILE_SIZE = 1 << 19 # 512kB
|
||||
|
||||
BOOT_DEVICE_FLOPPY = 0
|
||||
BOOT_DEVICE_CDROM = 1
|
||||
BOOT_DEVICE_HARDDISK = 2
|
||||
BOOT_DEVICE_NETWORK = 3
|
||||
|
||||
ISCSI_NO_AUTH_TYPE = 0
|
||||
ISCSI_CHAP_AUTH_TYPE = 1
|
||||
ISCSI_MUTUAL_CHAP_AUTH_TYPE = 2
|
||||
|
||||
REMOTEFX_MAX_RES_1024x768 = "1024x768"
|
||||
REMOTEFX_MAX_RES_1280x1024 = "1280x1024"
|
||||
REMOTEFX_MAX_RES_1600x1200 = "1600x1200"
|
||||
REMOTEFX_MAX_RES_1920x1200 = "1920x1200"
|
||||
REMOTEFX_MAX_RES_2560x1600 = "2560x1600"
|
||||
REMOTEFX_MAX_RES_3840x2160 = "3840x2160"
|
||||
|
||||
IPV4_DEFAULT = '0.0.0.0'
|
||||
|
||||
# The unattended file used when creating the .pdk file may contain substitution
|
||||
# strings. The substitution string along with their corresponding values will
|
||||
# be passed as metadata and added to a fsk file.
|
||||
# FSK_COMPUTERNAME represents the substitution string for ComputerName and will
|
||||
# set the hostname during vm provisioning.
|
||||
FSK_COMPUTERNAME = 'ComputerName'
|
||||
|
||||
VTPM_SUPPORTED_OS = ['windows']
|
||||
|
||||
# DNSUtils constants
|
||||
DNS_ZONE_TYPE_PRIMARY = 0
|
||||
DNS_ZONE_TYPE_SECONDARY = 1
|
||||
DNS_ZONE_TYPE_STUB = 2
|
||||
DNS_ZONE_TYPE_FORWARD = 3
|
||||
|
||||
DNS_ZONE_NO_UPDATES_ALLOWED = 0
|
||||
DNS_ZONE_SECURE_NONSECURE_UPDATES = 1
|
||||
DNS_ZONE_SECURE_UPDATES_ONLY = 2
|
||||
|
||||
DNS_ZONE_DO_NOT_NOTIFY = 0
|
||||
DNS_ZONE_NOTIFY_NAME_SERVERS_TAB = 1
|
||||
DNS_ZONE_NOTIFY_SPECIFIED_SERVERS = 2
|
||||
|
||||
DNS_ZONE_TRANSFER_ALLOWED_ANY_HOST = 0
|
||||
DNS_ZONE_TRANSFER_ALLOWED_NAME_SERVERS = 1
|
||||
DNS_ZONE_TRANSFER_ALLOWED_SECONDARY_SERVERS = 2
|
||||
DNS_ZONE_TRANSFER_NOT_ALLOWED = 3
|
@ -1,158 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Utility class for VM related operations on Hyper-V.
|
||||
"""
|
||||
|
||||
from os_win._i18n import _
|
||||
|
||||
|
||||
class OSWinException(Exception):
|
||||
msg_fmt = 'An exception has been encountered.'
|
||||
|
||||
def __init__(self, message=None, **kwargs):
|
||||
self.kwargs = kwargs
|
||||
|
||||
if not message:
|
||||
message = self.msg_fmt % kwargs
|
||||
|
||||
self.message = message
|
||||
super(OSWinException, self).__init__(message)
|
||||
|
||||
|
||||
class NotFound(OSWinException):
|
||||
msg_fmt = _("Resource could not be found: %(resource)s")
|
||||
|
||||
|
||||
class HyperVException(OSWinException):
|
||||
pass
|
||||
|
||||
|
||||
# TODO(alexpilotti): Add a storage exception base class
|
||||
class VHDResizeException(HyperVException):
|
||||
msg_fmt = _("Exception encountered while resizing the VHD %(vhd_path)s."
|
||||
"Reason: %(reason)s")
|
||||
|
||||
|
||||
class HyperVAuthorizationException(HyperVException):
|
||||
msg_fmt = _("The Windows account running nova-compute on this Hyper-V "
|
||||
"host doesn't have the required permissions to perform "
|
||||
"Hyper-V related operations.")
|
||||
|
||||
|
||||
class HyperVVMNotFoundException(NotFound, HyperVException):
|
||||
msg_fmt = _("VM not found: %(vm_name)s")
|
||||
|
||||
|
||||
class HyperVPortNotFoundException(NotFound, HyperVException):
|
||||
msg_fmt = _("Switch port not found: %(port_name)s")
|
||||
|
||||
|
||||
class SMBException(OSWinException):
|
||||
pass
|
||||
|
||||
|
||||
class Win32Exception(OSWinException):
|
||||
msg_fmt = _("Executing Win32 API function %(func_name)s failed. "
|
||||
"Error code: %(error_code)s. "
|
||||
"Error message: %(error_message)s")
|
||||
|
||||
def __init__(self, message=None, **kwargs):
|
||||
self.error_code = kwargs.get('error_code')
|
||||
super(Win32Exception, self).__init__(message=message, **kwargs)
|
||||
|
||||
|
||||
class VHDException(OSWinException):
|
||||
pass
|
||||
|
||||
|
||||
class VHDWin32APIException(VHDException, Win32Exception):
|
||||
pass
|
||||
|
||||
|
||||
class FCException(OSWinException):
|
||||
pass
|
||||
|
||||
|
||||
class FCWin32Exception(FCException, Win32Exception):
|
||||
pass
|
||||
|
||||
|
||||
class WMIException(OSWinException):
|
||||
def __init__(self, message=None, wmi_exc=None):
|
||||
if wmi_exc:
|
||||
try:
|
||||
wmi_exc_message = wmi_exc.com_error.excepinfo[2].strip()
|
||||
message = "%s WMI exception message: %s" % (message,
|
||||
wmi_exc_message)
|
||||
except AttributeError:
|
||||
pass
|
||||
except IndexError:
|
||||
pass
|
||||
super(WMIException, self).__init__(message)
|
||||
|
||||
|
||||
class WqlException(OSWinException):
|
||||
pass
|
||||
|
||||
|
||||
class ISCSITargetException(OSWinException):
|
||||
pass
|
||||
|
||||
|
||||
class ISCSITargetWMIException(ISCSITargetException, WMIException):
|
||||
pass
|
||||
|
||||
|
||||
class ISCSIInitiatorAPIException(Win32Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ISCSILunNotAvailable(ISCSITargetException):
|
||||
msg_fmt = _("Could not find lun %(target_lun)s "
|
||||
"for iSCSI target %(target_iqn)s.")
|
||||
|
||||
|
||||
class Win32IOException(Win32Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DiskNotFound(NotFound):
|
||||
pass
|
||||
|
||||
|
||||
class HyperVRemoteFXException(HyperVException):
|
||||
pass
|
||||
|
||||
|
||||
class HyperVClusterException(HyperVException):
|
||||
pass
|
||||
|
||||
|
||||
class DNSException(OSWinException):
|
||||
pass
|
||||
|
||||
|
||||
class DNSZoneNotFound(NotFound, DNSException):
|
||||
msg_fmt = _("DNS Zone not found: %(zone_name)s")
|
||||
|
||||
|
||||
class DNSZoneAlreadyExists(DNSException):
|
||||
msg_fmt = _("DNS Zone already exists: %(zone_name)s")
|
||||
|
||||
|
||||
class JobTerminateFailed(HyperVException):
|
||||
msg_fmt = _("Could not terminate the requested job(s).")
|
@ -1,42 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from oslotest import base
|
||||
from six.moves import builtins
|
||||
|
||||
|
||||
class FakeWMIExc(Exception):
|
||||
def __init__(self, hresult=None):
|
||||
excepinfo = [None] * 5 + [hresult]
|
||||
self.com_error = mock.Mock(excepinfo=excepinfo)
|
||||
super(FakeWMIExc, self).__init__()
|
||||
|
||||
|
||||
class OsWinBaseTestCase(base.BaseTestCase):
|
||||
def setUp(self):
|
||||
super(OsWinBaseTestCase, self).setUp()
|
||||
|
||||
self._mock_wmi = mock.MagicMock()
|
||||
self._mock_wmi.x_wmi = FakeWMIExc
|
||||
|
||||
mock_os = mock.MagicMock(Version='6.3.0')
|
||||
self._mock_wmi.WMI.return_value.Win32_OperatingSystem.return_value = (
|
||||
[mock_os])
|
||||
wmi_patcher = mock.patch.object(builtins, 'wmi', create=True,
|
||||
new=self._mock_wmi)
|
||||
wmi_patcher.start()
|
||||
self.addCleanup(mock.patch.stopall)
|
@ -1,172 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions SRL
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Unit tests for the os_win._utils module.
|
||||
"""
|
||||
|
||||
import mock
|
||||
from oslotest import base
|
||||
|
||||
from os_win import _utils
|
||||
from os_win import exceptions
|
||||
|
||||
|
||||
class UtilsTestCase(base.BaseTestCase):
|
||||
|
||||
@mock.patch('oslo_concurrency.processutils.execute')
|
||||
def test_execute(self, mock_execute):
|
||||
_utils.execute(mock.sentinel.cmd, kwarg=mock.sentinel.kwarg)
|
||||
mock_execute.assert_called_once_with(mock.sentinel.cmd,
|
||||
kwarg=mock.sentinel.kwarg)
|
||||
|
||||
def test_parse_server_string(self):
|
||||
result = _utils.parse_server_string('::1')
|
||||
self.assertEqual(('::1', ''), result)
|
||||
result = _utils.parse_server_string('[::1]:8773')
|
||||
self.assertEqual(('::1', '8773'), result)
|
||||
result = _utils.parse_server_string('2001:db8::192.168.1.1')
|
||||
self.assertEqual(('2001:db8::192.168.1.1', ''), result)
|
||||
result = _utils.parse_server_string('[2001:db8::192.168.1.1]:8773')
|
||||
self.assertEqual(('2001:db8::192.168.1.1', '8773'), result)
|
||||
result = _utils.parse_server_string('192.168.1.1')
|
||||
self.assertEqual(('192.168.1.1', ''), result)
|
||||
result = _utils.parse_server_string('192.168.1.2:8773')
|
||||
self.assertEqual(('192.168.1.2', '8773'), result)
|
||||
result = _utils.parse_server_string('192.168.1.3')
|
||||
self.assertEqual(('192.168.1.3', ''), result)
|
||||
result = _utils.parse_server_string('www.example.com:8443')
|
||||
self.assertEqual(('www.example.com', '8443'), result)
|
||||
result = _utils.parse_server_string('www.example.com')
|
||||
self.assertEqual(('www.example.com', ''), result)
|
||||
# error case
|
||||
result = _utils.parse_server_string('www.exa:mple.com:8443')
|
||||
self.assertEqual(('', ''), result)
|
||||
result = _utils.parse_server_string('')
|
||||
self.assertEqual(('', ''), result)
|
||||
|
||||
def _get_fake_func_with_retry_decorator(self, side_effect,
|
||||
*args, **kwargs):
|
||||
func_side_effect = mock.Mock(side_effect=side_effect)
|
||||
|
||||
@_utils.retry_decorator(*args, **kwargs)
|
||||
def fake_func(*_args, **_kwargs):
|
||||
return func_side_effect(*_args, **_kwargs)
|
||||
|
||||
return fake_func, func_side_effect
|
||||
|
||||
@mock.patch.object(_utils, 'time')
|
||||
def test_retry_decorator(self, mock_time):
|
||||
err_code = 1
|
||||
max_retry_count = 5
|
||||
max_sleep_time = 2
|
||||
timeout = max_retry_count + 1
|
||||
mock_time.time.side_effect = range(timeout)
|
||||
|
||||
raised_exc = exceptions.Win32Exception(message='fake_exc',
|
||||
error_code=err_code)
|
||||
side_effect = [raised_exc] * max_retry_count
|
||||
side_effect.append(mock.sentinel.ret_val)
|
||||
|
||||
(fake_func,
|
||||
fake_func_side_effect) = self._get_fake_func_with_retry_decorator(
|
||||
error_codes=err_code,
|
||||
exceptions=exceptions.Win32Exception,
|
||||
max_retry_count=max_retry_count,
|
||||
max_sleep_time=max_sleep_time,
|
||||
timeout=timeout,
|
||||
side_effect=side_effect)
|
||||
|
||||
ret_val = fake_func(mock.sentinel.arg,
|
||||
kwarg=mock.sentinel.kwarg)
|
||||
self.assertEqual(mock.sentinel.ret_val, ret_val)
|
||||
fake_func_side_effect.assert_has_calls(
|
||||
[mock.call(mock.sentinel.arg, kwarg=mock.sentinel.kwarg)] *
|
||||
(max_retry_count + 1))
|
||||
self.assertEqual(max_retry_count + 1, mock_time.time.call_count)
|
||||
mock_time.sleep.assert_has_calls(
|
||||
[mock.call(sleep_time)
|
||||
for sleep_time in [1, 2, 2, 2, 1]])
|
||||
|
||||
@mock.patch.object(_utils, 'time')
|
||||
def _test_retry_decorator_exceeded(self, mock_time, expected_try_count,
|
||||
mock_time_side_eff=None,
|
||||
timeout=None, max_retry_count=None):
|
||||
raised_exc = exceptions.Win32Exception(message='fake_exc')
|
||||
mock_time.time.side_effect = mock_time_side_eff
|
||||
|
||||
(fake_func,
|
||||
fake_func_side_effect) = self._get_fake_func_with_retry_decorator(
|
||||
exceptions=exceptions.Win32Exception,
|
||||
timeout=timeout,
|
||||
side_effect=raised_exc)
|
||||
|
||||
self.assertRaises(exceptions.Win32Exception, fake_func)
|
||||
fake_func_side_effect.assert_has_calls(
|
||||
[mock.call()] * expected_try_count)
|
||||
|
||||
def test_retry_decorator_tries_exceeded(self):
|
||||
self._test_retry_decorator_exceeded(
|
||||
max_retry_count=2,
|
||||
expected_try_count=3)
|
||||
|
||||
def test_retry_decorator_time_exceeded(self):
|
||||
self._test_retry_decorator_exceeded(
|
||||
mock_time_side_eff=[0, 1, 4],
|
||||
timeout=3,
|
||||
expected_try_count=1)
|
||||
|
||||
@mock.patch('time.sleep')
|
||||
def _test_retry_decorator_no_retry(self, mock_sleep,
|
||||
expected_exceptions=(),
|
||||
expected_error_codes=()):
|
||||
err_code = 1
|
||||
raised_exc = exceptions.Win32Exception(message='fake_exc',
|
||||
error_code=err_code)
|
||||
fake_func, fake_func_side_effect = (
|
||||
self._get_fake_func_with_retry_decorator(
|
||||
error_codes=expected_error_codes,
|
||||
exceptions=expected_exceptions,
|
||||
side_effect=raised_exc))
|
||||
|
||||
self.assertRaises(exceptions.Win32Exception,
|
||||
fake_func, mock.sentinel.arg,
|
||||
fake_kwarg=mock.sentinel.kwarg)
|
||||
|
||||
self.assertFalse(mock_sleep.called)
|
||||
fake_func_side_effect.assert_called_once_with(
|
||||
mock.sentinel.arg, fake_kwarg=mock.sentinel.kwarg)
|
||||
|
||||
def test_retry_decorator_unexpected_err_code(self):
|
||||
self._test_retry_decorator_no_retry(
|
||||
expected_exceptions=exceptions.Win32Exception,
|
||||
expected_error_codes=2)
|
||||
|
||||
def test_retry_decorator_unexpected_exc(self):
|
||||
self._test_retry_decorator_no_retry(
|
||||
expected_exceptions=(IOError, AttributeError))
|
||||
|
||||
@mock.patch('socket.getaddrinfo')
|
||||
def test_get_ips(self, mock_getaddrinfo):
|
||||
ips = ['1.2.3.4', '5.6.7.8']
|
||||
mock_getaddrinfo.return_value = [
|
||||
(None, None, None, None, (ip, 0)) for ip in ips]
|
||||
|
||||
resulted_ips = _utils.get_ips(mock.sentinel.addr)
|
||||
self.assertEqual(ips, resulted_ips)
|
||||
|
||||
mock_getaddrinfo.assert_called_once_with(
|
||||
mock.sentinel.addr, None, 0, 0, 0)
|
@ -1,144 +0,0 @@
|
||||
# Copyright 2014 Cloudbase Solutions SRL
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Unit tests for the Hyper-V utils factory.
|
||||
"""
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.compute import clusterutils
|
||||
from os_win.utils.compute import livemigrationutils
|
||||
from os_win.utils.compute import rdpconsoleutils
|
||||
from os_win.utils.compute import vmutils
|
||||
from os_win.utils.dns import dnsutils
|
||||
from os_win.utils import hostutils
|
||||
from os_win.utils.network import networkutils
|
||||
from os_win.utils import pathutils
|
||||
from os_win.utils.storage import diskutils
|
||||
from os_win.utils.storage.initiator import iscsi_cli_utils
|
||||
from os_win.utils.storage.initiator import iscsi_utils
|
||||
from os_win.utils.storage import smbutils
|
||||
from os_win.utils.storage.virtdisk import vhdutils
|
||||
from os_win import utilsfactory
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class TestHyperVUtilsFactory(test_base.OsWinBaseTestCase):
|
||||
|
||||
@mock.patch.object(utilsfactory.utils, 'get_windows_version')
|
||||
def test_get_class_unsupported_win_version(self, mock_get_windows_version):
|
||||
mock_get_windows_version.return_value = '5.2'
|
||||
self.assertRaises(exceptions.HyperVException, utilsfactory._get_class,
|
||||
'hostutils')
|
||||
|
||||
def test_get_class_unsupported_class_type(self):
|
||||
self.assertRaises(exceptions.HyperVException,
|
||||
utilsfactory._get_class,
|
||||
'invalid_class_type')
|
||||
|
||||
@mock.patch.object(utilsfactory.utils, 'get_windows_version')
|
||||
def _check_get_class(self, mock_get_windows_version, expected_class,
|
||||
class_type, windows_version='6.2'):
|
||||
mock_get_windows_version.return_value = windows_version
|
||||
|
||||
method = getattr(utilsfactory, 'get_%s' % class_type)
|
||||
instance = method()
|
||||
self.assertEqual(expected_class, type(instance))
|
||||
|
||||
def test_get_vmutils(self):
|
||||
self._check_get_class(expected_class=vmutils.VMUtils,
|
||||
class_type='vmutils')
|
||||
|
||||
def test_get_vhdutils(self):
|
||||
self._check_get_class(expected_class=vhdutils.VHDUtils,
|
||||
class_type='vhdutils')
|
||||
|
||||
def test_get_networkutils(self):
|
||||
self._check_get_class(expected_class=networkutils.NetworkUtils,
|
||||
class_type='networkutils')
|
||||
|
||||
def test_get_networkutilsr2(self):
|
||||
self._check_get_class(expected_class=networkutils.NetworkUtilsR2,
|
||||
class_type='networkutils',
|
||||
windows_version='6.3')
|
||||
|
||||
def test_get_hostutils(self):
|
||||
self._check_get_class(expected_class=hostutils.HostUtils,
|
||||
class_type='hostutils')
|
||||
|
||||
def test_get_pathutils(self):
|
||||
self._check_get_class(expected_class=pathutils.PathUtils,
|
||||
class_type='pathutils')
|
||||
|
||||
def test_get_livemigrationutils(self):
|
||||
self._check_get_class(
|
||||
expected_class=livemigrationutils.LiveMigrationUtils,
|
||||
class_type='livemigrationutils')
|
||||
|
||||
@mock.patch.object(smbutils.SMBUtils, '__init__',
|
||||
lambda *args, **kwargs: None)
|
||||
def test_get_smbutils(self):
|
||||
self._check_get_class(expected_class=smbutils.SMBUtils,
|
||||
class_type='smbutils')
|
||||
|
||||
def test_get_rdpconsoleutils(self):
|
||||
self._check_get_class(expected_class=rdpconsoleutils.RDPConsoleUtils,
|
||||
class_type='rdpconsoleutils')
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, '__init__',
|
||||
lambda *args, **kwargs: None)
|
||||
def test_get_iscsi_initiator_utils(self):
|
||||
self._test_get_initiator_utils(
|
||||
expected_class=iscsi_utils.ISCSIInitiatorUtils)
|
||||
|
||||
def test_get_iscsi_initiator_utils_force_v1(self):
|
||||
self._test_get_initiator_utils(
|
||||
expected_class=iscsi_cli_utils.ISCSIInitiatorCLIUtils,
|
||||
force_v1=True)
|
||||
|
||||
@mock.patch.object(utilsfactory.utils, 'get_windows_version')
|
||||
def _test_get_initiator_utils(self, mock_get_windows_version,
|
||||
expected_class, force_v1=False):
|
||||
CONF.set_override('force_volumeutils_v1', force_v1, 'hyperv')
|
||||
mock_get_windows_version.return_value = '6.2'
|
||||
|
||||
actual_class = type(utilsfactory.get_iscsi_initiator_utils())
|
||||
self.assertEqual(expected_class, actual_class)
|
||||
|
||||
@mock.patch('os_win.utils.storage.initiator.fc_utils.FCUtils')
|
||||
def test_get_fc_utils(self, mock_cls_fcutils):
|
||||
self._check_get_class(
|
||||
expected_class=type(mock_cls_fcutils.return_value),
|
||||
class_type='fc_utils')
|
||||
|
||||
def test_get_diskutils(self):
|
||||
self._check_get_class(
|
||||
expected_class=diskutils.DiskUtils,
|
||||
class_type='diskutils')
|
||||
|
||||
def test_get_clusterutils(self):
|
||||
self._check_get_class(
|
||||
expected_class=clusterutils.ClusterUtils,
|
||||
class_type='clusterutils')
|
||||
|
||||
def test_get_dnsutils(self):
|
||||
self._check_get_class(
|
||||
expected_class=dnsutils.DNSUtils,
|
||||
class_type='dnsutils')
|
@ -1,335 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.compute import clusterutils
|
||||
|
||||
|
||||
class ClusterUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V ClusterUtilsBase class."""
|
||||
|
||||
_FAKE_RES_NAME = "fake_res_name"
|
||||
_FAKE_HOST = "fake_host"
|
||||
_FAKE_PREV_HOST = "fake_prev_host"
|
||||
_FAKE_VM_NAME = 'instance-00000001'
|
||||
_FAKE_RESOURCEGROUP_NAME = 'Virtual Machine %s' % _FAKE_VM_NAME
|
||||
|
||||
def setUp(self):
|
||||
super(ClusterUtilsTestCase, self).setUp()
|
||||
self._clusterutils = clusterutils.ClusterUtils()
|
||||
self._clusterutils._conn_cluster = mock.MagicMock()
|
||||
self._clusterutils._cluster = mock.MagicMock()
|
||||
|
||||
def test_init_hyperv_conn(self):
|
||||
fake_cluster_name = "fake_cluster"
|
||||
mock_cluster = mock.MagicMock()
|
||||
mock_cluster.path_.return_value = r"\\%s\root" % fake_cluster_name
|
||||
|
||||
mock_conn = mock.MagicMock()
|
||||
mock_conn.MSCluster_Cluster.return_value = [mock_cluster]
|
||||
|
||||
self._clusterutils._get_wmi_conn = mock.MagicMock()
|
||||
self._clusterutils._get_wmi_conn.return_value = mock_conn
|
||||
|
||||
self._clusterutils._init_hyperv_conn("fake_host")
|
||||
|
||||
def test_init_hyperv_conn_exception(self):
|
||||
self._clusterutils._get_wmi_conn = mock.MagicMock()
|
||||
self._clusterutils._get_wmi_conn.side_effect = AttributeError
|
||||
self.assertRaises(exceptions.HyperVClusterException,
|
||||
self._clusterutils._init_hyperv_conn, "fake_host")
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils,
|
||||
'_get_cluster_nodes')
|
||||
def test_check_cluster_state_not_enough_nodes(self, mock_get_nodes):
|
||||
self.assertRaises(exceptions.HyperVClusterException,
|
||||
self._clusterutils.check_cluster_state)
|
||||
|
||||
def test_get_node_name(self):
|
||||
self._clusterutils._this_node = mock.sentinel.fake_node_name
|
||||
self.assertEqual(mock.sentinel.fake_node_name,
|
||||
self._clusterutils.get_node_name())
|
||||
|
||||
def test_get_cluster_nodes(self):
|
||||
fake_node1 = mock.MagicMock(Dependent=mock.sentinel.cluster_node1)
|
||||
fake_node2 = mock.MagicMock(Dependent=mock.sentinel.cluster_node2)
|
||||
node_list = [fake_node1, fake_node2]
|
||||
expected = [mock.sentinel.cluster_node1, mock.sentinel.cluster_node2]
|
||||
fake_class = self._clusterutils._conn_cluster.MSCluster_ClusterToNode
|
||||
fake_class.return_value = node_list
|
||||
|
||||
self.assertEqual(expected, self._clusterutils._get_cluster_nodes())
|
||||
|
||||
def test_get_vm_groups(self):
|
||||
vm_gr1 = mock.MagicMock(GroupType=self._clusterutils._VM_GROUP_TYPE)
|
||||
vm_gr2 = mock.MagicMock()
|
||||
vm_gr3 = mock.MagicMock(GroupType=self._clusterutils._VM_GROUP_TYPE)
|
||||
|
||||
fake_assoc1 = mock.MagicMock(PartComponent=vm_gr1)
|
||||
fake_assoc2 = mock.MagicMock(PartComponent=vm_gr2)
|
||||
fake_assoc3 = mock.MagicMock(PartComponent=vm_gr3)
|
||||
|
||||
assoc_list = [fake_assoc1, fake_assoc2, fake_assoc3]
|
||||
fake_conn = self._clusterutils._conn_cluster
|
||||
fake_conn.MSCluster_ClusterToResourceGroup.return_value = assoc_list
|
||||
|
||||
res = list(self._clusterutils._get_vm_groups())
|
||||
|
||||
self.assertIn(vm_gr1, res)
|
||||
self.assertNotIn(vm_gr2, res)
|
||||
self.assertIn(vm_gr3, res)
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils,
|
||||
'_lookup_vm_group')
|
||||
def test_lookup_vm_group_check(self, mock_lookup_vm_group):
|
||||
mock_lookup_vm_group.return_value = mock.sentinel.fake_vm
|
||||
|
||||
ret = self._clusterutils._lookup_vm_group_check(
|
||||
self._FAKE_VM_NAME)
|
||||
self.assertEqual(mock.sentinel.fake_vm, ret)
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils,
|
||||
'_lookup_vm_group')
|
||||
def test_lookup_vm_group_check_no_vm(self, mock_lookup_vm_group):
|
||||
mock_lookup_vm_group.return_value = None
|
||||
|
||||
self.assertRaises(exceptions.HyperVVMNotFoundException,
|
||||
self._clusterutils._lookup_vm_group_check,
|
||||
self._FAKE_VM_NAME)
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils,
|
||||
'_lookup_res')
|
||||
def test_lookup_vm_group(self, mock_lookup_res):
|
||||
self._clusterutils._lookup_vm_group(self._FAKE_VM_NAME)
|
||||
mock_lookup_res.assert_called_once_with(
|
||||
self._clusterutils._conn_cluster.MSCluster_ResourceGroup,
|
||||
self._FAKE_VM_NAME)
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils,
|
||||
'_lookup_vm')
|
||||
def test_lookup_vm_check(self, mock_lookup_vm):
|
||||
mock_lookup_vm.return_value = mock.sentinel.fake_vm
|
||||
|
||||
ret = self._clusterutils._lookup_vm_check(
|
||||
self._FAKE_VM_NAME)
|
||||
self.assertEqual(mock.sentinel.fake_vm, ret)
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils,
|
||||
'_lookup_vm')
|
||||
def test_lookup_vm_check_no_vm(self, mock_lookup_vm):
|
||||
mock_lookup_vm.return_value = None
|
||||
|
||||
self.assertRaises(exceptions.HyperVVMNotFoundException,
|
||||
self._clusterutils._lookup_vm_check,
|
||||
self._FAKE_VM_NAME)
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils,
|
||||
'_lookup_res')
|
||||
def test_lookup_vm(self, mock_lookup_res):
|
||||
self._clusterutils._lookup_vm(self._FAKE_VM_NAME)
|
||||
mock_lookup_res.assert_called_once_with(
|
||||
self._clusterutils._conn_cluster.MSCluster_Resource,
|
||||
self._clusterutils._VM_BASE_NAME % self._FAKE_VM_NAME)
|
||||
|
||||
def test_lookup_res_no_res(self):
|
||||
res_list = []
|
||||
resource_source = mock.MagicMock()
|
||||
resource_source.return_value = res_list
|
||||
|
||||
self.assertIsNone(
|
||||
self._clusterutils._lookup_res(resource_source,
|
||||
self._FAKE_RES_NAME))
|
||||
resource_source.assert_called_once_with(
|
||||
Name=self._FAKE_RES_NAME)
|
||||
|
||||
def test_lookup_res_duplicate_res(self):
|
||||
res_list = [mock.sentinel.r1,
|
||||
mock.sentinel.r1]
|
||||
resource_source = mock.MagicMock()
|
||||
resource_source.return_value = res_list
|
||||
|
||||
self.assertRaises(exceptions.HyperVClusterException,
|
||||
self._clusterutils._lookup_res,
|
||||
resource_source,
|
||||
self._FAKE_RES_NAME)
|
||||
resource_source.assert_called_once_with(
|
||||
Name=self._FAKE_RES_NAME)
|
||||
|
||||
def test_lookup_res(self):
|
||||
res_list = [mock.sentinel.r1]
|
||||
resource_source = mock.MagicMock()
|
||||
resource_source.return_value = res_list
|
||||
|
||||
self.assertEqual(
|
||||
mock.sentinel.r1,
|
||||
self._clusterutils._lookup_res(resource_source,
|
||||
self._FAKE_RES_NAME))
|
||||
resource_source.assert_called_once_with(
|
||||
Name=self._FAKE_RES_NAME)
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils,
|
||||
'_get_cluster_nodes')
|
||||
def test_get_cluster_node_names(self, mock_get_cluster_nodes):
|
||||
cluster_nodes = [mock.Mock(Name='node1'),
|
||||
mock.Mock(Name='node2')]
|
||||
mock_get_cluster_nodes.return_value = cluster_nodes
|
||||
|
||||
ret = self._clusterutils.get_cluster_node_names()
|
||||
|
||||
self.assertItemsEqual(['node1', 'node2'], ret)
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils,
|
||||
'_lookup_vm_group_check')
|
||||
def test_get_vm_host(self, mock_lookup_vm_group_check):
|
||||
owner_node = "fake_owner_node"
|
||||
vm = mock.Mock(OwnerNode=owner_node)
|
||||
mock_lookup_vm_group_check.return_value = vm
|
||||
|
||||
self.assertEqual(
|
||||
owner_node,
|
||||
self._clusterutils.get_vm_host(self._FAKE_VM_NAME))
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils, '_get_vm_groups')
|
||||
def test_list_instances(self, mock_get_vm_groups):
|
||||
mock_get_vm_groups.return_value = [mock.Mock(Name='vm1'),
|
||||
mock.Mock(Name='vm2')]
|
||||
ret = self._clusterutils.list_instances()
|
||||
self.assertItemsEqual(['vm1', 'vm2'], ret)
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils, '_get_vm_groups')
|
||||
def test_list_instance_uuids(self, mock_get_vm_groups):
|
||||
mock_get_vm_groups.return_value = [mock.Mock(Id='uuid1'),
|
||||
mock.Mock(Id='uuid2')]
|
||||
ret = self._clusterutils.list_instance_uuids()
|
||||
self.assertItemsEqual(['uuid1', 'uuid2'], ret)
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils,
|
||||
'_lookup_vm_group_check')
|
||||
def test_add_vm_to_cluster(self, mock_lookup_vm_group_check):
|
||||
self._clusterutils._cluster.AddVirtualMachine = mock.MagicMock()
|
||||
vm_group = mock.Mock()
|
||||
mock_lookup_vm_group_check.return_value = vm_group
|
||||
|
||||
self._clusterutils.add_vm_to_cluster(self._FAKE_VM_NAME)
|
||||
|
||||
self.assertTrue(vm_group.PersistentState)
|
||||
self.assertEqual(vm_group.AutoFailbackType,
|
||||
self._clusterutils._FAILBACK_TRUE)
|
||||
self.assertEqual(vm_group.FailbackWindowStart,
|
||||
self._clusterutils._FAILBACK_WINDOW_MIN)
|
||||
self.assertEqual(vm_group.FailbackWindowEnd,
|
||||
self._clusterutils._FAILBACK_WINDOW_MAX)
|
||||
vm_group.put.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils, '_lookup_vm_check')
|
||||
def test_bring_online(self, mock_lookup_vm_check):
|
||||
vm = mock.MagicMock()
|
||||
mock_lookup_vm_check.return_value = vm
|
||||
|
||||
self._clusterutils.bring_online(self._FAKE_VM_NAME)
|
||||
vm.BringOnline.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils, '_lookup_vm')
|
||||
def test_take_offline(self, mock_lookup_vm):
|
||||
vm = mock.MagicMock()
|
||||
mock_lookup_vm.return_value = vm
|
||||
|
||||
self._clusterutils.take_offline(self._FAKE_VM_NAME)
|
||||
vm.TakeOffline.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils, '_lookup_vm_group')
|
||||
def test_delete(self, mock_lookup_vm_group):
|
||||
vm = mock.MagicMock()
|
||||
mock_lookup_vm_group.return_value = vm
|
||||
|
||||
self._clusterutils.delete(self._FAKE_VM_NAME)
|
||||
vm.DestroyGroup.assert_called_once_with(
|
||||
self._clusterutils._DESTROY_GROUP)
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils, '_lookup_vm')
|
||||
def test_vm_exists_true(self, mock_lookup_vm):
|
||||
vm = mock.MagicMock()
|
||||
mock_lookup_vm.return_value = vm
|
||||
|
||||
self.assertTrue(self._clusterutils.vm_exists(self._FAKE_VM_NAME))
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils, '_lookup_vm')
|
||||
def test_vm_exists_false(self, mock_lookup_vm):
|
||||
mock_lookup_vm.return_value = None
|
||||
|
||||
self.assertFalse(self._clusterutils.vm_exists(self._FAKE_VM_NAME))
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils, '_migrate_vm')
|
||||
def test_live_migrate_vm(self, mock_migrate_vm):
|
||||
self._clusterutils.live_migrate_vm(self._FAKE_VM_NAME,
|
||||
self._FAKE_HOST)
|
||||
mock_migrate_vm.assert_called_once_with(
|
||||
self._FAKE_VM_NAME, self._FAKE_HOST,
|
||||
self._clusterutils._LIVE_MIGRATION_TYPE)
|
||||
|
||||
@mock.patch.object(clusterutils.ClusterUtils, '_lookup_vm_group_check')
|
||||
def test_migrate_vm(self, mock_lookup_vm_group_check):
|
||||
vm_group = mock.MagicMock()
|
||||
mock_lookup_vm_group_check.return_value = vm_group
|
||||
|
||||
self._clusterutils._migrate_vm(
|
||||
self._FAKE_VM_NAME, self._FAKE_HOST,
|
||||
self._clusterutils._LIVE_MIGRATION_TYPE)
|
||||
|
||||
vm_group.MoveToNewNodeParams.assert_called_once_with(
|
||||
self._clusterutils._IGNORE_LOCKED,
|
||||
self._FAKE_HOST,
|
||||
[self._clusterutils._LIVE_MIGRATION_TYPE])
|
||||
|
||||
@mock.patch.object(clusterutils, 'tpool')
|
||||
@mock.patch.object(clusterutils, 'patcher')
|
||||
def test_monitor_vm_failover_no_vm(self, mock_patcher, mock_tpool):
|
||||
self._clusterutils._watcher = mock.MagicMock()
|
||||
fake_prev = mock.MagicMock(OwnerNode=self._FAKE_PREV_HOST)
|
||||
fake_wmi_object = mock.MagicMock(OwnerNode=self._FAKE_HOST,
|
||||
Name='Virtual Machine',
|
||||
previous=fake_prev)
|
||||
mock_tpool.execute.return_value = fake_wmi_object
|
||||
fake_callback = mock.MagicMock()
|
||||
|
||||
self._clusterutils.monitor_vm_failover(fake_callback)
|
||||
|
||||
mock_tpool.execute.assert_called_once_with(
|
||||
self._clusterutils._watcher,
|
||||
self._clusterutils._WMI_EVENT_TIMEOUT_MS)
|
||||
fake_callback.assert_not_called()
|
||||
|
||||
@mock.patch.object(clusterutils, 'tpool')
|
||||
@mock.patch.object(clusterutils, 'patcher')
|
||||
def test_monitor_vm_failover(self, mock_patcher, mock_tpool):
|
||||
self._clusterutils._watcher = mock.MagicMock()
|
||||
fake_prev = mock.MagicMock(OwnerNode=self._FAKE_PREV_HOST)
|
||||
fake_wmi_object = mock.MagicMock(OwnerNode=self._FAKE_HOST,
|
||||
Name=self._FAKE_RESOURCEGROUP_NAME,
|
||||
previous=fake_prev)
|
||||
mock_tpool.execute.return_value = fake_wmi_object
|
||||
fake_callback = mock.MagicMock()
|
||||
|
||||
self._clusterutils.monitor_vm_failover(fake_callback)
|
||||
|
||||
mock_tpool.execute.assert_called_once_with(
|
||||
self._clusterutils._watcher,
|
||||
self._clusterutils._WMI_EVENT_TIMEOUT_MS)
|
||||
fake_callback.assert_called_once_with(self._FAKE_VM_NAME,
|
||||
self._FAKE_PREV_HOST,
|
||||
self._FAKE_HOST)
|
@ -1,455 +0,0 @@
|
||||
# Copyright 2014 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
import platform
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils import _wqlutils
|
||||
from os_win.utils.compute import livemigrationutils
|
||||
from os_win.utils.compute import vmutils
|
||||
|
||||
|
||||
class LiveMigrationUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V LiveMigrationUtils class."""
|
||||
|
||||
_FAKE_VM_NAME = 'fake_vm_name'
|
||||
_FAKE_RET_VAL = 0
|
||||
|
||||
_RESOURCE_TYPE_VHD = 31
|
||||
_RESOURCE_TYPE_DISK = 17
|
||||
_RESOURCE_SUB_TYPE_VHD = 'Microsoft:Hyper-V:Virtual Hard Disk'
|
||||
_RESOURCE_SUB_TYPE_DISK = 'Microsoft:Hyper-V:Physical Disk Drive'
|
||||
|
||||
def setUp(self):
|
||||
super(LiveMigrationUtilsTestCase, self).setUp()
|
||||
self.liveutils = livemigrationutils.LiveMigrationUtils()
|
||||
self._conn = mock.MagicMock()
|
||||
self.liveutils._conn_attr = self._conn
|
||||
self.liveutils._vmutils = mock.MagicMock()
|
||||
self.liveutils._iscsi_initiator = mock.MagicMock()
|
||||
self.liveutils._jobutils = mock.Mock()
|
||||
|
||||
self.liveutils._get_conn_v2 = mock.MagicMock(return_value=self._conn)
|
||||
self.liveutils._conn_v2 = self._conn
|
||||
|
||||
def test_check_live_migration_config(self):
|
||||
mock_migr_svc = (
|
||||
self._conn.Msvm_VirtualSystemMigrationService.return_value[0])
|
||||
conn_vsmssd = self._conn.Msvm_VirtualSystemMigrationServiceSettingData
|
||||
|
||||
vsmssd = mock.MagicMock()
|
||||
vsmssd.EnableVirtualSystemMigration = True
|
||||
conn_vsmssd.return_value = [vsmssd]
|
||||
mock_migr_svc.MigrationServiceListenerIPAdressList.return_value = [
|
||||
mock.sentinel.FAKE_HOST]
|
||||
|
||||
self.liveutils.check_live_migration_config()
|
||||
conn_vsmssd.assert_called_once_with()
|
||||
self._conn.Msvm_VirtualSystemMigrationService.assert_called_once_with()
|
||||
|
||||
def test_get_vm(self):
|
||||
expected_vm = mock.MagicMock()
|
||||
mock_conn_v2 = mock.MagicMock()
|
||||
mock_conn_v2.Msvm_ComputerSystem.return_value = [expected_vm]
|
||||
|
||||
found_vm = self.liveutils._get_vm(mock_conn_v2, self._FAKE_VM_NAME)
|
||||
|
||||
self.assertEqual(expected_vm, found_vm)
|
||||
|
||||
def test_get_vm_duplicate(self):
|
||||
mock_vm = mock.MagicMock()
|
||||
mock_conn_v2 = mock.MagicMock()
|
||||
mock_conn_v2.Msvm_ComputerSystem.return_value = [mock_vm, mock_vm]
|
||||
|
||||
self.assertRaises(exceptions.HyperVException, self.liveutils._get_vm,
|
||||
mock_conn_v2, self._FAKE_VM_NAME)
|
||||
|
||||
def test_get_vm_not_found(self):
|
||||
mock_conn_v2 = mock.MagicMock()
|
||||
mock_conn_v2.Msvm_ComputerSystem.return_value = []
|
||||
|
||||
self.assertRaises(exceptions.HyperVVMNotFoundException,
|
||||
self.liveutils._get_vm,
|
||||
mock_conn_v2, self._FAKE_VM_NAME)
|
||||
|
||||
def test_destroy_planned_vm(self):
|
||||
mock_conn_v2 = mock.MagicMock()
|
||||
mock_planned_vm = mock.MagicMock()
|
||||
mock_vs_man_svc = mock.MagicMock()
|
||||
mock_conn_v2.Msvm_VirtualSystemManagementService.return_value = [
|
||||
mock_vs_man_svc]
|
||||
mock_planned_vm.path_.return_value = mock.sentinel.planned_vm_path
|
||||
mock_vs_man_svc.DestroySystem.return_value = (
|
||||
mock.sentinel.job_path, mock.sentinel.ret_val)
|
||||
|
||||
self.liveutils._destroy_planned_vm(mock_conn_v2, mock_planned_vm)
|
||||
|
||||
mock_msvms_cls = mock_conn_v2.Msvm_VirtualSystemManagementService
|
||||
mock_msvms_cls.assert_called_once_with()
|
||||
mock_vs_man_svc.DestroySystem.assert_called_once_with(
|
||||
mock.sentinel.planned_vm_path)
|
||||
self.liveutils._jobutils.check_ret_val.assert_called_once_with(
|
||||
mock.sentinel.ret_val,
|
||||
mock.sentinel.job_path)
|
||||
|
||||
def test_get_planned_vms(self):
|
||||
mock_conn_v2 = mock.MagicMock()
|
||||
mock_vm = self._get_vm()
|
||||
mock_conn_v2.Msvm_PlannedComputerSystem.return_value = (
|
||||
mock.sentinel.planned_vms)
|
||||
|
||||
planned_vms = self.liveutils._get_planned_vms(mock_conn_v2, mock_vm)
|
||||
|
||||
mock_conn_v2.Msvm_PlannedComputerSystem.assert_called_once_with(
|
||||
Name=self._FAKE_VM_NAME)
|
||||
self.assertEqual(mock.sentinel.planned_vms, planned_vms)
|
||||
|
||||
@mock.patch.object(livemigrationutils.LiveMigrationUtils,
|
||||
'_destroy_planned_vm')
|
||||
@mock.patch.object(livemigrationutils.LiveMigrationUtils,
|
||||
'_get_planned_vms')
|
||||
def test_destroy_existing_planned_vms(self, mock_get_planned_vms,
|
||||
mock_destroy_planned_vm):
|
||||
mock_conn_v2 = mock.sentinel.conn_v2
|
||||
mock_planned_vms = [mock.sentinel.planned_vm,
|
||||
mock.sentinel.another_planned_vm]
|
||||
mock_get_planned_vms.return_value = mock_planned_vms
|
||||
|
||||
self.liveutils._destroy_existing_planned_vms(mock_conn_v2,
|
||||
mock.sentinel.vm)
|
||||
|
||||
mock_get_planned_vms.assert_called_once_with(mock_conn_v2,
|
||||
mock.sentinel.vm)
|
||||
mock_destroy_planned_vm.assert_has_calls(
|
||||
[mock.call(mock_conn_v2, mock.sentinel.planned_vm),
|
||||
mock.call(mock_conn_v2, mock.sentinel.another_planned_vm)])
|
||||
|
||||
def test_create_planned_vm_helper(self):
|
||||
mock_vm = mock.MagicMock()
|
||||
mock_v2 = mock.MagicMock()
|
||||
mock_vsmsd = mock_v2.query()[0]
|
||||
self._conn.Msvm_PlannedComputerSystem.return_value = [mock_vm]
|
||||
|
||||
migr_svc = mock_v2.Msvm_VirtualSystemMigrationService()[0]
|
||||
migr_svc.MigrateVirtualSystemToHost.return_value = (
|
||||
self._FAKE_RET_VAL, mock.sentinel.FAKE_JOB_PATH)
|
||||
|
||||
resulted_vm = self.liveutils._create_planned_vm(
|
||||
self._conn, mock_v2, mock_vm, [mock.sentinel.FAKE_REMOTE_IP_ADDR],
|
||||
mock.sentinel.FAKE_HOST)
|
||||
|
||||
self.assertEqual(mock_vm, resulted_vm)
|
||||
|
||||
migr_svc.MigrateVirtualSystemToHost.assert_called_once_with(
|
||||
ComputerSystem=mock_vm.path_.return_value,
|
||||
DestinationHost=mock.sentinel.FAKE_HOST,
|
||||
MigrationSettingData=mock_vsmsd.GetText_.return_value)
|
||||
self.liveutils._jobutils.check_ret_val.assert_called_once_with(
|
||||
mock.sentinel.FAKE_JOB_PATH,
|
||||
self._FAKE_RET_VAL)
|
||||
|
||||
def test_get_physical_disk_paths(self):
|
||||
ide_path = {mock.sentinel.IDE_PATH: mock.sentinel.IDE_HOST_RESOURCE}
|
||||
scsi_path = {mock.sentinel.SCSI_PATH: mock.sentinel.SCSI_HOST_RESOURCE}
|
||||
ide_ctrl = self.liveutils._vmutils.get_vm_ide_controller.return_value
|
||||
scsi_ctrl = self.liveutils._vmutils.get_vm_scsi_controller.return_value
|
||||
mock_get_controller_paths = (
|
||||
self.liveutils._vmutils.get_controller_volume_paths)
|
||||
|
||||
mock_get_controller_paths.side_effect = [ide_path, scsi_path]
|
||||
|
||||
result = self.liveutils._get_physical_disk_paths(mock.sentinel.VM_NAME)
|
||||
|
||||
expected = dict(ide_path)
|
||||
expected.update(scsi_path)
|
||||
self.assertDictContainsSubset(expected, result)
|
||||
calls = [mock.call(ide_ctrl), mock.call(scsi_ctrl)]
|
||||
mock_get_controller_paths.assert_has_calls(calls)
|
||||
|
||||
def test_get_physical_disk_paths_no_ide(self):
|
||||
scsi_path = {mock.sentinel.SCSI_PATH: mock.sentinel.SCSI_HOST_RESOURCE}
|
||||
scsi_ctrl = self.liveutils._vmutils.get_vm_scsi_controller.return_value
|
||||
mock_get_controller_paths = (
|
||||
self.liveutils._vmutils.get_controller_volume_paths)
|
||||
|
||||
self.liveutils._vmutils.get_vm_ide_controller.return_value = None
|
||||
mock_get_controller_paths.return_value = scsi_path
|
||||
|
||||
result = self.liveutils._get_physical_disk_paths(mock.sentinel.VM_NAME)
|
||||
|
||||
self.assertEqual(scsi_path, result)
|
||||
mock_get_controller_paths.assert_called_once_with(scsi_ctrl)
|
||||
|
||||
@mock.patch.object(livemigrationutils.iscsi_wmi_utils,
|
||||
'ISCSIInitiatorWMIUtils')
|
||||
def test_get_remote_disk_data(self, mock_iscsi_initiator_class):
|
||||
m_remote_iscsi_init = mock_iscsi_initiator_class.return_value
|
||||
m_local_iscsi_init = self.liveutils._iscsi_initiator
|
||||
|
||||
mock_vm_utils = mock.MagicMock()
|
||||
disk_paths = {
|
||||
mock.sentinel.FAKE_RASD_PATH: mock.sentinel.FAKE_DISK_PATH}
|
||||
m_local_iscsi_init.get_target_from_disk_path.return_value = (
|
||||
mock.sentinel.FAKE_IQN, mock.sentinel.FAKE_LUN)
|
||||
m_remote_iscsi_init.get_device_number_for_target.return_value = (
|
||||
mock.sentinel.FAKE_DEV_NUM)
|
||||
mock_vm_utils.get_mounted_disk_by_drive_number.return_value = (
|
||||
mock.sentinel.FAKE_DISK_PATH)
|
||||
|
||||
disk_paths = self.liveutils._get_remote_disk_data(
|
||||
mock_vm_utils, disk_paths, mock.sentinel.FAKE_HOST)
|
||||
|
||||
m_local_iscsi_init.get_target_from_disk_path.assert_called_with(
|
||||
mock.sentinel.FAKE_DISK_PATH)
|
||||
m_remote_iscsi_init.get_device_number_for_target.assert_called_with(
|
||||
mock.sentinel.FAKE_IQN, mock.sentinel.FAKE_LUN)
|
||||
mock_vm_utils.get_mounted_disk_by_drive_number.assert_called_once_with(
|
||||
mock.sentinel.FAKE_DEV_NUM)
|
||||
|
||||
self.assertEqual(
|
||||
{mock.sentinel.FAKE_RASD_PATH: mock.sentinel.FAKE_DISK_PATH},
|
||||
disk_paths)
|
||||
|
||||
def test_get_disk_data(self):
|
||||
mock_vmutils_remote = mock.MagicMock()
|
||||
mock_disk = mock.MagicMock()
|
||||
mock_disk_path_mapping = {
|
||||
mock.sentinel.serial: mock.sentinel.disk_path}
|
||||
|
||||
mock_disk.path.return_value.RelPath = mock.sentinel.rel_path
|
||||
mock_vmutils_remote.get_vm_disks.return_value = [
|
||||
None, [mock_disk]]
|
||||
mock_disk.ElementName = mock.sentinel.serial
|
||||
|
||||
resulted_disk_paths = self.liveutils._get_disk_data(
|
||||
self._FAKE_VM_NAME, mock_vmutils_remote, mock_disk_path_mapping)
|
||||
|
||||
mock_vmutils_remote.get_vm_disks.assert_called_once_with(
|
||||
self._FAKE_VM_NAME)
|
||||
mock_disk.path.assert_called_once_with()
|
||||
expected_disk_paths = {mock.sentinel.rel_path: mock.sentinel.disk_path}
|
||||
self.assertEqual(expected_disk_paths, resulted_disk_paths)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
def test_update_planned_vm_disk_resources(self,
|
||||
mock_get_elem_associated_class):
|
||||
self._prepare_vm_mocks(self._RESOURCE_TYPE_DISK,
|
||||
self._RESOURCE_SUB_TYPE_DISK,
|
||||
mock_get_elem_associated_class)
|
||||
mock_vm = mock.Mock(Name='fake_name')
|
||||
sasd = mock_get_elem_associated_class.return_value[0]
|
||||
|
||||
mock_vsmsvc = self._conn.Msvm_VirtualSystemManagementService()[0]
|
||||
|
||||
self.liveutils._update_planned_vm_disk_resources(
|
||||
self._conn, mock_vm, mock.sentinel.FAKE_VM_NAME,
|
||||
{sasd.path.return_value.RelPath: mock.sentinel.FAKE_RASD_PATH})
|
||||
|
||||
mock_vsmsvc.ModifyResourceSettings.assert_called_once_with(
|
||||
ResourceSettings=[sasd.GetText_.return_value])
|
||||
mock_get_elem_associated_class.assert_called_once_with(
|
||||
self._conn, self.liveutils._CIM_RES_ALLOC_SETTING_DATA_CLASS,
|
||||
element_uuid=mock_vm.Name)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
def test_get_vhd_setting_data(self, mock_get_elem_associated_class):
|
||||
self._prepare_vm_mocks(self._RESOURCE_TYPE_VHD,
|
||||
self._RESOURCE_SUB_TYPE_VHD,
|
||||
mock_get_elem_associated_class)
|
||||
mock_vm = mock.Mock(Name='fake_vm_name')
|
||||
mock_sasd = mock_get_elem_associated_class.return_value[0]
|
||||
|
||||
vhd_sds = self.liveutils._get_vhd_setting_data(mock_vm)
|
||||
self.assertEqual([mock_sasd.GetText_.return_value], vhd_sds)
|
||||
mock_get_elem_associated_class.assert_called_once_with(
|
||||
self._conn, self.liveutils._STORAGE_ALLOC_SETTING_DATA_CLASS,
|
||||
element_uuid=mock_vm.Name)
|
||||
|
||||
def test_live_migrate_vm_helper(self):
|
||||
mock_conn_local = mock.MagicMock()
|
||||
mock_vm = mock.MagicMock()
|
||||
mock_vsmsd = mock_conn_local.query()[0]
|
||||
|
||||
mock_vsmsvc = mock_conn_local.Msvm_VirtualSystemMigrationService()[0]
|
||||
mock_vsmsvc.MigrateVirtualSystemToHost.return_value = (
|
||||
self._FAKE_RET_VAL, mock.sentinel.FAKE_JOB_PATH)
|
||||
|
||||
self.liveutils._live_migrate_vm(
|
||||
mock_conn_local, mock_vm, None,
|
||||
[mock.sentinel.FAKE_REMOTE_IP_ADDR],
|
||||
mock.sentinel.FAKE_RASD_PATH, mock.sentinel.FAKE_HOST)
|
||||
|
||||
mock_vsmsvc.MigrateVirtualSystemToHost.assert_called_once_with(
|
||||
ComputerSystem=mock_vm.path_.return_value,
|
||||
DestinationHost=mock.sentinel.FAKE_HOST,
|
||||
MigrationSettingData=mock_vsmsd.GetText_.return_value,
|
||||
NewResourceSettingData=mock.sentinel.FAKE_RASD_PATH)
|
||||
|
||||
def test_live_migrate_multiple_planned_vms(self):
|
||||
mock_vm = self._get_vm()
|
||||
self._conn.Msvm_PlannedComputerSystem.return_value = [
|
||||
mock_vm, mock_vm]
|
||||
|
||||
self.assertRaises(exceptions.OSWinException,
|
||||
self.liveutils.live_migrate_vm,
|
||||
mock.sentinel.vm_name,
|
||||
mock.sentinel.host)
|
||||
|
||||
@mock.patch.object(livemigrationutils, 'vmutils')
|
||||
def test_live_migrate_no_planned_vm(self, mock_vm_utils):
|
||||
mock_vm_utils_remote = mock_vm_utils.VMUtils.return_value
|
||||
mock_vm = self._get_vm()
|
||||
|
||||
mock_migr_svc = self._conn.Msvm_VirtualSystemMigrationService()[0]
|
||||
mock_migr_svc.MigrationServiceListenerIPAddressList = [
|
||||
mock.sentinel.FAKE_REMOTE_IP_ADDR]
|
||||
|
||||
# patches, call and assertions.
|
||||
with mock.patch.multiple(
|
||||
self.liveutils,
|
||||
_get_physical_disk_paths=mock.DEFAULT,
|
||||
_get_remote_disk_data=mock.DEFAULT,
|
||||
_create_planned_vm=mock.DEFAULT,
|
||||
_update_planned_vm_disk_resources=mock.DEFAULT,
|
||||
_get_vhd_setting_data=mock.DEFAULT,
|
||||
_live_migrate_vm=mock.DEFAULT):
|
||||
|
||||
self._conn.Msvm_PlannedComputerSystem.return_value = []
|
||||
disk_paths = {
|
||||
mock.sentinel.FAKE_IDE_PATH: mock.sentinel.FAKE_SASD_RESOURCE}
|
||||
self.liveutils._get_physical_disk_paths.return_value = disk_paths
|
||||
mock_disk_paths = [mock.sentinel.FAKE_DISK_PATH]
|
||||
self.liveutils._get_remote_disk_data.return_value = (
|
||||
mock_disk_paths)
|
||||
self.liveutils._create_planned_vm.return_value = mock_vm
|
||||
|
||||
self.liveutils.live_migrate_vm(mock.sentinel.vm_name,
|
||||
mock.sentinel.FAKE_HOST)
|
||||
|
||||
self.liveutils._get_remote_disk_data.assert_called_once_with(
|
||||
mock_vm_utils_remote, disk_paths, mock.sentinel.FAKE_HOST)
|
||||
self.liveutils._create_planned_vm.assert_called_once_with(
|
||||
self._conn, self._conn, mock_vm,
|
||||
[mock.sentinel.FAKE_REMOTE_IP_ADDR], mock.sentinel.FAKE_HOST)
|
||||
mocked_method = self.liveutils._update_planned_vm_disk_resources
|
||||
mocked_method.assert_called_once_with(
|
||||
self._conn, mock_vm, mock.sentinel.vm_name,
|
||||
mock_disk_paths)
|
||||
self.liveutils._live_migrate_vm.assert_called_once_with(
|
||||
self._conn, mock_vm, mock_vm,
|
||||
[mock.sentinel.FAKE_REMOTE_IP_ADDR],
|
||||
self.liveutils._get_vhd_setting_data.return_value,
|
||||
mock.sentinel.FAKE_HOST)
|
||||
|
||||
def test_live_migrate_single_planned_vm(self):
|
||||
mock_vm = self._get_vm()
|
||||
|
||||
mock_migr_svc = self._conn.Msvm_VirtualSystemMigrationService()[0]
|
||||
mock_migr_svc.MigrationServiceListenerIPAddressList = [
|
||||
mock.sentinel.FAKE_REMOTE_IP_ADDR]
|
||||
|
||||
# patches, call and assertions.
|
||||
with mock.patch.multiple(
|
||||
self.liveutils,
|
||||
_get_vhd_setting_data=mock.DEFAULT,
|
||||
_live_migrate_vm=mock.DEFAULT):
|
||||
|
||||
self._conn.Msvm_PlannedComputerSystem.return_value = [mock_vm]
|
||||
self.liveutils.live_migrate_vm(mock.sentinel.vm_name,
|
||||
mock.sentinel.FAKE_HOST)
|
||||
self.liveutils._live_migrate_vm.assert_called_once_with(
|
||||
self._conn, mock_vm, mock_vm,
|
||||
[mock.sentinel.FAKE_REMOTE_IP_ADDR],
|
||||
self.liveutils._get_vhd_setting_data.return_value,
|
||||
mock.sentinel.FAKE_HOST)
|
||||
|
||||
@mock.patch.object(vmutils, 'VMUtils')
|
||||
@mock.patch.object(livemigrationutils.LiveMigrationUtils, '_get_vm')
|
||||
@mock.patch.object(livemigrationutils.LiveMigrationUtils,
|
||||
'_get_ip_address_list')
|
||||
@mock.patch.object(livemigrationutils.LiveMigrationUtils,
|
||||
'_update_planned_vm_disk_resources')
|
||||
@mock.patch.object(livemigrationutils.LiveMigrationUtils,
|
||||
'_create_planned_vm')
|
||||
@mock.patch.object(livemigrationutils.LiveMigrationUtils,
|
||||
'_destroy_existing_planned_vms')
|
||||
@mock.patch.object(livemigrationutils.LiveMigrationUtils,
|
||||
'_get_disk_data')
|
||||
def test_create_planned_vm(self, mock_get_disk_data,
|
||||
mock_destroy_existing_planned_vm,
|
||||
mock_create_planned_vm,
|
||||
mock_update_planned_vm_disk_resources,
|
||||
mock_get_ip_address_list, mock_get_vm,
|
||||
mock_cls_vmutils):
|
||||
dest_host = platform.node()
|
||||
mock_vm = mock.MagicMock()
|
||||
mock_get_vm.return_value = mock_vm
|
||||
mock_conn_v2 = mock.MagicMock()
|
||||
self.liveutils._get_conn_v2.return_value = mock_conn_v2
|
||||
|
||||
mock_get_disk_data.return_value = mock.sentinel.disk_data
|
||||
mock_get_ip_address_list.return_value = mock.sentinel.ip_address_list
|
||||
|
||||
mock_vsmsvc = self._conn.Msvm_VirtualSystemManagementService()[0]
|
||||
mock_vsmsvc.ModifyResourceSettings.return_value = (
|
||||
mock.sentinel.res_setting,
|
||||
mock.sentinel.job_path,
|
||||
self._FAKE_RET_VAL)
|
||||
|
||||
self.liveutils.create_planned_vm(mock.sentinel.vm_name,
|
||||
mock.sentinel.host,
|
||||
mock.sentinel.disk_path_mapping)
|
||||
|
||||
mock_destroy_existing_planned_vm.assert_called_once_with(self._conn,
|
||||
mock_vm)
|
||||
mock_get_ip_address_list.assert_called_once_with(self._conn, dest_host)
|
||||
mock_get_disk_data.assert_called_once_with(
|
||||
mock.sentinel.vm_name,
|
||||
mock_cls_vmutils.return_value,
|
||||
mock.sentinel.disk_path_mapping)
|
||||
mock_create_planned_vm.assert_called_once_with(
|
||||
self._conn, mock_conn_v2, mock_vm,
|
||||
mock.sentinel.ip_address_list, dest_host)
|
||||
mock_update_planned_vm_disk_resources.assert_called_once_with(
|
||||
self._conn, mock_create_planned_vm.return_value,
|
||||
mock.sentinel.vm_name, mock.sentinel.disk_data)
|
||||
|
||||
def _prepare_vm_mocks(self, resource_type, resource_sub_type,
|
||||
mock_get_elem_associated_class):
|
||||
mock_vm_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
|
||||
vm = self._get_vm()
|
||||
self._conn.Msvm_PlannedComputerSystem.return_value = [vm]
|
||||
mock_vm_svc.DestroySystem.return_value = (mock.sentinel.FAKE_JOB_PATH,
|
||||
self._FAKE_RET_VAL)
|
||||
mock_vm_svc.ModifyResourceSettings.return_value = (
|
||||
None, mock.sentinel.FAKE_JOB_PATH, self._FAKE_RET_VAL)
|
||||
|
||||
sasd = mock.MagicMock()
|
||||
other_sasd = mock.MagicMock()
|
||||
sasd.ResourceType = resource_type
|
||||
sasd.ResourceSubType = resource_sub_type
|
||||
sasd.HostResource = [mock.sentinel.FAKE_SASD_RESOURCE]
|
||||
sasd.path.return_value.RelPath = mock.sentinel.FAKE_DISK_PATH
|
||||
|
||||
mock_get_elem_associated_class.return_value = [sasd, other_sasd]
|
||||
|
||||
def _get_vm(self):
|
||||
mock_vm = mock.MagicMock()
|
||||
self._conn.Msvm_ComputerSystem.return_value = [mock_vm]
|
||||
mock_vm.path_.return_value = mock.sentinel.FAKE_VM_PATH
|
||||
mock_vm.Name = self._FAKE_VM_NAME
|
||||
return mock_vm
|
@ -1,37 +0,0 @@
|
||||
# Copyright 2013 Cloudbase Solutions Srl
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.compute import rdpconsoleutils
|
||||
|
||||
|
||||
class RDPConsoleUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
_FAKE_RDP_PORT = 1000
|
||||
|
||||
def setUp(self):
|
||||
self._rdpconsoleutils = rdpconsoleutils.RDPConsoleUtils()
|
||||
self._rdpconsoleutils._conn_attr = mock.MagicMock()
|
||||
|
||||
super(RDPConsoleUtilsTestCase, self).setUp()
|
||||
|
||||
def test_get_rdp_console_port(self):
|
||||
conn = self._rdpconsoleutils._conn
|
||||
mock_rdp_setting_data = conn.Msvm_TerminalServiceSettingData()[0]
|
||||
mock_rdp_setting_data.ListenerPort = self._FAKE_RDP_PORT
|
||||
|
||||
listener_port = self._rdpconsoleutils.get_rdp_console_port()
|
||||
|
||||
self.assertEqual(self._FAKE_RDP_PORT, listener_port)
|
File diff suppressed because it is too large
Load Diff
@ -1,228 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils import _wqlutils
|
||||
from os_win.utils.compute import vmutils10
|
||||
|
||||
|
||||
class VMUtils10TestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V VMUtils10 class."""
|
||||
|
||||
def setUp(self):
|
||||
super(VMUtils10TestCase, self).setUp()
|
||||
self._vmutils = vmutils10.VMUtils10()
|
||||
self._vmutils._conn_attr = mock.MagicMock()
|
||||
self._vmutils._conn_msps_attr = mock.MagicMock()
|
||||
self._vmutils._jobutils = mock.MagicMock()
|
||||
|
||||
@mock.patch.object(vmutils10.VMUtils10, '_get_wmi_conn')
|
||||
def test_conn_msps(self, mock_get_wmi_conn):
|
||||
self._vmutils._conn_msps_attr = None
|
||||
self.assertEqual(mock_get_wmi_conn.return_value,
|
||||
self._vmutils._conn_msps)
|
||||
|
||||
mock_get_wmi_conn.assert_called_with(
|
||||
self._vmutils._MSPS_NAMESPACE % self._vmutils._host)
|
||||
|
||||
@mock.patch.object(vmutils10.VMUtils10, '_get_wmi_conn')
|
||||
def test_conn_msps_no_namespace(self, mock_get_wmi_conn):
|
||||
self._vmutils._conn_msps_attr = None
|
||||
|
||||
mock_get_wmi_conn.side_effect = [exceptions.OSWinException]
|
||||
self.assertRaises(exceptions.OSWinException,
|
||||
lambda: self._vmutils._conn_msps)
|
||||
mock_get_wmi_conn.assert_called_with(
|
||||
self._vmutils._MSPS_NAMESPACE % self._vmutils._host)
|
||||
|
||||
def test_sec_svc(self):
|
||||
self._vmutils._sec_svc_attr = None
|
||||
self.assertEqual(
|
||||
self._vmutils._conn.Msvm_SecurityService.return_value[0],
|
||||
self._vmutils._sec_svc)
|
||||
|
||||
self._vmutils._conn.Msvm_SecurityService.assert_called_with()
|
||||
|
||||
def test_set_secure_boot_CA_required(self):
|
||||
vs_data = mock.MagicMock()
|
||||
mock_vssd = self._vmutils._conn.Msvm_VirtualSystemSettingData
|
||||
mock_vssd.return_value = [
|
||||
mock.MagicMock(SecureBootTemplateId=mock.sentinel.template_id)]
|
||||
|
||||
self._vmutils._set_secure_boot(vs_data, msft_ca_required=True)
|
||||
|
||||
self.assertTrue(vs_data.SecureBootEnabled)
|
||||
self.assertEqual(mock.sentinel.template_id,
|
||||
vs_data.SecureBootTemplateId)
|
||||
mock_vssd.assert_called_once_with(
|
||||
ElementName=self._vmutils._UEFI_CERTIFICATE_AUTH)
|
||||
|
||||
def test_vm_gen_supports_remotefx(self):
|
||||
ret = self._vmutils.vm_gen_supports_remotefx(mock.sentinel.VM_GEN)
|
||||
|
||||
self.assertTrue(ret)
|
||||
|
||||
def test_validate_remotefx_monitor_count(self):
|
||||
self.assertRaises(exceptions.HyperVRemoteFXException,
|
||||
self._vmutils._validate_remotefx_params,
|
||||
10, constants.REMOTEFX_MAX_RES_1024x768)
|
||||
|
||||
def test_validate_remotefx_max_resolution(self):
|
||||
self.assertRaises(exceptions.HyperVRemoteFXException,
|
||||
self._vmutils._validate_remotefx_params,
|
||||
1, '1024x700')
|
||||
|
||||
def test_validate_remotefx_vram(self):
|
||||
self.assertRaises(exceptions.HyperVRemoteFXException,
|
||||
self._vmutils._validate_remotefx_params,
|
||||
1, constants.REMOTEFX_MAX_RES_1024x768,
|
||||
vram_bytes=10000)
|
||||
|
||||
@mock.patch.object(vmutils10.VMUtils10, 'get_vm_generation')
|
||||
def _test_vm_has_s3_controller(self, vm_gen, mock_get_vm_gen):
|
||||
mock_get_vm_gen.return_value = vm_gen
|
||||
return self._vmutils._vm_has_s3_controller(mock.sentinel.fake_vm_name)
|
||||
|
||||
def test_vm_has_s3_controller_gen1(self):
|
||||
self.assertTrue(self._test_vm_has_s3_controller(constants.VM_GEN_1))
|
||||
|
||||
def test_vm_has_s3_controller_gen2(self):
|
||||
self.assertFalse(self._test_vm_has_s3_controller(constants.VM_GEN_2))
|
||||
|
||||
def test_populate_fsk(self):
|
||||
fsk_pairs = {mock.sentinel.computer: mock.sentinel.computer_value}
|
||||
|
||||
mock_fabricdata = (
|
||||
self._vmutils._conn_msps.Msps_FabricData.new.return_value)
|
||||
|
||||
fsk = self._vmutils._conn_msps.Msps_FSK.new.return_value
|
||||
mock_msps_pfp = self._vmutils._conn_msps.Msps_ProvisioningFileProcessor
|
||||
|
||||
self._vmutils.populate_fsk(mock.sentinel.fsk_filepath, fsk_pairs)
|
||||
|
||||
mock_msps_pfp.SerializeToFile.assert_called_once_with(
|
||||
mock.sentinel.fsk_filepath, fsk)
|
||||
self.assertEqual([mock_fabricdata], fsk.FabricDataPairs)
|
||||
self.assertEqual(mock.sentinel.computer, mock_fabricdata.key)
|
||||
self.assertEqual(mock.sentinel.computer_value,
|
||||
mock_fabricdata.Value)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
@mock.patch.object(vmutils10.VMUtils10, '_lookup_vm_check')
|
||||
def test_add_vtpm(self, mock_lookup_vm_check,
|
||||
mock_get_element_associated_class):
|
||||
mock_lookup_vm_check.return_value = mock.Mock(
|
||||
ConfigurationID=mock.sentinel.configuration_id)
|
||||
|
||||
mock_msps_pfp = self._vmutils._conn_msps.Msps_ProvisioningFileProcessor
|
||||
provisioning_file = mock.Mock(KeyProtector=mock.sentinel.keyprotector,
|
||||
PolicyData=mock.sentinel.policy)
|
||||
mock_msps_pfp.PopulateFromFile.return_value = [provisioning_file]
|
||||
security_profile = mock.Mock()
|
||||
|
||||
mock_get_element_associated_class.return_value = [security_profile]
|
||||
sec_profile_serialization = security_profile.GetText_.return_value
|
||||
|
||||
mock_sec_svc = self._vmutils._sec_svc
|
||||
mock_sec_svc.SetKeyProtector.return_value = (
|
||||
mock.sentinel.job_path_SetKeyProtector,
|
||||
mock.sentinel.ret_val_SetKeyProtector)
|
||||
mock_sec_svc.SetSecurityPolicy.return_value = (
|
||||
mock.sentinel.job_path_SetSecurityPolicy,
|
||||
mock.sentinel.ret_val_SetSecurityPolicy)
|
||||
mock_sec_svc.ModifySecuritySettings.return_value = (
|
||||
mock.sentinel.job_path_ModifySecuritySettings,
|
||||
mock.sentinel.ret_val_ModifySecuritySettings)
|
||||
|
||||
self._vmutils.add_vtpm(mock.sentinel.VM_NAME,
|
||||
mock.sentinel.pdk_filepath,
|
||||
shielded=True)
|
||||
|
||||
mock_lookup_vm_check.assert_called_with(mock.sentinel.VM_NAME)
|
||||
mock_msps_pfp.PopulateFromFile.assert_called_once_with(
|
||||
mock.sentinel.pdk_filepath)
|
||||
mock_get_element_associated_class.assert_called_once_with(
|
||||
self._vmutils._conn,
|
||||
self._vmutils._SECURITY_SETTING_DATA,
|
||||
element_uuid=mock.sentinel.configuration_id)
|
||||
mock_sec_svc.SetKeyProtector.assert_called_once_with(
|
||||
mock.sentinel.keyprotector,
|
||||
sec_profile_serialization)
|
||||
mock_sec_svc.SetSecurityPolicy.assert_called_once_with(
|
||||
mock.sentinel.policy, sec_profile_serialization)
|
||||
mock_sec_svc.ModifySecuritySettings.assert_called_once_with(
|
||||
sec_profile_serialization)
|
||||
|
||||
expected_call = [
|
||||
mock.call(mock.sentinel.job_path_SetKeyProtector,
|
||||
mock.sentinel.ret_val_SetKeyProtector),
|
||||
mock.call(mock.sentinel.job_path_SetSecurityPolicy,
|
||||
mock.sentinel.ret_val_SetSecurityPolicy),
|
||||
mock.call(mock.sentinel.job_path_ModifySecuritySettings,
|
||||
mock.sentinel.ret_val_ModifySecuritySettings)]
|
||||
self._vmutils._jobutils.check_ret_val.has_calls(expected_call)
|
||||
self.assertTrue(security_profile.EncryptStateAndVmMigrationTraffic)
|
||||
self.assertTrue(security_profile.TpmEnabled)
|
||||
self.assertTrue(security_profile.ShieldingRequested)
|
||||
|
||||
@mock.patch.object(vmutils10.VMUtils10, '_lookup_vm_check')
|
||||
def test_provision_vm(self, mock_lookup_vm_check):
|
||||
mock_vm = mock_lookup_vm_check.return_value
|
||||
provisioning_srv = self._vmutils._conn_msps.Msps_ProvisioningService
|
||||
|
||||
provisioning_srv.ProvisionMachine.return_value = (
|
||||
mock.sentinel.job_path_ProvisionMachine,
|
||||
mock.sentinel.ret_val_ProvisionMachine)
|
||||
|
||||
self._vmutils.provision_vm(mock.sentinel.vm_name,
|
||||
mock.sentinel.fsk_file,
|
||||
mock.sentinel.pdk_file)
|
||||
|
||||
provisioning_srv.ProvisionMachine.assert_called_once_with(
|
||||
mock.sentinel.fsk_file,
|
||||
mock_vm.ConfigurationID,
|
||||
mock.sentinel.pdk_file)
|
||||
self._vmutils._jobutils.check_ret_val.assert_called_once_with(
|
||||
mock.sentinel.ret_val_ProvisionMachine,
|
||||
mock.sentinel.job_path_ProvisionMachine)
|
||||
|
||||
mock_lookup_vm_check.assert_called_with(mock.sentinel.vm_name)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
@mock.patch.object(vmutils10.VMUtils10, 'get_vm_id')
|
||||
def _test_secure_vm(self, mock_get_vm_id,
|
||||
mock_get_element_associated_class,
|
||||
is_encrypted_vm=True):
|
||||
inst_id = mock_get_vm_id.return_value
|
||||
security_profile = mock.MagicMock()
|
||||
mock_get_element_associated_class.return_value = [security_profile]
|
||||
security_profile.EncryptStateAndVmMigrationTraffic = is_encrypted_vm
|
||||
|
||||
response = self._vmutils.is_secure_vm(mock.sentinel.instance_name)
|
||||
self.assertEqual(is_encrypted_vm, response)
|
||||
|
||||
mock_get_element_associated_class.assert_called_once_with(
|
||||
self._vmutils._conn,
|
||||
self._vmutils._SECURITY_SETTING_DATA,
|
||||
element_uuid=inst_id)
|
||||
|
||||
def test_is_secure_shielded_vm(self):
|
||||
self._test_secure_vm()
|
||||
|
||||
def test_not_secure_vm(self):
|
||||
self._test_secure_vm(is_encrypted_vm=False)
|
@ -1,261 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.dns import dnsutils
|
||||
|
||||
|
||||
class DNSUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V DNSUtils class."""
|
||||
|
||||
def setUp(self):
|
||||
super(DNSUtilsTestCase, self).setUp()
|
||||
self._dnsutils = dnsutils.DNSUtils()
|
||||
self._dnsutils._dns_manager_attr = mock.MagicMock()
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, '_get_wmi_obj')
|
||||
def test_dns_manager(self, mock_get_wmi_obj):
|
||||
self._dnsutils._dns_manager_attr = None
|
||||
|
||||
self.assertEqual(mock_get_wmi_obj.return_value,
|
||||
self._dnsutils._dns_manager)
|
||||
|
||||
mock_get_wmi_obj.assert_called_once_with(
|
||||
self._dnsutils._DNS_NAMESPACE % self._dnsutils._host)
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, '_get_wmi_obj')
|
||||
def test_dns_manager_fail(self, mock_get_wmi_obj):
|
||||
self._dnsutils._dns_manager_attr = None
|
||||
expected_exception = exceptions.DNSException
|
||||
mock_get_wmi_obj.side_effect = expected_exception
|
||||
|
||||
self.assertRaises(expected_exception,
|
||||
lambda: self._dnsutils._dns_manager)
|
||||
|
||||
mock_get_wmi_obj.assert_called_once_with(
|
||||
self._dnsutils._DNS_NAMESPACE % self._dnsutils._host)
|
||||
|
||||
def test_get_zone(self):
|
||||
zone_manager = self._dnsutils._dns_manager.MicrosoftDNS_Zone
|
||||
zone_manager.return_value = [mock.sentinel.zone]
|
||||
|
||||
zone_found = self._dnsutils._get_zone(mock.sentinel.zone_name)
|
||||
|
||||
zone_manager.assert_called_once_with(Name=mock.sentinel.zone_name)
|
||||
self.assertEqual(mock.sentinel.zone, zone_found)
|
||||
|
||||
def test_get_zone_ignore_missing(self):
|
||||
zone_manager = self._dnsutils._dns_manager.MicrosoftDNS_Zone
|
||||
zone_manager.return_value = []
|
||||
|
||||
zone_found = self._dnsutils._get_zone(mock.sentinel.zone_name)
|
||||
|
||||
zone_manager.assert_called_once_with(Name=mock.sentinel.zone_name)
|
||||
self.assertIsNone(zone_found)
|
||||
|
||||
def test_get_zone_missing(self):
|
||||
zone_manager = self._dnsutils._dns_manager.MicrosoftDNS_Zone
|
||||
zone_manager.return_value = []
|
||||
|
||||
self.assertRaises(exceptions.DNSZoneNotFound,
|
||||
self._dnsutils._get_zone,
|
||||
mock.sentinel.zone_name,
|
||||
ignore_missing=False)
|
||||
zone_manager.assert_called_once_with(Name=mock.sentinel.zone_name)
|
||||
|
||||
def test_zone_list(self):
|
||||
zone_manager = self._dnsutils._dns_manager.MicrosoftDNS_Zone
|
||||
zone_manager.return_value = [mock.Mock(Name=mock.sentinel.fake_name1),
|
||||
mock.Mock(Name=mock.sentinel.fake_name2)]
|
||||
|
||||
zone_list = self._dnsutils.zone_list()
|
||||
|
||||
expected_zone_list = [mock.sentinel.fake_name1,
|
||||
mock.sentinel.fake_name2]
|
||||
self.assertEqual(expected_zone_list, zone_list)
|
||||
zone_manager.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, '_get_zone')
|
||||
def test_zone_exists(self, mock_get_zone):
|
||||
zone_already_exists = self._dnsutils.zone_exists(
|
||||
mock.sentinel.zone_name)
|
||||
mock_get_zone.assert_called_once_with(mock.sentinel.zone_name)
|
||||
|
||||
self.assertTrue(zone_already_exists)
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, '_get_zone')
|
||||
def test_zone_exists_false(self, mock_get_zone):
|
||||
mock_get_zone.return_value = None
|
||||
|
||||
zone_already_exists = self._dnsutils.zone_exists(
|
||||
mock.sentinel.zone_name)
|
||||
mock_get_zone.assert_called_once_with(mock.sentinel.zone_name)
|
||||
|
||||
self.assertFalse(zone_already_exists)
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, '_get_zone')
|
||||
def test_get_zone_properties(self, mock_get_zone):
|
||||
mock_get_zone.return_value = mock.Mock(
|
||||
ZoneType=mock.sentinel.zone_type,
|
||||
DsIntegrated=mock.sentinel.ds_integrated,
|
||||
DataFile=mock.sentinel.data_file_name,
|
||||
MasterServers=[mock.sentinel.ip_addrs])
|
||||
|
||||
zone_properties = self._dnsutils.get_zone_properties(
|
||||
mock.sentinel.zone_name)
|
||||
expected_zone_props = {
|
||||
'zone_type': mock.sentinel.zone_type,
|
||||
'ds_integrated': mock.sentinel.ds_integrated,
|
||||
'master_servers': [mock.sentinel.ip_addrs],
|
||||
'data_file_name': mock.sentinel.data_file_name
|
||||
}
|
||||
self.assertDictEqual(expected_zone_props, zone_properties)
|
||||
mock_get_zone.assert_called_once_with(mock.sentinel.zone_name,
|
||||
ignore_missing=False)
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, 'zone_exists')
|
||||
def test_zone_create(self, mock_zone_exists):
|
||||
mock_zone_exists.return_value = False
|
||||
zone_manager = self._dnsutils._dns_manager.MicrosoftDNS_Zone
|
||||
zone_manager.CreateZone.return_value = (mock.sentinel.zone_path,)
|
||||
|
||||
zone_path = self._dnsutils.zone_create(
|
||||
zone_name=mock.sentinel.zone_name,
|
||||
zone_type=mock.sentinel.zone_type,
|
||||
ds_integrated=mock.sentinel.ds_integrated,
|
||||
data_file_name=mock.sentinel.data_file_name,
|
||||
ip_addrs=mock.sentinel.ip_addrs,
|
||||
admin_email_name=mock.sentinel.admin_email_name)
|
||||
|
||||
zone_manager.CreateZone.assert_called_once_with(
|
||||
ZoneName=mock.sentinel.zone_name,
|
||||
ZoneType=mock.sentinel.zone_type,
|
||||
DsIntegrated=mock.sentinel.ds_integrated,
|
||||
DataFileName=mock.sentinel.data_file_name,
|
||||
IpAddr=mock.sentinel.ip_addrs,
|
||||
AdminEmailname=mock.sentinel.admin_email_name)
|
||||
mock_zone_exists.assert_called_once_with(mock.sentinel.zone_name)
|
||||
self.assertEqual(mock.sentinel.zone_path, zone_path)
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, 'zone_exists')
|
||||
def test_zone_create_existing_zone(self, mock_zone_exists):
|
||||
self.assertRaises(exceptions.DNSZoneAlreadyExists,
|
||||
self._dnsutils.zone_create,
|
||||
zone_name=mock.sentinel.zone_name,
|
||||
zone_type=mock.sentinel.zone_type,
|
||||
ds_integrated=mock.sentinel.ds_integrated)
|
||||
mock_zone_exists.assert_called_once_with(mock.sentinel.zone_name)
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, '_get_zone')
|
||||
def test_zone_delete(self, mock_get_zone):
|
||||
self._dnsutils.zone_delete(mock.sentinel.zone_name)
|
||||
|
||||
mock_get_zone.assert_called_once_with(mock.sentinel.zone_name)
|
||||
mock_get_zone.return_value.Delete_.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, '_get_zone')
|
||||
def test_zone_modify(self, mock_get_zone):
|
||||
mock_zone = mock.MagicMock(
|
||||
AllowUpdate=mock.sentinel.allowupdate,
|
||||
DisableWINSRecordReplication=mock.sentinel.disablewins,
|
||||
Notify=mock.sentinel.notify,
|
||||
SecureSecondaries=mock.sentinel.securesecondaries)
|
||||
mock_get_zone.return_value = mock_zone
|
||||
|
||||
self._dnsutils.zone_modify(
|
||||
mock.sentinel.zone_name,
|
||||
allow_update=None,
|
||||
disable_wins=mock.sentinel.disable_wins,
|
||||
notify=None,
|
||||
reverse=mock.sentinel.reverse,
|
||||
secure_secondaries=None)
|
||||
|
||||
self.assertEqual(mock.sentinel.allowupdate, mock_zone.AllowUpdate)
|
||||
self.assertEqual(mock.sentinel.disable_wins,
|
||||
mock_zone.DisableWINSRecordReplication)
|
||||
self.assertEqual(mock.sentinel.notify, mock_zone.Notify)
|
||||
self.assertEqual(mock.sentinel.reverse,
|
||||
mock_zone.Reverse)
|
||||
self.assertEqual(mock.sentinel.securesecondaries,
|
||||
mock_zone.SecureSecondaries)
|
||||
mock_zone.put.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, '_get_zone')
|
||||
def test_zone_update_force_refresh(self, mock_get_zone):
|
||||
mock_zone = mock.MagicMock(DsIntegrated=False,
|
||||
ZoneType=constants.DNS_ZONE_TYPE_SECONDARY)
|
||||
mock_get_zone.return_value = mock_zone
|
||||
|
||||
self._dnsutils.zone_update(mock.sentinel.zone_name)
|
||||
|
||||
mock_get_zone.assert_called_once_with(
|
||||
mock.sentinel.zone_name,
|
||||
ignore_missing=False)
|
||||
mock_zone.ForceRefresh.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, '_get_zone')
|
||||
def test_zone_update_from_ds(self, mock_get_zone):
|
||||
mock_zone = mock.MagicMock(DsIntegrated=True,
|
||||
ZoneType=constants.DNS_ZONE_TYPE_PRIMARY)
|
||||
mock_get_zone.return_value = mock_zone
|
||||
|
||||
self._dnsutils.zone_update(mock.sentinel.zone_name)
|
||||
|
||||
mock_get_zone.assert_called_once_with(
|
||||
mock.sentinel.zone_name,
|
||||
ignore_missing=False)
|
||||
mock_zone.UpdateFromDS.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, '_get_zone')
|
||||
def test_zone_update_reload_zone(self, mock_get_zone):
|
||||
mock_zone = mock.MagicMock(DsIntegrated=False,
|
||||
ZoneType=constants.DNS_ZONE_TYPE_PRIMARY)
|
||||
mock_get_zone.return_value = mock_zone
|
||||
|
||||
self._dnsutils.zone_update(mock.sentinel.zone_name)
|
||||
|
||||
mock_get_zone.assert_called_once_with(
|
||||
mock.sentinel.zone_name,
|
||||
ignore_missing=False)
|
||||
mock_zone.ReloadZone.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, 'zone_exists')
|
||||
def test_get_zone_serial(self, mock_zone_exists):
|
||||
mock_zone_exists.return_value = True
|
||||
fake_serial_number = 1
|
||||
msdns_soatype = self._dnsutils._dns_manager.MicrosoftDNS_SOAType
|
||||
msdns_soatype.return_value = [
|
||||
mock.Mock(SerialNumber=fake_serial_number)]
|
||||
|
||||
serial_number = self._dnsutils.get_zone_serial(mock.sentinel.zone_name)
|
||||
|
||||
expected_serial_number = fake_serial_number
|
||||
self.assertEqual(expected_serial_number, serial_number)
|
||||
msdns_soatype.assert_called_once_with(
|
||||
ContainerName=mock.sentinel.zone_name)
|
||||
mock_zone_exists.assert_called_once_with(mock.sentinel.zone_name)
|
||||
|
||||
@mock.patch.object(dnsutils.DNSUtils, 'zone_exists')
|
||||
def test_get_zone_serial_zone_not_found(self, mock_zone_exists):
|
||||
mock_zone_exists.return_value = False
|
||||
|
||||
serial_number = self._dnsutils.get_zone_serial(mock.sentinel.zone_name)
|
||||
|
||||
self.assertIsNone(serial_number)
|
||||
mock_zone_exists.assert_called_once_with(mock.sentinel.zone_name)
|
@ -1,364 +0,0 @@
|
||||
# Copyright 2014 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.import mock
|
||||
|
||||
import mock
|
||||
from oslotest import base
|
||||
|
||||
import os
|
||||
import six
|
||||
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.utils.io import ioutils
|
||||
|
||||
|
||||
class IOThreadTestCase(base.BaseTestCase):
|
||||
_FAKE_SRC = r'fake_source_file'
|
||||
_FAKE_DEST = r'fake_dest_file'
|
||||
_FAKE_MAX_BYTES = 1
|
||||
|
||||
def setUp(self):
|
||||
self._iothread = ioutils.IOThread(
|
||||
self._FAKE_SRC, self._FAKE_DEST, self._FAKE_MAX_BYTES)
|
||||
super(IOThreadTestCase, self).setUp()
|
||||
|
||||
@mock.patch.object(six.moves.builtins, 'open')
|
||||
@mock.patch('os.rename')
|
||||
@mock.patch('os.path.exists')
|
||||
@mock.patch('os.remove')
|
||||
def test_copy(self, fake_remove, fake_exists, fake_rename, fake_open):
|
||||
fake_data = 'a'
|
||||
fake_src = mock.Mock()
|
||||
fake_dest = mock.Mock()
|
||||
|
||||
fake_src.read.return_value = fake_data
|
||||
fake_dest.tell.return_value = 0
|
||||
fake_exists.return_value = True
|
||||
|
||||
mock_context_manager = mock.MagicMock()
|
||||
fake_open.return_value = mock_context_manager
|
||||
mock_context_manager.__enter__.side_effect = [fake_src, fake_dest]
|
||||
self._iothread._stopped.isSet = mock.Mock(side_effect=[False, True])
|
||||
|
||||
self._iothread._copy()
|
||||
|
||||
fake_dest.seek.assert_called_once_with(0, os.SEEK_END)
|
||||
fake_dest.write.assert_called_once_with(fake_data)
|
||||
fake_dest.close.assert_called_once_with()
|
||||
fake_rename.assert_called_once_with(
|
||||
self._iothread._dest, self._iothread._dest_archive)
|
||||
fake_remove.assert_called_once_with(
|
||||
self._iothread._dest_archive)
|
||||
self.assertEqual(3, fake_open.call_count)
|
||||
|
||||
|
||||
class IOUtilsTestCase(base.BaseTestCase):
|
||||
def setUp(self):
|
||||
super(IOUtilsTestCase, self).setUp()
|
||||
self._setup_lib_mocks()
|
||||
|
||||
self._ioutils = ioutils.IOUtils()
|
||||
self._ioutils._win32_utils = mock.Mock()
|
||||
|
||||
self._mock_run = self._ioutils._win32_utils.run_and_check_output
|
||||
self._run_args = dict(kernel32_lib_func=True,
|
||||
failure_exc=exceptions.Win32IOException)
|
||||
|
||||
self.addCleanup(mock.patch.stopall)
|
||||
|
||||
def _setup_lib_mocks(self):
|
||||
self._ctypes = mock.Mock()
|
||||
# This is used in order to easily make assertions on the variables
|
||||
# passed by reference.
|
||||
self._ctypes.byref = lambda x: (x, "byref")
|
||||
self._ctypes.c_wchar_p = lambda x: (x, "c_wchar_p")
|
||||
|
||||
mock.patch.multiple(ioutils,
|
||||
ctypes=self._ctypes, kernel32=mock.DEFAULT,
|
||||
create=True).start()
|
||||
|
||||
def test_run_and_check_output(self):
|
||||
ret_val = self._ioutils._run_and_check_output(
|
||||
mock.sentinel.func, mock.sentinel.arg)
|
||||
|
||||
self._mock_run.assert_called_once_with(mock.sentinel.func,
|
||||
mock.sentinel.arg,
|
||||
**self._run_args)
|
||||
self.assertEqual(self._mock_run.return_value, ret_val)
|
||||
|
||||
def test_wait_named_pipe(self):
|
||||
fake_timeout_s = 10
|
||||
self._ioutils.wait_named_pipe(mock.sentinel.pipe_name,
|
||||
timeout=fake_timeout_s)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
ioutils.kernel32.WaitNamedPipeW,
|
||||
self._ctypes.c_wchar_p(mock.sentinel.pipe_name),
|
||||
fake_timeout_s * 1000,
|
||||
**self._run_args)
|
||||
|
||||
def test_open(self):
|
||||
handle = self._ioutils.open(mock.sentinel.path,
|
||||
mock.sentinel.access,
|
||||
mock.sentinel.share_mode,
|
||||
mock.sentinel.create_disposition,
|
||||
mock.sentinel.flags)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
ioutils.kernel32.CreateFileW,
|
||||
self._ctypes.c_wchar_p(mock.sentinel.path),
|
||||
mock.sentinel.access,
|
||||
mock.sentinel.share_mode,
|
||||
None,
|
||||
mock.sentinel.create_disposition,
|
||||
mock.sentinel.flags,
|
||||
None,
|
||||
error_ret_vals=[ioutils.INVALID_HANDLE_VALUE],
|
||||
**self._run_args)
|
||||
self.assertEqual(self._mock_run.return_value, handle)
|
||||
|
||||
def test_cancel_io(self):
|
||||
self._ioutils.cancel_io(mock.sentinel.handle,
|
||||
mock.sentinel.overlapped_struct,
|
||||
ignore_invalid_handle=True)
|
||||
|
||||
expected_ignored_err_codes = [ioutils.ERROR_NOT_FOUND,
|
||||
ioutils.ERROR_INVALID_HANDLE]
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
ioutils.kernel32.CancelIoEx,
|
||||
mock.sentinel.handle,
|
||||
self._ctypes.byref(mock.sentinel.overlapped_struct),
|
||||
ignored_error_codes=expected_ignored_err_codes,
|
||||
**self._run_args)
|
||||
|
||||
def test_close_handle(self):
|
||||
self._ioutils.close_handle(mock.sentinel.handle)
|
||||
|
||||
self._mock_run.assert_called_once_with(ioutils.kernel32.CloseHandle,
|
||||
mock.sentinel.handle,
|
||||
**self._run_args)
|
||||
|
||||
def test_wait_io_completion(self):
|
||||
self._ioutils._wait_io_completion(mock.sentinel.event)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
ioutils.kernel32.WaitForSingleObjectEx,
|
||||
mock.sentinel.event,
|
||||
ioutils.WAIT_INFINITE_TIMEOUT,
|
||||
True,
|
||||
error_ret_vals=[ioutils.WAIT_FAILED],
|
||||
**self._run_args)
|
||||
|
||||
def test_set_event(self):
|
||||
self._ioutils.set_event(mock.sentinel.event)
|
||||
|
||||
self._mock_run.assert_called_once_with(ioutils.kernel32.SetEvent,
|
||||
mock.sentinel.event,
|
||||
**self._run_args)
|
||||
|
||||
def test_reset_event(self):
|
||||
self._ioutils._reset_event(mock.sentinel.event)
|
||||
|
||||
self._mock_run.assert_called_once_with(ioutils.kernel32.ResetEvent,
|
||||
mock.sentinel.event,
|
||||
**self._run_args)
|
||||
|
||||
def test_create_event(self):
|
||||
event = self._ioutils._create_event(mock.sentinel.event_attributes,
|
||||
mock.sentinel.manual_reset,
|
||||
mock.sentinel.initial_state,
|
||||
mock.sentinel.name)
|
||||
|
||||
self._mock_run.assert_called_once_with(ioutils.kernel32.CreateEventW,
|
||||
mock.sentinel.event_attributes,
|
||||
mock.sentinel.manual_reset,
|
||||
mock.sentinel.initial_state,
|
||||
mock.sentinel.name,
|
||||
error_ret_vals=[None],
|
||||
**self._run_args)
|
||||
self.assertEqual(self._mock_run.return_value, event)
|
||||
|
||||
@mock.patch.object(ioutils, 'LPOVERLAPPED', create=True)
|
||||
@mock.patch.object(ioutils, 'LPOVERLAPPED_COMPLETION_ROUTINE',
|
||||
lambda x: x, create=True)
|
||||
@mock.patch.object(ioutils.IOUtils, 'set_event')
|
||||
def test_get_completion_routine(self, mock_set_event,
|
||||
mock_LPOVERLAPPED):
|
||||
mock_callback = mock.Mock()
|
||||
|
||||
compl_routine = self._ioutils.get_completion_routine(mock_callback)
|
||||
compl_routine(mock.sentinel.error_code,
|
||||
mock.sentinel.num_bytes,
|
||||
mock.sentinel.lpOverLapped)
|
||||
|
||||
self._ctypes.cast.assert_called_once_with(mock.sentinel.lpOverLapped,
|
||||
ioutils.LPOVERLAPPED)
|
||||
mock_overlapped_struct = self._ctypes.cast.return_value.contents
|
||||
mock_set_event.assert_called_once_with(mock_overlapped_struct.hEvent)
|
||||
mock_callback.assert_called_once_with(mock.sentinel.num_bytes)
|
||||
|
||||
@mock.patch.object(ioutils, 'OVERLAPPED', create=True)
|
||||
@mock.patch.object(ioutils.IOUtils, '_create_event')
|
||||
def test_get_new_overlapped_structure(self, mock_create_event,
|
||||
mock_OVERLAPPED):
|
||||
overlapped_struct = self._ioutils.get_new_overlapped_structure()
|
||||
|
||||
self.assertEqual(mock_OVERLAPPED.return_value, overlapped_struct)
|
||||
self.assertEqual(mock_create_event.return_value,
|
||||
overlapped_struct.hEvent)
|
||||
|
||||
@mock.patch.object(ioutils.IOUtils, '_reset_event')
|
||||
@mock.patch.object(ioutils.IOUtils, '_wait_io_completion')
|
||||
def test_read(self, mock_wait_io_completion, mock_reset_event):
|
||||
mock_overlapped_struct = mock.Mock()
|
||||
mock_event = mock_overlapped_struct.hEvent
|
||||
self._ioutils.read(mock.sentinel.handle, mock.sentinel.buff,
|
||||
mock.sentinel.num_bytes,
|
||||
mock_overlapped_struct,
|
||||
mock.sentinel.compl_routine)
|
||||
|
||||
mock_reset_event.assert_called_once_with(mock_event)
|
||||
self._mock_run.assert_called_once_with(ioutils.kernel32.ReadFileEx,
|
||||
mock.sentinel.handle,
|
||||
mock.sentinel.buff,
|
||||
mock.sentinel.num_bytes,
|
||||
self._ctypes.byref(
|
||||
mock_overlapped_struct),
|
||||
mock.sentinel.compl_routine,
|
||||
**self._run_args)
|
||||
mock_wait_io_completion.assert_called_once_with(mock_event)
|
||||
|
||||
@mock.patch.object(ioutils.IOUtils, '_reset_event')
|
||||
@mock.patch.object(ioutils.IOUtils, '_wait_io_completion')
|
||||
def test_write(self, mock_wait_io_completion, mock_reset_event):
|
||||
mock_overlapped_struct = mock.Mock()
|
||||
mock_event = mock_overlapped_struct.hEvent
|
||||
self._ioutils.write(mock.sentinel.handle, mock.sentinel.buff,
|
||||
mock.sentinel.num_bytes,
|
||||
mock_overlapped_struct,
|
||||
mock.sentinel.compl_routine)
|
||||
|
||||
mock_reset_event.assert_called_once_with(mock_event)
|
||||
self._mock_run.assert_called_once_with(ioutils.kernel32.WriteFileEx,
|
||||
mock.sentinel.handle,
|
||||
mock.sentinel.buff,
|
||||
mock.sentinel.num_bytes,
|
||||
self._ctypes.byref(
|
||||
mock_overlapped_struct),
|
||||
mock.sentinel.compl_routine,
|
||||
**self._run_args)
|
||||
mock_wait_io_completion.assert_called_once_with(mock_event)
|
||||
|
||||
def test_buffer_ops(self):
|
||||
mock.patch.stopall()
|
||||
|
||||
fake_data = 'fake data'
|
||||
|
||||
buff = self._ioutils.get_buffer(len(fake_data), data=fake_data)
|
||||
buff_data = self._ioutils.get_buffer_data(buff, len(fake_data))
|
||||
|
||||
self.assertEqual(six.b(fake_data), buff_data)
|
||||
|
||||
|
||||
class IOQueueTestCase(base.BaseTestCase):
|
||||
def setUp(self):
|
||||
super(IOQueueTestCase, self).setUp()
|
||||
|
||||
self._mock_queue = mock.Mock()
|
||||
queue_patcher = mock.patch.object(ioutils.Queue, 'Queue',
|
||||
new=self._mock_queue)
|
||||
queue_patcher.start()
|
||||
self.addCleanup(queue_patcher.stop)
|
||||
|
||||
self._mock_client_connected = mock.Mock()
|
||||
self._ioqueue = ioutils.IOQueue(self._mock_client_connected)
|
||||
|
||||
def test_get(self):
|
||||
self._mock_client_connected.isSet.return_value = True
|
||||
self._mock_queue.get.return_value = mock.sentinel.item
|
||||
|
||||
queue_item = self._ioqueue.get(timeout=mock.sentinel.timeout)
|
||||
|
||||
self._mock_queue.get.assert_called_once_with(
|
||||
self._ioqueue, timeout=mock.sentinel.timeout)
|
||||
self.assertEqual(mock.sentinel.item, queue_item)
|
||||
|
||||
def _test_get_timeout(self, continue_on_timeout=True):
|
||||
self._mock_client_connected.isSet.side_effect = [True, True, False]
|
||||
self._mock_queue.get.side_effect = ioutils.Queue.Empty
|
||||
|
||||
queue_item = self._ioqueue.get(timeout=mock.sentinel.timeout,
|
||||
continue_on_timeout=continue_on_timeout)
|
||||
|
||||
expected_calls_number = 2 if continue_on_timeout else 1
|
||||
self._mock_queue.get.assert_has_calls(
|
||||
[mock.call(self._ioqueue, timeout=mock.sentinel.timeout)] *
|
||||
expected_calls_number)
|
||||
self.assertIsNone(queue_item)
|
||||
|
||||
def test_get_continue_on_timeout(self):
|
||||
# Test that the queue blocks as long
|
||||
# as the client connected event is set.
|
||||
self._test_get_timeout()
|
||||
|
||||
def test_get_break_on_timeout(self):
|
||||
self._test_get_timeout(continue_on_timeout=False)
|
||||
|
||||
def test_put(self):
|
||||
self._mock_client_connected.isSet.side_effect = [True, True, False]
|
||||
self._mock_queue.put.side_effect = ioutils.Queue.Full
|
||||
|
||||
self._ioqueue.put(mock.sentinel.item,
|
||||
timeout=mock.sentinel.timeout)
|
||||
|
||||
self._mock_queue.put.assert_has_calls(
|
||||
[mock.call(self._ioqueue, mock.sentinel.item,
|
||||
timeout=mock.sentinel.timeout)] * 2)
|
||||
|
||||
@mock.patch.object(ioutils.IOQueue, 'get')
|
||||
def _test_get_burst(self, mock_get,
|
||||
exceeded_max_size=False):
|
||||
fake_data = 'fake_data'
|
||||
|
||||
mock_get.side_effect = [fake_data, fake_data, None]
|
||||
|
||||
if exceeded_max_size:
|
||||
max_size = 0
|
||||
else:
|
||||
max_size = constants.SERIAL_CONSOLE_BUFFER_SIZE
|
||||
|
||||
ret_val = self._ioqueue.get_burst(
|
||||
timeout=mock.sentinel.timeout,
|
||||
burst_timeout=mock.sentinel.burst_timeout,
|
||||
max_size=max_size)
|
||||
|
||||
expected_calls = [mock.call(timeout=mock.sentinel.timeout)]
|
||||
expected_ret_val = fake_data
|
||||
|
||||
if not exceeded_max_size:
|
||||
expected_calls.append(
|
||||
mock.call(timeout=mock.sentinel.burst_timeout,
|
||||
continue_on_timeout=False))
|
||||
expected_ret_val += fake_data
|
||||
|
||||
mock_get.assert_has_calls(expected_calls)
|
||||
self.assertEqual(expected_ret_val, ret_val)
|
||||
|
||||
def test_get_burst(self):
|
||||
self._test_get_burst()
|
||||
|
||||
def test_get_burst_exceeded_size(self):
|
||||
self._test_get_burst(exceeded_max_size=True)
|
@ -1,368 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import errno
|
||||
import mock
|
||||
from oslotest import base
|
||||
from six.moves import builtins
|
||||
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.utils.io import ioutils
|
||||
from os_win.utils.io import namedpipe
|
||||
|
||||
|
||||
class NamedPipeTestCase(base.BaseTestCase):
|
||||
_FAKE_LOG_PATH = 'fake_log_path'
|
||||
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, '_setup_io_structures')
|
||||
def setUp(self, mock_setup_structures):
|
||||
super(NamedPipeTestCase, self).setUp()
|
||||
|
||||
self._mock_input_queue = mock.Mock()
|
||||
self._mock_output_queue = mock.Mock()
|
||||
self._mock_client_connected = mock.Mock()
|
||||
|
||||
self._ioutils = mock.Mock()
|
||||
|
||||
threading_patcher = mock.patch.object(namedpipe, 'threading')
|
||||
threading_patcher.start()
|
||||
self.addCleanup(threading_patcher.stop)
|
||||
|
||||
self._handler = namedpipe.NamedPipeHandler(
|
||||
mock.sentinel.pipe_name,
|
||||
self._mock_input_queue,
|
||||
self._mock_output_queue,
|
||||
self._mock_client_connected,
|
||||
self._FAKE_LOG_PATH)
|
||||
self._handler._ioutils = self._ioutils
|
||||
|
||||
def _mock_setup_pipe_handler(self):
|
||||
self._handler._log_file_handle = mock.Mock()
|
||||
self._handler._pipe_handle = mock.sentinel.pipe_handle
|
||||
self._r_worker = mock.Mock()
|
||||
self._w_worker = mock.Mock()
|
||||
self._handler._workers = [self._r_worker, self._w_worker]
|
||||
self._handler._r_buffer = mock.Mock()
|
||||
self._handler._w_buffer = mock.Mock()
|
||||
self._handler._r_overlapped = mock.Mock()
|
||||
self._handler._w_overlapped = mock.Mock()
|
||||
self._handler._r_completion_routine = mock.Mock()
|
||||
self._handler._w_completion_routine = mock.Mock()
|
||||
|
||||
@mock.patch.object(builtins, 'open')
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, '_open_pipe')
|
||||
def test_start_pipe_handler(self, mock_open_pipe, mock_open):
|
||||
self._handler.start()
|
||||
|
||||
mock_open_pipe.assert_called_once_with()
|
||||
mock_open.assert_called_once_with(self._FAKE_LOG_PATH, 'ab', 1)
|
||||
self.assertEqual(mock_open.return_value,
|
||||
self._handler._log_file_handle)
|
||||
|
||||
thread = namedpipe.threading.Thread
|
||||
thread.assert_has_calls(
|
||||
[mock.call(target=self._handler._read_from_pipe),
|
||||
mock.call().setDaemon(True),
|
||||
mock.call().start(),
|
||||
mock.call(target=self._handler._write_to_pipe),
|
||||
mock.call().setDaemon(True),
|
||||
mock.call().start()])
|
||||
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, 'stop')
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, '_open_pipe')
|
||||
def test_start_pipe_handler_exception(self, mock_open_pipe,
|
||||
mock_stop_handler):
|
||||
mock_open_pipe.side_effect = Exception
|
||||
|
||||
self.assertRaises(exceptions.OSWinException,
|
||||
self._handler.start)
|
||||
|
||||
mock_stop_handler.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, '_cleanup_handles')
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, '_cancel_io')
|
||||
def _test_stop_pipe_handler(self, mock_cancel_io,
|
||||
mock_cleanup_handles,
|
||||
workers_started=True):
|
||||
self._mock_setup_pipe_handler()
|
||||
if not workers_started:
|
||||
handler_workers = []
|
||||
self._handler._workers = handler_workers
|
||||
else:
|
||||
handler_workers = self._handler._workers
|
||||
self._r_worker.is_alive.side_effect = (True, False)
|
||||
self._w_worker.is_alive.return_value = False
|
||||
|
||||
self._handler.stop()
|
||||
|
||||
self._handler._stopped.set.assert_called_once_with()
|
||||
if not workers_started:
|
||||
mock_cleanup_handles.assert_called_once_with()
|
||||
else:
|
||||
self.assertFalse(mock_cleanup_handles.called)
|
||||
|
||||
if workers_started:
|
||||
mock_cancel_io.assert_called_once_with()
|
||||
self._r_worker.join.assert_called_once_with(0.5)
|
||||
self.assertFalse(self._w_worker.join.called)
|
||||
|
||||
self.assertEqual([], self._handler._workers)
|
||||
|
||||
def test_stop_pipe_handler_workers_started(self):
|
||||
self._test_stop_pipe_handler()
|
||||
|
||||
def test_stop_pipe_handler_workers_not_started(self):
|
||||
self._test_stop_pipe_handler(workers_started=False)
|
||||
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, '_close_pipe')
|
||||
def test_cleanup_handles(self, mock_close_pipe):
|
||||
self._mock_setup_pipe_handler()
|
||||
log_handle = self._handler._log_file_handle
|
||||
r_event = self._handler._r_overlapped.hEvent
|
||||
w_event = self._handler._w_overlapped.hEvent
|
||||
|
||||
self._handler._cleanup_handles()
|
||||
|
||||
mock_close_pipe.assert_called_once_with()
|
||||
log_handle.close.assert_called_once_with()
|
||||
self._ioutils.close_handle.assert_has_calls(
|
||||
[mock.call(r_event), mock.call(w_event)])
|
||||
|
||||
self.assertIsNone(self._handler._log_file_handle)
|
||||
self.assertIsNone(self._handler._r_overlapped.hEvent)
|
||||
self.assertIsNone(self._handler._w_overlapped.hEvent)
|
||||
|
||||
def test_setup_io_structures(self):
|
||||
self._handler._setup_io_structures()
|
||||
|
||||
self.assertEqual(self._ioutils.get_buffer.return_value,
|
||||
self._handler._r_buffer)
|
||||
self.assertEqual(self._ioutils.get_buffer.return_value,
|
||||
self._handler._w_buffer)
|
||||
self.assertEqual(
|
||||
self._ioutils.get_new_overlapped_structure.return_value,
|
||||
self._handler._r_overlapped)
|
||||
self.assertEqual(
|
||||
self._ioutils.get_new_overlapped_structure.return_value,
|
||||
self._handler._w_overlapped)
|
||||
self.assertEqual(
|
||||
self._ioutils.get_completion_routine.return_value,
|
||||
self._handler._r_completion_routine)
|
||||
self.assertEqual(
|
||||
self._ioutils.get_completion_routine.return_value,
|
||||
self._handler._w_completion_routine)
|
||||
self.assertIsNone(self._handler._log_file_handle)
|
||||
|
||||
self._ioutils.get_buffer.assert_has_calls(
|
||||
[mock.call(constants.SERIAL_CONSOLE_BUFFER_SIZE)] * 2)
|
||||
self._ioutils.get_completion_routine.assert_has_calls(
|
||||
[mock.call(self._handler._read_callback),
|
||||
mock.call()])
|
||||
|
||||
def test_open_pipe(self):
|
||||
self._handler._open_pipe()
|
||||
|
||||
self._ioutils.wait_named_pipe.assert_called_once_with(
|
||||
mock.sentinel.pipe_name)
|
||||
self._ioutils.open.assert_called_once_with(
|
||||
mock.sentinel.pipe_name,
|
||||
desired_access=(ioutils.GENERIC_READ | ioutils.GENERIC_WRITE),
|
||||
share_mode=(ioutils.FILE_SHARE_READ | ioutils.FILE_SHARE_WRITE),
|
||||
creation_disposition=ioutils.OPEN_EXISTING,
|
||||
flags_and_attributes=ioutils.FILE_FLAG_OVERLAPPED)
|
||||
|
||||
self.assertEqual(self._ioutils.open.return_value,
|
||||
self._handler._pipe_handle)
|
||||
|
||||
def test_close_pipe(self):
|
||||
self._mock_setup_pipe_handler()
|
||||
|
||||
self._handler._close_pipe()
|
||||
|
||||
self._ioutils.close_handle.assert_called_once_with(
|
||||
mock.sentinel.pipe_handle)
|
||||
self.assertIsNone(self._handler._pipe_handle)
|
||||
|
||||
def test_cancel_io(self):
|
||||
self._mock_setup_pipe_handler()
|
||||
|
||||
self._handler._cancel_io()
|
||||
|
||||
overlapped_structures = [self._handler._r_overlapped,
|
||||
self._handler._w_overlapped]
|
||||
|
||||
self._ioutils.cancel_io.assert_has_calls(
|
||||
[mock.call(self._handler._pipe_handle,
|
||||
overlapped_structure,
|
||||
ignore_invalid_handle=True)
|
||||
for overlapped_structure in overlapped_structures])
|
||||
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, '_start_io_worker')
|
||||
def test_read_from_pipe(self, mock_start_worker):
|
||||
self._mock_setup_pipe_handler()
|
||||
|
||||
self._handler._read_from_pipe()
|
||||
|
||||
mock_start_worker.assert_called_once_with(
|
||||
self._ioutils.read,
|
||||
self._handler._r_buffer,
|
||||
self._handler._r_overlapped,
|
||||
self._handler._r_completion_routine)
|
||||
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, '_start_io_worker')
|
||||
def test_write_to_pipe(self, mock_start_worker):
|
||||
self._mock_setup_pipe_handler()
|
||||
|
||||
self._handler._write_to_pipe()
|
||||
|
||||
mock_start_worker.assert_called_once_with(
|
||||
self._ioutils.write,
|
||||
self._handler._w_buffer,
|
||||
self._handler._w_overlapped,
|
||||
self._handler._w_completion_routine,
|
||||
self._handler._get_data_to_write)
|
||||
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, '_cleanup_handles')
|
||||
def _test_start_io_worker(self, mock_cleanup_handles,
|
||||
buff_update_func=None, exception=None):
|
||||
self._handler._stopped.isSet.side_effect = [False, True]
|
||||
self._handler._pipe_handle = mock.sentinel.pipe_handle
|
||||
self._handler.stop = mock.Mock()
|
||||
|
||||
io_func = mock.Mock(side_effect=exception)
|
||||
fake_buffer = 'fake_buffer'
|
||||
|
||||
self._handler._start_io_worker(io_func, fake_buffer,
|
||||
mock.sentinel.overlapped_structure,
|
||||
mock.sentinel.completion_routine,
|
||||
buff_update_func)
|
||||
|
||||
if buff_update_func:
|
||||
num_bytes = buff_update_func()
|
||||
else:
|
||||
num_bytes = len(fake_buffer)
|
||||
|
||||
io_func.assert_called_once_with(mock.sentinel.pipe_handle,
|
||||
fake_buffer, num_bytes,
|
||||
mock.sentinel.overlapped_structure,
|
||||
mock.sentinel.completion_routine)
|
||||
|
||||
if exception:
|
||||
self._handler._stopped.set.assert_called_once_with()
|
||||
mock_cleanup_handles.assert_called_once_with()
|
||||
|
||||
def test_start_io_worker(self):
|
||||
self._test_start_io_worker()
|
||||
|
||||
def test_start_io_worker_with_buffer_update_method(self):
|
||||
self._test_start_io_worker(buff_update_func=mock.Mock())
|
||||
|
||||
def test_start_io_worker_exception(self):
|
||||
self._test_start_io_worker(exception=IOError)
|
||||
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, '_write_to_log')
|
||||
def test_read_callback(self, mock_write_to_log):
|
||||
self._mock_setup_pipe_handler()
|
||||
fake_data = self._ioutils.get_buffer_data.return_value
|
||||
|
||||
self._handler._read_callback(mock.sentinel.num_bytes)
|
||||
|
||||
self._ioutils.get_buffer_data.assert_called_once_with(
|
||||
self._handler._r_buffer, mock.sentinel.num_bytes)
|
||||
self._mock_output_queue.put.assert_called_once_with(fake_data)
|
||||
mock_write_to_log.assert_called_once_with(fake_data)
|
||||
|
||||
@mock.patch.object(namedpipe, 'time')
|
||||
def test_get_data_to_write(self, mock_time):
|
||||
self._mock_setup_pipe_handler()
|
||||
self._handler._stopped.isSet.side_effect = [False, False]
|
||||
self._mock_client_connected.isSet.side_effect = [False, True]
|
||||
fake_data = 'fake input data'
|
||||
self._mock_input_queue.get.return_value = fake_data
|
||||
|
||||
num_bytes = self._handler._get_data_to_write()
|
||||
|
||||
mock_time.sleep.assert_called_once_with(1)
|
||||
self._ioutils.write_buffer_data.assert_called_once_with(
|
||||
self._handler._w_buffer, fake_data)
|
||||
self.assertEqual(len(fake_data), num_bytes)
|
||||
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, '_rotate_logs')
|
||||
def _test_write_to_log(self, mock_rotate_logs, size_exceeded=False):
|
||||
self._mock_setup_pipe_handler()
|
||||
self._handler._stopped.isSet.return_value = False
|
||||
fake_handle = self._handler._log_file_handle
|
||||
fake_handle.tell.return_value = (constants.MAX_CONSOLE_LOG_FILE_SIZE
|
||||
if size_exceeded else 0)
|
||||
fake_data = 'fake_data'
|
||||
|
||||
self._handler._write_to_log(fake_data)
|
||||
|
||||
if size_exceeded:
|
||||
mock_rotate_logs.assert_called_once_with()
|
||||
|
||||
self._handler._log_file_handle.write.assert_called_once_with(
|
||||
fake_data)
|
||||
|
||||
def test_write_to_log(self):
|
||||
self._test_write_to_log()
|
||||
|
||||
def test_write_to_log_size_exceeded(self):
|
||||
self._test_write_to_log(size_exceeded=True)
|
||||
|
||||
@mock.patch.object(namedpipe.NamedPipeHandler, '_retry_if_file_in_use')
|
||||
@mock.patch.object(builtins, 'open')
|
||||
@mock.patch.object(namedpipe, 'os')
|
||||
def test_rotate_logs(self, mock_os, mock_open, mock_exec_retry):
|
||||
fake_archived_log_path = self._FAKE_LOG_PATH + '.1'
|
||||
mock_os.path.exists.return_value = True
|
||||
|
||||
self._mock_setup_pipe_handler()
|
||||
fake_handle = self._handler._log_file_handle
|
||||
|
||||
self._handler._rotate_logs()
|
||||
|
||||
fake_handle.flush.assert_called_once_with()
|
||||
fake_handle.close.assert_called_once_with()
|
||||
mock_os.path.exists.assert_called_once_with(
|
||||
fake_archived_log_path)
|
||||
|
||||
mock_exec_retry.assert_has_calls([mock.call(mock_os.remove,
|
||||
fake_archived_log_path),
|
||||
mock.call(mock_os.rename,
|
||||
self._FAKE_LOG_PATH,
|
||||
fake_archived_log_path)])
|
||||
|
||||
mock_open.assert_called_once_with(self._FAKE_LOG_PATH, 'ab', 1)
|
||||
self.assertEqual(mock_open.return_value,
|
||||
self._handler._log_file_handle)
|
||||
|
||||
@mock.patch.object(namedpipe, 'time')
|
||||
def test_retry_if_file_in_use_exceeded_retries(self, mock_time):
|
||||
class FakeWindowsException(Exception):
|
||||
errno = errno.EACCES
|
||||
|
||||
raise_count = self._handler._MAX_LOG_ROTATE_RETRIES + 1
|
||||
mock_func_side_eff = [FakeWindowsException] * raise_count
|
||||
mock_func = mock.Mock(side_effect=mock_func_side_eff)
|
||||
|
||||
with mock.patch.object(namedpipe, 'WindowsError',
|
||||
FakeWindowsException, create=True):
|
||||
self.assertRaises(FakeWindowsException,
|
||||
self._handler._retry_if_file_in_use,
|
||||
mock_func, mock.sentinel.arg)
|
||||
mock_time.sleep.assert_has_calls(
|
||||
[mock.call(1)] * self._handler._MAX_LOG_ROTATE_RETRIES)
|
@ -1,409 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils import _wqlutils
|
||||
from os_win.utils.metrics import metricsutils
|
||||
|
||||
|
||||
class MetricsUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V MetricsUtils class."""
|
||||
|
||||
_FAKE_RET_VAL = 0
|
||||
_FAKE_PORT = "fake's port name"
|
||||
|
||||
def setUp(self):
|
||||
super(MetricsUtilsTestCase, self).setUp()
|
||||
self.utils = metricsutils.MetricsUtils()
|
||||
self.utils._conn_attr = mock.MagicMock()
|
||||
|
||||
def test_cache_metrics_defs(self):
|
||||
mock_metric_def = mock.Mock(ElementName=mock.sentinel.elementname)
|
||||
self.utils._conn.CIM_BaseMetricDefinition.return_value = [
|
||||
mock_metric_def]
|
||||
self.utils._cache_metrics_defs()
|
||||
expected_cache_metrics = {mock.sentinel.elementname: mock_metric_def}
|
||||
self.assertEqual(expected_cache_metrics, self.utils._metrics_defs_obj)
|
||||
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_enable_metrics')
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources')
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm')
|
||||
def test_enable_vm_metrics_collection(
|
||||
self, mock_get_vm, mock_get_vm_resources, mock_enable_metrics):
|
||||
mock_vm = mock_get_vm.return_value
|
||||
mock_disk = mock.MagicMock()
|
||||
mock_dvd = mock.MagicMock(
|
||||
ResourceSubType=self.utils._DVD_DISK_RES_SUB_TYPE)
|
||||
mock_get_vm_resources.return_value = [mock_disk, mock_dvd]
|
||||
|
||||
self.utils.enable_vm_metrics_collection(mock.sentinel.vm_name)
|
||||
|
||||
metrics_names = [self.utils._CPU_METRICS,
|
||||
self.utils._MEMORY_METRICS]
|
||||
mock_enable_metrics.assert_has_calls(
|
||||
[mock.call(mock_disk), mock.call(mock_vm, metrics_names)])
|
||||
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_enable_metrics')
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_switch_port')
|
||||
def test_enable_switch_port_metrics_collection(self, mock_get_port,
|
||||
mock_enable_metrics):
|
||||
self.utils.enable_port_metrics_collection(mock.sentinel.port_name)
|
||||
|
||||
mock_get_port.assert_called_once_with(mock.sentinel.port_name)
|
||||
metrics = [self.utils._NET_IN_METRICS,
|
||||
self.utils._NET_OUT_METRICS]
|
||||
mock_enable_metrics.assert_called_once_with(
|
||||
mock_get_port.return_value, metrics)
|
||||
|
||||
def _check_enable_metrics(self, metrics=None, definition=None):
|
||||
mock_element = mock.MagicMock()
|
||||
|
||||
self.utils._enable_metrics(mock_element, metrics)
|
||||
|
||||
self.utils._metrics_svc.ControlMetrics.assert_called_once_with(
|
||||
Subject=mock_element.path_.return_value,
|
||||
Definition=definition,
|
||||
MetricCollectionEnabled=self.utils._METRICS_ENABLED)
|
||||
|
||||
def test_enable_metrics_no_metrics(self):
|
||||
self._check_enable_metrics()
|
||||
|
||||
def test_enable_metrics(self):
|
||||
metrics_name = self.utils._CPU_METRICS
|
||||
metrics_def = mock.MagicMock()
|
||||
self.utils._metrics_defs_obj = {metrics_name: metrics_def}
|
||||
self._check_enable_metrics([metrics_name, mock.sentinel.metrics_name],
|
||||
metrics_def.path_.return_value)
|
||||
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_metrics')
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources')
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm')
|
||||
def test_get_cpu_metrics(self, mock_get_vm, mock_get_vm_resources,
|
||||
mock_get_metrics):
|
||||
fake_cpu_count = 2
|
||||
fake_uptime = 1000
|
||||
fake_cpu_metrics_val = 2000
|
||||
|
||||
self.utils._metrics_defs_obj = {
|
||||
self.utils._CPU_METRICS: mock.sentinel.metrics}
|
||||
|
||||
mock_vm = mock_get_vm.return_value
|
||||
mock_vm.OnTimeInMilliseconds = fake_uptime
|
||||
mock_cpu = mock.MagicMock(VirtualQuantity=fake_cpu_count)
|
||||
mock_get_vm_resources.return_value = [mock_cpu]
|
||||
|
||||
mock_metric = mock.MagicMock(MetricValue=fake_cpu_metrics_val)
|
||||
mock_get_metrics.return_value = [mock_metric]
|
||||
|
||||
cpu_metrics = self.utils.get_cpu_metrics(mock.sentinel.vm_name)
|
||||
|
||||
self.assertEqual(3, len(cpu_metrics))
|
||||
self.assertEqual(fake_cpu_metrics_val, cpu_metrics[0])
|
||||
self.assertEqual(fake_cpu_count, cpu_metrics[1])
|
||||
self.assertEqual(fake_uptime, cpu_metrics[2])
|
||||
|
||||
mock_get_vm.assert_called_once_with(mock.sentinel.vm_name)
|
||||
mock_get_vm_resources.assert_called_once_with(
|
||||
mock.sentinel.vm_name, self.utils._PROCESSOR_SETTING_DATA_CLASS)
|
||||
mock_get_metrics.assert_called_once_with(mock_vm,
|
||||
mock.sentinel.metrics)
|
||||
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_metrics')
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm')
|
||||
def test_get_memory_metrics(self, mock_get_vm, mock_get_metrics):
|
||||
mock_vm = mock_get_vm.return_value
|
||||
self.utils._metrics_defs_obj = {
|
||||
self.utils._MEMORY_METRICS: mock.sentinel.metrics}
|
||||
|
||||
metrics_memory = mock.MagicMock()
|
||||
metrics_memory.MetricValue = 3
|
||||
mock_get_metrics.return_value = [metrics_memory]
|
||||
|
||||
response = self.utils.get_memory_metrics(mock.sentinel.vm_name)
|
||||
|
||||
self.assertEqual(3, response)
|
||||
mock_get_vm.assert_called_once_with(mock.sentinel.vm_name)
|
||||
mock_get_metrics.assert_called_once_with(mock_vm,
|
||||
mock.sentinel.metrics)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
@mock.patch.object(metricsutils.MetricsUtils,
|
||||
'_sum_metrics_values_by_defs')
|
||||
@mock.patch.object(metricsutils.MetricsUtils,
|
||||
'_get_metrics_value_instances')
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources')
|
||||
def test_get_vnic_metrics(self, mock_get_vm_resources,
|
||||
mock_get_value_instances, mock_sum_by_defs,
|
||||
mock_get_element_associated_class):
|
||||
fake_rx_mb = 1000
|
||||
fake_tx_mb = 2000
|
||||
|
||||
self.utils._metrics_defs_obj = {
|
||||
self.utils._NET_IN_METRICS: mock.sentinel.net_in_metrics,
|
||||
self.utils._NET_OUT_METRICS: mock.sentinel.net_out_metrics}
|
||||
|
||||
mock_port = mock.MagicMock(Parent=mock.sentinel.vnic_path)
|
||||
mock_vnic = mock.MagicMock(ElementName=mock.sentinel.element_name,
|
||||
Address=mock.sentinel.address)
|
||||
mock_vnic.path_.return_value = mock.sentinel.vnic_path
|
||||
mock_get_vm_resources.side_effect = [[mock_port], [mock_vnic]]
|
||||
mock_sum_by_defs.return_value = [fake_rx_mb, fake_tx_mb]
|
||||
|
||||
vnic_metrics = list(
|
||||
self.utils.get_vnic_metrics(mock.sentinel.vm_name))
|
||||
|
||||
self.assertEqual(1, len(vnic_metrics))
|
||||
self.assertEqual(fake_rx_mb, vnic_metrics[0]['rx_mb'])
|
||||
self.assertEqual(fake_tx_mb, vnic_metrics[0]['tx_mb'])
|
||||
self.assertEqual(mock.sentinel.element_name,
|
||||
vnic_metrics[0]['element_name'])
|
||||
self.assertEqual(mock.sentinel.address, vnic_metrics[0]['address'])
|
||||
|
||||
mock_get_vm_resources.assert_has_calls([
|
||||
mock.call(mock.sentinel.vm_name, self.utils._PORT_ALLOC_SET_DATA),
|
||||
mock.call(mock.sentinel.vm_name,
|
||||
self.utils._SYNTH_ETH_PORT_SET_DATA)])
|
||||
mock_get_value_instances.assert_called_once_with(
|
||||
mock_get_element_associated_class.return_value,
|
||||
self.utils._BASE_METRICS_VALUE)
|
||||
mock_sum_by_defs.assert_called_once_with(
|
||||
mock_get_value_instances.return_value,
|
||||
[mock.sentinel.net_in_metrics, mock.sentinel.net_out_metrics])
|
||||
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_metrics_values')
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources')
|
||||
def test_get_disk_metrics(self, mock_get_vm_resources,
|
||||
mock_get_metrics_values):
|
||||
fake_read_mb = 1000
|
||||
fake_write_mb = 2000
|
||||
|
||||
self.utils._metrics_defs_obj = {
|
||||
self.utils._DISK_RD_METRICS: mock.sentinel.disk_rd_metrics,
|
||||
self.utils._DISK_WR_METRICS: mock.sentinel.disk_wr_metrics}
|
||||
|
||||
mock_disk = mock.MagicMock(HostResource=[mock.sentinel.host_resource],
|
||||
InstanceID=mock.sentinel.instance_id)
|
||||
mock_get_vm_resources.return_value = [mock_disk]
|
||||
mock_get_metrics_values.return_value = [fake_read_mb, fake_write_mb]
|
||||
|
||||
disk_metrics = list(
|
||||
self.utils.get_disk_metrics(mock.sentinel.vm_name))
|
||||
|
||||
self.assertEqual(1, len(disk_metrics))
|
||||
self.assertEqual(fake_read_mb, disk_metrics[0]['read_mb'])
|
||||
self.assertEqual(fake_write_mb, disk_metrics[0]['write_mb'])
|
||||
self.assertEqual(mock.sentinel.instance_id,
|
||||
disk_metrics[0]['instance_id'])
|
||||
self.assertEqual(mock.sentinel.host_resource,
|
||||
disk_metrics[0]['host_resource'])
|
||||
|
||||
mock_get_vm_resources.assert_called_once_with(
|
||||
mock.sentinel.vm_name,
|
||||
self.utils._STORAGE_ALLOC_SETTING_DATA_CLASS)
|
||||
metrics = [mock.sentinel.disk_rd_metrics,
|
||||
mock.sentinel.disk_wr_metrics]
|
||||
mock_get_metrics_values.assert_called_once_with(mock_disk, metrics)
|
||||
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_metrics_values')
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources')
|
||||
def test_get_disk_latency_metrics(self, mock_get_vm_resources,
|
||||
mock_get_metrics_values):
|
||||
self.utils._metrics_defs_obj = {
|
||||
self.utils._DISK_LATENCY_METRICS: mock.sentinel.metrics}
|
||||
|
||||
mock_disk = mock.MagicMock(HostResource=[mock.sentinel.host_resource],
|
||||
InstanceID=mock.sentinel.instance_id)
|
||||
mock_get_vm_resources.return_value = [mock_disk]
|
||||
mock_get_metrics_values.return_value = [mock.sentinel.latency]
|
||||
|
||||
disk_metrics = list(
|
||||
self.utils.get_disk_latency_metrics(mock.sentinel.vm_name))
|
||||
|
||||
self.assertEqual(1, len(disk_metrics))
|
||||
self.assertEqual(mock.sentinel.latency,
|
||||
disk_metrics[0]['disk_latency'])
|
||||
self.assertEqual(mock.sentinel.instance_id,
|
||||
disk_metrics[0]['instance_id'])
|
||||
mock_get_vm_resources.assert_called_once_with(
|
||||
mock.sentinel.vm_name,
|
||||
self.utils._STORAGE_ALLOC_SETTING_DATA_CLASS)
|
||||
mock_get_metrics_values.assert_called_once_with(
|
||||
mock_disk, [mock.sentinel.metrics])
|
||||
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_metrics_values')
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_resources')
|
||||
def test_get_disk_iops_metrics(self, mock_get_vm_resources,
|
||||
mock_get_metrics_values):
|
||||
self.utils._metrics_defs_obj = {
|
||||
self.utils._DISK_IOPS_METRICS: mock.sentinel.metrics}
|
||||
mock_disk = mock.MagicMock(HostResource=[mock.sentinel.host_resource],
|
||||
InstanceID=mock.sentinel.instance_id)
|
||||
mock_get_vm_resources.return_value = [mock_disk]
|
||||
mock_get_metrics_values.return_value = [mock.sentinel.iops]
|
||||
|
||||
disk_metrics = list(
|
||||
self.utils.get_disk_iops_count(mock.sentinel.vm_name))
|
||||
|
||||
self.assertEqual(1, len(disk_metrics))
|
||||
self.assertEqual(mock.sentinel.iops,
|
||||
disk_metrics[0]['iops_count'])
|
||||
self.assertEqual(mock.sentinel.instance_id,
|
||||
disk_metrics[0]['instance_id'])
|
||||
mock_get_vm_resources.assert_called_once_with(
|
||||
mock.sentinel.vm_name,
|
||||
self.utils._STORAGE_ALLOC_SETTING_DATA_CLASS)
|
||||
mock_get_metrics_values.assert_called_once_with(
|
||||
mock_disk, [mock.sentinel.metrics])
|
||||
|
||||
def test_sum_metrics_values(self):
|
||||
mock_metric = mock.MagicMock(MetricValue='100')
|
||||
result = self.utils._sum_metrics_values([mock_metric] * 2)
|
||||
self.assertEqual(200, result)
|
||||
|
||||
def test_sum_metrics_values_by_defs(self):
|
||||
mock_metric = mock.MagicMock(MetricDefinitionId=mock.sentinel.def_id,
|
||||
MetricValue='100')
|
||||
mock_metric_useless = mock.MagicMock(MetricValue='200')
|
||||
mock_metric_def = mock.MagicMock(Id=mock.sentinel.def_id)
|
||||
|
||||
result = self.utils._sum_metrics_values_by_defs(
|
||||
[mock_metric, mock_metric_useless], [None, mock_metric_def])
|
||||
|
||||
self.assertEqual([0, 100], result)
|
||||
|
||||
def test_get_metrics_value_instances(self):
|
||||
FAKE_CLASS_NAME = "FAKE_CLASS"
|
||||
mock_el_metric = mock.MagicMock()
|
||||
mock_el_metric_2 = mock.MagicMock()
|
||||
mock_el_metric_2.path.return_value = mock.Mock(Class=FAKE_CLASS_NAME)
|
||||
|
||||
self.utils._conn.Msvm_MetricForME.side_effect = [
|
||||
[], [mock.Mock(Dependent=mock_el_metric_2)]]
|
||||
|
||||
returned = self.utils._get_metrics_value_instances(
|
||||
[mock_el_metric, mock_el_metric_2], FAKE_CLASS_NAME)
|
||||
|
||||
expected_return = [mock_el_metric_2]
|
||||
self.assertEqual(expected_return, returned)
|
||||
|
||||
@mock.patch.object(metricsutils.MetricsUtils,
|
||||
'_sum_metrics_values_by_defs')
|
||||
def test_get_metrics_values(self, mock_sum_by_defs):
|
||||
mock_element = mock.MagicMock()
|
||||
self.utils._conn.Msvm_MetricForME.return_value = [
|
||||
mock.Mock(Dependent=mock.sentinel.metric),
|
||||
mock.Mock(Dependent=mock.sentinel.another_metric)]
|
||||
|
||||
resulted_metrics_sum = self.utils._get_metrics_values(
|
||||
mock_element, mock.sentinel.metrics_defs)
|
||||
|
||||
self.utils._conn.Msvm_MetricForME.assert_called_once_with(
|
||||
Antecedent=mock_element.path_.return_value)
|
||||
mock_sum_by_defs.assert_called_once_with(
|
||||
[mock.sentinel.metric, mock.sentinel.another_metric],
|
||||
mock.sentinel.metrics_defs)
|
||||
expected_metrics_sum = mock_sum_by_defs.return_value
|
||||
self.assertEqual(expected_metrics_sum, resulted_metrics_sum)
|
||||
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_filter_metrics')
|
||||
def test_get_metrics(self, mock_filter_metrics):
|
||||
mock_metric = mock.MagicMock()
|
||||
mock_element = mock.MagicMock()
|
||||
self.utils._conn.Msvm_MetricForME.return_value = [mock_metric]
|
||||
|
||||
result = self.utils._get_metrics(mock_element,
|
||||
mock.sentinel.metrics_def)
|
||||
|
||||
self.assertEqual(mock_filter_metrics.return_value, result)
|
||||
self.utils._conn.Msvm_MetricForME.assert_called_once_with(
|
||||
Antecedent=mock_element.path_.return_value)
|
||||
mock_filter_metrics.assert_called_once_with(
|
||||
[mock_metric.Dependent],
|
||||
mock.sentinel.metrics_def)
|
||||
|
||||
def test_filter_metrics(self):
|
||||
mock_metric = mock.MagicMock(MetricDefinitionId=mock.sentinel.def_id)
|
||||
mock_bad_metric = mock.MagicMock()
|
||||
mock_metric_def = mock.MagicMock(Id=mock.sentinel.def_id)
|
||||
|
||||
result = self.utils._filter_metrics([mock_bad_metric, mock_metric],
|
||||
mock_metric_def)
|
||||
|
||||
self.assertEqual([mock_metric], result)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_get_vm_setting_data')
|
||||
def test_get_vm_resources(self, mock_get_vm_setting_data,
|
||||
mock_get_element_associated_class):
|
||||
result = self.utils._get_vm_resources(mock.sentinel.vm_name,
|
||||
mock.sentinel.resource_class)
|
||||
|
||||
mock_get_vm_setting_data.assert_called_once_with(mock.sentinel.vm_name)
|
||||
vm_setting_data = mock_get_vm_setting_data.return_value
|
||||
mock_get_element_associated_class.assert_called_once_with(
|
||||
self.utils._conn, mock.sentinel.resource_class,
|
||||
element_instance_id=vm_setting_data.InstanceID)
|
||||
self.assertEqual(mock_get_element_associated_class.return_value,
|
||||
result)
|
||||
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_unique_result')
|
||||
def test_get_vm(self, mock_unique_result):
|
||||
result = self.utils._get_vm(mock.sentinel.vm_name)
|
||||
|
||||
self.assertEqual(mock_unique_result.return_value, result)
|
||||
conn_class = self.utils._conn.Msvm_ComputerSystem
|
||||
conn_class.assert_called_once_with(ElementName=mock.sentinel.vm_name)
|
||||
mock_unique_result.assert_called_once_with(conn_class.return_value,
|
||||
mock.sentinel.vm_name)
|
||||
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_unique_result')
|
||||
def test_get_switch_port(self, mock_unique_result):
|
||||
result = self.utils._get_switch_port(mock.sentinel.port_name)
|
||||
|
||||
self.assertEqual(mock_unique_result.return_value, result)
|
||||
conn_class = self.utils._conn.Msvm_SyntheticEthernetPortSettingData
|
||||
conn_class.assert_called_once_with(ElementName=mock.sentinel.port_name)
|
||||
mock_unique_result.assert_called_once_with(conn_class.return_value,
|
||||
mock.sentinel.port_name)
|
||||
|
||||
@mock.patch.object(metricsutils.MetricsUtils, '_unique_result')
|
||||
def test_get_vm_setting_data(self, mock_unique_result):
|
||||
result = self.utils._get_vm_setting_data(mock.sentinel.vm_name)
|
||||
|
||||
self.assertEqual(mock_unique_result.return_value, result)
|
||||
conn_class = self.utils._conn.Msvm_VirtualSystemSettingData
|
||||
conn_class.assert_called_once_with(
|
||||
ElementName=mock.sentinel.vm_name,
|
||||
VirtualSystemType=self.utils._VIRTUAL_SYSTEM_TYPE_REALIZED)
|
||||
mock_unique_result.assert_called_once_with(conn_class.return_value,
|
||||
mock.sentinel.vm_name)
|
||||
|
||||
def test_unique_result_not_found(self):
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self.utils._unique_result,
|
||||
[], mock.sentinel.resource_name)
|
||||
|
||||
def test_unique_result_duplicate(self):
|
||||
self.assertRaises(exceptions.OSWinException,
|
||||
self.utils._unique_result,
|
||||
[mock.ANY, mock.ANY], mock.sentinel.resource_name)
|
||||
|
||||
def test_unique_result(self):
|
||||
result = self.utils._unique_result([mock.sentinel.obj],
|
||||
mock.sentinel.resource_name)
|
||||
self.assertEqual(mock.sentinel.obj, result)
|
@ -1,770 +0,0 @@
|
||||
# Copyright 2014 Cloudbase Solutions Srl
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils import _wqlutils
|
||||
from os_win.utils.network import networkutils
|
||||
|
||||
|
||||
class NetworkUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V NetworkUtils class."""
|
||||
|
||||
_FAKE_VSWITCH_NAME = "fake_vswitch_name"
|
||||
_FAKE_PORT_NAME = "fake_port_name"
|
||||
_FAKE_JOB_PATH = 'fake_job_path'
|
||||
_FAKE_RET_VAL = 0
|
||||
_FAKE_RES_PATH = "fake_res_path"
|
||||
_FAKE_VSWITCH = "fake_vswitch"
|
||||
_FAKE_VLAN_ID = "fake_vlan_id"
|
||||
_FAKE_CLASS_NAME = "fake_class_name"
|
||||
_FAKE_ELEMENT_NAME = "fake_element_name"
|
||||
_FAKE_HYPERV_VM_STATE = 'fake_hyperv_state'
|
||||
|
||||
_FAKE_ACL_ACT = 'fake_acl_action'
|
||||
_FAKE_ACL_DIR = 'fake_acl_dir'
|
||||
_FAKE_ACL_TYPE = 'fake_acl_type'
|
||||
_FAKE_LOCAL_PORT = 'fake_local_port'
|
||||
_FAKE_PROTOCOL = 'fake_port_protocol'
|
||||
_FAKE_REMOTE_ADDR = '0.0.0.0/0'
|
||||
_FAKE_WEIGHT = 'fake_weight'
|
||||
|
||||
_FAKE_BAD_INSTANCE_ID = 'bad_instance_id'
|
||||
_FAKE_INSTANCE_ID = (
|
||||
r"Microsoft:609CBAAD-BC13-4A65-AADE-AD95861FE394\\55349F56-72AB-4FA3-"
|
||||
"B5FE-6A30A511A419\\C\\776E0BA7-94A1-41C8-8F28-951F524251B5\\77A43184-"
|
||||
"5444-49BF-ABE0-2210B72ABA73")
|
||||
|
||||
_MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualEthernetSwitch'
|
||||
|
||||
def setUp(self):
|
||||
super(NetworkUtilsTestCase, self).setUp()
|
||||
self.netutils = networkutils.NetworkUtils()
|
||||
self.netutils._conn_attr = mock.MagicMock()
|
||||
self.netutils._jobutils = mock.MagicMock()
|
||||
|
||||
def test_init_caches(self):
|
||||
self.netutils._switches = {}
|
||||
self.netutils._switch_ports = {}
|
||||
self.netutils._vlan_sds = {}
|
||||
self.netutils._vsid_sds = {}
|
||||
conn = self.netutils._conn
|
||||
|
||||
mock_vswitch = mock.MagicMock(ElementName=mock.sentinel.vswitch_name)
|
||||
conn.Msvm_VirtualEthernetSwitch.return_value = [mock_vswitch]
|
||||
|
||||
mock_port = mock.MagicMock(ElementName=mock.sentinel.port_name)
|
||||
conn.Msvm_EthernetPortAllocationSettingData.return_value = [
|
||||
mock_port]
|
||||
|
||||
mock_sd = mock.MagicMock(InstanceID=self._FAKE_INSTANCE_ID)
|
||||
mock_bad_sd = mock.MagicMock(InstanceID=self._FAKE_BAD_INSTANCE_ID)
|
||||
conn.Msvm_EthernetSwitchPortVlanSettingData.return_value = [
|
||||
mock_bad_sd, mock_sd]
|
||||
conn.Msvm_EthernetSwitchPortSecuritySettingData.return_value = [
|
||||
mock_bad_sd, mock_sd]
|
||||
|
||||
self.netutils.init_caches()
|
||||
|
||||
self.assertEqual({mock.sentinel.vswitch_name: mock_vswitch},
|
||||
self.netutils._switches)
|
||||
self.assertEqual({mock.sentinel.port_name: mock_port},
|
||||
self.netutils._switch_ports)
|
||||
self.assertEqual([mock_sd], list(self.netutils._vlan_sds.values()))
|
||||
self.assertEqual([mock_sd], list(self.netutils._vsid_sds.values()))
|
||||
|
||||
def test_update_cache(self):
|
||||
conn = self.netutils._conn
|
||||
mock_port = mock.MagicMock(ElementName=mock.sentinel.port_name)
|
||||
conn.Msvm_EthernetPortAllocationSettingData.return_value = [
|
||||
mock_port]
|
||||
|
||||
self.netutils.update_cache()
|
||||
|
||||
self.assertEqual({mock.sentinel.port_name: mock_port},
|
||||
self.netutils._switch_ports)
|
||||
|
||||
def test_clear_port_sg_acls_cache(self):
|
||||
self.netutils._sg_acl_sds[mock.sentinel.port_id] = [mock.sentinel.acl]
|
||||
self.netutils.clear_port_sg_acls_cache(mock.sentinel.port_id)
|
||||
self.assertNotIn(mock.sentinel.acl, self.netutils._sg_acl_sds)
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_get_vswitch_external_port')
|
||||
def test_get_vswitch_external_network_name(self, mock_get_vswitch_port):
|
||||
mock_get_vswitch_port.return_value.ElementName = (
|
||||
mock.sentinel.network_name)
|
||||
result = self.netutils.get_vswitch_external_network_name(
|
||||
mock.sentinel.vswitch_name)
|
||||
self.assertEqual(mock.sentinel.network_name, result)
|
||||
|
||||
def test_get_vswitch_external_port(self):
|
||||
vswitch = mock.MagicMock(Name=mock.sentinel.vswitch_name)
|
||||
self.netutils._conn.Msvm_VirtualEthernetSwitch.return_value = [vswitch]
|
||||
|
||||
conn = self.netutils._conn
|
||||
ext_port = mock.MagicMock()
|
||||
lan_endpoint_assoc1 = mock.MagicMock()
|
||||
lan_endpoint_assoc2 = mock.Mock(SystemName=mock.sentinel.vswitch_name)
|
||||
self.netutils._conn.Msvm_ExternalEthernetPort.return_value = [ext_port]
|
||||
conn.Msvm_EthernetDeviceSAPImplementation.return_value = [
|
||||
lan_endpoint_assoc1]
|
||||
conn.Msvm_ActiveConnection.return_value = [
|
||||
mock.Mock(Antecedent=lan_endpoint_assoc2)]
|
||||
|
||||
result = self.netutils._get_vswitch_external_port(mock.sentinel.name)
|
||||
self.assertEqual(ext_port, result)
|
||||
conn.Msvm_EthernetDeviceSAPImplementation.assert_called_once_with(
|
||||
Antecedent=ext_port.path_.return_value)
|
||||
conn.Msvm_ActiveConnection.assert_called_once_with(
|
||||
Dependent=lan_endpoint_assoc1.Dependent.path_.return_value)
|
||||
|
||||
def test_vswitch_port_needed(self):
|
||||
self.assertFalse(self.netutils.vswitch_port_needed())
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_get_vnic_settings')
|
||||
def test_get_vnic_mac_address(self, mock_get_vnic_settings):
|
||||
mock_vnic = mock.MagicMock(Address=mock.sentinel.mac_address)
|
||||
mock_get_vnic_settings.return_value = mock_vnic
|
||||
|
||||
actual_mac_address = self.netutils.get_vnic_mac_address(
|
||||
mock.sentinel.switch_port_name)
|
||||
self.assertEqual(mock.sentinel.mac_address, actual_mac_address)
|
||||
|
||||
@mock.patch.object(networkutils, 'patcher')
|
||||
@mock.patch.object(networkutils.tpool, 'execute')
|
||||
@mock.patch.object(networkutils, 'wmi', create=True)
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_get_event_wql_query')
|
||||
def test_get_vnic_event_listener(self, mock_get_event_query, mock_wmi,
|
||||
mock_execute, mock_patcher):
|
||||
mock_wmi.x_wmi_timed_out = ValueError
|
||||
event = mock.MagicMock()
|
||||
port_class = self.netutils._conn.Msvm_SyntheticEthernetPortSettingData
|
||||
wmi_event_listener = port_class.watch_for.return_value
|
||||
mock_execute.side_effect = [mock_wmi.x_wmi_timed_out, event]
|
||||
|
||||
# callback will raise an exception in order to stop iteration in the
|
||||
# listener.
|
||||
callback = mock.MagicMock(side_effect=TypeError)
|
||||
|
||||
returned_listener = self.netutils.get_vnic_event_listener(
|
||||
self.netutils.EVENT_TYPE_CREATE)
|
||||
self.assertRaises(TypeError, returned_listener, callback)
|
||||
|
||||
mock_get_event_query.assert_called_once_with(
|
||||
cls=self.netutils._VNIC_SET_DATA,
|
||||
event_type=self.netutils.EVENT_TYPE_CREATE,
|
||||
timeframe=2)
|
||||
port_class.watch_for.assert_called_once_with(
|
||||
mock_get_event_query.return_value)
|
||||
mock_execute.assert_has_calls(
|
||||
[mock.call(wmi_event_listener,
|
||||
self.netutils._VNIC_LISTENER_TIMEOUT_MS)] * 2)
|
||||
callback.assert_called_once_with(event.ElementName)
|
||||
|
||||
def test_get_event_wql_query(self):
|
||||
expected = ("SELECT * FROM %(event_type)s WITHIN %(timeframe)s "
|
||||
"WHERE TargetInstance ISA '%(class)s' AND "
|
||||
"%(like)s" % {
|
||||
'class': "FakeClass",
|
||||
'event_type': self.netutils.EVENT_TYPE_CREATE,
|
||||
'like': "TargetInstance.foo LIKE 'bar%'",
|
||||
'timeframe': 2})
|
||||
|
||||
query = self.netutils._get_event_wql_query(
|
||||
"FakeClass", self.netutils.EVENT_TYPE_CREATE, like=dict(foo="bar"))
|
||||
|
||||
self.assertEqual(expected, query)
|
||||
|
||||
def test_connect_vnic_to_vswitch_found(self):
|
||||
self._test_connect_vnic_to_vswitch(True)
|
||||
|
||||
def test_connect_vnic_to_vswitch_not_found(self):
|
||||
self._test_connect_vnic_to_vswitch(False)
|
||||
|
||||
def _test_connect_vnic_to_vswitch(self, found):
|
||||
self.netutils._get_vnic_settings = mock.MagicMock()
|
||||
|
||||
if not found:
|
||||
mock_vm = mock.MagicMock()
|
||||
self.netutils._get_vm_from_res_setting_data = mock.MagicMock(
|
||||
return_value=mock_vm)
|
||||
self.netutils._add_virt_resource = mock.MagicMock()
|
||||
else:
|
||||
self.netutils._modify_virt_resource = mock.MagicMock()
|
||||
|
||||
self.netutils._get_vswitch = mock.MagicMock()
|
||||
mock_port = self._mock_get_switch_port_alloc(found=found)
|
||||
mock_port.HostResource = []
|
||||
|
||||
self.netutils.connect_vnic_to_vswitch(self._FAKE_VSWITCH_NAME,
|
||||
self._FAKE_PORT_NAME)
|
||||
|
||||
if not found:
|
||||
mock_add_resource = self.netutils._jobutils.add_virt_resource
|
||||
mock_add_resource.assert_called_once_with(mock_port, mock_vm)
|
||||
else:
|
||||
mock_modify_resource = self.netutils._jobutils.modify_virt_resource
|
||||
mock_modify_resource.assert_called_once_with(mock_port)
|
||||
|
||||
def test_connect_vnic_to_vswitch_already_connected(self):
|
||||
mock_port = self._mock_get_switch_port_alloc()
|
||||
mock_port.HostResource = [mock.sentinel.vswitch_path]
|
||||
|
||||
self.netutils.connect_vnic_to_vswitch(mock.sentinel.switch_name,
|
||||
mock.sentinel.port_name)
|
||||
|
||||
self.assertFalse(self.netutils._jobutils.modify_virt_resource.called)
|
||||
|
||||
def _mock_get_switch_port_alloc(self, found=True):
|
||||
mock_port = mock.MagicMock()
|
||||
patched = mock.patch.object(
|
||||
self.netutils, '_get_switch_port_allocation',
|
||||
return_value=(mock_port, found))
|
||||
patched.start()
|
||||
self.addCleanup(patched.stop)
|
||||
return mock_port
|
||||
|
||||
def test_get_vm_from_res_setting_data(self):
|
||||
fake_res_set_instance_id = "Microsoft:GUID\\SpecificData"
|
||||
fake_vm_set_instance_id = "Microsoft:GUID"
|
||||
res_setting_data = mock.Mock(InstanceID=fake_res_set_instance_id)
|
||||
conn = self.netutils._conn
|
||||
mock_setting_data = conn.Msvm_VirtualSystemSettingData.return_value
|
||||
|
||||
resulted_vm = self.netutils._get_vm_from_res_setting_data(
|
||||
res_setting_data)
|
||||
|
||||
conn.Msvm_VirtualSystemSettingData.assert_called_once_with(
|
||||
InstanceID=fake_vm_set_instance_id)
|
||||
conn.Msvm_ComputerSystem.assert_called_once_with(
|
||||
Name=mock_setting_data[0].ConfigurationID)
|
||||
expected_result = conn.Msvm_ComputerSystem.return_value[0]
|
||||
self.assertEqual(expected_result, resulted_vm)
|
||||
|
||||
@mock.patch.object(networkutils, 'wmi', create=True)
|
||||
def test_remove_switch_port(self, mock_wmi):
|
||||
mock_sw_port = self._mock_get_switch_port_alloc()
|
||||
self.netutils._switch_ports[self._FAKE_PORT_NAME] = mock_sw_port
|
||||
self.netutils._vlan_sds[mock_sw_port.InstanceID] = mock.MagicMock()
|
||||
mock_wmi.x_wmi = Exception
|
||||
self.netutils._jobutils.remove_virt_resource.side_effect = (
|
||||
mock_wmi.x_wmi)
|
||||
|
||||
self.netutils.remove_switch_port(self._FAKE_PORT_NAME, False)
|
||||
|
||||
self.netutils._jobutils.remove_virt_resource.assert_called_once_with(
|
||||
mock_sw_port)
|
||||
self.assertNotIn(self._FAKE_PORT_NAME, self.netutils._switch_ports)
|
||||
self.assertNotIn(mock_sw_port.InstanceID, self.netutils._vlan_sds)
|
||||
|
||||
def test_get_vswitch(self):
|
||||
self.netutils._conn.Msvm_VirtualEthernetSwitch.return_value = [
|
||||
self._FAKE_VSWITCH]
|
||||
vswitch = self.netutils._get_vswitch(self._FAKE_VSWITCH_NAME)
|
||||
|
||||
self.assertEqual({self._FAKE_VSWITCH_NAME: self._FAKE_VSWITCH},
|
||||
self.netutils._switches)
|
||||
self.assertEqual(self._FAKE_VSWITCH, vswitch)
|
||||
|
||||
def test_get_vswitch_not_found(self):
|
||||
self.netutils._switches = {}
|
||||
self.netutils._conn.Msvm_VirtualEthernetSwitch.return_value = []
|
||||
self.assertRaises(exceptions.HyperVException,
|
||||
self.netutils._get_vswitch,
|
||||
self._FAKE_VSWITCH_NAME)
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtils,
|
||||
'_create_default_setting_data')
|
||||
def _check_set_vswitch_port_vlan_id(self, mock_create_default_sd,
|
||||
missing_vlan=False):
|
||||
mock_port = self._mock_get_switch_port_alloc(found=True)
|
||||
old_vlan_settings = mock.MagicMock()
|
||||
if missing_vlan:
|
||||
side_effect = [old_vlan_settings, None]
|
||||
else:
|
||||
side_effect = [old_vlan_settings, old_vlan_settings]
|
||||
self.netutils._get_vlan_setting_data_from_port_alloc = mock.MagicMock(
|
||||
side_effect=side_effect)
|
||||
mock_vlan_settings = mock.MagicMock()
|
||||
mock_create_default_sd.return_value = mock_vlan_settings
|
||||
|
||||
if missing_vlan:
|
||||
self.assertRaises(exceptions.HyperVException,
|
||||
self.netutils.set_vswitch_port_vlan_id,
|
||||
self._FAKE_VLAN_ID, self._FAKE_PORT_NAME)
|
||||
else:
|
||||
self.netutils.set_vswitch_port_vlan_id(self._FAKE_VLAN_ID,
|
||||
self._FAKE_PORT_NAME)
|
||||
|
||||
mock_remove_feature = self.netutils._jobutils.remove_virt_feature
|
||||
mock_remove_feature.assert_called_once_with(old_vlan_settings)
|
||||
mock_add_feature = self.netutils._jobutils.add_virt_feature
|
||||
mock_add_feature.assert_called_once_with(mock_vlan_settings, mock_port)
|
||||
|
||||
def test_set_vswitch_port_vlan_id(self):
|
||||
self._check_set_vswitch_port_vlan_id()
|
||||
|
||||
def test_set_vswitch_port_vlan_id_missing(self):
|
||||
self._check_set_vswitch_port_vlan_id(missing_vlan=True)
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtils,
|
||||
'_get_vlan_setting_data_from_port_alloc')
|
||||
def test_set_vswitch_port_vlan_id_already_set(self, mock_get_vlan_sd):
|
||||
self._mock_get_switch_port_alloc()
|
||||
mock_get_vlan_sd.return_value = mock.MagicMock(
|
||||
AccessVlanId=mock.sentinel.vlan_id,
|
||||
OperationMode=self.netutils._OPERATION_MODE_ACCESS)
|
||||
|
||||
self.netutils.set_vswitch_port_vlan_id(mock.sentinel.vlan_id,
|
||||
mock.sentinel.port_name)
|
||||
|
||||
mock_remove_feature = self.netutils._jobutils.remove_virt_feature
|
||||
self.assertFalse(mock_remove_feature.called)
|
||||
mock_add_feature = self.netutils._jobutils.add_virt_feature
|
||||
self.assertFalse(mock_add_feature.called)
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtils,
|
||||
'_get_security_setting_data_from_port_alloc')
|
||||
@mock.patch.object(networkutils.NetworkUtils,
|
||||
'_create_default_setting_data')
|
||||
def _check_set_vswitch_port_vsid(self, mock_create_default_sd,
|
||||
mock_get_security_sd, missing_vsid=False):
|
||||
mock_port_alloc = self._mock_get_switch_port_alloc()
|
||||
|
||||
mock_vsid_settings = mock.MagicMock()
|
||||
if missing_vsid:
|
||||
side_effect = [mock_vsid_settings, None]
|
||||
else:
|
||||
side_effect = [mock_vsid_settings, mock_vsid_settings]
|
||||
|
||||
mock_get_security_sd.side_effect = side_effect
|
||||
mock_create_default_sd.return_value = mock_vsid_settings
|
||||
|
||||
if missing_vsid:
|
||||
self.assertRaises(exceptions.HyperVException,
|
||||
self.netutils.set_vswitch_port_vsid,
|
||||
mock.sentinel.vsid,
|
||||
mock.sentinel.switch_port_name)
|
||||
else:
|
||||
self.netutils.set_vswitch_port_vsid(mock.sentinel.vsid,
|
||||
mock.sentinel.switch_port_name)
|
||||
|
||||
mock_remove_feature = self.netutils._jobutils.remove_virt_feature
|
||||
mock_remove_feature.assert_called_once_with(mock_vsid_settings)
|
||||
mock_add_feature = self.netutils._jobutils.add_virt_feature
|
||||
mock_add_feature.assert_called_once_with(mock_vsid_settings,
|
||||
mock_port_alloc)
|
||||
|
||||
def test_set_vswitch_port_vsid(self):
|
||||
self._check_set_vswitch_port_vsid()
|
||||
|
||||
def test_set_vswitch_port_vsid_missing(self):
|
||||
self._check_set_vswitch_port_vsid(missing_vsid=True)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
def test_set_vswitch_port_vsid_already_set(self, mock_get_elem_assoc_cls):
|
||||
self._mock_get_switch_port_alloc()
|
||||
|
||||
mock_vsid_settings = mock.MagicMock(VirtualSubnetId=mock.sentinel.vsid)
|
||||
mock_get_elem_assoc_cls.return_value = (mock_vsid_settings, True)
|
||||
|
||||
self.netutils.set_vswitch_port_vsid(mock.sentinel.vsid,
|
||||
mock.sentinel.switch_port_name)
|
||||
|
||||
self.assertFalse(self.netutils._jobutils.add_virt_feature.called)
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtils,
|
||||
'_get_setting_data_from_port_alloc')
|
||||
def test_get_vlan_setting_data_from_port_alloc(self, mock_get_sd):
|
||||
mock_port = mock.MagicMock()
|
||||
result = self.netutils._get_vlan_setting_data_from_port_alloc(
|
||||
mock_port)
|
||||
|
||||
self.assertEqual(mock_get_sd.return_value, result)
|
||||
mock_get_sd.assert_called_once_with(mock_port, self.netutils._vsid_sds,
|
||||
self.netutils._PORT_VLAN_SET_DATA)
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtils,
|
||||
'_get_setting_data_from_port_alloc')
|
||||
def test_get_security_setting_data_from_port_alloc(self, mock_get_sd):
|
||||
mock_port = mock.MagicMock()
|
||||
result = self.netutils._get_security_setting_data_from_port_alloc(
|
||||
mock_port)
|
||||
|
||||
self.assertEqual(mock_get_sd.return_value, result)
|
||||
mock_get_sd.assert_called_once_with(
|
||||
mock_port, self.netutils._vsid_sds,
|
||||
self.netutils._PORT_SECURITY_SET_DATA)
|
||||
|
||||
def test_get_setting_data_from_port_alloc_cached(self):
|
||||
mock_port = mock.MagicMock(InstanceID=mock.sentinel.InstanceID)
|
||||
cache = {mock_port.InstanceID: mock.sentinel.sd_object}
|
||||
|
||||
result = self.netutils._get_setting_data_from_port_alloc(
|
||||
mock_port, cache, mock.sentinel.data_class)
|
||||
|
||||
self.assertEqual(mock.sentinel.sd_object, result)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
def test_get_setting_data_from_port_alloc(self, mock_get_elem_assoc_cls):
|
||||
sd_object = mock.MagicMock()
|
||||
mock_port = mock.MagicMock(InstanceID=mock.sentinel.InstanceID)
|
||||
mock_get_elem_assoc_cls.return_value = [sd_object]
|
||||
cache = {}
|
||||
result = self.netutils._get_setting_data_from_port_alloc(
|
||||
mock_port, cache, mock.sentinel.data_class)
|
||||
|
||||
mock_get_elem_assoc_cls.assert_called_once_with(
|
||||
self.netutils._conn, mock.sentinel.data_class,
|
||||
element_instance_id=mock.sentinel.InstanceID)
|
||||
self.assertEqual(sd_object, result)
|
||||
self.assertEqual(sd_object, cache[mock.sentinel.InstanceID])
|
||||
|
||||
def test_get_switch_port_allocation_cached(self):
|
||||
self.netutils._switch_ports[mock.sentinel.port_name] = (
|
||||
mock.sentinel.port)
|
||||
|
||||
port, found = self.netutils._get_switch_port_allocation(
|
||||
mock.sentinel.port_name)
|
||||
|
||||
self.assertEqual(mock.sentinel.port, port)
|
||||
self.assertTrue(found)
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_get_setting_data')
|
||||
def test_get_switch_port_allocation(self, mock_get_set_data):
|
||||
mock_get_set_data.return_value = (mock.sentinel.port, True)
|
||||
|
||||
port, found = self.netutils._get_switch_port_allocation(
|
||||
mock.sentinel.port_name)
|
||||
|
||||
self.assertEqual(mock.sentinel.port, port)
|
||||
self.assertTrue(found)
|
||||
self.assertIn(mock.sentinel.port_name, self.netutils._switch_ports)
|
||||
mock_get_set_data.assert_called_once_with(
|
||||
self.netutils._PORT_ALLOC_SET_DATA, mock.sentinel.port_name, False)
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_get_setting_data')
|
||||
def test_get_switch_port_allocation_expected(self, mock_get_set_data):
|
||||
self.netutils._switch_ports = {}
|
||||
mock_get_set_data.return_value = (None, False)
|
||||
|
||||
self.assertRaises(exceptions.HyperVPortNotFoundException,
|
||||
self.netutils._get_switch_port_allocation,
|
||||
mock.sentinel.port_name, expected=True)
|
||||
mock_get_set_data.assert_called_once_with(
|
||||
self.netutils._PORT_ALLOC_SET_DATA, mock.sentinel.port_name, False)
|
||||
|
||||
def test_get_setting_data(self):
|
||||
self.netutils._get_first_item = mock.MagicMock(return_value=None)
|
||||
|
||||
mock_data = mock.MagicMock()
|
||||
self.netutils._get_default_setting_data = mock.MagicMock(
|
||||
return_value=mock_data)
|
||||
|
||||
ret_val = self.netutils._get_setting_data(self._FAKE_CLASS_NAME,
|
||||
self._FAKE_ELEMENT_NAME,
|
||||
True)
|
||||
|
||||
self.assertEqual(ret_val, (mock_data, False))
|
||||
|
||||
def test_create_default_setting_data(self):
|
||||
result = self.netutils._create_default_setting_data('FakeClass')
|
||||
|
||||
fake_class = self.netutils._conn.FakeClass
|
||||
self.assertEqual(fake_class.new.return_value, result)
|
||||
fake_class.new.assert_called_once_with()
|
||||
|
||||
def test_add_metrics_collection_acls(self):
|
||||
mock_port = self._mock_get_switch_port_alloc()
|
||||
mock_acl = mock.MagicMock()
|
||||
|
||||
with mock.patch.multiple(
|
||||
self.netutils,
|
||||
_create_default_setting_data=mock.Mock(return_value=mock_acl)):
|
||||
|
||||
self.netutils.add_metrics_collection_acls(self._FAKE_PORT_NAME)
|
||||
|
||||
mock_add_feature = self.netutils._jobutils.add_virt_feature
|
||||
actual_calls = len(mock_add_feature.mock_calls)
|
||||
self.assertEqual(4, actual_calls)
|
||||
mock_add_feature.assert_called_with(mock_acl, mock_port)
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_is_port_vm_started')
|
||||
def test_is_metrics_collection_allowed_true(self, mock_is_started):
|
||||
mock_acl = mock.MagicMock()
|
||||
mock_acl.Action = self.netutils._ACL_ACTION_METER
|
||||
self._test_is_metrics_collection_allowed(
|
||||
mock_vm_started=mock_is_started,
|
||||
acls=[mock_acl, mock_acl],
|
||||
expected_result=True)
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_is_port_vm_started')
|
||||
def test_test_is_metrics_collection_allowed_false(self, mock_is_started):
|
||||
self._test_is_metrics_collection_allowed(
|
||||
mock_vm_started=mock_is_started,
|
||||
acls=[],
|
||||
expected_result=False)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
def _test_is_metrics_collection_allowed(self, mock_get_elem_assoc_cls,
|
||||
mock_vm_started, acls,
|
||||
expected_result):
|
||||
mock_port = self._mock_get_switch_port_alloc()
|
||||
mock_acl = mock.MagicMock()
|
||||
mock_acl.Action = self.netutils._ACL_ACTION_METER
|
||||
|
||||
mock_get_elem_assoc_cls.return_value = acls
|
||||
mock_vm_started.return_value = True
|
||||
|
||||
result = self.netutils.is_metrics_collection_allowed(
|
||||
self._FAKE_PORT_NAME)
|
||||
self.assertEqual(expected_result, result)
|
||||
mock_get_elem_assoc_cls.assert_called_once_with(
|
||||
self.netutils._conn, self.netutils._PORT_ALLOC_ACL_SET_DATA,
|
||||
element_instance_id=mock_port.InstanceID)
|
||||
|
||||
def test_is_port_vm_started_true(self):
|
||||
self._test_is_port_vm_started(self.netutils._HYPERV_VM_STATE_ENABLED,
|
||||
True)
|
||||
|
||||
def test_is_port_vm_started_false(self):
|
||||
self._test_is_port_vm_started(self._FAKE_HYPERV_VM_STATE, False)
|
||||
|
||||
def _test_is_port_vm_started(self, vm_state, expected_result):
|
||||
mock_svc = self.netutils._conn.Msvm_VirtualSystemManagementService()[0]
|
||||
mock_port = mock.MagicMock()
|
||||
mock_vmsettings = mock.MagicMock()
|
||||
mock_summary = mock.MagicMock()
|
||||
mock_summary.EnabledState = vm_state
|
||||
mock_vmsettings.path_.return_value = self._FAKE_RES_PATH
|
||||
|
||||
self.netutils._conn.Msvm_VirtualSystemSettingData.return_value = [
|
||||
mock_vmsettings]
|
||||
mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL,
|
||||
[mock_summary])
|
||||
|
||||
result = self.netutils._is_port_vm_started(mock_port)
|
||||
self.assertEqual(expected_result, result)
|
||||
mock_svc.GetSummaryInformation.assert_called_once_with(
|
||||
[self.netutils._VM_SUMMARY_ENABLED_STATE],
|
||||
[self._FAKE_RES_PATH])
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_bind_security_rules')
|
||||
def test_create_security_rules(self, mock_bind, mock_get_elem_assoc_cls):
|
||||
(m_port, m_acl) = self._setup_security_rule_test(
|
||||
mock_get_elem_assoc_cls)
|
||||
fake_rule = mock.MagicMock()
|
||||
|
||||
self.netutils.create_security_rules(self._FAKE_PORT_NAME, fake_rule)
|
||||
mock_bind.assert_called_once_with(m_port, fake_rule)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_create_security_acl')
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_get_new_weights')
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_filter_security_acls')
|
||||
def test_bind_security_rules(self, mock_filtered_acls, mock_get_weights,
|
||||
mock_create_acl, mock_get_elem_assoc_cls):
|
||||
m_port = mock.MagicMock()
|
||||
m_acl = mock.MagicMock()
|
||||
mock_get_elem_assoc_cls.return_value = [m_acl]
|
||||
mock_filtered_acls.return_value = []
|
||||
mock_get_weights.return_value = [mock.sentinel.FAKE_WEIGHT]
|
||||
mock_create_acl.return_value = m_acl
|
||||
fake_rule = mock.MagicMock()
|
||||
|
||||
self.netutils._bind_security_rules(m_port, [fake_rule])
|
||||
|
||||
mock_create_acl.assert_called_once_with(fake_rule,
|
||||
mock.sentinel.FAKE_WEIGHT)
|
||||
mock_add_features = self.netutils._jobutils.add_multiple_virt_features
|
||||
mock_add_features.assert_called_once_with([m_acl], m_port)
|
||||
mock_get_elem_assoc_cls.assert_called_once_with(
|
||||
self.netutils._conn, self.netutils._PORT_EXT_ACL_SET_DATA,
|
||||
element_instance_id=m_port.InstanceID)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_get_new_weights')
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_filter_security_acls')
|
||||
def test_bind_security_rules_existent(self, mock_filtered_acls,
|
||||
mock_get_weights,
|
||||
mock_get_elem_assoc_cls):
|
||||
m_port = mock.MagicMock()
|
||||
m_acl = mock.MagicMock()
|
||||
mock_get_elem_assoc_cls.return_value = [m_acl]
|
||||
mock_filtered_acls.return_value = [m_acl]
|
||||
fake_rule = mock.MagicMock()
|
||||
|
||||
self.netutils._bind_security_rules(m_port, [fake_rule])
|
||||
mock_filtered_acls.assert_called_once_with(fake_rule, [m_acl])
|
||||
mock_get_weights.assert_called_once_with([fake_rule], [m_acl])
|
||||
mock_get_elem_assoc_cls.assert_called_once_with(
|
||||
self.netutils._conn, self.netutils._PORT_EXT_ACL_SET_DATA,
|
||||
element_instance_id=m_port.InstanceID)
|
||||
|
||||
def test_get_port_security_acls_cached(self):
|
||||
mock_port = mock.MagicMock(ElementName=mock.sentinel.port_name)
|
||||
self.netutils._sg_acl_sds = {
|
||||
mock.sentinel.port_name: [mock.sentinel.fake_acl]}
|
||||
|
||||
acls = self.netutils._get_port_security_acls(mock_port)
|
||||
|
||||
self.assertEqual([mock.sentinel.fake_acl], acls)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
def test_get_port_security_acls(self, mock_get_elem_assoc_cls):
|
||||
self.netutils._sg_acl_sds = {}
|
||||
mock_port = mock.MagicMock()
|
||||
mock_get_elem_assoc_cls.return_value = [mock.sentinel.fake_acl]
|
||||
|
||||
acls = self.netutils._get_port_security_acls(mock_port)
|
||||
|
||||
self.assertEqual([mock.sentinel.fake_acl], acls)
|
||||
self.assertEqual({mock_port.ElementName: [mock.sentinel.fake_acl]},
|
||||
self.netutils._sg_acl_sds)
|
||||
mock_get_elem_assoc_cls.assert_called_once_with(
|
||||
self.netutils._conn, self.netutils._PORT_EXT_ACL_SET_DATA,
|
||||
element_instance_id=mock_port.InstanceID)
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
@mock.patch.object(networkutils.NetworkUtils, '_filter_security_acls')
|
||||
def test_remove_security_rules(self, mock_filter, mock_get_elem_assoc_cls):
|
||||
mock_acl = self._setup_security_rule_test(mock_get_elem_assoc_cls)[1]
|
||||
fake_rule = mock.MagicMock()
|
||||
mock_filter.return_value = [mock_acl]
|
||||
|
||||
self.netutils.remove_security_rules(self._FAKE_PORT_NAME, [fake_rule])
|
||||
|
||||
mock_remove_features = (
|
||||
self.netutils._jobutils.remove_multiple_virt_features)
|
||||
mock_remove_features.assert_called_once_with([mock_acl])
|
||||
|
||||
@mock.patch.object(_wqlutils, 'get_element_associated_class')
|
||||
def test_remove_all_security_rules(self, mock_get_elem_assoc_cls):
|
||||
mock_acl = self._setup_security_rule_test(mock_get_elem_assoc_cls)[1]
|
||||
self.netutils.remove_all_security_rules(self._FAKE_PORT_NAME)
|
||||
mock_remove_features = (
|
||||
self.netutils._jobutils.remove_multiple_virt_features)
|
||||
mock_remove_features.assert_called_once_with([mock_acl])
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtils,
|
||||
'_create_default_setting_data')
|
||||
def test_create_security_acl(self, mock_get_set_data):
|
||||
mock_acl = mock_get_set_data.return_value
|
||||
fake_rule = mock.MagicMock()
|
||||
fake_rule.to_dict.return_value = {"Action": self._FAKE_ACL_ACT}
|
||||
|
||||
self.netutils._create_security_acl(fake_rule, self._FAKE_WEIGHT)
|
||||
mock_acl.set.assert_called_once_with(Action=self._FAKE_ACL_ACT)
|
||||
|
||||
def _setup_security_rule_test(self, mock_get_elem_assoc_cls):
|
||||
mock_port = self._mock_get_switch_port_alloc()
|
||||
mock_acl = mock.MagicMock()
|
||||
mock_get_elem_assoc_cls.return_value = [mock_acl]
|
||||
|
||||
self.netutils._filter_security_acls = mock.MagicMock(
|
||||
return_value=[mock_acl])
|
||||
|
||||
return (mock_port, mock_acl)
|
||||
|
||||
def test_filter_acls(self):
|
||||
mock_acl = mock.MagicMock()
|
||||
mock_acl.Action = self._FAKE_ACL_ACT
|
||||
mock_acl.Applicability = self.netutils._ACL_APPLICABILITY_LOCAL
|
||||
mock_acl.Direction = self._FAKE_ACL_DIR
|
||||
mock_acl.AclType = self._FAKE_ACL_TYPE
|
||||
mock_acl.RemoteAddress = self._FAKE_REMOTE_ADDR
|
||||
|
||||
acls = [mock_acl, mock_acl]
|
||||
good_acls = self.netutils._filter_acls(
|
||||
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR,
|
||||
self._FAKE_ACL_TYPE, self._FAKE_REMOTE_ADDR)
|
||||
bad_acls = self.netutils._filter_acls(
|
||||
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE)
|
||||
|
||||
self.assertEqual(acls, good_acls)
|
||||
self.assertEqual([], bad_acls)
|
||||
|
||||
def test_get_new_weights_allow(self):
|
||||
actual = self.netutils._get_new_weights([mock.ANY, mock.ANY], mock.ANY)
|
||||
self.assertEqual([0, 0], actual)
|
||||
|
||||
|
||||
class TestNetworkUtilsR2(test_base.OsWinBaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestNetworkUtilsR2, self).setUp()
|
||||
self.netutils = networkutils.NetworkUtilsR2()
|
||||
self.netutils._conn_attr = mock.MagicMock()
|
||||
|
||||
@mock.patch.object(networkutils.NetworkUtilsR2,
|
||||
'_create_default_setting_data')
|
||||
def test_create_security_acl(self, mock_create_default_setting_data):
|
||||
sg_rule = mock.MagicMock()
|
||||
sg_rule.to_dict.return_value = {}
|
||||
|
||||
acl = self.netutils._create_security_acl(sg_rule, mock.sentinel.weight)
|
||||
|
||||
self.assertEqual(mock.sentinel.weight, acl.Weight)
|
||||
|
||||
def test_get_new_weights_no_acls_deny(self):
|
||||
mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_DENY)
|
||||
actual = self.netutils._get_new_weights([mock_rule], [])
|
||||
self.assertEqual([1], actual)
|
||||
|
||||
def test_get_new_weights_no_acls_allow(self):
|
||||
mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW)
|
||||
actual = self.netutils._get_new_weights([mock_rule, mock_rule], [])
|
||||
|
||||
expected = [self.netutils._MAX_WEIGHT - 1,
|
||||
self.netutils._MAX_WEIGHT - 2]
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_get_new_weights_deny(self):
|
||||
mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_DENY)
|
||||
mockacl1 = mock.MagicMock(Action=self.netutils._ACL_ACTION_DENY,
|
||||
Weight=1)
|
||||
mockacl2 = mock.MagicMock(Action=self.netutils._ACL_ACTION_DENY,
|
||||
Weight=3)
|
||||
|
||||
actual = self.netutils._get_new_weights([mock_rule, mock_rule],
|
||||
[mockacl1, mockacl2])
|
||||
|
||||
self.assertEqual([2, 4], actual)
|
||||
|
||||
def test_get_new_weights_allow(self):
|
||||
mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW)
|
||||
mockacl = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW,
|
||||
Weight=self.netutils._MAX_WEIGHT - 3)
|
||||
|
||||
actual = self.netutils._get_new_weights([mock_rule, mock_rule],
|
||||
[mockacl])
|
||||
|
||||
expected = [self.netutils._MAX_WEIGHT - 4,
|
||||
self.netutils._MAX_WEIGHT - 5]
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
def test_get_new_weights_search_available(self):
|
||||
mock_rule = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW)
|
||||
mockacl1 = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW,
|
||||
Weight=self.netutils._REJECT_ACLS_COUNT + 1)
|
||||
mockacl2 = mock.MagicMock(Action=self.netutils._ACL_ACTION_ALLOW,
|
||||
Weight=self.netutils._MAX_WEIGHT - 1)
|
||||
|
||||
actual = self.netutils._get_new_weights([mock_rule],
|
||||
[mockacl1, mockacl2])
|
||||
|
||||
self.assertEqual([self.netutils._MAX_WEIGHT - 2], actual)
|
@ -1,259 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions SRL
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Unit tests for the Hyper-V NVGRE support.
|
||||
"""
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.network import nvgreutils
|
||||
|
||||
|
||||
class TestNvgreUtils(test_base.OsWinBaseTestCase):
|
||||
|
||||
_FAKE_RDID = 'fake_rdid'
|
||||
_FAKE_NETWORK_NAME = 'fake_network_name'
|
||||
_FAKE_VSID = 9001
|
||||
_FAKE_DEST_PREFIX = 'fake_dest_prefix'
|
||||
_FAKE_GW_BAD = '10.0.0.1'
|
||||
_FAKE_GW = '10.0.0.2'
|
||||
|
||||
def setUp(self):
|
||||
super(TestNvgreUtils, self).setUp()
|
||||
self.utils = nvgreutils.NvgreUtils()
|
||||
self.utils._utils = mock.MagicMock()
|
||||
self.utils._scimv2 = mock.MagicMock()
|
||||
|
||||
def _create_mock_binding(self):
|
||||
binding = mock.MagicMock()
|
||||
binding.BindName = self.utils._WNV_BIND_NAME
|
||||
binding.Name = mock.sentinel.fake_network
|
||||
|
||||
net_binds = self.utils._scimv2.MSFT_NetAdapterBindingSettingData
|
||||
net_binds.return_value = [binding]
|
||||
return binding
|
||||
|
||||
@mock.patch.object(nvgreutils.NvgreUtils, 'get_network_iface_ip')
|
||||
@mock.patch.object(nvgreutils.NvgreUtils, '_get_network_iface_index')
|
||||
def test_create_provider_address(self, mock_get_iface_index,
|
||||
mock_get_iface_ip):
|
||||
mock_get_iface_index.return_value = mock.sentinel.iface_index
|
||||
mock_get_iface_ip.return_value = (mock.sentinel.iface_ip,
|
||||
mock.sentinel.prefix_len)
|
||||
|
||||
provider_addr = mock.MagicMock()
|
||||
scimv2 = self.utils._scimv2
|
||||
obj_class = scimv2.MSFT_NetVirtualizationProviderAddressSettingData
|
||||
obj_class.return_value = [provider_addr]
|
||||
|
||||
self.utils.create_provider_address(mock.sentinel.fake_network,
|
||||
mock.sentinel.fake_vlan_id)
|
||||
|
||||
self.assertTrue(provider_addr.Delete_.called)
|
||||
obj_class.new.assert_called_once_with(
|
||||
ProviderAddress=mock.sentinel.iface_ip,
|
||||
VlanID=mock.sentinel.fake_vlan_id,
|
||||
InterfaceIndex=mock.sentinel.iface_index,
|
||||
PrefixLength=mock.sentinel.prefix_len)
|
||||
|
||||
@mock.patch.object(nvgreutils.NvgreUtils, 'get_network_iface_ip')
|
||||
@mock.patch.object(nvgreutils.NvgreUtils, '_get_network_iface_index')
|
||||
def test_create_provider_address_exc(self, mock_get_iface_index,
|
||||
mock_get_iface_ip):
|
||||
mock_get_iface_ip.return_value = (None, None)
|
||||
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self.utils.create_provider_address,
|
||||
mock.sentinel.fake_network,
|
||||
mock.sentinel.fake_vlan_id)
|
||||
|
||||
@mock.patch.object(nvgreutils.NvgreUtils, 'get_network_iface_ip')
|
||||
@mock.patch.object(nvgreutils.NvgreUtils, '_get_network_iface_index')
|
||||
def test_create_provider_address_exists(self, mock_get_iface_index,
|
||||
mock_get_iface_ip):
|
||||
mock_get_iface_index.return_value = mock.sentinel.iface_index
|
||||
mock_get_iface_ip.return_value = (mock.sentinel.iface_ip,
|
||||
mock.sentinel.prefix_len)
|
||||
|
||||
provider_addr = mock.MagicMock(
|
||||
VlanID=mock.sentinel.fake_vlan_id,
|
||||
InterfaceIndex=mock.sentinel.iface_index)
|
||||
scimv2 = self.utils._scimv2
|
||||
obj_class = scimv2.MSFT_NetVirtualizationProviderAddressSettingData
|
||||
obj_class.return_value = [provider_addr]
|
||||
|
||||
self.utils.create_provider_address(mock.sentinel.fake_network,
|
||||
mock.sentinel.fake_vlan_id)
|
||||
|
||||
self.assertFalse(obj_class.new.called)
|
||||
|
||||
@mock.patch.object(nvgreutils.NvgreUtils, '_get_network_iface_index')
|
||||
def test_create_provider_route(self, mock_get_iface_index):
|
||||
mock_get_iface_index.return_value = mock.sentinel.iface_index
|
||||
self.utils._scimv2.MSFT_NetVirtualizationProviderRouteSettingData = (
|
||||
mock.MagicMock(return_value=[]))
|
||||
|
||||
self.utils.create_provider_route(mock.sentinel.fake_network)
|
||||
|
||||
scimv2 = self.utils._scimv2
|
||||
obj_class = scimv2.MSFT_NetVirtualizationProviderRouteSettingData
|
||||
obj_class.new.assert_called_once_with(
|
||||
InterfaceIndex=mock.sentinel.iface_index,
|
||||
DestinationPrefix='%s/0' % constants.IPV4_DEFAULT,
|
||||
NextHop=constants.IPV4_DEFAULT)
|
||||
|
||||
@mock.patch.object(nvgreutils.NvgreUtils, '_get_network_iface_index')
|
||||
def test_create_provider_route_none(self, mock_get_iface_index):
|
||||
mock_get_iface_index.return_value = None
|
||||
|
||||
self.utils.create_provider_route(mock.sentinel.fake_network)
|
||||
scimv2 = self.utils._scimv2
|
||||
self.assertFalse(
|
||||
scimv2.MSFT_NetVirtualizationProviderRouteSettingData.new.called)
|
||||
|
||||
@mock.patch.object(nvgreutils.NvgreUtils, '_get_network_iface_index')
|
||||
def test_create_provider_route_exists(self, mock_get_iface_index):
|
||||
mock_get_iface_index.return_value = mock.sentinel.iface_index
|
||||
self.utils._scimv2.MSFT_NetVirtualizationProviderRouteSettingData = (
|
||||
mock.MagicMock(return_value=[mock.MagicMock()]))
|
||||
|
||||
self.utils.create_provider_route(mock.sentinel.fake_network)
|
||||
|
||||
scimv2 = self.utils._scimv2
|
||||
self.assertFalse(
|
||||
scimv2.MSFT_NetVirtualizationProviderRouteSettingData.new.called)
|
||||
|
||||
def test_clear_customer_routes(self):
|
||||
cls = self.utils._scimv2.MSFT_NetVirtualizationCustomerRouteSettingData
|
||||
route = mock.MagicMock()
|
||||
cls.return_value = [route]
|
||||
|
||||
self.utils.clear_customer_routes(mock.sentinel.vsid)
|
||||
|
||||
cls.assert_called_once_with(VirtualSubnetID=mock.sentinel.vsid)
|
||||
route.Delete_.assert_called_once_with()
|
||||
|
||||
def test_create_customer_route(self):
|
||||
self.utils.create_customer_route(
|
||||
mock.sentinel.fake_vsid, mock.sentinel.dest_prefix,
|
||||
mock.sentinel.next_hop, self._FAKE_RDID)
|
||||
|
||||
scimv2 = self.utils._scimv2
|
||||
obj_class = scimv2.MSFT_NetVirtualizationCustomerRouteSettingData
|
||||
obj_class.new.assert_called_once_with(
|
||||
VirtualSubnetID=mock.sentinel.fake_vsid,
|
||||
DestinationPrefix=mock.sentinel.dest_prefix,
|
||||
NextHop=mock.sentinel.next_hop,
|
||||
Metric=255,
|
||||
RoutingDomainID='{%s}' % self._FAKE_RDID)
|
||||
|
||||
def _check_create_lookup_record(self, customer_addr, expected_type):
|
||||
lookup = mock.MagicMock()
|
||||
scimv2 = self.utils._scimv2
|
||||
obj_class = scimv2.MSFT_NetVirtualizationLookupRecordSettingData
|
||||
obj_class.return_value = [lookup]
|
||||
|
||||
self.utils.create_lookup_record(mock.sentinel.provider_addr,
|
||||
customer_addr,
|
||||
mock.sentinel.mac_addr,
|
||||
mock.sentinel.fake_vsid)
|
||||
|
||||
self.assertTrue(lookup.Delete_.called)
|
||||
obj_class.new.assert_called_once_with(
|
||||
VirtualSubnetID=mock.sentinel.fake_vsid,
|
||||
Rule=self.utils._TRANSLATE_ENCAP,
|
||||
Type=expected_type,
|
||||
MACAddress=mock.sentinel.mac_addr,
|
||||
CustomerAddress=customer_addr,
|
||||
ProviderAddress=mock.sentinel.provider_addr)
|
||||
|
||||
def test_create_lookup_record_l2_only(self):
|
||||
self._check_create_lookup_record(
|
||||
constants.IPV4_DEFAULT,
|
||||
self.utils._LOOKUP_RECORD_TYPE_L2_ONLY)
|
||||
|
||||
def test_create_lookup_record_static(self):
|
||||
self._check_create_lookup_record(
|
||||
mock.sentinel.customer_addr,
|
||||
self.utils._LOOKUP_RECORD_TYPE_STATIC)
|
||||
|
||||
def test_create_lookup_record_exists(self):
|
||||
lookup = mock.MagicMock(VirtualSubnetID=mock.sentinel.fake_vsid,
|
||||
ProviderAddress=mock.sentinel.provider_addr,
|
||||
CustomerAddress=mock.sentinel.customer_addr,
|
||||
MACAddress=mock.sentinel.mac_addr)
|
||||
scimv2 = self.utils._scimv2
|
||||
obj_class = scimv2.MSFT_NetVirtualizationLookupRecordSettingData
|
||||
obj_class.return_value = [lookup]
|
||||
|
||||
self.utils.create_lookup_record(mock.sentinel.provider_addr,
|
||||
mock.sentinel.customer_addr,
|
||||
mock.sentinel.mac_addr,
|
||||
mock.sentinel.fake_vsid)
|
||||
self.assertFalse(obj_class.new.called)
|
||||
|
||||
def test_get_network_iface_index_cached(self):
|
||||
self.utils._net_if_indexes[mock.sentinel.fake_network] = (
|
||||
mock.sentinel.iface_index)
|
||||
|
||||
index = self.utils._get_network_iface_index(mock.sentinel.fake_network)
|
||||
|
||||
self.assertEqual(mock.sentinel.iface_index, index)
|
||||
self.assertFalse(self.utils._scimv2.MSFT_NetAdapter.called)
|
||||
|
||||
def test_get_network_iface_index_not_found(self):
|
||||
self.utils._scimv2.MSFT_NetAdapter.return_value = []
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self.utils._get_network_iface_index,
|
||||
mock.sentinel.network_name)
|
||||
|
||||
def test_get_network_iface_index(self):
|
||||
fake_network = mock.MagicMock(InterfaceIndex=mock.sentinel.iface_index)
|
||||
self.utils._scimv2.MSFT_NetAdapter.return_value = [fake_network]
|
||||
description = (
|
||||
self.utils._utils.get_vswitch_external_network_name.return_value)
|
||||
|
||||
index = self.utils._get_network_iface_index(mock.sentinel.fake_network)
|
||||
|
||||
self.assertEqual(mock.sentinel.iface_index, index)
|
||||
self.assertIn(mock.sentinel.fake_network, self.utils._net_if_indexes)
|
||||
self.utils._scimv2.MSFT_NetAdapter.assert_called_once_with(
|
||||
InterfaceDescription=description)
|
||||
|
||||
@mock.patch.object(nvgreutils.NvgreUtils, '_get_network_ifaces_by_name')
|
||||
def test_get_network_iface_ip(self, mock_get_net_ifaces):
|
||||
fake_network = mock.MagicMock(
|
||||
InterfaceIndex=mock.sentinel.iface_index,
|
||||
DriverDescription=self.utils._HYPERV_VIRT_ADAPTER)
|
||||
mock_get_net_ifaces.return_value = [fake_network]
|
||||
|
||||
fake_netip = mock.MagicMock(IPAddress=mock.sentinel.provider_addr,
|
||||
PrefixLength=mock.sentinel.prefix_len)
|
||||
self.utils._scimv2.MSFT_NetIPAddress.return_value = [fake_netip]
|
||||
|
||||
pair = self.utils.get_network_iface_ip(mock.sentinel.fake_network)
|
||||
|
||||
self.assertEqual(
|
||||
(mock.sentinel.provider_addr, mock.sentinel.prefix_len), pair)
|
||||
|
||||
@mock.patch.object(nvgreutils.NvgreUtils, '_get_network_ifaces_by_name')
|
||||
def test_get_network_iface_ip_none(self, mock_get_net_ifaces):
|
||||
mock_get_net_ifaces.return_value = []
|
||||
pair = self.utils.get_network_iface_ip(mock.sentinel.fake_network)
|
||||
self.assertEqual((None, None), pair)
|
@ -1,181 +0,0 @@
|
||||
# Copyright 2014 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.storage.initiator import base_iscsi_utils
|
||||
|
||||
|
||||
class BaseISCSIInitiatorUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V BaseISCSIInitiatorUtils class."""
|
||||
|
||||
_FAKE_COMPUTER_NAME = "fake_computer_name"
|
||||
_FAKE_DOMAIN_NAME = "fake_domain_name"
|
||||
_FAKE_INITIATOR_NAME = "fake_initiator_name"
|
||||
_FAKE_INITIATOR_IQN_NAME = "iqn.1991-05.com.microsoft:fake_computer_name"
|
||||
_FAKE_DISK_PATH = 'fake_path DeviceID="123\\\\2"'
|
||||
_FAKE_MOUNT_DEVICE = '/dev/fake/mount'
|
||||
_FAKE_DEVICE_NAME = '/dev/fake/path'
|
||||
_FAKE_SWAP = {'device_name': _FAKE_DISK_PATH}
|
||||
|
||||
def setUp(self):
|
||||
self._utils = base_iscsi_utils.BaseISCSIInitiatorUtils()
|
||||
self._utils._conn_wmi = mock.MagicMock()
|
||||
self._utils._conn_cimv2 = mock.MagicMock()
|
||||
|
||||
super(BaseISCSIInitiatorUtilsTestCase, self).setUp()
|
||||
|
||||
def test_get_iscsi_initiator_ok(self):
|
||||
self._check_get_iscsi_initiator(
|
||||
self._FAKE_INITIATOR_NAME)
|
||||
|
||||
def test_get_iscsi_initiator_exception(self):
|
||||
initiator_name = "%(iqn)s.%(domain)s" % {
|
||||
'iqn': self._FAKE_INITIATOR_IQN_NAME,
|
||||
'domain': self._FAKE_DOMAIN_NAME
|
||||
}
|
||||
|
||||
self._check_get_iscsi_initiator(initiator_name,
|
||||
side_effect=Exception)
|
||||
|
||||
def _check_get_iscsi_initiator(self, expected=None, side_effect=None):
|
||||
mock_computer = mock.MagicMock()
|
||||
mock_computer.name = self._FAKE_COMPUTER_NAME
|
||||
mock_computer.Domain = self._FAKE_DOMAIN_NAME
|
||||
self._utils._conn_cimv2.Win32_ComputerSystem.return_value = [
|
||||
mock_computer]
|
||||
|
||||
expected_key_path = (
|
||||
"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\"
|
||||
"iSCSI\\Discovery")
|
||||
|
||||
with mock.patch.object(base_iscsi_utils,
|
||||
'winreg', create=True) as mock_winreg:
|
||||
mock_winreg.CloseKey.side_effect = side_effect
|
||||
mock_winreg.QueryValueEx.return_value = [expected]
|
||||
mock_winreg.OpenKey.return_value = mock.sentinel.key
|
||||
|
||||
initiator_name = self._utils.get_iscsi_initiator()
|
||||
self.assertEqual(expected, initiator_name)
|
||||
mock_winreg.OpenKey.assert_called_once_with(
|
||||
mock_winreg.HKEY_LOCAL_MACHINE,
|
||||
expected_key_path,
|
||||
0,
|
||||
mock_winreg.KEY_WOW64_64KEY + mock_winreg.KEY_ALL_ACCESS)
|
||||
mock_winreg.QueryValueEx.assert_called_once_with(
|
||||
mock.sentinel.key, "DefaultInitiatorName")
|
||||
mock_winreg.CloseKey.assert_called_once_with(mock.sentinel.key)
|
||||
|
||||
def test_get_drive_number_from_disk_path(self):
|
||||
fake_disk_path = (
|
||||
'\\\\WIN-I5BTVHOIFGK\\root\\virtualization\\v2:Msvm_DiskDrive.'
|
||||
'CreationClassName="Msvm_DiskDrive",DeviceID="Microsoft:353B3BE8-'
|
||||
'310C-4cf4-839E-4E1B14616136\\\\1",SystemCreationClassName='
|
||||
'"Msvm_ComputerSystem",SystemName="WIN-I5BTVHOIFGK"')
|
||||
expected_disk_number = 1
|
||||
|
||||
ret_val = self._utils._get_drive_number_from_disk_path(
|
||||
fake_disk_path)
|
||||
|
||||
self.assertEqual(expected_disk_number, ret_val)
|
||||
|
||||
def test_get_drive_number_not_found(self):
|
||||
fake_disk_path = 'fake_disk_path'
|
||||
|
||||
ret_val = self._utils._get_drive_number_from_disk_path(
|
||||
fake_disk_path)
|
||||
|
||||
self.assertFalse(ret_val)
|
||||
|
||||
@mock.patch.object(base_iscsi_utils.BaseISCSIInitiatorUtils,
|
||||
"_get_drive_number_from_disk_path")
|
||||
def test_get_session_id_from_mounted_disk(self, mock_get_session_id):
|
||||
mock_get_session_id.return_value = mock.sentinel.FAKE_DEVICE_NUMBER
|
||||
mock_initiator_session = self._create_initiator_session()
|
||||
mock_ses_class = self._utils._conn_wmi.MSiSCSIInitiator_SessionClass
|
||||
mock_ses_class.return_value = [mock_initiator_session]
|
||||
|
||||
session_id = self._utils.get_session_id_from_mounted_disk(
|
||||
self._FAKE_DISK_PATH)
|
||||
|
||||
self.assertEqual(mock.sentinel.FAKE_SESSION_ID, session_id)
|
||||
|
||||
def test_get_devices_for_target(self):
|
||||
init_session = self._create_initiator_session()
|
||||
mock_ses_class = self._utils._conn_wmi.MSiSCSIInitiator_SessionClass
|
||||
mock_ses_class.return_value = [init_session]
|
||||
devices = self._utils._get_devices_for_target(
|
||||
mock.sentinel.FAKE_IQN)
|
||||
|
||||
self.assertEqual(init_session.Devices, devices)
|
||||
|
||||
def test_get_devices_for_target_not_found(self):
|
||||
mock_ses_class = self._utils._conn_wmi.MSiSCSIInitiator_SessionClass
|
||||
mock_ses_class.return_value = []
|
||||
devices = self._utils._get_devices_for_target(mock.sentinel.FAKE_IQN)
|
||||
|
||||
self.assertEqual(0, len(devices))
|
||||
|
||||
@mock.patch.object(base_iscsi_utils.BaseISCSIInitiatorUtils,
|
||||
'_get_devices_for_target')
|
||||
def test_get_device_number_for_target(self, fake_get_devices):
|
||||
init_session = self._create_initiator_session()
|
||||
fake_get_devices.return_value = init_session.Devices
|
||||
mock_ses_class = self._utils._conn_wmi.MSiSCSIInitiator_SessionClass
|
||||
mock_ses_class.return_value = [init_session]
|
||||
device_number = self._utils.get_device_number_for_target(
|
||||
mock.sentinel.FAKE_IQN, mock.sentinel.FAKE_LUN)
|
||||
|
||||
self.assertEqual(mock.sentinel.FAKE_DEVICE_NUMBER, device_number)
|
||||
|
||||
@mock.patch.object(base_iscsi_utils.BaseISCSIInitiatorUtils,
|
||||
'_get_devices_for_target')
|
||||
def test_get_target_lun_count(self, fake_get_devices):
|
||||
init_session = self._create_initiator_session()
|
||||
# Only disk devices are being counted.
|
||||
disk_device = mock.Mock(DeviceType=self._utils._FILE_DEVICE_DISK)
|
||||
init_session.Devices.append(disk_device)
|
||||
fake_get_devices.return_value = init_session.Devices
|
||||
|
||||
lun_count = self._utils.get_target_lun_count(mock.sentinel.FAKE_IQN)
|
||||
|
||||
self.assertEqual(1, lun_count)
|
||||
|
||||
@mock.patch.object(base_iscsi_utils.BaseISCSIInitiatorUtils,
|
||||
"_get_drive_number_from_disk_path")
|
||||
def test_get_target_from_disk_path(self, mock_get_session_id):
|
||||
mock_get_session_id.return_value = mock.sentinel.FAKE_DEVICE_NUMBER
|
||||
init_sess = self._create_initiator_session()
|
||||
mock_ses_class = self._utils._conn_wmi.MSiSCSIInitiator_SessionClass
|
||||
mock_ses_class.return_value = [init_sess]
|
||||
|
||||
(target_name, scsi_lun) = self._utils.get_target_from_disk_path(
|
||||
self._FAKE_DISK_PATH)
|
||||
|
||||
self.assertEqual(mock.sentinel.FAKE_TARGET_NAME, target_name)
|
||||
self.assertEqual(mock.sentinel.FAKE_LUN, scsi_lun)
|
||||
|
||||
def _create_initiator_session(self):
|
||||
device = mock.MagicMock()
|
||||
device.ScsiLun = mock.sentinel.FAKE_LUN
|
||||
device.DeviceNumber = mock.sentinel.FAKE_DEVICE_NUMBER
|
||||
device.TargetName = mock.sentinel.FAKE_TARGET_NAME
|
||||
init_session = mock.MagicMock()
|
||||
init_session.Devices = [device]
|
||||
init_session.SessionId = mock.sentinel.FAKE_SESSION_ID
|
||||
|
||||
return init_session
|
@ -1,327 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ctypes
|
||||
import mock
|
||||
from oslotest import base
|
||||
import six
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.utils.storage.initiator import fc_structures as fc_struct
|
||||
from os_win.utils.storage.initiator import fc_utils
|
||||
|
||||
|
||||
class FCUtilsTestCase(base.BaseTestCase):
|
||||
"""Unit tests for the Hyper-V FCUtils class."""
|
||||
|
||||
_FAKE_ADAPTER_NAME = 'fake_adapter_name'
|
||||
_FAKE_ADAPTER_WWN = list(range(8))
|
||||
|
||||
@mock.patch.object(fc_utils, 'wmi', create=True)
|
||||
def setUp(self, mock_wmi):
|
||||
super(FCUtilsTestCase, self).setUp()
|
||||
self._setup_lib_mocks()
|
||||
|
||||
self._fc_utils = fc_utils.FCUtils()
|
||||
self._run_mocker = mock.patch.object(self._fc_utils,
|
||||
'_run_and_check_output')
|
||||
self._run_mocker.start()
|
||||
|
||||
self._mock_run = self._fc_utils._run_and_check_output
|
||||
|
||||
self.addCleanup(mock.patch.stopall)
|
||||
|
||||
def _setup_lib_mocks(self):
|
||||
self._ctypes = mock.Mock()
|
||||
# This is used in order to easily make assertions on the variables
|
||||
# passed by reference.
|
||||
self._ctypes.byref = lambda x: (x, "byref")
|
||||
|
||||
mock.patch.object(fc_utils, 'hbaapi', create=True).start()
|
||||
self._ctypes_mocker = mock.patch.object(fc_utils, 'ctypes',
|
||||
self._ctypes)
|
||||
self._ctypes_mocker.start()
|
||||
|
||||
def test_run_and_check_output(self):
|
||||
self._run_mocker.stop()
|
||||
with mock.patch.object(fc_utils.win32utils.Win32Utils,
|
||||
'run_and_check_output') as mock_win32_run:
|
||||
self._fc_utils._run_and_check_output(
|
||||
adapter_name=self._FAKE_ADAPTER_NAME)
|
||||
|
||||
mock_win32_run.assert_called_once_with(
|
||||
adapter_name=self._FAKE_ADAPTER_NAME,
|
||||
failure_exc=exceptions.FCWin32Exception)
|
||||
|
||||
def test_get_fc_hba_count(self):
|
||||
hba_count = self._fc_utils.get_fc_hba_count()
|
||||
|
||||
fc_utils.hbaapi.HBA_GetNumberOfAdapters.assert_called_once_with()
|
||||
self.assertEqual(fc_utils.hbaapi.HBA_GetNumberOfAdapters.return_value,
|
||||
hba_count)
|
||||
|
||||
def _test_open_adapter(self, adapter_name=None, adapter_wwn=None):
|
||||
self._ctypes_mocker.stop()
|
||||
self._mock_run.return_value = mock.sentinel.handle
|
||||
|
||||
if adapter_name:
|
||||
expected_func = fc_utils.hbaapi.HBA_OpenAdapter
|
||||
elif adapter_wwn:
|
||||
expected_func = fc_utils.hbaapi.HBA_OpenAdapterByWWN
|
||||
|
||||
resulted_handle = self._fc_utils._open_adapter(
|
||||
adapter_name=adapter_name, adapter_wwn=adapter_wwn)
|
||||
|
||||
args_list = self._mock_run.call_args_list[0][0]
|
||||
self.assertEqual(expected_func, args_list[0])
|
||||
if adapter_name:
|
||||
self.assertEqual(six.b(adapter_name),
|
||||
args_list[1].value)
|
||||
else:
|
||||
self.assertEqual(adapter_wwn, list(args_list[1]))
|
||||
|
||||
self.assertEqual(mock.sentinel.handle, resulted_handle)
|
||||
|
||||
def test_open_adapter_by_name(self):
|
||||
self._test_open_adapter(adapter_name=self._FAKE_ADAPTER_NAME)
|
||||
|
||||
def test_open_adapter_by_wwn(self):
|
||||
self._test_open_adapter(adapter_wwn=self._FAKE_ADAPTER_WWN)
|
||||
|
||||
def test_open_adapter_not_specified(self):
|
||||
self.assertRaises(exceptions.FCException,
|
||||
self._fc_utils._open_adapter)
|
||||
|
||||
def test_close_adapter(self):
|
||||
self._fc_utils._close_adapter(mock.sentinel.hba_handle)
|
||||
fc_utils.hbaapi.HBA_CloseAdapter.assert_called_once_with(
|
||||
mock.sentinel.hba_handle)
|
||||
|
||||
@mock.patch.object(fc_utils.FCUtils, '_open_adapter')
|
||||
@mock.patch.object(fc_utils.FCUtils, '_close_adapter')
|
||||
def test_get_hba_handle(self, mock_close_adapter, mock_open_adapter):
|
||||
with self._fc_utils._get_hba_handle(
|
||||
adapter_name=self._FAKE_ADAPTER_NAME):
|
||||
mock_open_adapter.assert_called_once_with(
|
||||
adapter_name=self._FAKE_ADAPTER_NAME)
|
||||
mock_close_adapter.assert_called_once_with(
|
||||
mock_open_adapter.return_value)
|
||||
|
||||
@mock.patch.object(ctypes, 'byref')
|
||||
def test_get_adapter_name(self, mock_byref):
|
||||
self._ctypes_mocker.stop()
|
||||
fake_adapter_index = 1
|
||||
|
||||
def update_buff(buff):
|
||||
buff.value = six.b(self._FAKE_ADAPTER_NAME)
|
||||
|
||||
mock_byref.side_effect = update_buff
|
||||
|
||||
resulted_adapter_name = self._fc_utils._get_adapter_name(
|
||||
fake_adapter_index)
|
||||
|
||||
args_list = self._mock_run.call_args_list[0][0]
|
||||
|
||||
self.assertEqual(fc_utils.hbaapi.HBA_GetAdapterName,
|
||||
args_list[0])
|
||||
self.assertIsInstance(args_list[1], ctypes.c_uint32)
|
||||
self.assertEqual(fake_adapter_index, args_list[1].value)
|
||||
|
||||
arg_byref = mock_byref.call_args_list[0][0][0]
|
||||
buff = ctypes.cast(arg_byref, ctypes.POINTER(
|
||||
ctypes.c_char * 256)).contents
|
||||
self.assertIsInstance(buff, ctypes.c_char * 256)
|
||||
self.assertEqual(self._FAKE_ADAPTER_NAME, resulted_adapter_name)
|
||||
|
||||
@mock.patch.object(fc_struct, 'get_target_mapping_struct')
|
||||
def test_get_target_mapping(self, mock_get_target_mapping):
|
||||
fake_entry_count = 10
|
||||
hresults = [fc_utils.HBA_STATUS_ERROR_MORE_DATA,
|
||||
fc_utils.HBA_STATUS_OK]
|
||||
mock_mapping = mock.Mock(NumberOfEntries=fake_entry_count)
|
||||
mock_get_target_mapping.return_value = mock_mapping
|
||||
self._mock_run.side_effect = hresults
|
||||
|
||||
resulted_mapping = self._fc_utils._get_target_mapping(
|
||||
mock.sentinel.hba_handle)
|
||||
|
||||
expected_calls = [
|
||||
mock.call(fc_utils.hbaapi.HBA_GetFcpTargetMapping,
|
||||
mock.sentinel.hba_handle,
|
||||
self._ctypes.byref(mock_mapping),
|
||||
ignored_error_codes=[fc_utils.HBA_STATUS_ERROR_MORE_DATA]
|
||||
)] * 2
|
||||
self._mock_run.assert_has_calls(expected_calls)
|
||||
self.assertEqual(mock_mapping, resulted_mapping)
|
||||
mock_get_target_mapping.assert_has_calls([mock.call(0),
|
||||
mock.call(fake_entry_count)])
|
||||
|
||||
@mock.patch.object(fc_struct, 'HBA_PortAttributes')
|
||||
def test_get_adapter_port_attributes(self, mock_class_HBA_PortAttributes):
|
||||
resulted_port_attributes = self._fc_utils._get_adapter_port_attributes(
|
||||
mock.sentinel.hba_handle, mock.sentinel.port_index)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
fc_utils.hbaapi.HBA_GetAdapterPortAttributes,
|
||||
mock.sentinel.hba_handle,
|
||||
mock.sentinel.port_index,
|
||||
self._ctypes.byref(mock_class_HBA_PortAttributes.return_value))
|
||||
|
||||
self.assertEqual(mock_class_HBA_PortAttributes.return_value,
|
||||
resulted_port_attributes)
|
||||
|
||||
@mock.patch.object(fc_struct, 'HBA_AdapterAttributes')
|
||||
def test_get_adapter_attributes(self, mock_class_HBA_AdapterAttributes):
|
||||
resulted_hba_attributes = self._fc_utils._get_adapter_attributes(
|
||||
mock.sentinel.hba_handle)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
fc_utils.hbaapi.HBA_GetAdapterAttributes,
|
||||
mock.sentinel.hba_handle,
|
||||
self._ctypes.byref(mock_class_HBA_AdapterAttributes.return_value))
|
||||
|
||||
self.assertEqual(mock_class_HBA_AdapterAttributes.return_value,
|
||||
resulted_hba_attributes)
|
||||
|
||||
@mock.patch.object(fc_utils.FCUtils, 'get_fc_hba_count')
|
||||
def test_get_fc_hba_ports_missing_hbas(self, mock_get_fc_hba_count):
|
||||
mock_get_fc_hba_count.return_value = 0
|
||||
|
||||
resulted_hba_ports = self._fc_utils.get_fc_hba_ports()
|
||||
|
||||
self.assertEqual([], resulted_hba_ports)
|
||||
|
||||
@mock.patch.object(fc_utils.FCUtils, '_get_fc_hba_adapter_ports')
|
||||
@mock.patch.object(fc_utils.FCUtils, '_get_adapter_name')
|
||||
@mock.patch.object(fc_utils.FCUtils, 'get_fc_hba_count')
|
||||
def test_get_fc_hba_ports(self, mock_get_fc_hba_count,
|
||||
mock_get_adapter_name,
|
||||
mock_get_adapter_ports):
|
||||
fake_adapter_count = 2
|
||||
|
||||
mock_get_adapter_name.return_value = mock.sentinel.adapter_name
|
||||
mock_get_fc_hba_count.return_value = fake_adapter_count
|
||||
mock_get_adapter_ports.side_effect = [Exception,
|
||||
[mock.sentinel.port]]
|
||||
|
||||
expected_hba_ports = [mock.sentinel.port]
|
||||
resulted_hba_ports = self._fc_utils.get_fc_hba_ports()
|
||||
self.assertEqual(expected_hba_ports, resulted_hba_ports)
|
||||
self.assertEqual(expected_hba_ports, resulted_hba_ports)
|
||||
|
||||
mock_get_adapter_name.assert_has_calls(
|
||||
[mock.call(index) for index in range(fake_adapter_count)])
|
||||
mock_get_adapter_ports.assert_has_calls(
|
||||
[mock.call(mock.sentinel.adapter_name)] * fake_adapter_count)
|
||||
|
||||
@mock.patch.object(fc_utils.FCUtils, '_open_adapter')
|
||||
@mock.patch.object(fc_utils.FCUtils, '_close_adapter')
|
||||
@mock.patch.object(fc_utils.FCUtils, '_get_adapter_port_attributes')
|
||||
@mock.patch.object(fc_utils.FCUtils, '_get_adapter_attributes')
|
||||
def test_get_fc_hba_adapter_ports(self, mock_get_adapter_attributes,
|
||||
mock_get_adapter_port_attributes,
|
||||
mock_close_adapter,
|
||||
mock_open_adapter):
|
||||
fake_port_count = 1
|
||||
fake_port_index = 0
|
||||
# Local WWNs
|
||||
fake_node_wwn = list(range(3))
|
||||
fake_port_wwn = list(range(3))
|
||||
|
||||
mock_adapter_attributes = mock.MagicMock()
|
||||
mock_adapter_attributes.NumberOfPorts = fake_port_count
|
||||
mock_port_attributes = mock.MagicMock()
|
||||
mock_port_attributes.NodeWWN = fake_node_wwn
|
||||
mock_port_attributes.PortWWN = fake_port_wwn
|
||||
|
||||
mock_get_adapter_attributes.return_value = mock_adapter_attributes
|
||||
mock_get_adapter_port_attributes.return_value = mock_port_attributes
|
||||
|
||||
resulted_hba_ports = self._fc_utils._get_fc_hba_adapter_ports(
|
||||
mock.sentinel.adapter_name)
|
||||
|
||||
expected_hba_ports = [{
|
||||
'node_name': self._fc_utils._wwn_array_to_hex_str(fake_node_wwn),
|
||||
'port_name': self._fc_utils._wwn_array_to_hex_str(fake_port_wwn)
|
||||
}]
|
||||
self.assertEqual(expected_hba_ports, resulted_hba_ports)
|
||||
|
||||
mock_open_adapter.assert_called_once_with(
|
||||
adapter_name=mock.sentinel.adapter_name)
|
||||
mock_close_adapter.assert_called_once_with(
|
||||
mock_open_adapter(mock.sentinel.adapter_nam))
|
||||
mock_get_adapter_attributes.assert_called_once_with(
|
||||
mock_open_adapter.return_value)
|
||||
mock_get_adapter_port_attributes.assert_called_once_with(
|
||||
mock_open_adapter.return_value, fake_port_index)
|
||||
|
||||
def test_wwn_hex_string_to_array(self):
|
||||
fake_wwn_hex_string = '000102'
|
||||
|
||||
resulted_array = self._fc_utils._wwn_hex_string_to_array(
|
||||
fake_wwn_hex_string)
|
||||
|
||||
expected_wwn_hex_array = list(range(3))
|
||||
self.assertEqual(expected_wwn_hex_array, resulted_array)
|
||||
|
||||
def test_wwn_array_to_hex_str(self):
|
||||
fake_wwn_array = list(range(3))
|
||||
|
||||
resulted_string = self._fc_utils._wwn_array_to_hex_str(fake_wwn_array)
|
||||
|
||||
expected_string = '000102'
|
||||
self.assertEqual(expected_string, resulted_string)
|
||||
|
||||
@mock.patch.object(fc_utils.FCUtils, '_open_adapter')
|
||||
@mock.patch.object(fc_utils.FCUtils, '_close_adapter')
|
||||
@mock.patch.object(fc_utils.FCUtils, '_get_target_mapping')
|
||||
def test_get_fc_target_mapping(self, mock_get_target_mapping,
|
||||
mock_close_adapter, mock_open_adapter):
|
||||
# Local WWNN
|
||||
fake_node_wwn_string = "123"
|
||||
# Remote WWNs
|
||||
fake_node_wwn = list(range(3))
|
||||
fake_port_wwn = list(range(3))
|
||||
|
||||
mock_fcp_mappings = mock.MagicMock()
|
||||
mock_entry = mock.MagicMock()
|
||||
mock_entry.FcpId.NodeWWN = fake_node_wwn
|
||||
mock_entry.FcpId.PortWWN = fake_port_wwn
|
||||
mock_entry.ScsiId.OSDeviceName = mock.sentinel.OSDeviceName
|
||||
mock_entry.ScsiId.ScsiOSLun = mock.sentinel.ScsiOSLun
|
||||
mock_fcp_mappings.Entries = [mock_entry]
|
||||
mock_get_target_mapping.return_value = mock_fcp_mappings
|
||||
mock_node_wwn = self._fc_utils._wwn_hex_string_to_array(
|
||||
fake_node_wwn_string)
|
||||
|
||||
resulted_mappings = self._fc_utils.get_fc_target_mappings(
|
||||
fake_node_wwn_string)
|
||||
|
||||
expected_mappings = [{
|
||||
'node_name': self._fc_utils._wwn_array_to_hex_str(fake_node_wwn),
|
||||
'port_name': self._fc_utils._wwn_array_to_hex_str(fake_port_wwn),
|
||||
'device_name': mock.sentinel.OSDeviceName,
|
||||
'lun': mock.sentinel.ScsiOSLun
|
||||
}]
|
||||
self.assertEqual(expected_mappings, resulted_mappings)
|
||||
mock_open_adapter.assert_called_once_with(adapter_wwn=mock_node_wwn)
|
||||
mock_close_adapter.assert_called_once_with(
|
||||
mock_open_adapter.return_value)
|
||||
|
||||
def test_refresh_hba_configuration(self):
|
||||
self._fc_utils.refresh_hba_configuration()
|
||||
|
||||
expected_func = fc_utils.hbaapi.HBA_RefreshAdapterConfiguration
|
||||
expected_func.assert_called_once_with()
|
@ -1,160 +0,0 @@
|
||||
# Copyright 2014 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from oslotest import base
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.utils.storage.initiator import iscsi_cli_utils
|
||||
|
||||
|
||||
class ISCSIInitiatorCLIUtilsTestCase(base.BaseTestCase):
|
||||
"""Unit tests for the Hyper-V ISCSIInitiatorCLIUtils class."""
|
||||
|
||||
_FAKE_PORTAL_ADDR = '10.1.1.1'
|
||||
_FAKE_PORTAL_PORT = '3260'
|
||||
_FAKE_LUN = 0
|
||||
_FAKE_TARGET = 'iqn.2010-10.org.openstack:fake_target'
|
||||
|
||||
_FAKE_STDOUT_VALUE = 'The operation completed successfully'
|
||||
|
||||
def setUp(self):
|
||||
super(ISCSIInitiatorCLIUtilsTestCase, self).setUp()
|
||||
self._initiator = iscsi_cli_utils.ISCSIInitiatorCLIUtils()
|
||||
self._initiator._conn_wmi = mock.MagicMock()
|
||||
self._initiator._conn_cimv2 = mock.MagicMock()
|
||||
|
||||
def _test_login_target_portal(self, portal_connected):
|
||||
fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
|
||||
self._FAKE_PORTAL_PORT)
|
||||
|
||||
self._initiator.execute = mock.MagicMock()
|
||||
if portal_connected:
|
||||
exec_output = 'Address and Socket: %s %s' % (
|
||||
self._FAKE_PORTAL_ADDR, self._FAKE_PORTAL_PORT)
|
||||
else:
|
||||
exec_output = ''
|
||||
|
||||
self._initiator.execute.return_value = exec_output
|
||||
|
||||
self._initiator._login_target_portal(fake_portal)
|
||||
|
||||
call_list = self._initiator.execute.call_args_list
|
||||
all_call_args = [arg for call in call_list for arg in call[0]]
|
||||
|
||||
if portal_connected:
|
||||
self.assertIn('RefreshTargetPortal', all_call_args)
|
||||
else:
|
||||
self.assertIn('AddTargetPortal', all_call_args)
|
||||
|
||||
def test_login_connected_portal(self):
|
||||
self._test_login_target_portal(True)
|
||||
|
||||
def test_login_new_portal(self):
|
||||
self._test_login_target_portal(False)
|
||||
|
||||
@mock.patch.object(iscsi_cli_utils, 'CONF')
|
||||
def _test_login_target(self, mock_CONF, target_connected=False,
|
||||
raise_exception=False, use_chap=False):
|
||||
mock_CONF.hyperv.volume_attach_retry_count = 4
|
||||
mock_CONF.hyperv.volume_attach_retry_interval = 0
|
||||
fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
|
||||
self._FAKE_PORTAL_PORT)
|
||||
self._initiator.execute = mock.MagicMock()
|
||||
self._initiator._login_target_portal = mock.MagicMock()
|
||||
|
||||
if use_chap:
|
||||
username, password = (mock.sentinel.username,
|
||||
mock.sentinel.password)
|
||||
else:
|
||||
username, password = None, None
|
||||
|
||||
if target_connected:
|
||||
self._initiator.execute.return_value = self._FAKE_TARGET
|
||||
elif raise_exception:
|
||||
self._initiator.execute.return_value = ''
|
||||
else:
|
||||
self._initiator.execute.side_effect = (
|
||||
['', '', '', self._FAKE_TARGET, ''])
|
||||
|
||||
if raise_exception:
|
||||
self.assertRaises(exceptions.HyperVException,
|
||||
self._initiator.login_storage_target,
|
||||
self._FAKE_LUN, self._FAKE_TARGET,
|
||||
fake_portal, username, password)
|
||||
else:
|
||||
self._initiator.login_storage_target(self._FAKE_LUN,
|
||||
self._FAKE_TARGET,
|
||||
fake_portal,
|
||||
username, password)
|
||||
|
||||
if target_connected:
|
||||
call_list = self._initiator.execute.call_args_list
|
||||
all_call_args = [arg for call in call_list for arg in call[0]]
|
||||
self.assertNotIn('qlogintarget', all_call_args)
|
||||
else:
|
||||
self._initiator.execute.assert_any_call(
|
||||
'iscsicli.exe', 'qlogintarget',
|
||||
self._FAKE_TARGET, username, password)
|
||||
|
||||
def test_login_connected_target(self):
|
||||
self._test_login_target(target_connected=True)
|
||||
|
||||
def test_login_disconncted_target(self):
|
||||
self._test_login_target()
|
||||
|
||||
def test_login_target_exception(self):
|
||||
self._test_login_target(raise_exception=True)
|
||||
|
||||
def test_login_target_using_chap(self):
|
||||
self._test_login_target(use_chap=True)
|
||||
|
||||
def _test_execute_wrapper(self, raise_exception):
|
||||
fake_cmd = ('iscsicli.exe', 'ListTargetPortals')
|
||||
|
||||
if raise_exception:
|
||||
output = 'fake error'
|
||||
else:
|
||||
output = 'The operation completed successfully'
|
||||
|
||||
with mock.patch('os_win._utils.execute') as fake_execute:
|
||||
fake_execute.return_value = (output, None)
|
||||
|
||||
if raise_exception:
|
||||
self.assertRaises(exceptions.HyperVException,
|
||||
self._initiator.execute,
|
||||
*fake_cmd)
|
||||
else:
|
||||
ret_val = self._initiator.execute(*fake_cmd)
|
||||
self.assertEqual(output, ret_val)
|
||||
|
||||
def test_execute_raise_exception(self):
|
||||
self._test_execute_wrapper(True)
|
||||
|
||||
def test_execute_exception(self):
|
||||
self._test_execute_wrapper(False)
|
||||
|
||||
@mock.patch.object(iscsi_cli_utils, '_utils')
|
||||
def test_logout_storage_target(self, mock_utils):
|
||||
mock_utils.execute.return_value = (self._FAKE_STDOUT_VALUE,
|
||||
mock.sentinel.FAKE_STDERR_VALUE)
|
||||
session = mock.MagicMock()
|
||||
session.SessionId = mock.sentinel.FAKE_SESSION_ID
|
||||
self._initiator._conn_wmi.query.return_value = [session]
|
||||
|
||||
self._initiator.logout_storage_target(mock.sentinel.FAKE_IQN)
|
||||
mock_utils.execute.assert_called_once_with(
|
||||
'iscsicli.exe', 'logouttarget', mock.sentinel.FAKE_SESSION_ID)
|
@ -1,56 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ctypes
|
||||
|
||||
import six
|
||||
|
||||
from os_win import constants
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.storage.initiator import iscsidsc_structures as iscsi_struct
|
||||
|
||||
|
||||
class ISCSIStructTestCase(test_base.OsWinBaseTestCase):
|
||||
def test_iscsi_login_opts_setup(self):
|
||||
fake_username = 'fake_chap_username'
|
||||
fake_password = 'fake_chap_secret'
|
||||
auth_type = constants.ISCSI_CHAP_AUTH_TYPE
|
||||
|
||||
login_opts = iscsi_struct.ISCSI_LOGIN_OPTIONS(Username=fake_username,
|
||||
Password=fake_password,
|
||||
AuthType=auth_type)
|
||||
|
||||
self.assertIsInstance(login_opts.Username, iscsi_struct.PUCHAR)
|
||||
self.assertIsInstance(login_opts.Password, iscsi_struct.PUCHAR)
|
||||
|
||||
self.assertEqual(len(fake_username), login_opts.UsernameLength)
|
||||
self.assertEqual(len(fake_password), login_opts.PasswordLength)
|
||||
|
||||
username_struct_contents = ctypes.cast(
|
||||
login_opts.Username,
|
||||
ctypes.POINTER(ctypes.c_char * len(fake_username))).contents.value
|
||||
pwd_struct_contents = ctypes.cast(
|
||||
login_opts.Password,
|
||||
ctypes.POINTER(ctypes.c_char * len(fake_password))).contents.value
|
||||
|
||||
self.assertEqual(six.b(fake_username), username_struct_contents)
|
||||
self.assertEqual(six.b(fake_password), pwd_struct_contents)
|
||||
|
||||
expected_info_bitmap = (iscsi_struct.ISCSI_LOGIN_OPTIONS_USERNAME |
|
||||
iscsi_struct.ISCSI_LOGIN_OPTIONS_PASSWORD |
|
||||
iscsi_struct.ISCSI_LOGIN_OPTIONS_AUTH_TYPE)
|
||||
self.assertEqual(expected_info_bitmap,
|
||||
login_opts.InformationSpecified)
|
@ -1,813 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import ctypes
|
||||
import mock
|
||||
|
||||
from os_win import _utils
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.storage.initiator import iscsi_utils
|
||||
from os_win.utils.storage.initiator import iscsidsc_structures as iscsi_struct
|
||||
from os_win.utils.storage.initiator import iscsierr
|
||||
|
||||
|
||||
class ISCSIInitiatorUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V ISCSIInitiatorUtils class."""
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, '__init__',
|
||||
lambda *args, **kwargs: None)
|
||||
def setUp(self):
|
||||
super(ISCSIInitiatorUtilsTestCase, self).setUp()
|
||||
|
||||
self._initiator = iscsi_utils.ISCSIInitiatorUtils()
|
||||
self._initiator._win32utils = mock.Mock()
|
||||
self._initiator._diskutils = mock.Mock()
|
||||
|
||||
self._iscsidsc = mock.patch.object(
|
||||
iscsi_utils, 'iscsidsc', create=True).start()
|
||||
|
||||
self._run_mocker = mock.patch.object(self._initiator,
|
||||
'_run_and_check_output')
|
||||
self._mock_run = self._run_mocker.start()
|
||||
|
||||
iscsi_utils.portal_map = collections.defaultdict(set)
|
||||
|
||||
def _mock_ctypes(self):
|
||||
self._ctypes = mock.Mock()
|
||||
# This is used in order to easily make assertions on the variables
|
||||
# passed by reference.
|
||||
self._ctypes.byref = lambda x: (x, "byref")
|
||||
|
||||
mock.patch.object(iscsi_utils, 'ctypes', self._ctypes).start()
|
||||
|
||||
def _get_fake_iscsi_utils_getter_func(self, func_side_effect,
|
||||
decorator_args,
|
||||
returned_element_count=None,
|
||||
required_buff_sz=None):
|
||||
@iscsi_utils.ensure_buff_and_retrieve_items(**decorator_args)
|
||||
def fake_func(inst, buff=None, buff_size=None,
|
||||
element_count=None, *args, **kwargs):
|
||||
raised_exc = None
|
||||
try:
|
||||
# Those arguments will always be ULONGs, as requested
|
||||
# by the iscsidsc functions.
|
||||
self.assertIsInstance(buff_size, ctypes.c_ulong)
|
||||
self.assertIsInstance(element_count, ctypes.c_ulong)
|
||||
func_side_effect(buff=buff, buff_size_val=buff_size.value,
|
||||
element_count_val=element_count.value,
|
||||
*args, **kwargs)
|
||||
except Exception as ex:
|
||||
raised_exc = ex
|
||||
|
||||
if returned_element_count:
|
||||
element_count.value = returned_element_count
|
||||
if required_buff_sz:
|
||||
buff_size.value = required_buff_sz
|
||||
|
||||
if raised_exc:
|
||||
raise raised_exc
|
||||
return mock.sentinel.ret_val
|
||||
return fake_func
|
||||
|
||||
@mock.patch.object(iscsi_utils, '_get_items_from_buff')
|
||||
def _test_ensure_buff_decorator(self, mock_get_items,
|
||||
required_buff_sz=None,
|
||||
returned_element_count=None,
|
||||
parse_output=False):
|
||||
insufficient_buff_exc = exceptions.Win32Exception(
|
||||
message='fake_err_msg',
|
||||
error_code=iscsi_utils.ERROR_INSUFFICIENT_BUFFER)
|
||||
func_requests_buff_sz = required_buff_sz is not None
|
||||
struct_type = ctypes.c_uint
|
||||
|
||||
decorator_args = dict(struct_type=struct_type,
|
||||
parse_output=parse_output,
|
||||
func_requests_buff_sz=func_requests_buff_sz)
|
||||
|
||||
func_side_effect = mock.Mock(side_effect=(insufficient_buff_exc, None))
|
||||
fake_func = self._get_fake_iscsi_utils_getter_func(
|
||||
returned_element_count=returned_element_count,
|
||||
required_buff_sz=required_buff_sz,
|
||||
func_side_effect=func_side_effect,
|
||||
decorator_args=decorator_args)
|
||||
|
||||
ret_val = fake_func(self._initiator, fake_arg=mock.sentinel.arg)
|
||||
if parse_output:
|
||||
self.assertEqual(mock_get_items.return_value, ret_val)
|
||||
else:
|
||||
self.assertEqual(mock.sentinel.ret_val, ret_val)
|
||||
|
||||
# We expect our decorated method to be called exactly two times.
|
||||
first_call_args_dict = func_side_effect.call_args_list[0][1]
|
||||
self.assertIsInstance(first_call_args_dict['buff'],
|
||||
ctypes.c_ubyte * 0)
|
||||
self.assertEqual(first_call_args_dict['buff_size_val'], 0)
|
||||
self.assertEqual(first_call_args_dict['element_count_val'], 0)
|
||||
|
||||
if required_buff_sz:
|
||||
expected_buff_sz = required_buff_sz
|
||||
else:
|
||||
expected_buff_sz = ctypes.sizeof(
|
||||
struct_type) * returned_element_count
|
||||
|
||||
second_call_args_dict = func_side_effect.call_args_list[1][1]
|
||||
self.assertIsInstance(second_call_args_dict['buff'],
|
||||
ctypes.c_ubyte * expected_buff_sz)
|
||||
self.assertEqual(second_call_args_dict['buff_size_val'],
|
||||
required_buff_sz or 0)
|
||||
self.assertEqual(second_call_args_dict['element_count_val'],
|
||||
returned_element_count or 0)
|
||||
|
||||
def test_ensure_buff_func_requests_buff_sz(self):
|
||||
self._test_ensure_buff_decorator(required_buff_sz=10,
|
||||
parse_output=True)
|
||||
|
||||
def test_ensure_buff_func_requests_el_count(self):
|
||||
self._test_ensure_buff_decorator(returned_element_count=5)
|
||||
|
||||
def test_ensure_buff_func_unexpected_exception(self):
|
||||
fake_exc = exceptions.Win32Exception(message='fake_message',
|
||||
error_code=1)
|
||||
|
||||
func_side_effect = mock.Mock(side_effect=fake_exc)
|
||||
fake_func = self._get_fake_iscsi_utils_getter_func(
|
||||
func_side_effect=func_side_effect,
|
||||
decorator_args={})
|
||||
|
||||
self.assertRaises(exceptions.Win32Exception, fake_func,
|
||||
self._initiator)
|
||||
|
||||
def test_get_items_from_buff(self):
|
||||
fake_buff_contents = 'fake_buff_contents'
|
||||
fake_buff = (ctypes.c_wchar * len(fake_buff_contents))()
|
||||
fake_buff.value = fake_buff_contents
|
||||
|
||||
fake_buff = ctypes.cast(fake_buff, ctypes.POINTER(ctypes.c_ubyte))
|
||||
|
||||
result = iscsi_utils._get_items_from_buff(fake_buff, ctypes.c_wchar,
|
||||
len(fake_buff_contents))
|
||||
|
||||
self.assertEqual(fake_buff_contents, result.value)
|
||||
|
||||
def test_run_and_check_output(self):
|
||||
self._run_mocker.stop()
|
||||
self._initiator._win32utils = mock.Mock()
|
||||
mock_win32utils_run_and_check_output = (
|
||||
self._initiator._win32utils.run_and_check_output)
|
||||
|
||||
self._initiator._run_and_check_output(mock.sentinel.func,
|
||||
mock.sentinel.arg,
|
||||
fake_kwarg=mock.sentinel.kwarg)
|
||||
|
||||
mock_win32utils_run_and_check_output.assert_called_once_with(
|
||||
mock.sentinel.func,
|
||||
mock.sentinel.arg,
|
||||
fake_kwarg=mock.sentinel.kwarg,
|
||||
error_msg_src=iscsierr.err_msg_dict,
|
||||
failure_exc=exceptions.ISCSIInitiatorAPIException)
|
||||
|
||||
def test_get_iscsi_persistent_logins(self):
|
||||
self._mock_ctypes()
|
||||
|
||||
_get_iscsi_persistent_logins = _utils.get_wrapped_function(
|
||||
self._initiator._get_iscsi_persistent_logins)
|
||||
_get_iscsi_persistent_logins(
|
||||
self._initiator,
|
||||
buff=mock.sentinel.buff,
|
||||
buff_size=mock.sentinel.buff_size,
|
||||
element_count=mock.sentinel.element_count)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
self._iscsidsc.ReportIScsiPersistentLoginsW,
|
||||
self._ctypes.byref(mock.sentinel.element_count),
|
||||
self._ctypes.byref(mock.sentinel.buff),
|
||||
self._ctypes.byref(mock.sentinel.buff_size))
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_parse_string_list')
|
||||
def test_get_targets(self, mock_parse_string_list):
|
||||
self._mock_ctypes()
|
||||
|
||||
get_targets = _utils.get_wrapped_function(
|
||||
self._initiator.get_targets)
|
||||
mock_el_count = mock.Mock(value=mock.sentinel.element_count)
|
||||
|
||||
resulted_target_list = get_targets(
|
||||
self._initiator,
|
||||
forced_update=mock.sentinel.forced_update,
|
||||
element_count=mock_el_count,
|
||||
buff=mock.sentinel.buff)
|
||||
self.assertEqual(mock_parse_string_list.return_value,
|
||||
resulted_target_list)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
self._iscsidsc.ReportIScsiTargetsW,
|
||||
mock.sentinel.forced_update,
|
||||
self._ctypes.byref(mock_el_count),
|
||||
self._ctypes.byref(mock.sentinel.buff))
|
||||
mock_parse_string_list.assert_called_once_with(
|
||||
mock.sentinel.buff, mock.sentinel.element_count)
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_parse_string_list')
|
||||
def test_get_initiators(self, mock_parse_string_list):
|
||||
self._mock_ctypes()
|
||||
|
||||
get_initiators = _utils.get_wrapped_function(
|
||||
self._initiator.get_iscsi_initiators)
|
||||
mock_el_count = mock.Mock(value=mock.sentinel.element_count)
|
||||
|
||||
resulted_initator_list = get_initiators(
|
||||
self._initiator,
|
||||
element_count=mock_el_count,
|
||||
buff=mock.sentinel.buff)
|
||||
self.assertEqual(mock_parse_string_list.return_value,
|
||||
resulted_initator_list)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
self._iscsidsc.ReportIScsiInitiatorListW,
|
||||
self._ctypes.byref(mock_el_count),
|
||||
self._ctypes.byref(mock.sentinel.buff))
|
||||
mock_parse_string_list.assert_called_once_with(
|
||||
mock.sentinel.buff, mock.sentinel.element_count)
|
||||
|
||||
def test_parse_string_list(self):
|
||||
self._mock_ctypes()
|
||||
|
||||
fake_buff = 'fake\x00buff\x00\x00'
|
||||
self._ctypes.cast.return_value = fake_buff
|
||||
|
||||
str_list = self._initiator._parse_string_list(fake_buff,
|
||||
len(fake_buff))
|
||||
|
||||
self.assertEqual(['fake', 'buff'], str_list)
|
||||
|
||||
self._ctypes.cast.assert_called_once_with(fake_buff,
|
||||
self._ctypes.POINTER.return_value)
|
||||
self._ctypes.POINTER.assert_called_once_with(self._ctypes.c_wchar)
|
||||
|
||||
def test_get_iscsi_initiator(self):
|
||||
self._mock_ctypes()
|
||||
|
||||
self._ctypes.c_wchar = mock.MagicMock()
|
||||
fake_buff = (self._ctypes.c_wchar * (
|
||||
iscsi_struct.MAX_ISCSI_NAME_LEN + 1))()
|
||||
fake_buff.value = mock.sentinel.buff_value
|
||||
|
||||
resulted_iscsi_initiator = self._initiator.get_iscsi_initiator()
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
self._iscsidsc.GetIScsiInitiatorNodeNameW,
|
||||
self._ctypes.byref(fake_buff))
|
||||
self.assertEqual(mock.sentinel.buff_value,
|
||||
resulted_iscsi_initiator)
|
||||
|
||||
@mock.patch('socket.getfqdn')
|
||||
def test_get_iscsi_initiator_exception(self, mock_get_fqdn):
|
||||
fake_fqdn = 'fakehost.FAKE-DOMAIN.com'
|
||||
fake_exc = exceptions.ISCSIInitiatorAPIException(
|
||||
message='fake_message',
|
||||
error_code=1,
|
||||
func_name='fake_func')
|
||||
|
||||
self._mock_run.side_effect = fake_exc
|
||||
mock_get_fqdn.return_value = fake_fqdn
|
||||
|
||||
resulted_iqn = self._initiator.get_iscsi_initiator()
|
||||
|
||||
expected_iqn = "%s:%s" % (self._initiator._MS_IQN_PREFIX,
|
||||
fake_fqdn.lower())
|
||||
self.assertEqual(expected_iqn, resulted_iqn)
|
||||
|
||||
@mock.patch.object(ctypes, 'byref')
|
||||
@mock.patch.object(iscsi_struct, 'ISCSI_UNIQUE_CONNECTION_ID')
|
||||
@mock.patch.object(iscsi_struct, 'ISCSI_UNIQUE_SESSION_ID')
|
||||
def test_login_iscsi_target(self, mock_cls_ISCSI_UNIQUE_SESSION_ID,
|
||||
mock_cls_ISCSI_UNIQUE_CONNECTION_ID,
|
||||
mock_byref):
|
||||
fake_target_name = 'fake_target_name'
|
||||
|
||||
resulted_session_id, resulted_conection_id = (
|
||||
self._initiator._login_iscsi_target(fake_target_name))
|
||||
|
||||
args_list = self._mock_run.call_args_list[0][0]
|
||||
|
||||
self.assertIsInstance(args_list[1], ctypes.c_wchar_p)
|
||||
self.assertEqual(fake_target_name, args_list[1].value)
|
||||
self.assertIsInstance(args_list[4], ctypes.c_ulong)
|
||||
self.assertEqual(
|
||||
ctypes.c_ulong(iscsi_struct.ISCSI_ANY_INITIATOR_PORT).value,
|
||||
args_list[4].value)
|
||||
self.assertIsInstance(args_list[6], ctypes.c_ulonglong)
|
||||
self.assertEqual(iscsi_struct.ISCSI_DEFAULT_SECURITY_FLAGS,
|
||||
args_list[6].value)
|
||||
self.assertIsInstance(args_list[9], ctypes.c_ulong)
|
||||
self.assertEqual(0, args_list[9].value)
|
||||
|
||||
mock_byref.assert_has_calls([
|
||||
mock.call(mock_cls_ISCSI_UNIQUE_SESSION_ID.return_value),
|
||||
mock.call(mock_cls_ISCSI_UNIQUE_CONNECTION_ID.return_value)])
|
||||
self.assertEqual(
|
||||
mock_cls_ISCSI_UNIQUE_SESSION_ID.return_value,
|
||||
resulted_session_id)
|
||||
self.assertEqual(
|
||||
mock_cls_ISCSI_UNIQUE_CONNECTION_ID.return_value,
|
||||
resulted_conection_id)
|
||||
|
||||
def test_get_iscsi_sessions(self):
|
||||
self._mock_ctypes()
|
||||
|
||||
_get_iscsi_sessions = _utils.get_wrapped_function(
|
||||
self._initiator._get_iscsi_sessions)
|
||||
_get_iscsi_sessions(
|
||||
self._initiator,
|
||||
buff=mock.sentinel.buff,
|
||||
buff_size=mock.sentinel.buff_size,
|
||||
element_count=mock.sentinel.element_count)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
self._iscsidsc.GetIScsiSessionListW,
|
||||
self._ctypes.byref(mock.sentinel.buff_size),
|
||||
self._ctypes.byref(mock.sentinel.element_count),
|
||||
self._ctypes.byref(mock.sentinel.buff))
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_sessions')
|
||||
def test_get_iscsi_target_sessions(self, mock_get_iscsi_sessions,
|
||||
target_sessions_found=True):
|
||||
fake_session = mock.Mock(TargetNodeName=mock.sentinel.target_name,
|
||||
ConnectionCount=1)
|
||||
fake_disconn_session = mock.Mock(
|
||||
TargetNodeName=mock.sentinel.target_name,
|
||||
ConnectionCount=0)
|
||||
other_session = mock.Mock(TargetNodeName=mock.sentinel.other_target,
|
||||
ConnectionCount=1)
|
||||
|
||||
sessions = [fake_session, fake_disconn_session, other_session]
|
||||
mock_get_iscsi_sessions.return_value = sessions
|
||||
|
||||
resulted_tgt_sessions = self._initiator._get_iscsi_target_sessions(
|
||||
mock.sentinel.target_name)
|
||||
|
||||
self.assertEqual([fake_session], resulted_tgt_sessions)
|
||||
|
||||
def test_get_iscsi_session_devices(self):
|
||||
self._mock_ctypes()
|
||||
|
||||
_get_iscsi_session_devices = _utils.get_wrapped_function(
|
||||
self._initiator._get_iscsi_session_devices)
|
||||
_get_iscsi_session_devices(
|
||||
self._initiator,
|
||||
mock.sentinel.session_id,
|
||||
buff=mock.sentinel.buff,
|
||||
element_count=mock.sentinel.element_count)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
self._iscsidsc.GetDevicesForIScsiSessionW,
|
||||
self._ctypes.byref(mock.sentinel.session_id),
|
||||
self._ctypes.byref(mock.sentinel.element_count),
|
||||
self._ctypes.byref(mock.sentinel.buff))
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_session_devices')
|
||||
def test_get_iscsi_session_luns(self, mock_get_iscsi_session_devices):
|
||||
fake_device = mock.Mock()
|
||||
fake_device.StorageDeviceNumber.DeviceType = (
|
||||
iscsi_struct.FILE_DEVICE_DISK)
|
||||
mock_get_iscsi_session_devices.return_value = [fake_device,
|
||||
mock.Mock()]
|
||||
|
||||
resulted_luns = self._initiator._get_iscsi_session_disk_luns(
|
||||
mock.sentinel.session_id)
|
||||
expected_luns = [fake_device.ScsiAddress.Lun]
|
||||
|
||||
mock_get_iscsi_session_devices.assert_called_once_with(
|
||||
mock.sentinel.session_id)
|
||||
self.assertEqual(expected_luns, resulted_luns)
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_session_devices')
|
||||
def test_get_iscsi_device_from_session(self,
|
||||
mock_get_iscsi_session_devices):
|
||||
fake_device = mock.Mock()
|
||||
fake_device.ScsiAddress.Lun = mock.sentinel.target_lun
|
||||
mock_get_iscsi_session_devices.return_value = [mock.Mock(),
|
||||
fake_device]
|
||||
|
||||
resulted_device = self._initiator._get_iscsi_device_from_session(
|
||||
mock.sentinel.session_id,
|
||||
mock.sentinel.target_lun)
|
||||
|
||||
mock_get_iscsi_session_devices.assert_called_once_with(
|
||||
mock.sentinel.session_id)
|
||||
self.assertEqual(fake_device, resulted_device)
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_device_from_session')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_target_sessions')
|
||||
def test_get_iscsi_device(self, mock_get_iscsi_target_sessions,
|
||||
mock_get_iscsi_session_devices):
|
||||
fake_sessions = [mock.Mock(), mock.Mock()]
|
||||
|
||||
mock_get_iscsi_target_sessions.return_value = fake_sessions
|
||||
mock_get_iscsi_session_devices.side_effect = [None,
|
||||
mock.sentinel.device]
|
||||
|
||||
resulted_device = self._initiator._get_iscsi_device(
|
||||
mock.sentinel.target_name,
|
||||
mock.sentinel.target_lun)
|
||||
|
||||
mock_get_iscsi_target_sessions.assert_called_once_with(
|
||||
mock.sentinel.target_name)
|
||||
mock_get_iscsi_session_devices.assert_has_calls(
|
||||
[mock.call(session.SessionId, mock.sentinel.target_lun)
|
||||
for session in fake_sessions])
|
||||
self.assertEqual(mock.sentinel.device, resulted_device)
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, '_get_iscsi_device')
|
||||
def test_get_device_number_for_target(self, mock_get_iscsi_device):
|
||||
mock_dev = mock_get_iscsi_device.return_value
|
||||
|
||||
dev_num = self._initiator.get_device_number_for_target(
|
||||
mock.sentinel.target_name, mock.sentinel.lun)
|
||||
|
||||
mock_get_iscsi_device.assert_called_once_with(
|
||||
mock.sentinel.target_name, mock.sentinel.lun)
|
||||
self.assertEqual(mock_dev.StorageDeviceNumber.DeviceNumber, dev_num)
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, '_get_iscsi_device')
|
||||
def get_device_number_and_path(self, mock_get_iscsi_device):
|
||||
mock_dev = mock_get_iscsi_device.return_value
|
||||
|
||||
dev_num, dev_path = self._initiator.get_device_path(
|
||||
mock.sentinel.target_name, mock.sentinel.lun)
|
||||
|
||||
mock_get_iscsi_device.assert_called_once_with(
|
||||
mock.sentinel.target_name, mock.sentinel.lun)
|
||||
|
||||
self.assertEqual(mock_dev.StorageDeviceNumber.DeviceNumber, dev_num)
|
||||
self.assertEqual(mock_dev.LegacyName, dev_path)
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_target_sessions')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_session_disk_luns')
|
||||
def test_get_target_luns(self, mock_get_iscsi_session_disk_luns,
|
||||
mock_get_iscsi_target_sessions):
|
||||
fake_session = mock.Mock()
|
||||
mock_get_iscsi_target_sessions.return_value = [fake_session]
|
||||
|
||||
retrieved_luns = [mock.sentinel.lun_0]
|
||||
mock_get_iscsi_session_disk_luns.return_value = retrieved_luns
|
||||
|
||||
resulted_luns = self._initiator.get_target_luns(
|
||||
mock.sentinel.target_name)
|
||||
|
||||
mock_get_iscsi_target_sessions.assert_called_once_with(
|
||||
mock.sentinel.target_name)
|
||||
mock_get_iscsi_session_disk_luns.assert_called_once_with(
|
||||
fake_session.SessionId)
|
||||
self.assertEqual(retrieved_luns, resulted_luns)
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'get_target_luns')
|
||||
def test_get_target_lun_count(self, mock_get_target_luns):
|
||||
target_luns = [mock.sentinel.lun0, mock.sentinel.lun1]
|
||||
mock_get_target_luns.return_value = target_luns
|
||||
|
||||
lun_count = self._initiator.get_target_lun_count(
|
||||
mock.sentinel.target_name)
|
||||
|
||||
self.assertEqual(len(target_luns), lun_count)
|
||||
mock_get_target_luns.assert_called_once_with(
|
||||
mock.sentinel.target_name)
|
||||
|
||||
def test_logout_iscsi_target(self):
|
||||
self._mock_ctypes()
|
||||
|
||||
self._initiator._logout_iscsi_target(mock.sentinel.session_id)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
self._iscsidsc.LogoutIScsiTarget,
|
||||
self._ctypes.byref(mock.sentinel.session_id))
|
||||
|
||||
def test_add_static_target(self):
|
||||
self._mock_ctypes()
|
||||
|
||||
is_persistent = True
|
||||
self._initiator._add_static_target(mock.sentinel.target_name,
|
||||
is_persistent=is_persistent)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
self._iscsidsc.AddIScsiStaticTargetW,
|
||||
self._ctypes.c_wchar_p(mock.sentinel.target_name),
|
||||
None, 0, is_persistent, None, None, None)
|
||||
|
||||
def test_remove_static_target(self):
|
||||
self._mock_ctypes()
|
||||
|
||||
self._initiator._remove_static_target(mock.sentinel.target_name)
|
||||
|
||||
expected_ignored_err_codes = [iscsierr.ISDSC_TARGET_NOT_FOUND]
|
||||
self._mock_run.assert_called_once_with(
|
||||
self._iscsidsc.RemoveIScsiStaticTargetW,
|
||||
self._ctypes.c_wchar_p(mock.sentinel.target_name),
|
||||
ignored_error_codes=expected_ignored_err_codes)
|
||||
|
||||
@mock.patch.object(iscsi_struct, 'ISCSI_LOGIN_OPTIONS')
|
||||
def _test_get_login_opts(self, mock_cls_ISCSI_LOGIN_OPTIONS,
|
||||
auth_type=None, creds_specified=False):
|
||||
auth_user = mock.sentinel.auth_user if creds_specified else None
|
||||
auth_pwd = mock.sentinel.auth_pwd if creds_specified else None
|
||||
|
||||
if not auth_type:
|
||||
expected_auth_type = (constants.ISCSI_CHAP_AUTH_TYPE
|
||||
if creds_specified
|
||||
else constants.ISCSI_NO_AUTH_TYPE)
|
||||
else:
|
||||
expected_auth_type = auth_type
|
||||
|
||||
resulted_login_opts = self._initiator._get_login_opts(
|
||||
auth_user, auth_pwd, auth_type,
|
||||
mock.sentinel.login_flags)
|
||||
|
||||
expected_login_opts = mock_cls_ISCSI_LOGIN_OPTIONS.return_value
|
||||
mock_cls_ISCSI_LOGIN_OPTIONS.assert_called_once_with(
|
||||
Username=auth_user,
|
||||
Password=auth_pwd,
|
||||
AuthType=expected_auth_type,
|
||||
LoginFlags=mock.sentinel.login_flags)
|
||||
self.assertEqual(expected_login_opts, resulted_login_opts)
|
||||
|
||||
def test_get_login_opts_without_creds_and_explicit_auth_type(self):
|
||||
self._test_get_login_opts()
|
||||
|
||||
def test_get_login_opts_with_creds_and_without_explicit_auth_type(self):
|
||||
self._test_get_login_opts(creds_specified=True)
|
||||
|
||||
def test_get_login_opts_with_explicit_auth_type(self):
|
||||
self._test_get_login_opts(auth_type=mock.sentinel.auth_type)
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_session_devices')
|
||||
def test_session_on_path_exists(self, mock_get_iscsi_session_devices):
|
||||
mock_device = mock.Mock(InitiatorName=mock.sentinel.initiator_name)
|
||||
mock_get_iscsi_session_devices.return_value = [mock_device]
|
||||
|
||||
fake_connection = mock.Mock(TargetAddress=mock.sentinel.portal_addr,
|
||||
TargetSocket=mock.sentinel.portal_port)
|
||||
fake_connections = [mock.Mock(), fake_connection]
|
||||
fake_session = mock.Mock(ConnectionCount=len(fake_connections),
|
||||
Connections=fake_connections)
|
||||
fake_sessions = [mock.Mock(Connections=[], ConnectionCount=0),
|
||||
fake_session]
|
||||
|
||||
session_on_path_exists = self._initiator._session_on_path_exists(
|
||||
fake_sessions, mock.sentinel.portal_addr,
|
||||
mock.sentinel.portal_port,
|
||||
mock.sentinel.initiator_name)
|
||||
self.assertTrue(session_on_path_exists)
|
||||
mock_get_iscsi_session_devices.assert_has_calls(
|
||||
[mock.call(session.SessionId) for session in fake_sessions])
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_target_sessions')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_session_on_path_exists')
|
||||
def _test_new_session_required(self, mock_session_on_path_exists,
|
||||
mock_get_iscsi_target_sessions,
|
||||
sessions=None,
|
||||
mpio_enabled=False,
|
||||
session_on_path_exists=False):
|
||||
mock_get_iscsi_target_sessions.return_value = sessions
|
||||
mock_session_on_path_exists.return_value = session_on_path_exists
|
||||
|
||||
expected_result = (not sessions or
|
||||
(mpio_enabled and not session_on_path_exists))
|
||||
result = self._initiator._new_session_required(
|
||||
mock.sentinel.target_iqn,
|
||||
mock.sentinel.portal_addr,
|
||||
mock.sentinel.portal_port,
|
||||
mock.sentinel.initiator_name,
|
||||
mpio_enabled)
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
if sessions and mpio_enabled:
|
||||
mock_session_on_path_exists.assert_called_once_with(
|
||||
sessions,
|
||||
mock.sentinel.portal_addr,
|
||||
mock.sentinel.portal_port,
|
||||
mock.sentinel.initiator_name)
|
||||
|
||||
def test_new_session_required_no_sessions(self):
|
||||
self._test_new_session_required()
|
||||
|
||||
def test_new_session_required_existing_sessions_no_mpio(self):
|
||||
self._test_new_session_required(sessions=mock.sentinel.sessions)
|
||||
|
||||
def test_new_session_required_existing_sessions_mpio_enabled(self):
|
||||
self._test_new_session_required(sessions=mock.sentinel.sessions,
|
||||
mpio_enabled=True)
|
||||
|
||||
def test_new_session_required_session_on_path_exists(self):
|
||||
self._test_new_session_required(sessions=mock.sentinel.sessions,
|
||||
mpio_enabled=True,
|
||||
session_on_path_exists=True)
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_login_opts')
|
||||
@mock.patch.object(iscsi_struct, 'ISCSI_TARGET_PORTAL')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_new_session_required')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, 'get_targets')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils, '_login_iscsi_target')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'ensure_lun_available')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_add_static_target')
|
||||
def _test_login_storage_target(self, mock_add_static_target,
|
||||
mock_ensure_lun_available,
|
||||
mock_login_iscsi_target,
|
||||
mock_get_targets,
|
||||
mock_session_required,
|
||||
mock_cls_ISCSI_TARGET_PORTAL,
|
||||
mock_get_login_opts,
|
||||
mpio_enabled=False,
|
||||
login_required=True):
|
||||
fake_portal_addr = '127.0.0.1'
|
||||
fake_portal_port = 3260
|
||||
fake_target_portal = '%s:%s' % (fake_portal_addr, fake_portal_port)
|
||||
|
||||
fake_portal = mock_cls_ISCSI_TARGET_PORTAL.return_value
|
||||
fake_login_opts = mock_get_login_opts.return_value
|
||||
|
||||
mock_get_targets.return_value = []
|
||||
mock_login_iscsi_target.return_value = (mock.sentinel.session_id,
|
||||
mock.sentinel.conn_id)
|
||||
mock_session_required.return_value = login_required
|
||||
|
||||
self._initiator.login_storage_target(
|
||||
mock.sentinel.target_lun,
|
||||
mock.sentinel.target_iqn,
|
||||
fake_target_portal,
|
||||
auth_username=mock.sentinel.auth_username,
|
||||
auth_password=mock.sentinel.auth_password,
|
||||
auth_type=mock.sentinel.auth_type,
|
||||
mpio_enabled=mpio_enabled,
|
||||
rescan_attempts=mock.sentinel.rescan_attempts)
|
||||
|
||||
mock_get_targets.assert_called_once_with()
|
||||
mock_add_static_target.assert_called_once_with(
|
||||
mock.sentinel.target_iqn)
|
||||
|
||||
if login_required:
|
||||
expected_login_flags = (
|
||||
iscsi_struct.ISCSI_LOGIN_FLAG_MULTIPATH_ENABLED
|
||||
if mpio_enabled else 0)
|
||||
mock_get_login_opts.assert_called_once_with(
|
||||
mock.sentinel.auth_username,
|
||||
mock.sentinel.auth_password,
|
||||
mock.sentinel.auth_type,
|
||||
expected_login_flags)
|
||||
mock_cls_ISCSI_TARGET_PORTAL.assert_called_once_with(
|
||||
Address=fake_portal_addr,
|
||||
Socket=fake_portal_port)
|
||||
mock_login_iscsi_target.assert_has_calls([
|
||||
mock.call(mock.sentinel.target_iqn,
|
||||
fake_portal,
|
||||
fake_login_opts,
|
||||
is_persistent=True),
|
||||
mock.call(mock.sentinel.target_iqn,
|
||||
fake_portal,
|
||||
fake_login_opts,
|
||||
is_persistent=False)])
|
||||
else:
|
||||
self.assertFalse(mock_login_iscsi_target.called)
|
||||
|
||||
mock_ensure_lun_available.assert_called_once_with(
|
||||
mock.sentinel.target_iqn,
|
||||
mock.sentinel.target_lun,
|
||||
mock.sentinel.rescan_attempts)
|
||||
|
||||
def test_login_storage_target_path_exists(self):
|
||||
self._test_login_storage_target(login_required=False)
|
||||
|
||||
def test_login_new_storage_target_no_mpio(self):
|
||||
self._test_login_storage_target()
|
||||
|
||||
def test_login_storage_target_new_path_using_mpio(self):
|
||||
self._test_login_storage_target(mpio_enabled=True)
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_device_from_session')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_target_sessions')
|
||||
def test_ensure_lun_available(self,
|
||||
mock_get_iscsi_target_sessions,
|
||||
mock_get_iscsi_device_from_session):
|
||||
expected_try_count = 5
|
||||
mock_get_iscsi_target_sessions.return_value = [
|
||||
mock.Mock(SessionId=mock.sentinel.session_id)]
|
||||
|
||||
fake_exc = exceptions.ISCSIInitiatorAPIException(
|
||||
message='fake_message',
|
||||
error_code=1,
|
||||
func_name='fake_func')
|
||||
dev_num_side_eff = [None, -1, fake_exc, mock.sentinel.dev_num]
|
||||
fake_device = mock.Mock()
|
||||
type(fake_device.StorageDeviceNumber).DeviceNumber = (
|
||||
mock.PropertyMock(side_effect=dev_num_side_eff))
|
||||
|
||||
mock_get_dev_side_eff = [None] + [fake_device] * 4
|
||||
mock_get_iscsi_device_from_session.side_effect = mock_get_dev_side_eff
|
||||
|
||||
self._initiator.ensure_lun_available(
|
||||
mock.sentinel.target_iqn,
|
||||
mock.sentinel.target_lun,
|
||||
rescan_attempts=6)
|
||||
|
||||
mock_get_iscsi_target_sessions.assert_has_calls(
|
||||
[mock.call(mock.sentinel.target_iqn)] * expected_try_count)
|
||||
mock_get_iscsi_device_from_session.assert_has_calls(
|
||||
[mock.call(mock.sentinel.session_id,
|
||||
mock.sentinel.target_lun)] * 4)
|
||||
self.assertEqual(
|
||||
4,
|
||||
self._initiator._diskutils.rescan_disks.call_count)
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_target_sessions')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_logout_iscsi_target')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_remove_target_persistent_logins')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_remove_static_target')
|
||||
def test_logout_storage_target(self, mock_remove_static_target,
|
||||
mock_remove_target_persistent_logins,
|
||||
mock_logout_iscsi_target,
|
||||
mock_get_iscsi_target_sessions):
|
||||
fake_session = mock.Mock(SessionId=mock.sentinel.session_id)
|
||||
mock_get_iscsi_target_sessions.return_value = [fake_session]
|
||||
|
||||
self._initiator.logout_storage_target(mock.sentinel.target_iqn)
|
||||
|
||||
mock_get_iscsi_target_sessions.assert_called_once_with(
|
||||
mock.sentinel.target_iqn, connected_only=False)
|
||||
mock_logout_iscsi_target.assert_called_once_with(
|
||||
mock.sentinel.session_id)
|
||||
mock_remove_target_persistent_logins.assert_called_once_with(
|
||||
mock.sentinel.target_iqn)
|
||||
mock_remove_static_target.assert_called_once_with(
|
||||
mock.sentinel.target_iqn)
|
||||
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_remove_persistent_login')
|
||||
@mock.patch.object(iscsi_utils.ISCSIInitiatorUtils,
|
||||
'_get_iscsi_persistent_logins')
|
||||
def test_remove_target_persistent_logins(self,
|
||||
mock_get_iscsi_persistent_logins,
|
||||
mock_remove_persistent_login):
|
||||
fake_persistent_login = mock.Mock(TargetName=mock.sentinel.target_iqn)
|
||||
mock_get_iscsi_persistent_logins.return_value = [fake_persistent_login]
|
||||
|
||||
self._initiator._remove_target_persistent_logins(
|
||||
mock.sentinel.target_iqn)
|
||||
|
||||
mock_remove_persistent_login.assert_called_once_with(
|
||||
fake_persistent_login)
|
||||
mock_get_iscsi_persistent_logins.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(ctypes, 'byref')
|
||||
def test_remove_persistent_login(self, mock_byref):
|
||||
fake_persistent_login = mock.Mock()
|
||||
fake_persistent_login.InitiatorInstance = 'fake_initiator_instance'
|
||||
fake_persistent_login.TargetName = 'fake_target_name'
|
||||
|
||||
self._initiator._remove_persistent_login(fake_persistent_login)
|
||||
|
||||
args_list = self._mock_run.call_args_list[0][0]
|
||||
self.assertIsInstance(args_list[1], ctypes.c_wchar_p)
|
||||
self.assertEqual(fake_persistent_login.InitiatorInstance,
|
||||
args_list[1].value)
|
||||
self.assertIsInstance(args_list[3], ctypes.c_wchar_p)
|
||||
self.assertEqual(fake_persistent_login.TargetName,
|
||||
args_list[3].value)
|
||||
mock_byref.assert_called_once_with(fake_persistent_login.TargetPortal)
|
@ -1,161 +0,0 @@
|
||||
# Copyright 2014 Cloudbase Solutions Srl
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.storage.initiator import iscsi_wmi_utils
|
||||
|
||||
|
||||
class ISCSIInitiatorWMIUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V ISCSIInitiatorWMIUtils class."""
|
||||
|
||||
_FAKE_PORTAL_ADDR = '10.1.1.1'
|
||||
_FAKE_PORTAL_PORT = '3260'
|
||||
_FAKE_LUN = 0
|
||||
_FAKE_TARGET = 'iqn.2010-10.org.openstack:fake_target'
|
||||
|
||||
def setUp(self):
|
||||
super(ISCSIInitiatorWMIUtilsTestCase, self).setUp()
|
||||
self._initiator = iscsi_wmi_utils.ISCSIInitiatorWMIUtils()
|
||||
self._initiator._conn_storage = mock.MagicMock()
|
||||
self._initiator._conn_wmi = mock.MagicMock()
|
||||
|
||||
def _test_login_target_portal(self, portal_connected):
|
||||
fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
|
||||
self._FAKE_PORTAL_PORT)
|
||||
fake_portal_object = mock.MagicMock()
|
||||
_query = self._initiator._conn_storage.query
|
||||
self._initiator._conn_storage.MSFT_iSCSITargetPortal = (
|
||||
fake_portal_object)
|
||||
|
||||
if portal_connected:
|
||||
_query.return_value = [fake_portal_object]
|
||||
else:
|
||||
_query.return_value = None
|
||||
|
||||
self._initiator._login_target_portal(fake_portal)
|
||||
|
||||
if portal_connected:
|
||||
fake_portal_object.Update.assert_called_once_with()
|
||||
else:
|
||||
fake_portal_object.New.assert_called_once_with(
|
||||
TargetPortalAddress=self._FAKE_PORTAL_ADDR,
|
||||
TargetPortalPortNumber=self._FAKE_PORTAL_PORT)
|
||||
|
||||
def test_login_connected_portal(self):
|
||||
self._test_login_target_portal(True)
|
||||
|
||||
def test_login_new_portal(self):
|
||||
self._test_login_target_portal(False)
|
||||
|
||||
@mock.patch.object(iscsi_wmi_utils, 'CONF')
|
||||
def _test_login_target(self, mock_CONF, target_connected=False,
|
||||
raise_exception=False, use_chap=False):
|
||||
mock_CONF.hyperv.volume_attach_retry_count = 4
|
||||
mock_CONF.hyperv.volume_attach_retry_interval = 0
|
||||
fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
|
||||
self._FAKE_PORTAL_PORT)
|
||||
|
||||
fake_target_object = mock.MagicMock()
|
||||
|
||||
if target_connected:
|
||||
fake_target_object.IsConnected = True
|
||||
elif not raise_exception:
|
||||
type(fake_target_object).IsConnected = mock.PropertyMock(
|
||||
side_effect=[False, True])
|
||||
else:
|
||||
fake_target_object.IsConnected = False
|
||||
|
||||
_query = self._initiator._conn_storage.query
|
||||
_query.return_value = [fake_target_object]
|
||||
|
||||
self._initiator._conn_storage.MSFT_iSCSITarget = (
|
||||
fake_target_object)
|
||||
|
||||
if use_chap:
|
||||
username, password = (mock.sentinel.username,
|
||||
mock.sentinel.password)
|
||||
auth = {
|
||||
'AuthenticationType': self._initiator._CHAP_AUTH_TYPE,
|
||||
'ChapUsername': username,
|
||||
'ChapSecret': password,
|
||||
}
|
||||
else:
|
||||
username, password = None, None
|
||||
auth = {}
|
||||
|
||||
if raise_exception:
|
||||
self.assertRaises(exceptions.HyperVException,
|
||||
self._initiator.login_storage_target,
|
||||
self._FAKE_LUN, self._FAKE_TARGET, fake_portal)
|
||||
else:
|
||||
self._initiator.login_storage_target(self._FAKE_LUN,
|
||||
self._FAKE_TARGET,
|
||||
fake_portal,
|
||||
username, password)
|
||||
|
||||
if target_connected:
|
||||
fake_target_object.Update.assert_called_with()
|
||||
else:
|
||||
fake_target_object.Connect.assert_called_once_with(
|
||||
IsPersistent=True, NodeAddress=self._FAKE_TARGET, **auth)
|
||||
|
||||
def test_login_connected_target(self):
|
||||
self._test_login_target(target_connected=True)
|
||||
|
||||
def test_login_disconncted_target(self):
|
||||
self._test_login_target()
|
||||
|
||||
def test_login_target_exception(self):
|
||||
self._test_login_target(raise_exception=True)
|
||||
|
||||
def test_login_target_using_chap(self):
|
||||
self._test_login_target(use_chap=True)
|
||||
|
||||
def test_logout_storage_target(self):
|
||||
mock_msft_target = self._initiator._conn_storage.MSFT_iSCSITarget
|
||||
mock_msft_session = self._initiator._conn_storage.MSFT_iSCSISession
|
||||
|
||||
mock_target = mock.MagicMock()
|
||||
mock_target.IsConnected = True
|
||||
mock_msft_target.return_value = [mock_target]
|
||||
|
||||
mock_session = mock.MagicMock()
|
||||
mock_session.IsPersistent = True
|
||||
mock_msft_session.return_value = [mock_session]
|
||||
|
||||
self._initiator.logout_storage_target(self._FAKE_TARGET)
|
||||
|
||||
mock_msft_target.assert_called_once_with(NodeAddress=self._FAKE_TARGET)
|
||||
mock_msft_session.assert_called_once_with(
|
||||
TargetNodeAddress=self._FAKE_TARGET)
|
||||
|
||||
mock_session.Unregister.assert_called_once_with()
|
||||
mock_target.Disconnect.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(iscsi_wmi_utils.ISCSIInitiatorWMIUtils,
|
||||
'logout_storage_target')
|
||||
def test_execute_log_out(self, mock_logout_target):
|
||||
sess_class = self._initiator._conn_wmi.MSiSCSIInitiator_SessionClass
|
||||
|
||||
mock_session = mock.MagicMock()
|
||||
sess_class.return_value = [mock_session]
|
||||
|
||||
self._initiator.execute_log_out(mock.sentinel.FAKE_SESSION_ID)
|
||||
|
||||
sess_class.assert_called_once_with(
|
||||
SessionId=mock.sentinel.FAKE_SESSION_ID)
|
||||
mock_logout_target.assert_called_once_with(mock_session.TargetName)
|
@ -1,56 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ctypes
|
||||
|
||||
import six
|
||||
|
||||
from os_win import constants
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.storage.initiator import iscsidsc_structures as iscsi_struct
|
||||
|
||||
|
||||
class ISCSIStructTestCase(test_base.OsWinBaseTestCase):
|
||||
def test_iscsi_login_opts_setup(self):
|
||||
fake_username = 'fake_chap_username'
|
||||
fake_password = 'fake_chap_secret'
|
||||
auth_type = constants.ISCSI_CHAP_AUTH_TYPE
|
||||
|
||||
login_opts = iscsi_struct.ISCSI_LOGIN_OPTIONS(Username=fake_username,
|
||||
Password=fake_password,
|
||||
AuthType=auth_type)
|
||||
|
||||
self.assertIsInstance(login_opts.Username, iscsi_struct.PUCHAR)
|
||||
self.assertIsInstance(login_opts.Password, iscsi_struct.PUCHAR)
|
||||
|
||||
self.assertEqual(len(fake_username), login_opts.UsernameLength)
|
||||
self.assertEqual(len(fake_password), login_opts.PasswordLength)
|
||||
|
||||
username_struct_contents = ctypes.cast(
|
||||
login_opts.Username,
|
||||
ctypes.POINTER(ctypes.c_char * len(fake_username))).contents.value
|
||||
pwd_struct_contents = ctypes.cast(
|
||||
login_opts.Password,
|
||||
ctypes.POINTER(ctypes.c_char * len(fake_password))).contents.value
|
||||
|
||||
self.assertEqual(six.b(fake_username), username_struct_contents)
|
||||
self.assertEqual(six.b(fake_password), pwd_struct_contents)
|
||||
|
||||
expected_info_bitmap = (iscsi_struct.ISCSI_LOGIN_OPTIONS_USERNAME |
|
||||
iscsi_struct.ISCSI_LOGIN_OPTIONS_PASSWORD |
|
||||
iscsi_struct.ISCSI_LOGIN_OPTIONS_AUTH_TYPE)
|
||||
self.assertEqual(expected_info_bitmap,
|
||||
login_opts.InformationSpecified)
|
@ -1,488 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.storage.target import iscsi_target_utils as tg_utils
|
||||
|
||||
|
||||
class ISCSITargetUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
@mock.patch.object(tg_utils, 'hostutils')
|
||||
def setUp(self, mock_hostutils):
|
||||
super(ISCSITargetUtilsTestCase, self).setUp()
|
||||
|
||||
self._tgutils = tg_utils.ISCSITargetUtils()
|
||||
self._tgutils._pathutils = mock.Mock()
|
||||
|
||||
def test_ensure_wt_provider_unavailable(self):
|
||||
self._tgutils._conn_wmi = None
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils._ensure_wt_provider_available)
|
||||
|
||||
def test_get_supported_disk_format_6_2(self):
|
||||
self._tgutils._win_gteq_6_3 = False
|
||||
fmt = self._tgutils.get_supported_disk_format()
|
||||
self.assertEqual(constants.DISK_FORMAT_VHD, fmt)
|
||||
|
||||
def test_get_supported_disk_format_6_3(self):
|
||||
self._tgutils._win_gteq_6_3 = True
|
||||
fmt = self._tgutils.get_supported_disk_format()
|
||||
self.assertEqual(constants.DISK_FORMAT_VHDX, fmt)
|
||||
|
||||
def test_get_supported_vhd_type_6_2(self):
|
||||
self._tgutils._win_gteq_6_3 = False
|
||||
vhd_type = self._tgutils.get_supported_vhd_type()
|
||||
self.assertEqual(constants.VHD_TYPE_FIXED, vhd_type)
|
||||
|
||||
def test_get_supported_vhd_type_6_3(self):
|
||||
self._tgutils._win_gteq_6_3 = True
|
||||
vhd_type = self._tgutils.get_supported_vhd_type()
|
||||
self.assertEqual(constants.VHD_TYPE_DYNAMIC, vhd_type)
|
||||
|
||||
def _test_get_portal_locations(self, available_only=False,
|
||||
fail_if_none_found=False):
|
||||
mock_portal = mock.Mock(Listen=False,
|
||||
Address=mock.sentinel.address,
|
||||
Port=mock.sentinel.port)
|
||||
mock_portal_location = "%s:%s" % (mock.sentinel.address,
|
||||
mock.sentinel.port)
|
||||
|
||||
mock_wt_portal_cls = self._tgutils._conn_wmi.WT_Portal
|
||||
mock_wt_portal_cls.return_value = [mock_portal]
|
||||
|
||||
if available_only and fail_if_none_found:
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.get_portal_locations,
|
||||
available_only=available_only,
|
||||
fail_if_none_found=fail_if_none_found)
|
||||
else:
|
||||
portals = self._tgutils.get_portal_locations(
|
||||
available_only=available_only,
|
||||
fail_if_none_found=fail_if_none_found)
|
||||
|
||||
expected_retrieved_portals = []
|
||||
if not available_only:
|
||||
expected_retrieved_portals.append(mock_portal_location)
|
||||
|
||||
self.assertEqual(expected_retrieved_portals,
|
||||
portals)
|
||||
|
||||
def test_get_portal_locations(self):
|
||||
self._test_get_portal_locations()
|
||||
|
||||
def test_get_available_portal_locations(self):
|
||||
self._test_get_portal_locations(available_only=True)
|
||||
|
||||
def test_get_portal_locations_failing_if_none(self):
|
||||
self._test_get_portal_locations(available_only=True,
|
||||
fail_if_none_found=True)
|
||||
|
||||
def _test_get_wt_host(self, host_found=True, fail_if_not_found=False):
|
||||
mock_wt_host = mock.Mock()
|
||||
mock_wt_host_cls = self._tgutils._conn_wmi.WT_Host
|
||||
mock_wt_host_cls.return_value = [mock_wt_host] if host_found else []
|
||||
|
||||
if not host_found and fail_if_not_found:
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils._get_wt_host,
|
||||
mock.sentinel.target_name,
|
||||
fail_if_not_found=fail_if_not_found)
|
||||
else:
|
||||
wt_host = self._tgutils._get_wt_host(
|
||||
mock.sentinel.target_name,
|
||||
fail_if_not_found=fail_if_not_found)
|
||||
|
||||
expected_wt_host = mock_wt_host if host_found else None
|
||||
self.assertEqual(expected_wt_host, wt_host)
|
||||
|
||||
mock_wt_host_cls.assert_called_once_with(
|
||||
HostName=mock.sentinel.target_name)
|
||||
|
||||
def test_get_wt_host(self):
|
||||
self._test_get_wt_host()
|
||||
|
||||
def test_get_wt_host_not_found(self):
|
||||
self._test_get_wt_host(host_found=False)
|
||||
|
||||
def test_get_wt_host_not_found_exception(self):
|
||||
self._test_get_wt_host(host_found=False,
|
||||
fail_if_not_found=True)
|
||||
|
||||
def _test_get_wt_disk(self, disk_found=True, fail_if_not_found=False):
|
||||
mock_wt_disk = mock.Mock()
|
||||
mock_wt_disk_cls = self._tgutils._conn_wmi.WT_Disk
|
||||
mock_wt_disk_cls.return_value = [mock_wt_disk] if disk_found else []
|
||||
|
||||
if not disk_found and fail_if_not_found:
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils._get_wt_disk,
|
||||
mock.sentinel.disk_description,
|
||||
fail_if_not_found=fail_if_not_found)
|
||||
else:
|
||||
wt_disk = self._tgutils._get_wt_disk(
|
||||
mock.sentinel.disk_description,
|
||||
fail_if_not_found=fail_if_not_found)
|
||||
|
||||
expected_wt_disk = mock_wt_disk if disk_found else None
|
||||
self.assertEqual(expected_wt_disk, wt_disk)
|
||||
|
||||
mock_wt_disk_cls.assert_called_once_with(
|
||||
Description=mock.sentinel.disk_description)
|
||||
|
||||
def test_get_wt_disk(self):
|
||||
self._test_get_wt_disk()
|
||||
|
||||
def test_get_wt_disk_not_found(self):
|
||||
self._test_get_wt_disk(disk_found=False)
|
||||
|
||||
def test_get_wt_disk_not_found_exception(self):
|
||||
self._test_get_wt_disk(disk_found=False,
|
||||
fail_if_not_found=True)
|
||||
|
||||
def _test_get_wt_snap(self, snap_found=True, fail_if_not_found=False):
|
||||
mock_wt_snap = mock.Mock()
|
||||
mock_wt_snap_cls = self._tgutils._conn_wmi.WT_Snapshot
|
||||
mock_wt_snap_cls.return_value = [mock_wt_snap] if snap_found else []
|
||||
|
||||
if not snap_found and fail_if_not_found:
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils._get_wt_snapshot,
|
||||
mock.sentinel.snap_description,
|
||||
fail_if_not_found=fail_if_not_found)
|
||||
else:
|
||||
wt_snap = self._tgutils._get_wt_snapshot(
|
||||
mock.sentinel.snap_description,
|
||||
fail_if_not_found=fail_if_not_found)
|
||||
|
||||
expected_wt_snap = mock_wt_snap if snap_found else None
|
||||
self.assertEqual(expected_wt_snap, wt_snap)
|
||||
|
||||
mock_wt_snap_cls.assert_called_once_with(
|
||||
Description=mock.sentinel.snap_description)
|
||||
|
||||
def test_get_wt_snap(self):
|
||||
self._test_get_wt_snap()
|
||||
|
||||
def test_get_wt_snap_not_found(self):
|
||||
self._test_get_wt_snap(snap_found=False)
|
||||
|
||||
def test_get_wt_snap_not_found_exception(self):
|
||||
self._test_get_wt_snap(snap_found=False,
|
||||
fail_if_not_found=True)
|
||||
|
||||
def _test_get_wt_idmethod(self, idmeth_found=True):
|
||||
mock_wt_idmeth = mock.Mock()
|
||||
mock_wt_idmeth_cls = self._tgutils._conn_wmi.WT_IDMethod
|
||||
mock_wt_idmeth_cls.return_value = ([mock_wt_idmeth]
|
||||
if idmeth_found else [])
|
||||
|
||||
wt_idmeth = self._tgutils._get_wt_idmethod(mock.sentinel.initiator,
|
||||
mock.sentinel.target_name)
|
||||
|
||||
expected_wt_idmeth = mock_wt_idmeth if idmeth_found else None
|
||||
self.assertEqual(expected_wt_idmeth, wt_idmeth)
|
||||
|
||||
mock_wt_idmeth_cls.assert_called_once_with(
|
||||
HostName=mock.sentinel.target_name,
|
||||
Value=mock.sentinel.initiator)
|
||||
|
||||
def test_get_wt_idmethod(self):
|
||||
self._test_get_wt_idmethod()
|
||||
|
||||
def test_get_wt_idmethod_not_found(self):
|
||||
self._test_get_wt_idmethod(idmeth_found=False)
|
||||
|
||||
def _test_create_iscsi_target_exception(self, target_exists=False,
|
||||
fail_if_exists=False):
|
||||
fake_file_exists_hres = -0x7ff8ffb0
|
||||
fake_hres = fake_file_exists_hres if target_exists else 1
|
||||
mock_wt_host_cls = self._tgutils._conn_wmi.WT_Host
|
||||
mock_wt_host_cls.NewHost.side_effect = test_base.FakeWMIExc(
|
||||
hresult=fake_hres)
|
||||
|
||||
if target_exists and not fail_if_exists:
|
||||
self._tgutils.create_iscsi_target(mock.sentinel.target_name,
|
||||
fail_if_exists=fail_if_exists)
|
||||
else:
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.create_iscsi_target,
|
||||
mock.sentinel.target_name,
|
||||
fail_if_exists=fail_if_exists)
|
||||
|
||||
mock_wt_host_cls.NewHost.assert_called_once_with(
|
||||
HostName=mock.sentinel.target_name)
|
||||
|
||||
def test_create_iscsi_target_exception(self):
|
||||
self._test_create_iscsi_target_exception()
|
||||
|
||||
def test_create_iscsi_target_already_exists_skipping(self):
|
||||
self._test_create_iscsi_target_exception(target_exists=True)
|
||||
|
||||
def test_create_iscsi_target_already_exists_failing(self):
|
||||
self._test_create_iscsi_target_exception(target_exists=True,
|
||||
fail_if_exists=True)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_host')
|
||||
def test_delete_iscsi_target_exception(self, mock_get_wt_host):
|
||||
mock_wt_host = mock_get_wt_host.return_value
|
||||
mock_wt_host.Delete_.side_effect = test_base.FakeWMIExc
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.delete_iscsi_target,
|
||||
mock.sentinel.target_name)
|
||||
|
||||
mock_wt_host.RemoveAllWTDisks.assert_called_once_with()
|
||||
mock_get_wt_host.assert_called_once_with(mock.sentinel.target_name,
|
||||
fail_if_not_found=False)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_host')
|
||||
def _test_iscsi_target_exists(self, mock_get_wt_host, target_exists=True):
|
||||
mock_get_wt_host.return_value = (mock.sentinel.wt_host
|
||||
if target_exists else None)
|
||||
|
||||
result = self._tgutils.iscsi_target_exists(mock.sentinel.target_name)
|
||||
|
||||
self.assertEqual(target_exists, result)
|
||||
mock_get_wt_host.assert_called_once_with(mock.sentinel.target_name,
|
||||
fail_if_not_found=False)
|
||||
|
||||
def test_iscsi_target_exists(self):
|
||||
self._test_iscsi_target_exists()
|
||||
|
||||
def test_iscsi_target_unexisting(self):
|
||||
self._test_iscsi_target_exists(target_exists=False)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_host')
|
||||
def test_get_target_information(self, mock_get_wt_host):
|
||||
mock_wt_host = mock_get_wt_host.return_value
|
||||
mock_wt_host.EnableCHAP = True
|
||||
mock_wt_host.Status = 1 # connected
|
||||
|
||||
target_info = self._tgutils.get_target_information(
|
||||
mock.sentinel.target_name)
|
||||
|
||||
expected_info = dict(target_iqn=mock_wt_host.TargetIQN,
|
||||
enabled=mock_wt_host.Enabled,
|
||||
connected=True,
|
||||
auth_method='CHAP',
|
||||
auth_username=mock_wt_host.CHAPUserName,
|
||||
auth_password=mock_wt_host.CHAPSecret)
|
||||
self.assertEqual(expected_info, target_info)
|
||||
mock_get_wt_host.assert_called_once_with(mock.sentinel.target_name)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_host')
|
||||
def test_set_chap_credentials_exception(self, mock_get_wt_host):
|
||||
mock_wt_host = mock_get_wt_host.return_value
|
||||
mock_wt_host.put.side_effect = test_base.FakeWMIExc
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.set_chap_credentials,
|
||||
mock.sentinel.target_name,
|
||||
mock.sentinel.chap_username,
|
||||
mock.sentinel.chap_password)
|
||||
|
||||
mock_get_wt_host.assert_called_once_with(mock.sentinel.target_name)
|
||||
self.assertTrue(mock_wt_host.EnableCHAP),
|
||||
self.assertEqual(mock.sentinel.chap_username,
|
||||
mock_wt_host.CHAPUserName)
|
||||
self.assertEqual(mock.sentinel.chap_password,
|
||||
mock_wt_host.CHAPSecret)
|
||||
mock_wt_host.put.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_idmethod')
|
||||
def test_associate_initiator_exception(self, mock_get_wtidmethod):
|
||||
mock_get_wtidmethod.return_value = None
|
||||
mock_wt_idmeth_cls = self._tgutils._conn_wmi.WT_IDMethod
|
||||
mock_wt_idmetod = mock_wt_idmeth_cls.new.return_value
|
||||
mock_wt_idmetod.put.side_effect = test_base.FakeWMIExc
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.associate_initiator_with_iscsi_target,
|
||||
mock.sentinel.initiator, mock.sentinel.target_name,
|
||||
id_method=mock.sentinel.id_method)
|
||||
|
||||
self.assertEqual(mock.sentinel.target_name, mock_wt_idmetod.HostName)
|
||||
self.assertEqual(mock.sentinel.initiator, mock_wt_idmetod.Value)
|
||||
self.assertEqual(mock.sentinel.id_method, mock_wt_idmetod.Method)
|
||||
mock_get_wtidmethod.assert_called_once_with(mock.sentinel.initiator,
|
||||
mock.sentinel.target_name)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_idmethod')
|
||||
def test_already_associated_initiator(self, mock_get_wtidmethod):
|
||||
mock_wt_idmeth_cls = self._tgutils._conn_wmi.WT_IDMethod
|
||||
|
||||
self._tgutils.associate_initiator_with_iscsi_target(
|
||||
mock.sentinel.initiator, mock.sentinel.target_name,
|
||||
id_method=mock.sentinel.id_method)
|
||||
|
||||
self.assertFalse(mock_wt_idmeth_cls.new.called)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_idmethod')
|
||||
def test_deassociate_initiator_exception(self, mock_get_wtidmethod):
|
||||
mock_wt_idmetod = mock_get_wtidmethod.return_value
|
||||
mock_wt_idmetod.Delete_.side_effect = test_base.FakeWMIExc
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.deassociate_initiator,
|
||||
mock.sentinel.initiator, mock.sentinel.target_name)
|
||||
|
||||
mock_get_wtidmethod.assert_called_once_with(mock.sentinel.initiator,
|
||||
mock.sentinel.target_name)
|
||||
|
||||
def test_create_wt_disk_exception(self):
|
||||
mock_wt_disk_cls = self._tgutils._conn_wmi.WT_Disk
|
||||
mock_wt_disk_cls.NewWTDisk.side_effect = test_base.FakeWMIExc
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.create_wt_disk,
|
||||
mock.sentinel.vhd_path, mock.sentinel.wtd_name,
|
||||
mock.sentinel.size_mb)
|
||||
|
||||
mock_wt_disk_cls.NewWTDisk.assert_called_once_with(
|
||||
DevicePath=mock.sentinel.vhd_path,
|
||||
Description=mock.sentinel.wtd_name,
|
||||
SizeInMB=mock.sentinel.size_mb)
|
||||
|
||||
def test_import_wt_disk_exception(self):
|
||||
mock_wt_disk_cls = self._tgutils._conn_wmi.WT_Disk
|
||||
mock_wt_disk_cls.ImportWTDisk.side_effect = test_base.FakeWMIExc
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.import_wt_disk,
|
||||
mock.sentinel.vhd_path, mock.sentinel.wtd_name)
|
||||
|
||||
mock_wt_disk_cls.ImportWTDisk.assert_called_once_with(
|
||||
DevicePath=mock.sentinel.vhd_path,
|
||||
Description=mock.sentinel.wtd_name)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_disk')
|
||||
def test_change_wt_disk_status_exception(self, mock_get_wt_disk):
|
||||
mock_wt_disk = mock_get_wt_disk.return_value
|
||||
mock_wt_disk.put.side_effect = test_base.FakeWMIExc
|
||||
wt_disk_enabled = True
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.change_wt_disk_status,
|
||||
mock.sentinel.wtd_name,
|
||||
enabled=wt_disk_enabled)
|
||||
|
||||
mock_get_wt_disk.assert_called_once_with(mock.sentinel.wtd_name)
|
||||
self.assertEqual(wt_disk_enabled, mock_wt_disk.Enabled)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_disk')
|
||||
def test_remove_wt_disk_exception(self, mock_get_wt_disk):
|
||||
mock_wt_disk = mock_get_wt_disk.return_value
|
||||
mock_wt_disk.Delete_.side_effect = test_base.FakeWMIExc
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.remove_wt_disk,
|
||||
mock.sentinel.wtd_name)
|
||||
|
||||
mock_get_wt_disk.assert_called_once_with(mock.sentinel.wtd_name,
|
||||
fail_if_not_found=False)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_disk')
|
||||
def test_extend_wt_disk_exception(self, mock_get_wt_disk):
|
||||
mock_wt_disk = mock_get_wt_disk.return_value
|
||||
mock_wt_disk.Extend.side_effect = test_base.FakeWMIExc
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.extend_wt_disk,
|
||||
mock.sentinel.wtd_name,
|
||||
mock.sentinel.additional_mb)
|
||||
|
||||
mock_get_wt_disk.assert_called_once_with(mock.sentinel.wtd_name)
|
||||
mock_wt_disk.Extend.assert_called_once_with(
|
||||
mock.sentinel.additional_mb)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_host')
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_disk')
|
||||
def test_add_disk_to_target_exception(self, mock_get_wt_disk,
|
||||
mock_get_wt_host):
|
||||
mock_wt_disk = mock_get_wt_disk.return_value
|
||||
mock_wt_host = mock_get_wt_host.return_value
|
||||
mock_wt_host.AddWTDisk.side_effect = test_base.FakeWMIExc
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.add_disk_to_target,
|
||||
mock.sentinel.wtd_name,
|
||||
mock.sentinel.target_name)
|
||||
|
||||
mock_get_wt_disk.assert_called_once_with(mock.sentinel.wtd_name)
|
||||
mock_get_wt_host.assert_called_once_with(mock.sentinel.target_name)
|
||||
mock_wt_host.AddWTDisk.assert_called_once_with(mock_wt_disk.WTD)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_disk')
|
||||
def test_create_snapshot_exception(self, mock_get_wt_disk):
|
||||
mock_wt_disk = mock_get_wt_disk.return_value
|
||||
mock_wt_snap = mock.Mock()
|
||||
mock_wt_snap.put.side_effect = test_base.FakeWMIExc
|
||||
mock_wt_snap_cls = self._tgutils._conn_wmi.WT_Snapshot
|
||||
mock_wt_snap_cls.return_value = [mock_wt_snap]
|
||||
mock_wt_snap_cls.Create.return_value = [mock.sentinel.snap_id]
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.create_snapshot,
|
||||
mock.sentinel.wtd_name,
|
||||
mock.sentinel.snap_name)
|
||||
|
||||
mock_get_wt_disk.assert_called_once_with(mock.sentinel.wtd_name)
|
||||
mock_wt_snap_cls.Create.assert_called_once_with(WTD=mock_wt_disk.WTD)
|
||||
mock_wt_snap_cls.assert_called_once_with(Id=mock.sentinel.snap_id)
|
||||
self.assertEqual(mock.sentinel.snap_name, mock_wt_snap.Description)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_snapshot')
|
||||
def test_delete_snapshot_exception(self, mock_get_wt_snap):
|
||||
mock_wt_snap = mock_get_wt_snap.return_value
|
||||
mock_wt_snap.Delete_.side_effect = test_base.FakeWMIExc
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.delete_snapshot,
|
||||
mock.sentinel.snap_name)
|
||||
|
||||
mock_get_wt_snap.assert_called_once_with(mock.sentinel.snap_name,
|
||||
fail_if_not_found=False)
|
||||
|
||||
@mock.patch.object(tg_utils.ISCSITargetUtils, '_get_wt_snapshot')
|
||||
def test_export_snapshot_exception(self, mock_get_wt_snap):
|
||||
mock_wt_disk_cls = self._tgutils._conn_wmi.WT_Disk
|
||||
mock_wt_disk = mock.Mock()
|
||||
mock_wt_disk_cls.return_value = [mock_wt_disk]
|
||||
mock_wt_disk.Delete_.side_effect = test_base.FakeWMIExc
|
||||
mock_wt_snap = mock_get_wt_snap.return_value
|
||||
mock_wt_snap.Export.return_value = [mock.sentinel.wt_disk_id]
|
||||
|
||||
self.assertRaises(exceptions.ISCSITargetException,
|
||||
self._tgutils.export_snapshot,
|
||||
mock.sentinel.snap_name,
|
||||
mock.sentinel.dest_path)
|
||||
|
||||
mock_get_wt_snap.assert_called_once_with(mock.sentinel.snap_name)
|
||||
mock_wt_snap.Export.assert_called_once_with()
|
||||
mock_wt_disk_cls.assert_called_once_with(WTD=mock.sentinel.wt_disk_id)
|
||||
|
||||
expected_wt_disk_description = "%s-%s-temp" % (
|
||||
mock.sentinel.snap_name,
|
||||
mock.sentinel.wt_disk_id)
|
||||
self.assertEqual(expected_wt_disk_description,
|
||||
mock_wt_disk.Description)
|
||||
|
||||
mock_wt_disk.put.assert_called_once_with()
|
||||
mock_wt_disk.Delete_.assert_called_once_with()
|
||||
self._tgutils._pathutils.copy.assert_called_once_with(
|
||||
mock_wt_disk.DevicePath, mock.sentinel.dest_path)
|
@ -1,156 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import _utils
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.storage import diskutils
|
||||
|
||||
|
||||
class DiskUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
def setUp(self):
|
||||
super(DiskUtilsTestCase, self).setUp()
|
||||
self._diskutils = diskutils.DiskUtils()
|
||||
self._diskutils._conn_storage = mock.MagicMock()
|
||||
self._diskutils._win32_utils = mock.MagicMock()
|
||||
self._mock_run = self._diskutils._win32_utils.run_and_check_output
|
||||
|
||||
def test_get_disk(self):
|
||||
mock_msft_disk_cls = self._diskutils._conn_storage.Msft_Disk
|
||||
mock_disk = mock_msft_disk_cls.return_value[0]
|
||||
|
||||
resulted_disk = self._diskutils._get_disk(mock.sentinel.disk_number)
|
||||
|
||||
mock_msft_disk_cls.assert_called_once_with(
|
||||
Number=mock.sentinel.disk_number)
|
||||
self.assertEqual(mock_disk, resulted_disk)
|
||||
|
||||
def test_get_unexisting_disk(self):
|
||||
mock_msft_disk_cls = self._diskutils._conn_storage.Msft_Disk
|
||||
mock_msft_disk_cls.return_value = []
|
||||
|
||||
self.assertRaises(exceptions.DiskNotFound,
|
||||
self._diskutils._get_disk,
|
||||
mock.sentinel.disk_number)
|
||||
|
||||
mock_msft_disk_cls.assert_called_once_with(
|
||||
Number=mock.sentinel.disk_number)
|
||||
|
||||
@mock.patch.object(diskutils.DiskUtils, '_get_disk')
|
||||
def test_get_disk_uid_and_uid_type(self, mock_get_disk):
|
||||
mock_disk = mock_get_disk.return_value
|
||||
|
||||
uid, uid_type = self._diskutils.get_disk_uid_and_uid_type(
|
||||
mock.sentinel.disk_number)
|
||||
|
||||
mock_get_disk.assert_called_once_with(mock.sentinel.disk_number)
|
||||
self.assertEqual(mock_disk.UniqueId, uid)
|
||||
self.assertEqual(mock_disk.UniqueIdFormat, uid_type)
|
||||
|
||||
def test_get_disk_uid_and_uid_type_not_found(self):
|
||||
mock_msft_disk_cls = self._diskutils._conn_storage.Msft_Disk
|
||||
mock_msft_disk_cls.return_value = []
|
||||
|
||||
self.assertRaises(exceptions.DiskNotFound,
|
||||
self._diskutils.get_disk_uid_and_uid_type,
|
||||
mock.sentinel.disk_number)
|
||||
|
||||
@mock.patch.object(diskutils.DiskUtils, '_get_disk')
|
||||
def test_refresh_disk(self, mock_get_disk):
|
||||
mock_disk = mock_get_disk.return_value
|
||||
|
||||
self._diskutils.refresh_disk(mock.sentinel.disk_number)
|
||||
|
||||
mock_get_disk.assert_called_once_with(mock.sentinel.disk_number)
|
||||
mock_disk.Refresh.assert_called_once_with()
|
||||
|
||||
def test_get_dev_number_from_dev_name(self):
|
||||
fake_physical_device_name = r'\\.\PhysicalDrive15'
|
||||
expected_device_number = '15'
|
||||
|
||||
get_dev_number = self._diskutils.get_device_number_from_device_name
|
||||
resulted_dev_number = get_dev_number(fake_physical_device_name)
|
||||
self.assertEqual(expected_device_number, resulted_dev_number)
|
||||
|
||||
def test_get_device_number_from_invalid_device_name(self):
|
||||
fake_physical_device_name = ''
|
||||
|
||||
self.assertRaises(exceptions.DiskNotFound,
|
||||
self._diskutils.get_device_number_from_device_name,
|
||||
fake_physical_device_name)
|
||||
|
||||
@mock.patch.object(_utils, 'execute')
|
||||
def test_rescan_disks(self, mock_execute):
|
||||
cmd = ("cmd", "/c", "echo", "rescan", "|", "diskpart.exe")
|
||||
|
||||
self._diskutils.rescan_disks()
|
||||
|
||||
mock_execute.assert_called_once_with(*cmd)
|
||||
self._diskutils._conn_storage.Msft_Disk.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(diskutils, 'ctypes')
|
||||
@mock.patch.object(diskutils, 'kernel32', create=True)
|
||||
@mock.patch('os.path.abspath')
|
||||
def _test_get_disk_capacity(self, mock_abspath,
|
||||
mock_kernel32, mock_ctypes,
|
||||
raised_exc=None, ignore_errors=False):
|
||||
expected_values = ('total_bytes', 'free_bytes')
|
||||
|
||||
mock_params = [mock.Mock(value=value) for value in expected_values]
|
||||
mock_ctypes.c_ulonglong.side_effect = mock_params
|
||||
mock_ctypes.c_wchar_p = lambda x: (x, 'c_wchar_p')
|
||||
|
||||
self._mock_run.side_effect = raised_exc(
|
||||
func_name='fake_func_name',
|
||||
error_code='fake_error_code',
|
||||
error_message='fake_error_message') if raised_exc else None
|
||||
|
||||
if raised_exc and not ignore_errors:
|
||||
self.assertRaises(raised_exc,
|
||||
self._diskutils.get_disk_capacity,
|
||||
mock.sentinel.disk_path,
|
||||
ignore_errors=ignore_errors)
|
||||
else:
|
||||
ret_val = self._diskutils.get_disk_capacity(
|
||||
mock.sentinel.disk_path,
|
||||
ignore_errors=ignore_errors)
|
||||
expected_ret_val = (0, 0) if raised_exc else expected_values
|
||||
|
||||
self.assertEqual(expected_ret_val, ret_val)
|
||||
|
||||
mock_abspath.assert_called_once_with(mock.sentinel.disk_path)
|
||||
mock_ctypes.pointer.assert_has_calls(
|
||||
[mock.call(param) for param in mock_params])
|
||||
self._mock_run.assert_called_once_with(
|
||||
mock_kernel32.GetDiskFreeSpaceExW,
|
||||
mock_ctypes.c_wchar_p(mock_abspath.return_value),
|
||||
None,
|
||||
mock_ctypes.pointer.return_value,
|
||||
mock_ctypes.pointer.return_value,
|
||||
kernel32_lib_func=True)
|
||||
|
||||
def test_get_disk_capacity_successfully(self):
|
||||
self._test_get_disk_capacity()
|
||||
|
||||
def test_get_disk_capacity_ignored_error(self):
|
||||
self._test_get_disk_capacity(
|
||||
raised_exc=exceptions.Win32Exception,
|
||||
ignore_errors=True)
|
||||
|
||||
def test_get_disk_capacity_raised_exc(self):
|
||||
self._test_get_disk_capacity(
|
||||
raised_exc=exceptions.Win32Exception)
|
@ -1,215 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils.storage import smbutils
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class SMBUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
def setUp(self):
|
||||
super(SMBUtilsTestCase, self).setUp()
|
||||
|
||||
self._smbutils = smbutils.SMBUtils()
|
||||
self._smbutils._win32_utils = mock.Mock()
|
||||
self._smbutils._smb_conn = mock.Mock()
|
||||
self._mock_run = self._smbutils._win32_utils.run_and_check_output
|
||||
self._smb_conn = self._smbutils._smb_conn
|
||||
|
||||
@mock.patch.object(smbutils.SMBUtils, 'unmount_smb_share')
|
||||
@mock.patch('os.path.exists')
|
||||
def _test_check_smb_mapping(self, mock_exists, mock_unmount_smb_share,
|
||||
existing_mappings=True, share_available=False):
|
||||
mock_exists.return_value = share_available
|
||||
|
||||
fake_mappings = (
|
||||
[mock.sentinel.smb_mapping] if existing_mappings else [])
|
||||
|
||||
self._smb_conn.Msft_SmbMapping.return_value = fake_mappings
|
||||
|
||||
ret_val = self._smbutils.check_smb_mapping(
|
||||
mock.sentinel.share_path, remove_unavailable_mapping=True)
|
||||
|
||||
self.assertEqual(existing_mappings and share_available, ret_val)
|
||||
if existing_mappings and not share_available:
|
||||
mock_unmount_smb_share.assert_called_once_with(
|
||||
mock.sentinel.share_path, force=True)
|
||||
|
||||
def test_check_mapping(self):
|
||||
self._test_check_smb_mapping()
|
||||
|
||||
def test_remake_unavailable_mapping(self):
|
||||
self._test_check_smb_mapping(existing_mappings=True,
|
||||
share_available=False)
|
||||
|
||||
def test_available_mapping(self):
|
||||
self._test_check_smb_mapping(existing_mappings=True,
|
||||
share_available=True)
|
||||
|
||||
def test_mount_smb_share(self):
|
||||
fake_create = self._smb_conn.Msft_SmbMapping.Create
|
||||
self._smbutils.mount_smb_share(mock.sentinel.share_path,
|
||||
mock.sentinel.username,
|
||||
mock.sentinel.password)
|
||||
fake_create.assert_called_once_with(
|
||||
RemotePath=mock.sentinel.share_path,
|
||||
UserName=mock.sentinel.username,
|
||||
Password=mock.sentinel.password)
|
||||
|
||||
@mock.patch.object(smbutils, 'wmi', create=True)
|
||||
def test_mount_smb_share_failed(self, mock_wmi):
|
||||
mock_wmi.x_wmi = Exception
|
||||
self._smb_conn.Msft_SmbMapping.Create.side_effect = mock_wmi.x_wmi
|
||||
|
||||
self.assertRaises(exceptions.SMBException,
|
||||
self._smbutils.mount_smb_share,
|
||||
mock.sentinel.share_path)
|
||||
|
||||
def _test_unmount_smb_share(self, force=False):
|
||||
fake_mapping = mock.Mock()
|
||||
fake_mapping_attr_err = mock.Mock()
|
||||
fake_mapping_attr_err.side_effect = AttributeError
|
||||
smb_mapping_class = self._smb_conn.Msft_SmbMapping
|
||||
smb_mapping_class.return_value = [fake_mapping, fake_mapping_attr_err]
|
||||
|
||||
self._smbutils.unmount_smb_share(mock.sentinel.share_path,
|
||||
force)
|
||||
|
||||
smb_mapping_class.assert_called_once_with(
|
||||
RemotePath=mock.sentinel.share_path)
|
||||
fake_mapping.Remove.assert_called_once_with(Force=force)
|
||||
|
||||
def test_soft_unmount_smb_share(self):
|
||||
self._test_unmount_smb_share()
|
||||
|
||||
def test_force_unmount_smb_share(self):
|
||||
self._test_unmount_smb_share(force=True)
|
||||
|
||||
@mock.patch.object(smbutils, 'wmi', create=True)
|
||||
def test_unmount_smb_share_wmi_exception(self, mock_wmi):
|
||||
mock_wmi.x_wmi = Exception
|
||||
fake_mapping = mock.Mock()
|
||||
fake_mapping.Remove.side_effect = mock_wmi.x_wmi
|
||||
self._smb_conn.Msft_SmbMapping.return_value = [fake_mapping]
|
||||
|
||||
self.assertRaises(mock_wmi.x_wmi, self._smbutils.unmount_smb_share,
|
||||
mock.sentinel.share_path, force=True)
|
||||
|
||||
@mock.patch.object(smbutils, 'ctypes')
|
||||
@mock.patch.object(smbutils, 'kernel32', create=True)
|
||||
@mock.patch('os.path.abspath')
|
||||
def _test_get_share_capacity_info(self, mock_abspath,
|
||||
mock_kernel32, mock_ctypes,
|
||||
raised_exc=None, ignore_errors=False):
|
||||
expected_values = ('total_bytes', 'free_bytes')
|
||||
|
||||
mock_params = [mock.Mock(value=value) for value in expected_values]
|
||||
mock_ctypes.c_ulonglong.side_effect = mock_params
|
||||
mock_ctypes.c_wchar_p = lambda x: (x, 'c_wchar_p')
|
||||
|
||||
self._mock_run.side_effect = raised_exc(
|
||||
func_name='fake_func_name',
|
||||
error_code='fake_error_code',
|
||||
error_message='fake_error_message') if raised_exc else None
|
||||
|
||||
if raised_exc and not ignore_errors:
|
||||
self.assertRaises(raised_exc,
|
||||
self._smbutils.get_share_capacity_info,
|
||||
mock.sentinel.share_path,
|
||||
ignore_errors=ignore_errors)
|
||||
else:
|
||||
ret_val = self._smbutils.get_share_capacity_info(
|
||||
mock.sentinel.share_path,
|
||||
ignore_errors=ignore_errors)
|
||||
expected_ret_val = (0, 0) if raised_exc else expected_values
|
||||
|
||||
self.assertEqual(expected_ret_val, ret_val)
|
||||
|
||||
mock_abspath.assert_called_once_with(mock.sentinel.share_path)
|
||||
mock_ctypes.pointer.assert_has_calls(
|
||||
[mock.call(param) for param in mock_params])
|
||||
self._mock_run.assert_called_once_with(
|
||||
mock_kernel32.GetDiskFreeSpaceExW,
|
||||
mock_ctypes.c_wchar_p(mock_abspath.return_value),
|
||||
None,
|
||||
mock_ctypes.pointer.return_value,
|
||||
mock_ctypes.pointer.return_value,
|
||||
kernel32_lib_func=True)
|
||||
|
||||
def test_get_share_capacity_info_successfully(self):
|
||||
self._test_get_share_capacity_info()
|
||||
|
||||
def test_get_share_capacity_info_ignored_error(self):
|
||||
self._test_get_share_capacity_info(
|
||||
raised_exc=exceptions.Win32Exception,
|
||||
ignore_errors=True)
|
||||
|
||||
def test_get_share_capacity_info_raised_exc(self):
|
||||
self._test_get_share_capacity_info(
|
||||
raised_exc=exceptions.Win32Exception)
|
||||
|
||||
def test_get_smb_share_path(self):
|
||||
fake_share = mock.Mock(Path=mock.sentinel.share_path)
|
||||
self._smb_conn.Msft_SmbShare.return_value = [fake_share]
|
||||
|
||||
share_path = self._smbutils.get_smb_share_path(
|
||||
mock.sentinel.share_name)
|
||||
|
||||
self.assertEqual(mock.sentinel.share_path, share_path)
|
||||
self._smb_conn.Msft_SmbShare.assert_called_once_with(
|
||||
Name=mock.sentinel.share_name)
|
||||
|
||||
def test_get_unexisting_smb_share_path(self):
|
||||
self._smb_conn.Msft_SmbShare.return_value = []
|
||||
|
||||
share_path = self._smbutils.get_smb_share_path(
|
||||
mock.sentinel.share_name)
|
||||
|
||||
self.assertIsNone(share_path)
|
||||
self._smb_conn.Msft_SmbShare.assert_called_once_with(
|
||||
Name=mock.sentinel.share_name)
|
||||
|
||||
@ddt.data({'local_ips': [mock.sentinel.ip0, mock.sentinel.ip1],
|
||||
'dest_ips': [mock.sentinel.ip2, mock.sentinel.ip3],
|
||||
'expected_local': False},
|
||||
{'local_ips': [mock.sentinel.ip0, mock.sentinel.ip1],
|
||||
'dest_ips': [mock.sentinel.ip1, mock.sentinel.ip3],
|
||||
'expected_local': True})
|
||||
@ddt.unpack
|
||||
@mock.patch('os_win._utils.get_ips')
|
||||
@mock.patch('socket.gethostname')
|
||||
def test_is_local_share(self, mock_gethostname, mock_get_ips,
|
||||
local_ips, dest_ips, expected_local):
|
||||
fake_share_server = 'fake_share_server'
|
||||
fake_share = '\\\\%s\\fake_share' % fake_share_server
|
||||
mock_get_ips.side_effect = (local_ips, dest_ips)
|
||||
self._smbutils._loopback_share_map = {}
|
||||
|
||||
is_local = self._smbutils.is_local_share(fake_share)
|
||||
self.assertEqual(expected_local, is_local)
|
||||
|
||||
# We ensure that this value is cached, calling it again
|
||||
# and making sure that we have attempted to resolve the
|
||||
# address only once.
|
||||
self._smbutils.is_local_share(fake_share)
|
||||
|
||||
mock_gethostname.assert_called_once_with()
|
||||
mock_get_ips.assert_has_calls(
|
||||
[mock.call(mock_gethostname.return_value),
|
||||
mock.call(fake_share_server)])
|
@ -1,727 +0,0 @@
|
||||
# Copyright 2013 Cloudbase Solutions Srl
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
import mock
|
||||
from oslotest import base
|
||||
import six
|
||||
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.utils.storage.virtdisk import (
|
||||
virtdisk_constants as vdisk_const)
|
||||
from os_win.utils.storage.virtdisk import vhdutils
|
||||
|
||||
|
||||
class VHDUtilsTestCase(base.BaseTestCase):
|
||||
"""Unit tests for the Hyper-V VHDUtils class."""
|
||||
|
||||
def setUp(self):
|
||||
super(VHDUtilsTestCase, self).setUp()
|
||||
self._setup_lib_mocks()
|
||||
|
||||
self._fake_vst_struct = self._vdisk_struct.Win32_VIRTUAL_STORAGE_TYPE
|
||||
|
||||
self._vhdutils = vhdutils.VHDUtils()
|
||||
self._vhdutils._win32_utils = mock.Mock()
|
||||
|
||||
self._mock_run = self._vhdutils._win32_utils.run_and_check_output
|
||||
self._run_args = self._vhdutils._virtdisk_run_args
|
||||
|
||||
self.addCleanup(mock.patch.stopall)
|
||||
|
||||
def _setup_lib_mocks(self):
|
||||
self._vdisk_struct = mock.Mock()
|
||||
self._ctypes = mock.Mock()
|
||||
# This is used in order to easily make assertions on the variables
|
||||
# passed by reference.
|
||||
self._ctypes.byref = lambda x: (x, "byref")
|
||||
self._ctypes.c_wchar_p = lambda x: (x, "c_wchar_p")
|
||||
self._ctypes.c_ulong = lambda x: (x, "c_ulong")
|
||||
|
||||
mock.patch.multiple(vhdutils,
|
||||
ctypes=self._ctypes, kernel32=mock.DEFAULT,
|
||||
wintypes=mock.DEFAULT, virtdisk=mock.DEFAULT,
|
||||
vdisk_struct=self._vdisk_struct,
|
||||
create=True).start()
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_close')
|
||||
def _test_run_and_check_output(self, mock_close, raised_exc=None):
|
||||
self._mock_run.side_effect = raised_exc(
|
||||
func_name='fake_func_name',
|
||||
error_code='fake_error_code',
|
||||
error_message='fake_error_message') if raised_exc else None
|
||||
|
||||
if raised_exc:
|
||||
self.assertRaises(
|
||||
raised_exc,
|
||||
self._vhdutils._run_and_check_output,
|
||||
mock.sentinel.func,
|
||||
mock.sentinel.arg,
|
||||
cleanup_handle=mock.sentinel.handle)
|
||||
else:
|
||||
ret_val = self._vhdutils._run_and_check_output(
|
||||
mock.sentinel.func,
|
||||
mock.sentinel.arg,
|
||||
cleanup_handle=mock.sentinel.handle)
|
||||
self.assertEqual(self._mock_run.return_value, ret_val)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
mock.sentinel.func, mock.sentinel.arg, **self._run_args)
|
||||
mock_close.assert_called_once_with(mock.sentinel.handle)
|
||||
|
||||
def test_run_and_check_output(self):
|
||||
self._test_run_and_check_output()
|
||||
|
||||
def test_run_and_check_output_raising_error(self):
|
||||
self._test_run_and_check_output(
|
||||
raised_exc=exceptions.VHDWin32APIException)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_get_vhd_device_id')
|
||||
def test_open(self, mock_get_dev_id):
|
||||
fake_vst = self._fake_vst_struct.return_value
|
||||
|
||||
mock_get_dev_id.return_value = mock.sentinel.device_id
|
||||
|
||||
handle = self._vhdutils._open(
|
||||
vhd_path=mock.sentinel.vhd_path,
|
||||
open_flag=mock.sentinel.open_flag,
|
||||
open_access_mask=mock.sentinel.access_mask,
|
||||
open_params=mock.sentinel.open_params)
|
||||
|
||||
self.assertEqual(vhdutils.wintypes.HANDLE.return_value, handle)
|
||||
self._fake_vst_struct.assert_called_once_with(
|
||||
DeviceId=mock.sentinel.device_id)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
vhdutils.virtdisk.OpenVirtualDisk,
|
||||
self._ctypes.byref(fake_vst),
|
||||
self._ctypes.c_wchar_p(mock.sentinel.vhd_path),
|
||||
mock.sentinel.access_mask,
|
||||
mock.sentinel.open_flag,
|
||||
mock.sentinel.open_params,
|
||||
self._ctypes.byref(vhdutils.wintypes.HANDLE.return_value),
|
||||
**self._run_args)
|
||||
|
||||
def test_close(self):
|
||||
self._vhdutils._close(mock.sentinel.handle)
|
||||
vhdutils.kernel32.CloseHandle.assert_called_once_with(
|
||||
mock.sentinel.handle)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_get_vhd_device_id')
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_close')
|
||||
def _test_create_vhd(self, mock_close, mock_get_dev_id, new_vhd_type):
|
||||
create_params_struct = (
|
||||
self._vdisk_struct.Win32_CREATE_VIRTUAL_DISK_PARAMETERS)
|
||||
mock_handle = vhdutils.wintypes.HANDLE.return_value
|
||||
|
||||
expected_create_vhd_flag = (
|
||||
vdisk_const.CREATE_VIRTUAL_DISK_FLAGS.get(new_vhd_type))
|
||||
|
||||
self._vhdutils.create_vhd(
|
||||
new_vhd_path=mock.sentinel.new_vhd_path,
|
||||
new_vhd_type=new_vhd_type,
|
||||
src_path=mock.sentinel.src_path,
|
||||
max_internal_size=mock.sentinel.max_internal_size,
|
||||
parent_path=mock.sentinel.parent_path)
|
||||
|
||||
self._fake_vst_struct.assert_called_once_with(
|
||||
DeviceId=mock_get_dev_id.return_value)
|
||||
create_params_struct.assert_called_once_with(
|
||||
MaximumSize=mock.sentinel.max_internal_size,
|
||||
ParentPath=mock.sentinel.parent_path,
|
||||
SourcePath=mock.sentinel.src_path)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
vhdutils.virtdisk.CreateVirtualDisk,
|
||||
self._ctypes.byref(self._fake_vst_struct.return_value),
|
||||
self._ctypes.c_wchar_p(mock.sentinel.new_vhd_path), None,
|
||||
None, expected_create_vhd_flag, None,
|
||||
self._ctypes.byref(create_params_struct.return_value), None,
|
||||
self._ctypes.byref(mock_handle),
|
||||
**self._run_args)
|
||||
|
||||
mock_close.assert_called_once_with(mock_handle)
|
||||
|
||||
def test_create_dynamic_vhd(self):
|
||||
self._test_create_vhd(new_vhd_type=constants.VHD_TYPE_DYNAMIC)
|
||||
|
||||
def test_create_fixed_vhd(self):
|
||||
self._test_create_vhd(new_vhd_type=constants.VHD_TYPE_FIXED)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, 'create_vhd')
|
||||
def test_create_dynamic_vhd_helper(self, mock_create_vhd):
|
||||
self._vhdutils.create_dynamic_vhd(mock.sentinel.path,
|
||||
mock.sentinel.size)
|
||||
|
||||
mock_create_vhd.assert_called_once_with(
|
||||
mock.sentinel.path,
|
||||
constants.VHD_TYPE_DYNAMIC,
|
||||
max_internal_size=mock.sentinel.size)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, 'create_vhd')
|
||||
def test_create_differencing_vhd_helper(self, mock_create_vhd):
|
||||
self._vhdutils.create_differencing_vhd(mock.sentinel.path,
|
||||
mock.sentinel.parent_path)
|
||||
|
||||
mock_create_vhd.assert_called_once_with(
|
||||
mock.sentinel.path,
|
||||
constants.VHD_TYPE_DIFFERENCING,
|
||||
parent_path=mock.sentinel.parent_path)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, 'create_vhd')
|
||||
def test_convert_vhd(self, mock_create_vhd):
|
||||
self._vhdutils.convert_vhd(mock.sentinel.src,
|
||||
mock.sentinel.dest,
|
||||
mock.sentinel.vhd_type)
|
||||
|
||||
mock_create_vhd.assert_called_once_with(
|
||||
mock.sentinel.dest,
|
||||
mock.sentinel.vhd_type,
|
||||
src_path=mock.sentinel.src)
|
||||
|
||||
def test_get_vhd_format_found_by_ext(self):
|
||||
fake_vhd_path = 'C:\\test.vhd'
|
||||
|
||||
ret_val = self._vhdutils.get_vhd_format(fake_vhd_path)
|
||||
|
||||
self.assertEqual(constants.DISK_FORMAT_VHD, ret_val)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_get_vhd_format_by_signature')
|
||||
@mock.patch('os.path.exists')
|
||||
def _test_vhd_format_unrecognized_ext(self, mock_exists,
|
||||
mock_get_vhd_fmt_by_sign,
|
||||
signature_available=False):
|
||||
mock_exists.return_value = True
|
||||
fake_vhd_path = 'C:\\test_vhd'
|
||||
mock_get_vhd_fmt_by_sign.return_value = (
|
||||
constants.DISK_FORMAT_VHD if signature_available else None)
|
||||
|
||||
if signature_available:
|
||||
ret_val = self._vhdutils.get_vhd_format(fake_vhd_path)
|
||||
self.assertEqual(constants.DISK_FORMAT_VHD, ret_val)
|
||||
else:
|
||||
self.assertRaises(exceptions.VHDException,
|
||||
self._vhdutils.get_vhd_format,
|
||||
fake_vhd_path)
|
||||
|
||||
def test_get_vhd_format_unrecognised_ext_unavailable_signature(self):
|
||||
self._test_vhd_format_unrecognized_ext()
|
||||
|
||||
def test_get_vhd_format_unrecognised_ext_available_signature(self):
|
||||
self._test_vhd_format_unrecognized_ext(signature_available=True)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, 'get_vhd_format')
|
||||
def test_get_vhd_device_id(self, mock_get_vhd_fmt):
|
||||
mock_get_vhd_fmt.return_value = constants.DISK_FORMAT_VHD
|
||||
|
||||
dev_id = self._vhdutils._get_vhd_device_id(mock.sentinel.vhd_path)
|
||||
|
||||
mock_get_vhd_fmt.assert_called_once_with(mock.sentinel.vhd_path)
|
||||
self.assertEqual(vdisk_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHD,
|
||||
dev_id)
|
||||
|
||||
def _mock_open(self, read_data=None, curr_f_pos=0):
|
||||
mock_open = mock.mock_open()
|
||||
mock.patch.object(vhdutils, 'open', mock_open,
|
||||
create=True).start()
|
||||
|
||||
f = mock_open.return_value
|
||||
f.read.side_effect = read_data
|
||||
f.tell.return_value = curr_f_pos
|
||||
|
||||
return mock_open
|
||||
|
||||
def test_get_vhd_format_by_sig_vhdx(self):
|
||||
read_data = (vdisk_const.VHDX_SIGNATURE, )
|
||||
self._mock_open(read_data=read_data)
|
||||
|
||||
fmt = self._vhdutils._get_vhd_format_by_signature(
|
||||
mock.sentinel.vhd_path)
|
||||
|
||||
self.assertEqual(constants.DISK_FORMAT_VHDX, fmt)
|
||||
|
||||
def test_get_vhd_format_by_sig_vhd(self):
|
||||
read_data = ('notthesig', vdisk_const.VHD_SIGNATURE)
|
||||
mock_open = self._mock_open(read_data=read_data, curr_f_pos=1024)
|
||||
|
||||
fmt = self._vhdutils._get_vhd_format_by_signature(
|
||||
mock.sentinel.vhd_path)
|
||||
|
||||
self.assertEqual(constants.DISK_FORMAT_VHD, fmt)
|
||||
mock_open.return_value.seek.assert_has_calls([mock.call(0, 2),
|
||||
mock.call(-512, 2)])
|
||||
|
||||
def test_get_vhd_format_by_sig_invalid_format(self):
|
||||
self._mock_open(read_data='notthesig', curr_f_pos=1024)
|
||||
|
||||
fmt = self._vhdutils._get_vhd_format_by_signature(
|
||||
mock.sentinel.vhd_path)
|
||||
|
||||
self.assertIsNone(fmt)
|
||||
|
||||
def test_get_vhd_format_by_sig_zero_length_file(self):
|
||||
mock_open = self._mock_open(read_data=('', ''))
|
||||
|
||||
fmt = self._vhdutils._get_vhd_format_by_signature(
|
||||
mock.sentinel.vhd_path)
|
||||
|
||||
self.assertIsNone(fmt)
|
||||
mock_open.return_value.seek.assert_called_once_with(0, 2)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_open')
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_close')
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_get_vhd_info_member')
|
||||
def test_get_vhd_info(self, mock_get_vhd_info_member,
|
||||
mock_close, mock_open):
|
||||
fake_info_member = vdisk_const.GET_VIRTUAL_DISK_INFO_SIZE
|
||||
fake_vhd_info = {'VirtualSize': mock.sentinel.virtual_size}
|
||||
|
||||
mock_open.return_value = mock.sentinel.handle
|
||||
mock_get_vhd_info_member.return_value = fake_vhd_info
|
||||
|
||||
expected_access_mask = (vdisk_const.VIRTUAL_DISK_ACCESS_GET_INFO |
|
||||
vdisk_const.VIRTUAL_DISK_ACCESS_DETACH)
|
||||
|
||||
ret_val = self._vhdutils.get_vhd_info(mock.sentinel.vhd_path,
|
||||
[fake_info_member])
|
||||
|
||||
self.assertEqual(fake_vhd_info, ret_val)
|
||||
mock_open.assert_called_once_with(
|
||||
mock.sentinel.vhd_path,
|
||||
open_access_mask=expected_access_mask)
|
||||
self._vhdutils._get_vhd_info_member.assert_called_once_with(
|
||||
mock.sentinel.handle,
|
||||
fake_info_member)
|
||||
mock_close.assert_called_once_with(mock.sentinel.handle)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_parse_vhd_info')
|
||||
def test_get_vhd_info_member(self, mock_parse_vhd_info):
|
||||
get_vd_info_struct = (
|
||||
self._vdisk_struct.Win32_GET_VIRTUAL_DISK_INFO_PARAMETERS)
|
||||
fake_params = get_vd_info_struct.return_value
|
||||
fake_info_size = self._ctypes.sizeof.return_value
|
||||
|
||||
info_member = vdisk_const.GET_VIRTUAL_DISK_INFO_PARENT_LOCATION
|
||||
|
||||
vhd_info = self._vhdutils._get_vhd_info_member(
|
||||
mock.sentinel.vhd_path,
|
||||
info_member)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
vhdutils.virtdisk.GetVirtualDiskInformation,
|
||||
mock.sentinel.vhd_path,
|
||||
self._ctypes.byref(
|
||||
self._ctypes.c_ulong(fake_info_size)),
|
||||
self._ctypes.byref(fake_params), None,
|
||||
ignored_error_codes=[vdisk_const.ERROR_VHD_INVALID_TYPE],
|
||||
**self._run_args)
|
||||
|
||||
self.assertEqual(mock_parse_vhd_info.return_value, vhd_info)
|
||||
mock_parse_vhd_info.assert_called_once_with(fake_params,
|
||||
info_member)
|
||||
|
||||
def test_parse_vhd_info(self):
|
||||
fake_info_member = vdisk_const.GET_VIRTUAL_DISK_INFO_SIZE
|
||||
fake_info = mock.Mock()
|
||||
fake_info.VhdInfo.Size._fields_ = [
|
||||
("VirtualSize", vhdutils.wintypes.ULARGE_INTEGER),
|
||||
("PhysicalSize", vhdutils.wintypes.ULARGE_INTEGER)]
|
||||
fake_info.VhdInfo.Size.VirtualSize = mock.sentinel.virt_size
|
||||
fake_info.VhdInfo.Size.PhysicalSize = mock.sentinel.phys_size
|
||||
|
||||
ret_val = self._vhdutils._parse_vhd_info(fake_info,
|
||||
fake_info_member)
|
||||
expected = {'VirtualSize': mock.sentinel.virt_size,
|
||||
'PhysicalSize': mock.sentinel.phys_size}
|
||||
|
||||
self.assertEqual(expected, ret_val)
|
||||
|
||||
def test_parse_vhd_provider_subtype_member(self):
|
||||
fake_info_member = (
|
||||
vdisk_const.GET_VIRTUAL_DISK_INFO_PROVIDER_SUBTYPE)
|
||||
fake_info = mock.Mock()
|
||||
fake_info.VhdInfo.ProviderSubtype = mock.sentinel.provider_subtype
|
||||
|
||||
ret_val = self._vhdutils._parse_vhd_info(fake_info, fake_info_member)
|
||||
expected = {'ProviderSubtype': mock.sentinel.provider_subtype}
|
||||
|
||||
self.assertEqual(expected, ret_val)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, 'get_vhd_info')
|
||||
def test_get_vhd_size(self, mock_get_vhd_info):
|
||||
ret_val = self._vhdutils.get_vhd_size(mock.sentinel.vhd_path)
|
||||
|
||||
self.assertEqual(mock_get_vhd_info.return_value, ret_val)
|
||||
mock_get_vhd_info.assert_called_once_with(
|
||||
mock.sentinel.vhd_path,
|
||||
[vdisk_const.GET_VIRTUAL_DISK_INFO_SIZE])
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, 'get_vhd_info')
|
||||
def test_get_vhd_parent_path(self, mock_get_vhd_info):
|
||||
mock_get_vhd_info.return_value = {
|
||||
'ParentPath': mock.sentinel.parent_path}
|
||||
|
||||
ret_val = self._vhdutils.get_vhd_parent_path(mock.sentinel.vhd_path)
|
||||
|
||||
self.assertEqual(mock.sentinel.parent_path, ret_val)
|
||||
mock_get_vhd_info.assert_called_once_with(
|
||||
mock.sentinel.vhd_path,
|
||||
[vdisk_const.GET_VIRTUAL_DISK_INFO_PARENT_LOCATION])
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, 'get_vhd_info')
|
||||
def test_get_vhd_type(self, mock_get_vhd_info):
|
||||
mock_get_vhd_info.return_value = {
|
||||
'ProviderSubtype': mock.sentinel.provider_subtype}
|
||||
|
||||
ret_val = self._vhdutils.get_vhd_type(mock.sentinel.vhd_path)
|
||||
|
||||
self.assertEqual(mock.sentinel.provider_subtype, ret_val)
|
||||
mock_get_vhd_info.assert_called_once_with(
|
||||
mock.sentinel.vhd_path,
|
||||
[vdisk_const.GET_VIRTUAL_DISK_INFO_PROVIDER_SUBTYPE])
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_open')
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_close')
|
||||
@mock.patch('os.remove')
|
||||
def test_merge_vhd(self, mock_remove, mock_close, mock_open):
|
||||
open_params_struct = (
|
||||
self._vdisk_struct.Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V1)
|
||||
merge_params_struct = (
|
||||
self._vdisk_struct.Win32_MERGE_VIRTUAL_DISK_PARAMETERS)
|
||||
|
||||
fake_open_params = open_params_struct.return_value
|
||||
fake_merge_params = merge_params_struct.return_value
|
||||
mock_open.return_value = mock.sentinel.handle
|
||||
|
||||
self._vhdutils.merge_vhd(mock.sentinel.vhd_path)
|
||||
|
||||
open_params_struct.assert_called_once_with(RWDepth=2)
|
||||
mock_open.assert_called_once_with(
|
||||
mock.sentinel.vhd_path,
|
||||
open_params=self._ctypes.byref(fake_open_params))
|
||||
merge_params_struct.assert_called_once_with(MergeDepth=1)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
vhdutils.virtdisk.MergeVirtualDisk,
|
||||
mock.sentinel.handle,
|
||||
None,
|
||||
self._ctypes.byref(fake_merge_params),
|
||||
None,
|
||||
**self._run_args)
|
||||
mock_remove.assert_called_once_with(
|
||||
mock.sentinel.vhd_path)
|
||||
mock_close.assert_called_once_with(mock.sentinel.handle)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_open')
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_close')
|
||||
def test_reconnect_parent_vhd(self, mock_close, mock_open):
|
||||
set_vdisk_info_struct = (
|
||||
self._vdisk_struct.Win32_SET_VIRTUAL_DISK_INFO_PARAMETERS)
|
||||
open_params_struct = (
|
||||
self._vdisk_struct.Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V2)
|
||||
|
||||
fake_set_params = set_vdisk_info_struct.return_value
|
||||
fake_open_params = open_params_struct.return_value
|
||||
mock_open.return_value = mock.sentinel.handle
|
||||
|
||||
self._vhdutils.reconnect_parent_vhd(mock.sentinel.vhd_path,
|
||||
mock.sentinel.parent_path)
|
||||
|
||||
open_params_struct.assert_called_once_with(GetInfoOnly=False)
|
||||
self._vhdutils._open.assert_called_once_with(
|
||||
mock.sentinel.vhd_path,
|
||||
open_flag=vdisk_const.OPEN_VIRTUAL_DISK_FLAG_NO_PARENTS,
|
||||
open_access_mask=None,
|
||||
open_params=vhdutils.ctypes.byref(fake_open_params))
|
||||
set_vdisk_info_struct.assert_called_once_with(
|
||||
ParentFilePath=mock.sentinel.parent_path)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
vhdutils.virtdisk.SetVirtualDiskInformation,
|
||||
mock.sentinel.handle,
|
||||
vhdutils.ctypes.byref(fake_set_params),
|
||||
**self._run_args)
|
||||
mock_close.assert_called_once_with(mock.sentinel.handle)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, 'get_internal_vhd_size_by_file_size')
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_resize_vhd')
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_check_resize_needed')
|
||||
def _test_resize_vhd(self, mock_check_resize_needed,
|
||||
mock_resize_helper, mock_get_internal_size,
|
||||
is_file_max_size=True, resize_needed=True):
|
||||
mock_check_resize_needed.return_value = resize_needed
|
||||
|
||||
self._vhdutils.resize_vhd(mock.sentinel.vhd_path,
|
||||
mock.sentinel.new_size,
|
||||
is_file_max_size,
|
||||
validate_new_size=True)
|
||||
|
||||
if is_file_max_size:
|
||||
mock_get_internal_size.assert_called_once_with(
|
||||
mock.sentinel.vhd_path, mock.sentinel.new_size)
|
||||
expected_new_size = mock_get_internal_size.return_value
|
||||
else:
|
||||
expected_new_size = mock.sentinel.new_size
|
||||
|
||||
mock_check_resize_needed.assert_called_once_with(
|
||||
mock.sentinel.vhd_path, expected_new_size)
|
||||
if resize_needed:
|
||||
mock_resize_helper.assert_called_once_with(mock.sentinel.vhd_path,
|
||||
expected_new_size)
|
||||
else:
|
||||
self.assertFalse(mock_resize_helper.called)
|
||||
|
||||
def test_resize_vhd_specifying_internal_size(self):
|
||||
self._test_resize_vhd(is_file_max_size=False)
|
||||
|
||||
def test_resize_vhd_specifying_file_max_size(self):
|
||||
self._test_resize_vhd()
|
||||
|
||||
def test_resize_vhd_already_having_requested_size(self):
|
||||
self._test_resize_vhd(resize_needed=False)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, 'get_vhd_size')
|
||||
def _test_check_resize_needed(self, mock_get_vhd_size,
|
||||
current_size=1, new_size=2):
|
||||
mock_get_vhd_size.return_value = dict(VirtualSize=current_size)
|
||||
|
||||
if current_size > new_size:
|
||||
self.assertRaises(exceptions.VHDException,
|
||||
self._vhdutils._check_resize_needed,
|
||||
mock.sentinel.vhd_path,
|
||||
new_size)
|
||||
else:
|
||||
resize_needed = self._vhdutils._check_resize_needed(
|
||||
mock.sentinel.vhd_path, new_size)
|
||||
self.assertEqual(current_size < new_size, resize_needed)
|
||||
|
||||
def test_check_resize_needed_smaller_new_size(self):
|
||||
self._test_check_resize_needed(current_size=2, new_size=1)
|
||||
|
||||
def test_check_resize_needed_bigger_new_size(self):
|
||||
self._test_check_resize_needed()
|
||||
|
||||
def test_check_resize_needed_smaller_equal_size(self):
|
||||
self._test_check_resize_needed(current_size=1, new_size=1)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_open')
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_close')
|
||||
def test_resize_vhd_helper(self, mock_close, mock_open):
|
||||
resize_vdisk_struct = (
|
||||
self._vdisk_struct.Win32_RESIZE_VIRTUAL_DISK_PARAMETERS)
|
||||
fake_params = resize_vdisk_struct.return_value
|
||||
|
||||
mock_open.return_value = mock.sentinel.handle
|
||||
|
||||
self._vhdutils._resize_vhd(mock.sentinel.vhd_path,
|
||||
mock.sentinel.new_size)
|
||||
|
||||
resize_vdisk_struct.assert_called_once_with(
|
||||
NewSize=mock.sentinel.new_size)
|
||||
self._mock_run.assert_called_once_with(
|
||||
vhdutils.virtdisk.ResizeVirtualDisk,
|
||||
mock.sentinel.handle,
|
||||
None,
|
||||
vhdutils.ctypes.byref(fake_params),
|
||||
None,
|
||||
**self._run_args)
|
||||
mock_close.assert_called_once_with(mock.sentinel.handle)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, 'get_vhd_info')
|
||||
@mock.patch.object(vhdutils.VHDUtils,
|
||||
'_get_internal_vhd_size_by_file_size')
|
||||
@mock.patch.object(vhdutils.VHDUtils,
|
||||
'_get_internal_vhdx_size_by_file_size')
|
||||
def _test_get_int_sz_by_file_size(
|
||||
self, mock_get_vhdx_int_size,
|
||||
mock_get_vhd_int_size, mock_get_vhd_info,
|
||||
vhd_dev_id=vdisk_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHD,
|
||||
vhd_type=constants.VHD_TYPE_DYNAMIC):
|
||||
fake_vhd_info = dict(ProviderSubtype=vhd_type,
|
||||
ParentPath=mock.sentinel.parent_path,
|
||||
DeviceId=vhd_dev_id)
|
||||
mock_get_vhd_info.side_effect = [fake_vhd_info]
|
||||
exppected_vhd_info_calls = [mock.call(mock.sentinel.vhd_path)]
|
||||
expected_vhd_checked = mock.sentinel.vhd_path
|
||||
expected_checked_vhd_info = fake_vhd_info
|
||||
|
||||
if vhd_type == constants.VHD_TYPE_DIFFERENCING:
|
||||
expected_checked_vhd_info = dict(
|
||||
fake_vhd_info, vhd_type=constants.VHD_TYPE_DYNAMIC)
|
||||
mock_get_vhd_info.side_effect.append(
|
||||
expected_checked_vhd_info)
|
||||
exppected_vhd_info_calls.append(
|
||||
mock.call(mock.sentinel.parent_path))
|
||||
expected_vhd_checked = mock.sentinel.parent_path
|
||||
|
||||
is_vhd = vhd_dev_id == vdisk_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHD
|
||||
expected_helper = (mock_get_vhd_int_size
|
||||
if is_vhd
|
||||
else mock_get_vhdx_int_size)
|
||||
|
||||
ret_val = self._vhdutils.get_internal_vhd_size_by_file_size(
|
||||
mock.sentinel.vhd_path, mock.sentinel.vhd_size)
|
||||
|
||||
mock_get_vhd_info.assert_has_calls(exppected_vhd_info_calls)
|
||||
expected_helper.assert_called_once_with(expected_vhd_checked,
|
||||
mock.sentinel.vhd_size,
|
||||
expected_checked_vhd_info)
|
||||
self.assertEqual(expected_helper.return_value, ret_val)
|
||||
|
||||
def test_get_int_sz_by_file_size_vhd(self):
|
||||
self._test_get_int_sz_by_file_size()
|
||||
|
||||
def test_get_int_sz_by_file_size_vhdx(self):
|
||||
self._test_get_int_sz_by_file_size(
|
||||
vhd_dev_id=vdisk_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHDX)
|
||||
|
||||
def test_get_int_sz_by_file_size_differencing(self):
|
||||
self._test_get_int_sz_by_file_size(
|
||||
vhd_dev_id=vdisk_const.VIRTUAL_STORAGE_TYPE_DEVICE_VHDX)
|
||||
|
||||
def _mocked_get_internal_vhd_size(self, root_vhd_size, vhd_type):
|
||||
fake_vhd_info = dict(ProviderSubtype=vhd_type,
|
||||
BlockSize=2097152,
|
||||
ParentPath=mock.sentinel.parent_path)
|
||||
|
||||
return self._vhdutils._get_internal_vhd_size_by_file_size(
|
||||
mock.sentinel.vhd_path, root_vhd_size, fake_vhd_info)
|
||||
|
||||
def test_get_internal_vhd_size_by_file_size_fixed(self):
|
||||
root_vhd_size = 1 << 30
|
||||
real_size = self._mocked_get_internal_vhd_size(
|
||||
root_vhd_size=root_vhd_size,
|
||||
vhd_type=constants.VHD_TYPE_FIXED)
|
||||
|
||||
expected_vhd_size = root_vhd_size - 512
|
||||
self.assertEqual(expected_vhd_size, real_size)
|
||||
|
||||
def test_get_internal_vhd_size_by_file_size_dynamic(self):
|
||||
root_vhd_size = 20 << 30
|
||||
real_size = self._mocked_get_internal_vhd_size(
|
||||
root_vhd_size=root_vhd_size,
|
||||
vhd_type=constants.VHD_TYPE_DYNAMIC)
|
||||
|
||||
expected_md_size = 43008
|
||||
expected_vhd_size = root_vhd_size - expected_md_size
|
||||
self.assertEqual(expected_vhd_size, real_size)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_get_vhdx_block_size')
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_get_vhdx_log_size')
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_get_vhdx_metadata_size_and_offset')
|
||||
def test_get_vhdx_internal_size(self, mock_get_vhdx_md_sz_and_off,
|
||||
mock_get_vhdx_log_sz,
|
||||
mock_get_vhdx_block_size):
|
||||
self._mock_open()
|
||||
fake_log_sz = 1 << 20
|
||||
fake_block_sz = 32 << 20
|
||||
fake_md_sz = 1 << 20
|
||||
fake_logical_sector_sz = 4096
|
||||
new_vhd_sz = 1 << 30
|
||||
# We expect less than a block to be reserved for internal metadata.
|
||||
expected_max_int_sz = new_vhd_sz - fake_block_sz
|
||||
|
||||
fake_vhd_info = dict(SectorSize=fake_logical_sector_sz)
|
||||
|
||||
mock_get_vhdx_block_size.return_value = fake_block_sz
|
||||
mock_get_vhdx_log_sz.return_value = fake_log_sz
|
||||
mock_get_vhdx_md_sz_and_off.return_value = fake_md_sz, None
|
||||
|
||||
internal_size = self._vhdutils._get_internal_vhdx_size_by_file_size(
|
||||
mock.sentinel.vhd_path, new_vhd_sz, fake_vhd_info)
|
||||
|
||||
self.assertIn(type(internal_size), six.integer_types)
|
||||
self.assertEqual(expected_max_int_sz, internal_size)
|
||||
|
||||
def test_get_vhdx_internal_size_exception(self):
|
||||
mock_open = self._mock_open()
|
||||
mock_open.side_effect = IOError
|
||||
func = self._vhdutils._get_internal_vhdx_size_by_file_size
|
||||
self.assertRaises(exceptions.VHDException,
|
||||
func,
|
||||
mock.sentinel.vhd_path,
|
||||
mock.sentinel.vhd_size,
|
||||
mock.sentinel.vhd_info)
|
||||
|
||||
def _get_mock_file_handle(self, *args):
|
||||
mock_file_handle = mock.Mock()
|
||||
mock_file_handle.read.side_effect = args
|
||||
return mock_file_handle
|
||||
|
||||
def test_get_vhdx_current_header(self):
|
||||
# The current header has the maximum sequence number.
|
||||
fake_seq_numbers = [
|
||||
bytearray(b'\x01\x00\x00\x00\x00\x00\x00\x00'),
|
||||
bytearray(b'\x02\x00\x00\x00\x00\x00\x00\x00')]
|
||||
mock_handle = self._get_mock_file_handle(*fake_seq_numbers)
|
||||
|
||||
offset = self._vhdutils._get_vhdx_current_header_offset(mock_handle)
|
||||
|
||||
self.assertEqual(vdisk_const.VHDX_HEADER_OFFSETS[1], offset)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, '_get_vhdx_current_header_offset')
|
||||
def test_get_log_size(self, mock_get_vhdx_curr_hd_offset):
|
||||
fake_curr_header_offset = vdisk_const.VHDX_HEADER_OFFSETS[0]
|
||||
fake_log_sz = bytearray(b'\x01\x00\x00\x00')
|
||||
|
||||
mock_get_vhdx_curr_hd_offset.return_value = fake_curr_header_offset
|
||||
mock_handle = self._get_mock_file_handle(fake_log_sz)
|
||||
|
||||
log_size = self._vhdutils._get_vhdx_log_size(mock_handle)
|
||||
|
||||
self.assertEqual(log_size, 1)
|
||||
|
||||
def test_get_vhdx_metadata_size(self):
|
||||
fake_md_offset = bytearray(b'\x01\x00\x00\x00\x00\x00\x00\x00')
|
||||
fake_md_sz = bytearray(b'\x01\x00\x00\x00')
|
||||
|
||||
mock_handle = self._get_mock_file_handle(fake_md_offset,
|
||||
fake_md_sz)
|
||||
|
||||
md_sz, md_offset = self._vhdutils._get_vhdx_metadata_size_and_offset(
|
||||
mock_handle)
|
||||
|
||||
self.assertEqual(1, md_sz)
|
||||
self.assertEqual(1, md_offset)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils,
|
||||
'_get_vhdx_metadata_size_and_offset')
|
||||
def test_get_block_size(self, mock_get_md_sz_and_offset):
|
||||
mock_get_md_sz_and_offset.return_value = (mock.sentinel.md_sz, 1024)
|
||||
fake_block_size = bytearray(b'\x01\x00\x00\x00')
|
||||
fake_offset = bytearray(b'\x02\x00\x00\x00')
|
||||
mock_handle = self._get_mock_file_handle(fake_offset,
|
||||
fake_block_size)
|
||||
|
||||
block_size = self._vhdutils._get_vhdx_block_size(mock_handle)
|
||||
self.assertEqual(block_size, 1)
|
||||
|
||||
@mock.patch.object(vhdutils.VHDUtils, 'convert_vhd')
|
||||
@mock.patch.object(os, 'unlink')
|
||||
@mock.patch.object(os, 'rename')
|
||||
def test_flatten_vhd(self, mock_rename, mock_unlink, mock_convert):
|
||||
fake_vhd_path = r'C:\test.vhd'
|
||||
expected_tmp_path = r'C:\test.tmp.vhd'
|
||||
|
||||
self._vhdutils.flatten_vhd(fake_vhd_path)
|
||||
|
||||
mock_convert.assert_called_once_with(fake_vhd_path, expected_tmp_path)
|
||||
mock_unlink.assert_called_once_with(fake_vhd_path)
|
||||
mock_rename.assert_called_once_with(expected_tmp_path, fake_vhd_path)
|
||||
|
||||
def test_get_best_supported_vhd_format(self):
|
||||
fmt = self._vhdutils.get_best_supported_vhd_format()
|
||||
self.assertEqual(constants.DISK_FORMAT_VHDX, fmt)
|
@ -1,132 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils import baseutils
|
||||
|
||||
|
||||
class BaseUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the os-win BaseUtils class."""
|
||||
|
||||
def setUp(self):
|
||||
super(BaseUtilsTestCase, self).setUp()
|
||||
self.utils = baseutils.BaseUtils()
|
||||
self.utils._conn = mock.MagicMock()
|
||||
|
||||
@mock.patch.object(baseutils, 'wmi', create=True)
|
||||
def test_get_wmi_obj(self, mock_wmi):
|
||||
result = self.utils._get_wmi_obj(mock.sentinel.moniker)
|
||||
|
||||
self.assertEqual(mock_wmi.WMI.return_value, result)
|
||||
mock_wmi.WMI.assert_called_once_with(moniker=mock.sentinel.moniker)
|
||||
|
||||
@mock.patch.object(baseutils.BaseUtils, '_get_wmi_obj')
|
||||
@mock.patch.object(baseutils, 'sys')
|
||||
def _check_get_wmi_conn(self, mock_sys, mock_get_wmi_obj, **kwargs):
|
||||
mock_sys.platform = 'win32'
|
||||
result = self.utils._get_wmi_conn(mock.sentinel.moniker, **kwargs)
|
||||
|
||||
self.assertEqual(mock_get_wmi_obj.return_value, result)
|
||||
mock_get_wmi_obj.assert_called_once_with(mock.sentinel.moniker,
|
||||
**kwargs)
|
||||
|
||||
def test_get_wmi_conn_kwargs(self):
|
||||
self.utils._WMI_CONS.clear()
|
||||
self._check_get_wmi_conn(privileges=mock.sentinel.privileges)
|
||||
self.assertNotIn(mock.sentinel.moniker, baseutils.BaseUtils._WMI_CONS)
|
||||
|
||||
def test_get_wmi_conn(self):
|
||||
self._check_get_wmi_conn()
|
||||
self.assertIn(mock.sentinel.moniker, baseutils.BaseUtils._WMI_CONS)
|
||||
|
||||
@mock.patch.object(baseutils.BaseUtils, '_get_wmi_obj')
|
||||
@mock.patch.object(baseutils, 'sys')
|
||||
def test_get_wmi_conn_cached(self, mock_sys, mock_get_wmi_obj):
|
||||
mock_sys.platform = 'win32'
|
||||
baseutils.BaseUtils._WMI_CONS[mock.sentinel.moniker] = (
|
||||
mock.sentinel.conn)
|
||||
result = self.utils._get_wmi_conn(mock.sentinel.moniker)
|
||||
|
||||
self.assertEqual(mock.sentinel.conn, result)
|
||||
self.assertFalse(mock_get_wmi_obj.called)
|
||||
|
||||
@mock.patch.object(baseutils, 'sys')
|
||||
def test_get_wmi_conn_linux(self, mock_sys):
|
||||
mock_sys.platform = 'linux'
|
||||
result = self.utils._get_wmi_conn(mock.sentinel.moniker)
|
||||
|
||||
self.assertIsNone(result)
|
||||
|
||||
|
||||
class BaseUtilsVirtTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the os-win BaseUtilsVirt class."""
|
||||
|
||||
def setUp(self):
|
||||
super(BaseUtilsVirtTestCase, self).setUp()
|
||||
self.utils = baseutils.BaseUtilsVirt()
|
||||
self.utils._conn_attr = mock.MagicMock()
|
||||
baseutils.BaseUtilsVirt._os_version = None
|
||||
|
||||
@mock.patch.object(baseutils.BaseUtilsVirt, '_get_wmi_conn')
|
||||
def test_conn(self, mock_get_wmi_conn):
|
||||
self.utils._conn_attr = None
|
||||
|
||||
self.assertEqual(mock_get_wmi_conn.return_value, self.utils._conn)
|
||||
mock_get_wmi_conn.assert_called_once_with(
|
||||
self.utils._wmi_namespace % '.')
|
||||
|
||||
def test_vs_man_svc(self):
|
||||
mock_os = mock.MagicMock(Version='6.3.0')
|
||||
self._mock_wmi.WMI.return_value.Win32_OperatingSystem.return_value = [
|
||||
mock_os]
|
||||
expected = self.utils._conn.Msvm_VirtualSystemManagementService()[0]
|
||||
self.assertEqual(expected, self.utils._vs_man_svc)
|
||||
|
||||
@mock.patch.object(baseutils, 'imp')
|
||||
@mock.patch.object(baseutils, 'wmi', create=True)
|
||||
def test_vs_man_svc_2012(self, mock_wmi, mock_imp):
|
||||
mock_os = mock.MagicMock(Version='6.2.0')
|
||||
mock_wmi.WMI.return_value.Win32_OperatingSystem.return_value = [
|
||||
mock_os]
|
||||
fake_module_path = '/fake/path/to/module'
|
||||
mock_wmi.__path__ = [fake_module_path]
|
||||
old_conn = mock_imp.load_source.return_value.WMI.return_value
|
||||
|
||||
expected = old_conn.Msvm_VirtualSystemManagementService()[0]
|
||||
self.assertEqual(expected, self.utils._vs_man_svc)
|
||||
mock_imp.load_source.assert_called_once_with(
|
||||
'old_wmi', '%s.py' % fake_module_path)
|
||||
|
||||
@mock.patch.object(baseutils.BaseUtilsVirt, '_get_wmi_compat_conn')
|
||||
def test_get_wmi_obj_compatibility_6_3(self, mock_get_wmi_compat):
|
||||
mock_os = mock.MagicMock(Version='6.3.0')
|
||||
self._mock_wmi.WMI.return_value.Win32_OperatingSystem.return_value = [
|
||||
mock_os]
|
||||
|
||||
result = self.utils._get_wmi_obj(mock.sentinel.moniker, True)
|
||||
self.assertEqual(self._mock_wmi.WMI.return_value, result)
|
||||
|
||||
@mock.patch.object(baseutils.BaseUtilsVirt, '_get_wmi_compat_conn')
|
||||
def test_get_wmi_obj_no_compatibility_6_2(self, mock_get_wmi_compat):
|
||||
baseutils.BaseUtilsVirt._os_version = [6, 2]
|
||||
result = self.utils._get_wmi_obj(mock.sentinel.moniker, False)
|
||||
self.assertEqual(self._mock_wmi.WMI.return_value, result)
|
||||
|
||||
@mock.patch.object(baseutils.BaseUtilsVirt, '_get_wmi_compat_conn')
|
||||
def test_get_wmi_obj_compatibility_6_2(self, mock_get_wmi_compat):
|
||||
baseutils.BaseUtilsVirt._os_version = [6, 2]
|
||||
result = self.utils._get_wmi_obj(mock.sentinel.moniker, True)
|
||||
self.assertEqual(mock_get_wmi_compat.return_value, result)
|
@ -1,295 +0,0 @@
|
||||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils import hostutils
|
||||
|
||||
|
||||
class FakeCPUSpec(object):
|
||||
"""Fake CPU Spec for unit tests."""
|
||||
|
||||
Architecture = mock.sentinel.cpu_arch
|
||||
Name = mock.sentinel.cpu_name
|
||||
Manufacturer = mock.sentinel.cpu_man
|
||||
MaxClockSpeed = mock.sentinel.max_clock_speed
|
||||
NumberOfCores = mock.sentinel.cpu_cores
|
||||
NumberOfLogicalProcessors = mock.sentinel.cpu_procs
|
||||
|
||||
|
||||
class HostUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V hostutils class."""
|
||||
|
||||
_DEVICE_ID = "Microsoft:UUID\\0\\0"
|
||||
_NODE_ID = "Microsoft:PhysicalNode\\0"
|
||||
|
||||
_FAKE_MEMORY_TOTAL = 1024
|
||||
_FAKE_MEMORY_FREE = 512
|
||||
_FAKE_DISK_SIZE = 1024
|
||||
_FAKE_DISK_FREE = 512
|
||||
_FAKE_VERSION_GOOD = '6.2.0'
|
||||
_FAKE_VERSION_BAD = '6.1.9'
|
||||
|
||||
def setUp(self):
|
||||
self._hostutils = hostutils.HostUtils()
|
||||
self._hostutils._conn_cimv2 = mock.MagicMock()
|
||||
self._hostutils._conn_attr = mock.MagicMock()
|
||||
|
||||
super(HostUtilsTestCase, self).setUp()
|
||||
|
||||
@mock.patch('os_win.utils.hostutils.ctypes')
|
||||
def test_get_host_tick_count64(self, mock_ctypes):
|
||||
tick_count64 = "100"
|
||||
mock_ctypes.windll.kernel32.GetTickCount64.return_value = tick_count64
|
||||
response = self._hostutils.get_host_tick_count64()
|
||||
self.assertEqual(tick_count64, response)
|
||||
|
||||
def test_get_cpus_info(self):
|
||||
cpu = mock.MagicMock(spec=FakeCPUSpec)
|
||||
self._hostutils._conn_cimv2.query.return_value = [cpu]
|
||||
cpu_list = self._hostutils.get_cpus_info()
|
||||
self.assertEqual([cpu._mock_children], cpu_list)
|
||||
|
||||
def test_get_memory_info(self):
|
||||
memory = mock.MagicMock()
|
||||
type(memory).TotalVisibleMemorySize = mock.PropertyMock(
|
||||
return_value=self._FAKE_MEMORY_TOTAL)
|
||||
type(memory).FreePhysicalMemory = mock.PropertyMock(
|
||||
return_value=self._FAKE_MEMORY_FREE)
|
||||
|
||||
self._hostutils._conn_cimv2.query.return_value = [memory]
|
||||
total_memory, free_memory = self._hostutils.get_memory_info()
|
||||
|
||||
self.assertEqual(self._FAKE_MEMORY_TOTAL, total_memory)
|
||||
self.assertEqual(self._FAKE_MEMORY_FREE, free_memory)
|
||||
|
||||
def test_get_volume_info(self):
|
||||
disk = mock.MagicMock()
|
||||
type(disk).Size = mock.PropertyMock(return_value=self._FAKE_DISK_SIZE)
|
||||
type(disk).FreeSpace = mock.PropertyMock(
|
||||
return_value=self._FAKE_DISK_FREE)
|
||||
|
||||
self._hostutils._conn_cimv2.query.return_value = [disk]
|
||||
(total_memory, free_memory) = self._hostutils.get_volume_info(
|
||||
mock.sentinel.FAKE_DRIVE)
|
||||
|
||||
self.assertEqual(self._FAKE_DISK_SIZE, total_memory)
|
||||
self.assertEqual(self._FAKE_DISK_FREE, free_memory)
|
||||
|
||||
def test_check_min_windows_version_true(self):
|
||||
self._test_check_min_windows_version(self._FAKE_VERSION_GOOD, True)
|
||||
|
||||
def test_check_min_windows_version_false(self):
|
||||
self._test_check_min_windows_version(self._FAKE_VERSION_BAD, False)
|
||||
|
||||
def _test_check_min_windows_version(self, version, expected):
|
||||
os = mock.MagicMock()
|
||||
os.Version = version
|
||||
self._hostutils._conn_cimv2.Win32_OperatingSystem.return_value = [os]
|
||||
hostutils.HostUtils._windows_version = None
|
||||
self.assertEqual(expected,
|
||||
self._hostutils.check_min_windows_version(6, 2))
|
||||
|
||||
def test_get_windows_version(self):
|
||||
os = mock.MagicMock()
|
||||
os.Version = self._FAKE_VERSION_GOOD
|
||||
self._hostutils._conn_cimv2.Win32_OperatingSystem.return_value = [os]
|
||||
hostutils.HostUtils._windows_version = None
|
||||
self.assertEqual(self._FAKE_VERSION_GOOD,
|
||||
self._hostutils.get_windows_version())
|
||||
|
||||
@mock.patch('socket.gethostname')
|
||||
@mock.patch('os_win._utils.get_ips')
|
||||
def test_get_local_ips(self, mock_get_ips, mock_gethostname):
|
||||
local_ips = self._hostutils.get_local_ips()
|
||||
|
||||
self.assertEqual(mock_get_ips.return_value, local_ips)
|
||||
mock_gethostname.assert_called_once_with()
|
||||
mock_get_ips.assert_called_once_with(mock_gethostname.return_value)
|
||||
|
||||
def _test_host_power_action(self, action):
|
||||
fake_win32 = mock.MagicMock()
|
||||
fake_win32.Win32Shutdown = mock.MagicMock()
|
||||
|
||||
self._hostutils._conn_cimv2.Win32_OperatingSystem.return_value = [
|
||||
fake_win32]
|
||||
|
||||
if action == constants.HOST_POWER_ACTION_SHUTDOWN:
|
||||
self._hostutils.host_power_action(action)
|
||||
fake_win32.Win32Shutdown.assert_called_with(
|
||||
self._hostutils._HOST_FORCED_SHUTDOWN)
|
||||
elif action == constants.HOST_POWER_ACTION_REBOOT:
|
||||
self._hostutils.host_power_action(action)
|
||||
fake_win32.Win32Shutdown.assert_called_with(
|
||||
self._hostutils._HOST_FORCED_REBOOT)
|
||||
else:
|
||||
self.assertRaises(NotImplementedError,
|
||||
self._hostutils.host_power_action, action)
|
||||
|
||||
def test_host_shutdown(self):
|
||||
self._test_host_power_action(constants.HOST_POWER_ACTION_SHUTDOWN)
|
||||
|
||||
def test_host_reboot(self):
|
||||
self._test_host_power_action(constants.HOST_POWER_ACTION_REBOOT)
|
||||
|
||||
def test_host_startup(self):
|
||||
self._test_host_power_action(constants.HOST_POWER_ACTION_STARTUP)
|
||||
|
||||
def test_get_supported_vm_types_2012_r2(self):
|
||||
with mock.patch.object(self._hostutils,
|
||||
'check_min_windows_version') as mock_check_win:
|
||||
mock_check_win.return_value = True
|
||||
result = self._hostutils.get_supported_vm_types()
|
||||
self.assertEqual([constants.IMAGE_PROP_VM_GEN_1,
|
||||
constants.IMAGE_PROP_VM_GEN_2], result)
|
||||
|
||||
def test_get_supported_vm_types(self):
|
||||
with mock.patch.object(self._hostutils,
|
||||
'check_min_windows_version') as mock_check_win:
|
||||
mock_check_win.return_value = False
|
||||
result = self._hostutils.get_supported_vm_types()
|
||||
self.assertEqual([constants.IMAGE_PROP_VM_GEN_1], result)
|
||||
|
||||
def test_check_server_feature(self):
|
||||
mock_sv_feature_cls = self._hostutils._conn_cimv2.Win32_ServerFeature
|
||||
mock_sv_feature_cls.return_value = [mock.sentinel.sv_feature]
|
||||
|
||||
feature_enabled = self._hostutils.check_server_feature(
|
||||
mock.sentinel.feature_id)
|
||||
self.assertTrue(feature_enabled)
|
||||
|
||||
mock_sv_feature_cls.assert_called_once_with(
|
||||
ID=mock.sentinel.feature_id)
|
||||
|
||||
def _check_get_numa_nodes_missing_info(self):
|
||||
numa_node = mock.MagicMock()
|
||||
self._hostutils._conn.Msvm_NumaNode.return_value = [
|
||||
numa_node, numa_node]
|
||||
|
||||
nodes_info = self._hostutils.get_numa_nodes()
|
||||
self.assertEqual([], nodes_info)
|
||||
|
||||
@mock.patch.object(hostutils.HostUtils, '_get_numa_memory_info')
|
||||
def test_get_numa_nodes_missing_memory_info(self, mock_get_memory_info):
|
||||
mock_get_memory_info.return_value = None
|
||||
self._check_get_numa_nodes_missing_info()
|
||||
|
||||
@mock.patch.object(hostutils.HostUtils, '_get_numa_cpu_info')
|
||||
@mock.patch.object(hostutils.HostUtils, '_get_numa_memory_info')
|
||||
def test_get_numa_nodes_missing_cpu_info(self, mock_get_memory_info,
|
||||
mock_get_cpu_info):
|
||||
mock_get_cpu_info.return_value = None
|
||||
self._check_get_numa_nodes_missing_info()
|
||||
|
||||
@mock.patch.object(hostutils.HostUtils, '_get_numa_cpu_info')
|
||||
@mock.patch.object(hostutils.HostUtils, '_get_numa_memory_info')
|
||||
def test_get_numa_nodes(self, mock_get_memory_info, mock_get_cpu_info):
|
||||
numa_memory = mock_get_memory_info.return_value
|
||||
host_cpu = mock.MagicMock(DeviceID=self._DEVICE_ID)
|
||||
mock_get_cpu_info.return_value = [host_cpu]
|
||||
numa_node = mock.MagicMock(NodeID=self._NODE_ID)
|
||||
self._hostutils._conn.Msvm_NumaNode.return_value = [
|
||||
numa_node, numa_node]
|
||||
|
||||
nodes_info = self._hostutils.get_numa_nodes()
|
||||
|
||||
expected_info = {
|
||||
'id': self._DEVICE_ID.split('\\')[-1],
|
||||
'memory': numa_memory.NumberOfBlocks,
|
||||
'memory_usage': numa_node.CurrentlyConsumableMemoryBlocks,
|
||||
'cpuset': set([self._DEVICE_ID.split('\\')[-1]]),
|
||||
'cpu_usage': 0,
|
||||
}
|
||||
|
||||
self.assertEqual([expected_info, expected_info], nodes_info)
|
||||
|
||||
def test_get_numa_memory_info(self):
|
||||
system_memory = mock.MagicMock()
|
||||
system_memory.path_.return_value = 'fake_wmi_obj_path'
|
||||
numa_node_memory = mock.MagicMock()
|
||||
numa_node_memory.path_.return_value = 'fake_wmi_obj_path1'
|
||||
numa_node_assoc = [system_memory]
|
||||
memory_info = self._hostutils._get_numa_memory_info(
|
||||
numa_node_assoc, [system_memory, numa_node_memory])
|
||||
|
||||
self.assertEqual(system_memory, memory_info)
|
||||
|
||||
def test_get_numa_memory_info_not_found(self):
|
||||
other = mock.MagicMock()
|
||||
memory_info = self._hostutils._get_numa_memory_info([], [other])
|
||||
|
||||
self.assertIsNone(memory_info)
|
||||
|
||||
def test_get_numa_cpu_info(self):
|
||||
host_cpu = mock.MagicMock()
|
||||
host_cpu.path_.return_value = 'fake_wmi_obj_path'
|
||||
vm_cpu = mock.MagicMock()
|
||||
vm_cpu.path_.return_value = 'fake_wmi_obj_path1'
|
||||
numa_node_assoc = [host_cpu]
|
||||
cpu_info = self._hostutils._get_numa_cpu_info(numa_node_assoc,
|
||||
[host_cpu, vm_cpu])
|
||||
|
||||
self.assertEqual([host_cpu], cpu_info)
|
||||
|
||||
def test_get_numa_cpu_info_not_found(self):
|
||||
other = mock.MagicMock()
|
||||
cpu_info = self._hostutils._get_numa_cpu_info([], [other])
|
||||
|
||||
self.assertEqual([], cpu_info)
|
||||
|
||||
def test_get_remotefx_gpu_info(self):
|
||||
fake_gpu = mock.MagicMock()
|
||||
fake_gpu.Name = mock.sentinel.Fake_gpu_name
|
||||
fake_gpu.TotalVideoMemory = mock.sentinel.Fake_gpu_total_memory
|
||||
fake_gpu.AvailableVideoMemory = mock.sentinel.Fake_gpu_available_memory
|
||||
fake_gpu.DirectXVersion = mock.sentinel.Fake_gpu_directx
|
||||
fake_gpu.DriverVersion = mock.sentinel.Fake_gpu_driver_version
|
||||
|
||||
mock_phys_3d_proc = (
|
||||
self._hostutils._conn.Msvm_Physical3dGraphicsProcessor)
|
||||
mock_phys_3d_proc.return_value = [fake_gpu]
|
||||
|
||||
return_gpus = self._hostutils.get_remotefx_gpu_info()
|
||||
self.assertEqual(mock.sentinel.Fake_gpu_name, return_gpus[0]['name'])
|
||||
self.assertEqual(mock.sentinel.Fake_gpu_driver_version,
|
||||
return_gpus[0]['driver_version'])
|
||||
self.assertEqual(mock.sentinel.Fake_gpu_total_memory,
|
||||
return_gpus[0]['total_video_ram'])
|
||||
self.assertEqual(mock.sentinel.Fake_gpu_available_memory,
|
||||
return_gpus[0]['available_video_ram'])
|
||||
self.assertEqual(mock.sentinel.Fake_gpu_directx,
|
||||
return_gpus[0]['directx_version'])
|
||||
|
||||
def _set_verify_host_remotefx_capability_mocks(self, isGpuCapable=True,
|
||||
isSlatCapable=True):
|
||||
s3d_video_pool = self._hostutils._conn.Msvm_Synth3dVideoPool()[0]
|
||||
s3d_video_pool.IsGpuCapable = isGpuCapable
|
||||
s3d_video_pool.IsSlatCapable = isSlatCapable
|
||||
|
||||
def test_verify_host_remotefx_capability_unsupported_gpu(self):
|
||||
self._set_verify_host_remotefx_capability_mocks(isGpuCapable=False)
|
||||
self.assertRaises(exceptions.HyperVRemoteFXException,
|
||||
self._hostutils.verify_host_remotefx_capability)
|
||||
|
||||
def test_verify_host_remotefx_capability_no_slat(self):
|
||||
self._set_verify_host_remotefx_capability_mocks(isSlatCapable=False)
|
||||
self.assertRaises(exceptions.HyperVRemoteFXException,
|
||||
self._hostutils.verify_host_remotefx_capability)
|
||||
|
||||
def test_verify_host_remotefx_capability(self):
|
||||
self._set_verify_host_remotefx_capability_mocks()
|
||||
self._hostutils.verify_host_remotefx_capability()
|
@ -1,66 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils import hostutils10
|
||||
|
||||
|
||||
class HostUtils10TestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V HostUtils10 class."""
|
||||
|
||||
def setUp(self):
|
||||
super(HostUtils10TestCase, self).setUp()
|
||||
self._hostutils = hostutils10.HostUtils10()
|
||||
self._hostutils._conn_hgs_attr = mock.MagicMock()
|
||||
|
||||
@mock.patch.object(hostutils10.HostUtils10, '_get_wmi_conn')
|
||||
def test_conn_hgs(self, mock_get_wmi_conn):
|
||||
self._hostutils._conn_hgs_attr = None
|
||||
self.assertEqual(mock_get_wmi_conn.return_value,
|
||||
self._hostutils._conn_hgs)
|
||||
|
||||
mock_get_wmi_conn.assert_called_once_with(
|
||||
self._hostutils._HGS_NAMESPACE % self._hostutils._host)
|
||||
|
||||
@mock.patch.object(hostutils10.HostUtils10, '_get_wmi_conn')
|
||||
def test_conn_hgs_no_namespace(self, mock_get_wmi_conn):
|
||||
self._hostutils._conn_hgs_attr = None
|
||||
|
||||
mock_get_wmi_conn.side_effect = [exceptions.OSWinException]
|
||||
self.assertRaises(exceptions.OSWinException,
|
||||
lambda: self._hostutils._conn_hgs)
|
||||
mock_get_wmi_conn.assert_called_once_with(
|
||||
self._hostutils._HGS_NAMESPACE % self._hostutils._host)
|
||||
|
||||
def _test_is_host_guarded(self, return_code=0, is_host_guarded=True):
|
||||
hgs_config = self._hostutils._conn_hgs.MSFT_HgsClientConfiguration
|
||||
hgs_config.Get.return_value = (return_code,
|
||||
mock.MagicMock(IsHostGuarded=is_host_guarded))
|
||||
expected_result = is_host_guarded and not return_code
|
||||
|
||||
result = self._hostutils.is_host_guarded()
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
def test_is_guarded_host_config_error(self):
|
||||
self._test_is_host_guarded(return_code=mock.sentinel.return_code)
|
||||
|
||||
def test_is_guarded_host(self):
|
||||
self._test_is_host_guarded()
|
||||
|
||||
def test_is_not_guarded_host(self):
|
||||
self._test_is_host_guarded(is_host_guarded=False)
|
@ -1,266 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils import jobutils
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class JobUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V JobUtils class."""
|
||||
|
||||
_FAKE_RET_VAL = 0
|
||||
|
||||
_FAKE_JOB_STATUS_BAD = -1
|
||||
_FAKE_JOB_DESCRIPTION = "fake_job_description"
|
||||
_FAKE_JOB_PATH = 'fake_job_path'
|
||||
_FAKE_ERROR = "fake_error"
|
||||
_FAKE_ELAPSED_TIME = 0
|
||||
_CONCRETE_JOB = "Msvm_ConcreteJob"
|
||||
|
||||
def setUp(self):
|
||||
super(JobUtilsTestCase, self).setUp()
|
||||
self.jobutils = jobutils.JobUtils()
|
||||
self.jobutils._conn_attr = mock.MagicMock()
|
||||
|
||||
@mock.patch.object(jobutils.JobUtils, '_wait_for_job')
|
||||
def test_check_ret_val_started(self, mock_wait_for_job):
|
||||
self.jobutils.check_ret_val(constants.WMI_JOB_STATUS_STARTED,
|
||||
mock.sentinel.job_path)
|
||||
mock_wait_for_job.assert_called_once_with(mock.sentinel.job_path)
|
||||
|
||||
@mock.patch.object(jobutils.JobUtils, '_wait_for_job')
|
||||
def test_check_ret_val_ok(self, mock_wait_for_job):
|
||||
self.jobutils.check_ret_val(self._FAKE_RET_VAL,
|
||||
mock.sentinel.job_path)
|
||||
self.assertFalse(mock_wait_for_job.called)
|
||||
|
||||
def test_check_ret_val_exception(self):
|
||||
self.assertRaises(exceptions.HyperVException,
|
||||
self.jobutils.check_ret_val,
|
||||
mock.sentinel.ret_val_bad,
|
||||
mock.sentinel.job_path)
|
||||
|
||||
def test_wait_for_job_exception_concrete_job(self):
|
||||
mock_job = self._prepare_wait_for_job()
|
||||
mock_job.path.return_value.Class = self._CONCRETE_JOB
|
||||
self.assertRaises(exceptions.HyperVException,
|
||||
self.jobutils._wait_for_job,
|
||||
self._FAKE_JOB_PATH)
|
||||
|
||||
def test_wait_for_job_exception_with_error(self):
|
||||
mock_job = self._prepare_wait_for_job()
|
||||
mock_job.GetError.return_value = (self._FAKE_ERROR, self._FAKE_RET_VAL)
|
||||
self.assertRaises(exceptions.HyperVException,
|
||||
self.jobutils._wait_for_job,
|
||||
self._FAKE_JOB_PATH)
|
||||
mock_job.GetError.assert_called_once_with()
|
||||
|
||||
def test_wait_for_job_exception_no_error_details(self):
|
||||
mock_job = self._prepare_wait_for_job()
|
||||
mock_job.GetError.return_value = (None, None)
|
||||
self.assertRaises(exceptions.HyperVException,
|
||||
self.jobutils._wait_for_job,
|
||||
self._FAKE_JOB_PATH)
|
||||
|
||||
def test_wait_for_job_ok(self):
|
||||
mock_job = self._prepare_wait_for_job(
|
||||
constants.WMI_JOB_STATE_COMPLETED)
|
||||
job = self.jobutils._wait_for_job(self._FAKE_JOB_PATH)
|
||||
self.assertEqual(mock_job, job)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_get_pending_jobs(self, ignore_error_state):
|
||||
mock_killed_job = mock.Mock(JobState=constants.JOB_STATE_KILLED)
|
||||
mock_running_job = mock.Mock(JobState=constants.WMI_JOB_STATE_RUNNING)
|
||||
mock_error_st_job = mock.Mock(JobState=constants.JOB_STATE_EXCEPTION)
|
||||
mappings = [mock.Mock(AffectingElement=None),
|
||||
mock.Mock(AffectingElement=mock_killed_job),
|
||||
mock.Mock(AffectingElement=mock_running_job),
|
||||
mock.Mock(AffectingElement=mock_error_st_job)]
|
||||
self.jobutils._conn.Msvm_AffectedJobElement.return_value = mappings
|
||||
|
||||
mock_affected_element = mock.Mock()
|
||||
|
||||
expected_pending_jobs = [mock_running_job]
|
||||
if not ignore_error_state:
|
||||
expected_pending_jobs.append(mock_error_st_job)
|
||||
|
||||
pending_jobs = self.jobutils._get_pending_jobs_affecting_element(
|
||||
mock_affected_element,
|
||||
ignore_error_state=ignore_error_state)
|
||||
self.assertEqual(expected_pending_jobs, pending_jobs)
|
||||
|
||||
self.jobutils._conn.Msvm_AffectedJobElement.assert_called_once_with(
|
||||
AffectedElement=mock_affected_element.path_.return_value)
|
||||
|
||||
@ddt.data(True, False)
|
||||
@mock.patch.object(jobutils.JobUtils,
|
||||
'_get_pending_jobs_affecting_element')
|
||||
def test_stop_jobs_helper(self, jobs_ended, mock_get_pending_jobs):
|
||||
mock_job1 = mock.Mock(Cancellable=True)
|
||||
mock_job2 = mock.Mock(Cancellable=True)
|
||||
mock_job3 = mock.Mock(Cancellable=False)
|
||||
|
||||
pending_jobs = [mock_job1, mock_job2, mock_job3]
|
||||
mock_get_pending_jobs.side_effect = (
|
||||
pending_jobs,
|
||||
pending_jobs if not jobs_ended else [])
|
||||
|
||||
mock_job1.RequestStateChange.side_effect = (
|
||||
test_base.FakeWMIExc(hresult=jobutils.JobUtils._WBEM_E_NOT_FOUND))
|
||||
mock_job2.RequestStateChange.side_effect = (
|
||||
test_base.FakeWMIExc(hresult=mock.sentinel.hresult))
|
||||
|
||||
if jobs_ended:
|
||||
self.jobutils._stop_jobs(mock.sentinel.vm)
|
||||
else:
|
||||
self.assertRaises(exceptions.JobTerminateFailed,
|
||||
self.jobutils._stop_jobs,
|
||||
mock.sentinel.vm)
|
||||
|
||||
mock_get_pending_jobs.assert_has_calls(
|
||||
[mock.call(mock.sentinel.vm, ignore_error_state=False),
|
||||
mock.call(mock.sentinel.vm)])
|
||||
|
||||
mock_job1.RequestStateChange.assert_called_once_with(
|
||||
self.jobutils._KILL_JOB_STATE_CHANGE_REQUEST)
|
||||
mock_job2.RequestStateChange.assert_called_once_with(
|
||||
self.jobutils._KILL_JOB_STATE_CHANGE_REQUEST)
|
||||
self.assertFalse(mock_job3.RequestStateqqChange.called)
|
||||
|
||||
@mock.patch.object(jobutils.JobUtils, '_stop_jobs')
|
||||
def test_stop_jobs(self, mock_stop_jobs_helper):
|
||||
fake_timeout = 1
|
||||
self.jobutils.stop_jobs(mock.sentinel.element, fake_timeout)
|
||||
mock_stop_jobs_helper.assert_called_once_with(mock.sentinel.element)
|
||||
|
||||
def test_is_job_completed_true(self):
|
||||
job = mock.MagicMock(JobState=constants.WMI_JOB_STATE_COMPLETED)
|
||||
|
||||
self.assertTrue(self.jobutils._is_job_completed(job))
|
||||
|
||||
def test_is_job_completed_false(self):
|
||||
job = mock.MagicMock(JobState=constants.WMI_JOB_STATE_RUNNING)
|
||||
|
||||
self.assertFalse(self.jobutils._is_job_completed(job))
|
||||
|
||||
def _prepare_wait_for_job(self, state=_FAKE_JOB_STATUS_BAD):
|
||||
mock_job = mock.MagicMock()
|
||||
mock_job.JobState = state
|
||||
mock_job.Description = self._FAKE_JOB_DESCRIPTION
|
||||
mock_job.ElapsedTime = self._FAKE_ELAPSED_TIME
|
||||
|
||||
wmi_patcher = mock.patch.object(jobutils.JobUtils, '_get_wmi_obj')
|
||||
mock_wmi = wmi_patcher.start()
|
||||
self.addCleanup(wmi_patcher.stop)
|
||||
mock_wmi.return_value = mock_job
|
||||
return mock_job
|
||||
|
||||
def test_modify_virt_resource(self):
|
||||
side_effect = [
|
||||
(self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)]
|
||||
self._check_modify_virt_resource_max_retries(side_effect=side_effect)
|
||||
|
||||
def test_modify_virt_resource_max_retries_exception(self):
|
||||
side_effect = exceptions.HyperVException('expected failure.')
|
||||
self._check_modify_virt_resource_max_retries(
|
||||
side_effect=side_effect, num_calls=6, expected_fail=True)
|
||||
|
||||
def test_modify_virt_resource_max_retries(self):
|
||||
side_effect = [exceptions.HyperVException('expected failure.')] * 5 + [
|
||||
(self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)]
|
||||
self._check_modify_virt_resource_max_retries(side_effect=side_effect,
|
||||
num_calls=5)
|
||||
|
||||
@mock.patch('time.sleep')
|
||||
def _check_modify_virt_resource_max_retries(
|
||||
self, mock_sleep, side_effect, num_calls=1, expected_fail=False):
|
||||
mock_svc = mock.MagicMock()
|
||||
self.jobutils._vs_man_svc_attr = mock_svc
|
||||
mock_svc.ModifyResourceSettings.side_effect = side_effect
|
||||
mock_res_setting_data = mock.MagicMock()
|
||||
mock_res_setting_data.GetText_.return_value = mock.sentinel.res_data
|
||||
|
||||
if expected_fail:
|
||||
self.assertRaises(exceptions.HyperVException,
|
||||
self.jobutils.modify_virt_resource,
|
||||
mock_res_setting_data)
|
||||
else:
|
||||
self.jobutils.modify_virt_resource(mock_res_setting_data)
|
||||
|
||||
mock_calls = [
|
||||
mock.call(ResourceSettings=[mock.sentinel.res_data])] * num_calls
|
||||
mock_svc.ModifyResourceSettings.has_calls(mock_calls)
|
||||
mock_sleep.has_calls(mock.call(1) * num_calls)
|
||||
|
||||
def test_add_virt_resource(self):
|
||||
self._test_virt_method('AddResourceSettings', 3, 'add_virt_resource',
|
||||
True, mock.sentinel.vm_path,
|
||||
[mock.sentinel.res_data])
|
||||
|
||||
def test_remove_virt_resource(self):
|
||||
self._test_virt_method('RemoveResourceSettings', 2,
|
||||
'remove_virt_resource', False,
|
||||
ResourceSettings=[mock.sentinel.res_path])
|
||||
|
||||
def test_add_virt_feature(self):
|
||||
self._test_virt_method('AddFeatureSettings', 3, 'add_virt_feature',
|
||||
True, mock.sentinel.vm_path,
|
||||
[mock.sentinel.res_data])
|
||||
|
||||
def test_remove_virt_feature(self):
|
||||
self._test_virt_method('RemoveFeatureSettings', 2,
|
||||
'remove_virt_feature', False,
|
||||
FeatureSettings=[mock.sentinel.res_path])
|
||||
|
||||
def _test_virt_method(self, vsms_method_name, return_count,
|
||||
utils_method_name, with_mock_vm, *args, **kwargs):
|
||||
mock_svc = mock.MagicMock()
|
||||
self.jobutils._vs_man_svc_attr = mock_svc
|
||||
vsms_method = getattr(mock_svc, vsms_method_name)
|
||||
mock_rsd = self._mock_vsms_method(vsms_method, return_count)
|
||||
if with_mock_vm:
|
||||
mock_vm = mock.MagicMock()
|
||||
mock_vm.path_.return_value = mock.sentinel.vm_path
|
||||
getattr(self.jobutils, utils_method_name)(mock_rsd, mock_vm)
|
||||
else:
|
||||
getattr(self.jobutils, utils_method_name)(mock_rsd)
|
||||
|
||||
if args:
|
||||
vsms_method.assert_called_once_with(*args)
|
||||
else:
|
||||
vsms_method.assert_called_once_with(**kwargs)
|
||||
|
||||
def _mock_vsms_method(self, vsms_method, return_count):
|
||||
args = None
|
||||
if return_count == 3:
|
||||
args = (
|
||||
mock.sentinel.job_path, mock.MagicMock(), self._FAKE_RET_VAL)
|
||||
else:
|
||||
args = (mock.sentinel.job_path, self._FAKE_RET_VAL)
|
||||
|
||||
vsms_method.return_value = args
|
||||
mock_res_setting_data = mock.MagicMock()
|
||||
mock_res_setting_data.GetText_.return_value = mock.sentinel.res_data
|
||||
mock_res_setting_data.path_.return_value = mock.sentinel.res_path
|
||||
|
||||
self.jobutils.check_ret_val = mock.MagicMock()
|
||||
|
||||
return mock_res_setting_data
|
@ -1,214 +0,0 @@
|
||||
# Copyright 2014 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import six
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils import pathutils
|
||||
|
||||
|
||||
class PathUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
"""Unit tests for the Hyper-V PathUtils class."""
|
||||
|
||||
def setUp(self):
|
||||
super(PathUtilsTestCase, self).setUp()
|
||||
self._setup_lib_mocks()
|
||||
|
||||
self._pathutils = pathutils.PathUtils()
|
||||
self._pathutils._win32_utils = mock.Mock()
|
||||
self._mock_run = self._pathutils._win32_utils.run_and_check_output
|
||||
|
||||
def _setup_lib_mocks(self):
|
||||
self._ctypes = mock.Mock()
|
||||
self._wintypes = mock.Mock()
|
||||
|
||||
self._wintypes.BOOL = lambda x: (x, 'BOOL')
|
||||
self._ctypes.c_wchar_p = lambda x: (x, "c_wchar_p")
|
||||
|
||||
mock.patch.multiple(pathutils,
|
||||
wintypes=self._wintypes,
|
||||
ctypes=self._ctypes, kernel32=mock.DEFAULT,
|
||||
create=True).start()
|
||||
|
||||
@mock.patch.object(pathutils.PathUtils, 'rename')
|
||||
@mock.patch.object(os.path, 'isfile')
|
||||
@mock.patch.object(os, 'listdir')
|
||||
def test_move_folder_files(self, mock_listdir, mock_isfile, mock_rename):
|
||||
src_dir = 'src'
|
||||
dest_dir = 'dest'
|
||||
fname = 'tmp_file.txt'
|
||||
subdir = 'tmp_folder'
|
||||
src_fname = os.path.join(src_dir, fname)
|
||||
dest_fname = os.path.join(dest_dir, fname)
|
||||
|
||||
# making sure src_subdir is not moved.
|
||||
mock_listdir.return_value = [fname, subdir]
|
||||
mock_isfile.side_effect = [True, False]
|
||||
|
||||
self._pathutils.move_folder_files(src_dir, dest_dir)
|
||||
mock_rename.assert_called_once_with(src_fname, dest_fname)
|
||||
|
||||
@mock.patch('shutil.rmtree')
|
||||
@mock.patch('time.sleep')
|
||||
def test_rmtree(self, mock_sleep, mock_rmtree):
|
||||
class WindowsError(Exception):
|
||||
def __init__(self, winerror=None):
|
||||
self.winerror = winerror
|
||||
|
||||
mock_rmtree.side_effect = [WindowsError(
|
||||
pathutils.ERROR_DIR_IS_NOT_EMPTY), True]
|
||||
fake_windows_error = WindowsError
|
||||
with mock.patch.object(six.moves.builtins, 'WindowsError',
|
||||
fake_windows_error, create=True):
|
||||
self._pathutils.rmtree(mock.sentinel.FAKE_PATH)
|
||||
|
||||
mock_sleep.assert_called_once_with(1)
|
||||
mock_rmtree.assert_has_calls([mock.call(mock.sentinel.FAKE_PATH),
|
||||
mock.call(mock.sentinel.FAKE_PATH)])
|
||||
|
||||
@mock.patch.object(pathutils.PathUtils, 'makedirs')
|
||||
@mock.patch.object(pathutils.PathUtils, 'exists')
|
||||
def test_check_create_dir(self, mock_exists, mock_makedirs):
|
||||
fake_dir = 'dir'
|
||||
mock_exists.return_value = False
|
||||
self._pathutils.check_create_dir(fake_dir)
|
||||
|
||||
mock_exists.assert_called_once_with(fake_dir)
|
||||
mock_makedirs.assert_called_once_with(fake_dir)
|
||||
|
||||
@mock.patch.object(pathutils.PathUtils, 'rmtree')
|
||||
@mock.patch.object(pathutils.PathUtils, 'exists')
|
||||
def test_check_remove_dir(self, mock_exists, mock_rmtree):
|
||||
fake_dir = 'dir'
|
||||
self._pathutils.check_remove_dir(fake_dir)
|
||||
|
||||
mock_exists.assert_called_once_with(fake_dir)
|
||||
mock_rmtree.assert_called_once_with(fake_dir)
|
||||
|
||||
@mock.patch('os.path.isdir')
|
||||
@mock.patch('os.path.islink')
|
||||
def _test_check_symlink(self, mock_is_symlink, mock_is_dir,
|
||||
is_symlink=True, python_version=(2, 7),
|
||||
is_dir=True):
|
||||
fake_path = r'c:\\fake_path'
|
||||
if is_symlink:
|
||||
f_attr = 0x400
|
||||
else:
|
||||
f_attr = 0x80
|
||||
|
||||
mock_is_dir.return_value = is_dir
|
||||
mock_is_symlink.return_value = is_symlink
|
||||
self._mock_run.return_value = f_attr
|
||||
|
||||
with mock.patch('sys.version_info', python_version):
|
||||
ret_value = self._pathutils.is_symlink(fake_path)
|
||||
|
||||
if python_version >= (3, 2):
|
||||
mock_is_symlink.assert_called_once_with(fake_path)
|
||||
else:
|
||||
self._mock_run.assert_called_once_with(
|
||||
pathutils.kernel32.GetFileAttributesW,
|
||||
fake_path,
|
||||
kernel32_lib_func=True)
|
||||
|
||||
self.assertEqual(is_symlink, ret_value)
|
||||
|
||||
def test_is_symlink(self):
|
||||
self._test_check_symlink()
|
||||
|
||||
def test_is_not_symlink(self):
|
||||
self._test_check_symlink(is_symlink=False)
|
||||
|
||||
def test_is_symlink_python_gt_3_2(self):
|
||||
self._test_check_symlink(python_version=(3, 3))
|
||||
|
||||
def test_create_sym_link(self):
|
||||
tg_is_dir = False
|
||||
self._pathutils.create_sym_link(mock.sentinel.path,
|
||||
mock.sentinel.target,
|
||||
target_is_dir=tg_is_dir)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
pathutils.kernel32.CreateSymbolicLinkW,
|
||||
mock.sentinel.path,
|
||||
mock.sentinel.target,
|
||||
tg_is_dir,
|
||||
kernel32_lib_func=True)
|
||||
|
||||
@mock.patch('os.path.isdir')
|
||||
def _test_copy(self, mock_isdir, dest_isdir=False):
|
||||
mock_isdir.return_value = dest_isdir
|
||||
fail_if_exists = False
|
||||
|
||||
fake_src = r'fake_src_fname'
|
||||
fake_dest = r'fake_dest'
|
||||
|
||||
expected_dest = (os.path.join(fake_dest, fake_src)
|
||||
if dest_isdir else fake_dest)
|
||||
|
||||
self._pathutils.copy(fake_src, fake_dest,
|
||||
fail_if_exists=fail_if_exists)
|
||||
|
||||
self._mock_run.assert_called_once_with(
|
||||
pathutils.kernel32.CopyFileW,
|
||||
self._ctypes.c_wchar_p(fake_src),
|
||||
self._ctypes.c_wchar_p(expected_dest),
|
||||
self._wintypes.BOOL(fail_if_exists),
|
||||
kernel32_lib_func=True)
|
||||
|
||||
def test_copy_dest_is_fpath(self):
|
||||
self._test_copy()
|
||||
|
||||
def test_copy_dest_is_dir(self):
|
||||
self._test_copy(dest_isdir=True)
|
||||
|
||||
@mock.patch('os.path.isdir')
|
||||
def test_copy_exc(self, mock_isdir):
|
||||
mock_isdir.return_value = False
|
||||
self._mock_run.side_effect = exceptions.Win32Exception(
|
||||
func_name='mock_copy',
|
||||
error_code='fake_error_code',
|
||||
error_message='fake_error_msg')
|
||||
self.assertRaises(IOError,
|
||||
self._pathutils.copy,
|
||||
mock.sentinel.src,
|
||||
mock.sentinel.dest)
|
||||
|
||||
@mock.patch('os.close')
|
||||
@mock.patch('tempfile.mkstemp')
|
||||
def test_create_temporary_file(self, mock_mkstemp, mock_close):
|
||||
fd = mock.sentinel.file_descriptor
|
||||
path = mock.sentinel.absolute_pathname
|
||||
mock_mkstemp.return_value = (fd, path)
|
||||
|
||||
output = self._pathutils.create_temporary_file(
|
||||
suffix=mock.sentinel.suffix)
|
||||
|
||||
self.assertEqual(path, output)
|
||||
mock_close.assert_called_once_with(fd)
|
||||
mock_mkstemp.assert_called_once_with(suffix=mock.sentinel.suffix)
|
||||
|
||||
@mock.patch('oslo_utils.fileutils.delete_if_exists')
|
||||
def test_temporary_file(self, mock_delete):
|
||||
self._pathutils.create_temporary_file = mock.MagicMock()
|
||||
self._pathutils.create_temporary_file.return_value = (
|
||||
mock.sentinel.temporary_file)
|
||||
with self._pathutils.temporary_file() as tmp_file:
|
||||
self.assertEqual(mock.sentinel.temporary_file, tmp_file)
|
||||
self.assertFalse(mock_delete.called)
|
||||
mock_delete.assert_called_once_with(mock.sentinel.temporary_file)
|
@ -1,189 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from oslotest import base
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.utils import win32utils
|
||||
|
||||
|
||||
class Win32UtilsTestCase(base.BaseTestCase):
|
||||
def setUp(self):
|
||||
super(Win32UtilsTestCase, self).setUp()
|
||||
self._setup_lib_mocks()
|
||||
|
||||
self._win32_utils = win32utils.Win32Utils()
|
||||
|
||||
self.addCleanup(mock.patch.stopall)
|
||||
|
||||
def _setup_lib_mocks(self):
|
||||
self._ctypes = mock.Mock()
|
||||
# This is used in order to easily make assertions on the variables
|
||||
# passed by reference.
|
||||
self._ctypes.byref = lambda x: (x, "byref")
|
||||
|
||||
self._ctypes_patcher = mock.patch.multiple(
|
||||
win32utils, ctypes=self._ctypes)
|
||||
self._ctypes_patcher.start()
|
||||
|
||||
mock.patch.multiple(win32utils,
|
||||
kernel32=mock.DEFAULT,
|
||||
wintypes=mock.DEFAULT,
|
||||
create=True).start()
|
||||
|
||||
@mock.patch.object(win32utils.Win32Utils, 'get_error_message')
|
||||
@mock.patch.object(win32utils.Win32Utils, 'get_last_error')
|
||||
def _test_run_and_check_output(self, mock_get_last_err, mock_get_err_msg,
|
||||
ret_val=None, expected_exc=None,
|
||||
**kwargs):
|
||||
mock_func = mock.Mock()
|
||||
mock_func.return_value = ret_val
|
||||
self._ctypes_patcher.stop()
|
||||
|
||||
if expected_exc:
|
||||
self.assertRaises(expected_exc,
|
||||
self._win32_utils.run_and_check_output,
|
||||
mock_func,
|
||||
mock.sentinel.arg,
|
||||
kwarg=mock.sentinel.kwarg,
|
||||
**kwargs)
|
||||
else:
|
||||
actual_ret_val = self._win32_utils.run_and_check_output(
|
||||
mock_func,
|
||||
mock.sentinel.arg,
|
||||
kwarg=mock.sentinel.kwarg,
|
||||
**kwargs)
|
||||
self.assertEqual(ret_val, actual_ret_val)
|
||||
|
||||
mock_func.assert_called_once_with(mock.sentinel.arg,
|
||||
kwarg=mock.sentinel.kwarg)
|
||||
|
||||
return mock_get_last_err, mock_get_err_msg
|
||||
|
||||
def test_run_and_check_output(self):
|
||||
self._test_run_and_check_output(ret_val=0)
|
||||
|
||||
def test_run_and_check_output_fail_on_nonzero_ret_val(self):
|
||||
ret_val = 1
|
||||
|
||||
(mock_get_last_err,
|
||||
mock_get_err_msg) = self._test_run_and_check_output(
|
||||
ret_val=ret_val,
|
||||
expected_exc=exceptions.VHDWin32APIException,
|
||||
failure_exc=exceptions.VHDWin32APIException)
|
||||
|
||||
mock_get_err_msg.assert_called_once_with(ret_val)
|
||||
|
||||
def test_run_and_check_output_explicit_error_ret_vals(self):
|
||||
ret_val = 1
|
||||
error_ret_vals = [ret_val]
|
||||
|
||||
(mock_get_last_err,
|
||||
mock_get_err_msg) = self._test_run_and_check_output(
|
||||
ret_val=ret_val,
|
||||
error_ret_vals=error_ret_vals,
|
||||
ret_val_is_err_code=False,
|
||||
expected_exc=exceptions.Win32Exception)
|
||||
|
||||
mock_get_err_msg.assert_called_once_with(
|
||||
win32utils.ctypes.c_ulong(mock_get_last_err).value)
|
||||
|
||||
def test_run_and_check_output_ignored_error(self):
|
||||
ret_val = 1
|
||||
ignored_err_codes = [ret_val]
|
||||
|
||||
self._test_run_and_check_output(ret_val=ret_val,
|
||||
ignored_error_codes=ignored_err_codes)
|
||||
|
||||
def test_run_and_check_output_kernel32_lib_func(self):
|
||||
ret_val = 0
|
||||
self._test_run_and_check_output(ret_val=ret_val,
|
||||
expected_exc=exceptions.Win32Exception,
|
||||
kernel32_lib_func=True)
|
||||
|
||||
def test_run_and_check_output_with_err_msg_dict(self):
|
||||
self._ctypes_patcher.stop()
|
||||
|
||||
err_code = 1
|
||||
err_msg = 'fake_err_msg'
|
||||
err_msg_dict = {err_code: err_msg}
|
||||
|
||||
mock_func = mock.Mock()
|
||||
mock_func.return_value = err_code
|
||||
|
||||
try:
|
||||
self._win32_utils.run_and_check_output(mock_func,
|
||||
mock.sentinel.arg,
|
||||
error_msg_src=err_msg_dict)
|
||||
except Exception as ex:
|
||||
self.assertIsInstance(ex, exceptions.Win32Exception)
|
||||
self.assertIn(err_msg, ex.message)
|
||||
|
||||
def test_get_error_message(self):
|
||||
err_msg = self._win32_utils.get_error_message(mock.sentinel.err_code)
|
||||
|
||||
fake_msg_buff = win32utils.ctypes.c_char_p.return_value
|
||||
|
||||
expected_flags = (win32utils.FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
win32utils.FORMAT_MESSAGE_ALLOCATE_BUFFER |
|
||||
win32utils.FORMAT_MESSAGE_IGNORE_INSERTS)
|
||||
|
||||
win32utils.kernel32.FormatMessageA.assert_called_once_with(
|
||||
expected_flags, None, mock.sentinel.err_code, 0,
|
||||
win32utils.ctypes.byref(fake_msg_buff), 0, None)
|
||||
self.assertEqual(fake_msg_buff.value, err_msg)
|
||||
|
||||
def test_get_last_error(self):
|
||||
last_err = self._win32_utils.get_last_error()
|
||||
|
||||
self.assertEqual(win32utils.kernel32.GetLastError.return_value,
|
||||
last_err)
|
||||
win32utils.kernel32.SetLastError.assert_called_once_with(0)
|
||||
|
||||
def test_hresult_to_err_code(self):
|
||||
# This could differ based on the error source.
|
||||
# Only the last 2 bytes of the hresult the error code.
|
||||
fake_file_exists_hres = -0x7ff8ffb0
|
||||
file_exists_err_code = 0x50
|
||||
|
||||
ret_val = self._win32_utils.hresult_to_err_code(fake_file_exists_hres)
|
||||
self.assertEqual(file_exists_err_code, ret_val)
|
||||
|
||||
@mock.patch.object(win32utils.Win32Utils, 'get_com_error_hresult')
|
||||
@mock.patch.object(win32utils.Win32Utils, 'hresult_to_err_code')
|
||||
def test_get_com_err_code(self, mock_hres_to_err_code, mock_get_hresult):
|
||||
ret_val = self._win32_utils.get_com_err_code(mock.sentinel.com_err)
|
||||
|
||||
self.assertEqual(mock_hres_to_err_code.return_value, ret_val)
|
||||
mock_get_hresult.assert_called_once_with(mock.sentinel.com_err)
|
||||
mock_hres_to_err_code.assert_called_once_with(
|
||||
mock_get_hresult.return_value)
|
||||
|
||||
def test_get_com_error_hresult(self):
|
||||
self._ctypes_patcher.stop()
|
||||
fake_hres = -5
|
||||
expected_hres = (1 << 32) + fake_hres
|
||||
mock_excepinfo = [None] * 5 + [fake_hres]
|
||||
mock_com_err = mock.Mock(excepinfo=mock_excepinfo)
|
||||
|
||||
ret_val = self._win32_utils.get_com_error_hresult(mock_com_err)
|
||||
|
||||
self.assertEqual(expected_hres, ret_val)
|
||||
|
||||
def get_com_error_hresult_missing_excepinfo(self):
|
||||
ret_val = self._win32_utils.get_com_error_hresult(None)
|
||||
self.assertIsNone(ret_val)
|
@ -1,52 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from os_win import exceptions
|
||||
from os_win.tests import test_base
|
||||
from os_win.utils import _wqlutils
|
||||
|
||||
|
||||
class WqlUtilsTestCase(test_base.OsWinBaseTestCase):
|
||||
def _test_get_element_associated_class(self, fields=None):
|
||||
mock_conn = mock.MagicMock()
|
||||
_wqlutils.get_element_associated_class(
|
||||
mock_conn, mock.sentinel.class_name,
|
||||
element_instance_id=mock.sentinel.instance_id,
|
||||
fields=fields)
|
||||
|
||||
expected_fields = ", ".join(fields) if fields else '*'
|
||||
expected_query = (
|
||||
"SELECT %(expected_fields)s FROM %(class_name)s "
|
||||
"WHERE InstanceID LIKE '%(instance_id)s%%'" %
|
||||
{'expected_fields': expected_fields,
|
||||
'class_name': mock.sentinel.class_name,
|
||||
'instance_id': mock.sentinel.instance_id})
|
||||
mock_conn.query.assert_called_once_with(expected_query)
|
||||
|
||||
def test_get_element_associated_class(self):
|
||||
self._test_get_element_associated_class()
|
||||
|
||||
def test_get_element_associated_class_specific_fields(self):
|
||||
self._test_get_element_associated_class(
|
||||
fields=['field', 'another_field'])
|
||||
|
||||
def test_get_element_associated_class_invalid_element(self):
|
||||
self.assertRaises(
|
||||
exceptions.WqlException,
|
||||
_wqlutils.get_element_associated_class,
|
||||
mock.sentinel.conn,
|
||||
mock.sentinel.class_name)
|
@ -1,44 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from os_win._i18n import _
|
||||
from os_win import exceptions
|
||||
|
||||
|
||||
def get_element_associated_class(conn, class_name, element_instance_id=None,
|
||||
element_uuid=None, fields=None):
|
||||
"""Returns the objects associated to an element as a list.
|
||||
|
||||
:param conn: connection to be used to execute the query
|
||||
:param class_name: object's class type name to be retrieved
|
||||
:param element_instance_id: element class InstanceID
|
||||
:param element_uuid: UUID of the element
|
||||
:param fields: specific class attributes to be retrieved
|
||||
"""
|
||||
if element_instance_id:
|
||||
instance_id = element_instance_id
|
||||
elif element_uuid:
|
||||
instance_id = "Microsoft:%s" % element_uuid
|
||||
else:
|
||||
err_msg = _("Could not get element associated class. Either element "
|
||||
"instance id or element uuid must be specified.")
|
||||
raise exceptions.WqlException(err_msg)
|
||||
fields = ", ".join(fields) if fields else "*"
|
||||
return conn.query(
|
||||
"SELECT %(fields)s FROM %(class_name)s WHERE InstanceID "
|
||||
"LIKE '%(instance_id)s%%'" % {
|
||||
'fields': fields,
|
||||
'class_name': class_name,
|
||||
'instance_id': instance_id})
|
@ -1,107 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Base WMI utility class.
|
||||
"""
|
||||
|
||||
import imp
|
||||
import sys
|
||||
|
||||
if sys.platform == 'win32':
|
||||
import wmi
|
||||
|
||||
|
||||
class BaseUtils(object):
|
||||
|
||||
_WMI_CONS = {}
|
||||
|
||||
def _get_wmi_obj(self, moniker, **kwargs):
|
||||
return wmi.WMI(moniker=moniker, **kwargs)
|
||||
|
||||
def _get_wmi_conn(self, moniker, **kwargs):
|
||||
if sys.platform != 'win32':
|
||||
return None
|
||||
if kwargs:
|
||||
return self._get_wmi_obj(moniker, **kwargs)
|
||||
if moniker in self._WMI_CONS:
|
||||
return self._WMI_CONS[moniker]
|
||||
|
||||
wmi_conn = self._get_wmi_obj(moniker)
|
||||
self._WMI_CONS[moniker] = wmi_conn
|
||||
return wmi_conn
|
||||
|
||||
|
||||
class BaseUtilsVirt(BaseUtils):
|
||||
|
||||
_wmi_namespace = '//%s/root/virtualization/v2'
|
||||
_os_version = None
|
||||
_old_wmi = None
|
||||
|
||||
def __init__(self, host='.'):
|
||||
self._vs_man_svc_attr = None
|
||||
self._host = host
|
||||
self._conn_attr = None
|
||||
self._compat_conn_attr = None
|
||||
|
||||
@property
|
||||
def _conn(self):
|
||||
if not self._conn_attr:
|
||||
self._conn_attr = self._get_wmi_conn(
|
||||
self._wmi_namespace % self._host)
|
||||
return self._conn_attr
|
||||
|
||||
@property
|
||||
def _compat_conn(self):
|
||||
if not self._compat_conn_attr:
|
||||
if not BaseUtilsVirt._os_version:
|
||||
# hostutils cannot be used for this, it would end up in
|
||||
# a circular import.
|
||||
os_version = wmi.WMI().Win32_OperatingSystem()[0].Version
|
||||
BaseUtilsVirt._os_version = list(
|
||||
map(int, os_version.split('.')))
|
||||
|
||||
if BaseUtilsVirt._os_version >= [6, 3]:
|
||||
self._compat_conn_attr = self._conn
|
||||
else:
|
||||
self._compat_conn_attr = self._get_wmi_compat_conn(
|
||||
moniker=self._wmi_namespace % self._host)
|
||||
|
||||
return self._compat_conn_attr
|
||||
|
||||
@property
|
||||
def _vs_man_svc(self):
|
||||
if not self._vs_man_svc_attr:
|
||||
self._vs_man_svc_attr = (
|
||||
self._compat_conn.Msvm_VirtualSystemManagementService()[0])
|
||||
return self._vs_man_svc_attr
|
||||
|
||||
def _get_wmi_compat_conn(self, moniker, **kwargs):
|
||||
if not BaseUtilsVirt._old_wmi:
|
||||
old_wmi_path = "%s.py" % wmi.__path__[0]
|
||||
BaseUtilsVirt._old_wmi = imp.load_source('old_wmi', old_wmi_path)
|
||||
return BaseUtilsVirt._old_wmi.WMI(moniker=moniker, **kwargs)
|
||||
|
||||
def _get_wmi_obj(self, moniker, compatibility_mode=False, **kwargs):
|
||||
if not BaseUtilsVirt._os_version:
|
||||
# hostutils cannot be used for this, it would end up in
|
||||
# a circular import.
|
||||
os_version = wmi.WMI().Win32_OperatingSystem()[0].Version
|
||||
BaseUtilsVirt._os_version = list(map(int, os_version.split('.')))
|
||||
|
||||
if not compatibility_mode or BaseUtilsVirt._os_version >= [6, 3]:
|
||||
return wmi.WMI(moniker=moniker, **kwargs)
|
||||
return self._get_wmi_compat_conn(moniker=moniker, **kwargs)
|
@ -1,238 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Utility class for VM related operations on Hyper-V Clusters.
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
if sys.platform == 'win32':
|
||||
import wmi
|
||||
|
||||
from eventlet import patcher
|
||||
from eventlet import tpool
|
||||
from oslo_log import log as logging
|
||||
|
||||
from os_win._i18n import _, _LE
|
||||
from os_win import exceptions
|
||||
from os_win.utils import baseutils
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ClusterUtils(baseutils.BaseUtils):
|
||||
|
||||
_MSCLUSTER_NODE = 'MSCluster_Node'
|
||||
_MSCLUSTER_RES = 'MSCluster_Resource'
|
||||
|
||||
_VM_BASE_NAME = 'Virtual Machine %s'
|
||||
_VM_TYPE = 'Virtual Machine'
|
||||
_VM_GROUP_TYPE = 111
|
||||
|
||||
_MS_CLUSTER_NAMESPACE = '//%s/root/MSCluster'
|
||||
|
||||
_LIVE_MIGRATION_TYPE = 4
|
||||
_IGNORE_LOCKED = 1
|
||||
_DESTROY_GROUP = 1
|
||||
|
||||
_FAILBACK_TRUE = 1
|
||||
_FAILBACK_WINDOW_MIN = 0
|
||||
_FAILBACK_WINDOW_MAX = 23
|
||||
|
||||
_WMI_EVENT_TIMEOUT_MS = 100
|
||||
_WMI_EVENT_CHECK_INTERVAL = 2
|
||||
|
||||
def __init__(self, host='.'):
|
||||
self._instance_name_regex = re.compile('Virtual Machine (.*)')
|
||||
|
||||
if sys.platform == 'win32':
|
||||
self._init_hyperv_conn(host)
|
||||
self._watcher = self._get_failover_watcher()
|
||||
|
||||
def _init_hyperv_conn(self, host):
|
||||
try:
|
||||
self._conn_cluster = self._get_wmi_conn(
|
||||
self._MS_CLUSTER_NAMESPACE % host)
|
||||
self._cluster = self._conn_cluster.MSCluster_Cluster()[0]
|
||||
|
||||
# extract this node name from cluster's path
|
||||
path = self._cluster.path_()
|
||||
self._this_node = re.search(r'\\\\(.*)\\root', path,
|
||||
re.IGNORECASE).group(1)
|
||||
except AttributeError:
|
||||
raise exceptions.HyperVClusterException(
|
||||
_("Could not initialize cluster wmi connection."))
|
||||
|
||||
def _get_failover_watcher(self):
|
||||
raw_query = (
|
||||
"SELECT * FROM __InstanceModificationEvent "
|
||||
"WITHIN %(wmi_check_interv)s WHERE TargetInstance ISA "
|
||||
"'%(cluster_res)s' AND "
|
||||
"TargetInstance.Type='%(cluster_res_type)s' AND "
|
||||
"TargetInstance.OwnerNode != PreviousInstance.OwnerNode" %
|
||||
{'wmi_check_interv': self._WMI_EVENT_CHECK_INTERVAL,
|
||||
'cluster_res': self._MSCLUSTER_RES,
|
||||
'cluster_res_type': self._VM_TYPE})
|
||||
return self._conn_cluster.watch_for(raw_wql=raw_query)
|
||||
|
||||
def check_cluster_state(self):
|
||||
if len(self._get_cluster_nodes()) < 1:
|
||||
raise exceptions.HyperVClusterException(
|
||||
_("Not enough cluster nodes."))
|
||||
|
||||
def get_node_name(self):
|
||||
return self._this_node
|
||||
|
||||
def _get_cluster_nodes(self):
|
||||
cluster_assoc = self._conn_cluster.MSCluster_ClusterToNode(
|
||||
Antecedent=self._cluster.path_())
|
||||
return [x.Dependent for x in cluster_assoc]
|
||||
|
||||
def _get_vm_groups(self):
|
||||
assocs = self._conn_cluster.MSCluster_ClusterToResourceGroup(
|
||||
GroupComponent=self._cluster.path_())
|
||||
resources = [a.PartComponent for a in assocs]
|
||||
return (r for r in resources if
|
||||
hasattr(r, 'GroupType') and
|
||||
r.GroupType == self._VM_GROUP_TYPE)
|
||||
|
||||
def _lookup_vm_group_check(self, vm_name):
|
||||
vm = self._lookup_vm_group(vm_name)
|
||||
if not vm:
|
||||
raise exceptions.HyperVVMNotFoundException(vm_name=vm_name)
|
||||
return vm
|
||||
|
||||
def _lookup_vm_group(self, vm_name):
|
||||
return self._lookup_res(self._conn_cluster.MSCluster_ResourceGroup,
|
||||
vm_name)
|
||||
|
||||
def _lookup_vm_check(self, vm_name):
|
||||
vm = self._lookup_vm(vm_name)
|
||||
if not vm:
|
||||
raise exceptions.HyperVVMNotFoundException(vm_name=vm_name)
|
||||
return vm
|
||||
|
||||
def _lookup_vm(self, vm_name):
|
||||
vm_name = self._VM_BASE_NAME % vm_name
|
||||
return self._lookup_res(self._conn_cluster.MSCluster_Resource, vm_name)
|
||||
|
||||
def _lookup_res(self, resource_source, res_name):
|
||||
res = resource_source(Name=res_name)
|
||||
n = len(res)
|
||||
if n == 0:
|
||||
return None
|
||||
elif n > 1:
|
||||
raise exceptions.HyperVClusterException(
|
||||
_('Duplicate resource name %s found.') % res_name)
|
||||
else:
|
||||
return res[0]
|
||||
|
||||
def get_cluster_node_names(self):
|
||||
nodes = self._get_cluster_nodes()
|
||||
return [n.Name for n in nodes]
|
||||
|
||||
def get_vm_host(self, vm_name):
|
||||
return self._lookup_vm_group_check(vm_name).OwnerNode
|
||||
|
||||
def list_instances(self):
|
||||
return [r.Name for r in self._get_vm_groups()]
|
||||
|
||||
def list_instance_uuids(self):
|
||||
return [r.Id for r in self._get_vm_groups()]
|
||||
|
||||
def add_vm_to_cluster(self, vm_name):
|
||||
LOG.debug("Add vm to cluster called for vm %s" % vm_name)
|
||||
self._cluster.AddVirtualMachine(vm_name)
|
||||
|
||||
vm_group = self._lookup_vm_group_check(vm_name)
|
||||
vm_group.PersistentState = True
|
||||
vm_group.AutoFailbackType = self._FAILBACK_TRUE
|
||||
# set the earliest and latest time that the group can be moved
|
||||
# back to its preferred node. The unit is in hours.
|
||||
vm_group.FailbackWindowStart = self._FAILBACK_WINDOW_MIN
|
||||
vm_group.FailbackWindowEnd = self._FAILBACK_WINDOW_MAX
|
||||
vm_group.put()
|
||||
|
||||
def bring_online(self, vm_name):
|
||||
vm = self._lookup_vm_check(vm_name)
|
||||
vm.BringOnline()
|
||||
|
||||
def take_offline(self, vm_name):
|
||||
vm = self._lookup_vm_check(vm_name)
|
||||
vm.TakeOffline()
|
||||
|
||||
def delete(self, vm_name):
|
||||
vm = self._lookup_vm_group_check(vm_name)
|
||||
vm.DestroyGroup(self._DESTROY_GROUP)
|
||||
|
||||
def vm_exists(self, vm_name):
|
||||
return self._lookup_vm(vm_name) is not None
|
||||
|
||||
def live_migrate_vm(self, vm_name, new_host):
|
||||
self._migrate_vm(vm_name, new_host, self._LIVE_MIGRATION_TYPE)
|
||||
|
||||
def _migrate_vm(self, vm_name, new_host, migration_type):
|
||||
vm_group = self._lookup_vm_group_check(vm_name)
|
||||
try:
|
||||
vm_group.MoveToNewNodeParams(self._IGNORE_LOCKED, new_host,
|
||||
[migration_type])
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Exception during cluster live migration of '
|
||||
'%(vm_name)s to %(host)s: %(exception)s'),
|
||||
{'vm_name': vm_name,
|
||||
'host': new_host,
|
||||
'exception': e})
|
||||
|
||||
def monitor_vm_failover(self, callback):
|
||||
"""Creates a monitor to check for new WMI MSCluster_Resource
|
||||
events.
|
||||
|
||||
This method will poll the last _WMI_EVENT_CHECK_INTERVAL + 1
|
||||
seconds for new events and listens for _WMI_EVENT_TIMEOUT_MS
|
||||
miliseconds, since listening is a thread blocking action.
|
||||
|
||||
Any event object caught will then be processed.
|
||||
"""
|
||||
vm_name = None
|
||||
new_host = None
|
||||
try:
|
||||
# wait for new event for _WMI_EVENT_TIMEOUT_MS miliseconds.
|
||||
if patcher.is_monkey_patched('thread'):
|
||||
wmi_object = tpool.execute(self._watcher,
|
||||
self._WMI_EVENT_TIMEOUT_MS)
|
||||
else:
|
||||
wmi_object = self._watcher(self._WMI_EVENT_TIMEOUT_MS)
|
||||
|
||||
old_host = wmi_object.previous.OwnerNode
|
||||
new_host = wmi_object.OwnerNode
|
||||
# wmi_object.Name field is of the form:
|
||||
# 'Virtual Machine nova-instance-template'
|
||||
# wmi_object.Name filed is a key and as such is not affected
|
||||
# by locale, so it will always be 'Virtual Machine'
|
||||
match = self._instance_name_regex.search(wmi_object.Name)
|
||||
if match:
|
||||
vm_name = match.group(1)
|
||||
|
||||
if vm_name:
|
||||
try:
|
||||
callback(vm_name, old_host, new_host)
|
||||
except Exception:
|
||||
LOG.exception(
|
||||
_LE("Exception during failover callback."))
|
||||
except wmi.x_wmi_timed_out:
|
||||
pass
|
@ -1,297 +0,0 @@
|
||||
# Copyright 2013 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import platform
|
||||
import sys
|
||||
|
||||
if sys.platform == 'win32':
|
||||
import wmi
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from os_win._i18n import _, _LE
|
||||
from os_win import exceptions
|
||||
from os_win.utils import _wqlutils
|
||||
from os_win.utils import baseutils
|
||||
from os_win.utils.compute import vmutils
|
||||
from os_win.utils import jobutils
|
||||
from os_win.utils.storage.initiator import iscsi_wmi_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LiveMigrationUtils(baseutils.BaseUtilsVirt):
|
||||
_STORAGE_ALLOC_SETTING_DATA_CLASS = 'Msvm_StorageAllocationSettingData'
|
||||
_CIM_RES_ALLOC_SETTING_DATA_CLASS = 'CIM_ResourceAllocationSettingData'
|
||||
|
||||
def __init__(self):
|
||||
super(LiveMigrationUtils, self).__init__()
|
||||
self._vmutils = vmutils.VMUtils()
|
||||
self._jobutils = jobutils.JobUtils()
|
||||
self._iscsi_initiator = iscsi_wmi_utils.ISCSIInitiatorWMIUtils()
|
||||
|
||||
def _get_conn_v2(self, host='localhost'):
|
||||
try:
|
||||
return self._get_wmi_obj(self._wmi_namespace % host)
|
||||
except wmi.x_wmi as ex:
|
||||
LOG.exception(_LE('Get version 2 connection error'))
|
||||
if ex.com_error.hresult == -2147217394:
|
||||
msg = (_('Live migration is not supported on target host "%s"')
|
||||
% host)
|
||||
elif ex.com_error.hresult == -2147023174:
|
||||
msg = (_('Target live migration host "%s" is unreachable')
|
||||
% host)
|
||||
else:
|
||||
msg = _('Live migration failed: %s') % ex.message
|
||||
raise exceptions.HyperVException(msg)
|
||||
|
||||
def check_live_migration_config(self):
|
||||
migration_svc = (
|
||||
self._compat_conn.Msvm_VirtualSystemMigrationService()[0])
|
||||
vsmssd = (
|
||||
self._compat_conn.Msvm_VirtualSystemMigrationServiceSettingData())
|
||||
vsmssd = vsmssd[0]
|
||||
if not vsmssd.EnableVirtualSystemMigration:
|
||||
raise exceptions.HyperVException(
|
||||
_('Live migration is not enabled on this host'))
|
||||
if not migration_svc.MigrationServiceListenerIPAddressList:
|
||||
raise exceptions.HyperVException(
|
||||
_('Live migration networks are not configured on this host'))
|
||||
|
||||
def _get_vm(self, conn_v2, vm_name):
|
||||
vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name)
|
||||
n = len(vms)
|
||||
if not n:
|
||||
raise exceptions.HyperVVMNotFoundException(vm_name=vm_name)
|
||||
elif n > 1:
|
||||
raise exceptions.HyperVException(_('Duplicate VM name found: %s')
|
||||
% vm_name)
|
||||
return vms[0]
|
||||
|
||||
def _destroy_planned_vm(self, conn_v2, planned_vm):
|
||||
LOG.debug("Destroying existing planned VM: %s",
|
||||
planned_vm.ElementName)
|
||||
vs_man_svc = conn_v2.Msvm_VirtualSystemManagementService()[0]
|
||||
(job_path, ret_val) = vs_man_svc.DestroySystem(planned_vm.path_())
|
||||
self._jobutils.check_ret_val(ret_val, job_path)
|
||||
|
||||
def _get_planned_vms(self, conn_v2, vm):
|
||||
return conn_v2.Msvm_PlannedComputerSystem(Name=vm.Name)
|
||||
|
||||
def _destroy_existing_planned_vms(self, conn_v2, vm):
|
||||
planned_vms = self._get_planned_vms(conn_v2, vm)
|
||||
for planned_vm in planned_vms:
|
||||
self._destroy_planned_vm(conn_v2, planned_vm)
|
||||
|
||||
def _create_planned_vm(self, conn_v2_local, conn_v2_remote,
|
||||
vm, ip_addr_list, dest_host):
|
||||
# Staged
|
||||
vsmsd = conn_v2_remote.query("select * from "
|
||||
"Msvm_VirtualSystemMigrationSettingData "
|
||||
"where MigrationType = 32770")[0]
|
||||
vsmsd.DestinationIPAddressList = ip_addr_list
|
||||
migration_setting_data = vsmsd.GetText_(1)
|
||||
|
||||
LOG.debug("Creating planned VM for VM: %s", vm.ElementName)
|
||||
migr_svc = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
|
||||
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
|
||||
ComputerSystem=vm.path_(),
|
||||
DestinationHost=dest_host,
|
||||
MigrationSettingData=migration_setting_data)
|
||||
self._jobutils.check_ret_val(ret_val, job_path)
|
||||
|
||||
return conn_v2_local.Msvm_PlannedComputerSystem(Name=vm.Name)[0]
|
||||
|
||||
def _get_physical_disk_paths(self, vm_name):
|
||||
# TODO(claudiub): Remove this after the livemigrationutils usage has
|
||||
# been updated to create planned VM on the destination host beforehand.
|
||||
ide_ctrl_path = self._vmutils.get_vm_ide_controller(vm_name, 0)
|
||||
if ide_ctrl_path:
|
||||
ide_paths = self._vmutils.get_controller_volume_paths(
|
||||
ide_ctrl_path)
|
||||
else:
|
||||
ide_paths = {}
|
||||
|
||||
scsi_ctrl_path = self._vmutils.get_vm_scsi_controller(vm_name)
|
||||
scsi_paths = self._vmutils.get_controller_volume_paths(scsi_ctrl_path)
|
||||
|
||||
return dict(list(ide_paths.items()) + list(scsi_paths.items()))
|
||||
|
||||
def _get_remote_disk_data(self, vmutils_remote, disk_paths, dest_host):
|
||||
# TODO(claudiub): Remove this after the livemigrationutils usage has
|
||||
# been updated to create planned VM on the destination host beforehand.
|
||||
remote_iscsi_initiator = iscsi_wmi_utils.ISCSIInitiatorWMIUtils(
|
||||
dest_host)
|
||||
|
||||
disk_paths_remote = {}
|
||||
for (rasd_rel_path, disk_path) in disk_paths.items():
|
||||
target = self._iscsi_initiator.get_target_from_disk_path(disk_path)
|
||||
if target:
|
||||
(target_iqn, target_lun) = target
|
||||
dev_num = remote_iscsi_initiator.get_device_number_for_target(
|
||||
target_iqn, target_lun)
|
||||
disk_path_remote = (
|
||||
vmutils_remote.get_mounted_disk_by_drive_number(dev_num))
|
||||
disk_paths_remote[rasd_rel_path] = disk_path_remote
|
||||
else:
|
||||
LOG.debug("Could not retrieve iSCSI target "
|
||||
"from disk path: %s", disk_path)
|
||||
return disk_paths_remote
|
||||
|
||||
def _get_disk_data(self, vm_name, vmutils_remote, disk_path_mapping):
|
||||
disk_paths = {}
|
||||
phys_disk_resources = vmutils_remote.get_vm_disks(vm_name)[1]
|
||||
|
||||
for disk in phys_disk_resources:
|
||||
rasd_rel_path = disk.path().RelPath
|
||||
# We set this when volumes are attached.
|
||||
serial = disk.ElementName
|
||||
disk_paths[rasd_rel_path] = disk_path_mapping[serial]
|
||||
return disk_paths
|
||||
|
||||
def _update_planned_vm_disk_resources(self, conn_v2_local,
|
||||
planned_vm, vm_name,
|
||||
disk_paths_remote):
|
||||
updated_resource_setting_data = []
|
||||
sasds = _wqlutils.get_element_associated_class(
|
||||
self._compat_conn, self._CIM_RES_ALLOC_SETTING_DATA_CLASS,
|
||||
element_uuid=planned_vm.Name)
|
||||
for sasd in sasds:
|
||||
if (sasd.ResourceType == 17 and sasd.ResourceSubType ==
|
||||
"Microsoft:Hyper-V:Physical Disk Drive" and
|
||||
sasd.HostResource):
|
||||
# Replace the local disk target with the correct remote one
|
||||
old_disk_path = sasd.HostResource[0]
|
||||
new_disk_path = disk_paths_remote.pop(sasd.path().RelPath)
|
||||
|
||||
LOG.debug("Replacing host resource "
|
||||
"%(old_disk_path)s with "
|
||||
"%(new_disk_path)s on planned VM %(vm_name)s",
|
||||
{'old_disk_path': old_disk_path,
|
||||
'new_disk_path': new_disk_path,
|
||||
'vm_name': vm_name})
|
||||
sasd.HostResource = [new_disk_path]
|
||||
updated_resource_setting_data.append(sasd.GetText_(1))
|
||||
|
||||
LOG.debug("Updating remote planned VM disk paths for VM: %s",
|
||||
vm_name)
|
||||
vsmsvc = conn_v2_local.Msvm_VirtualSystemManagementService()[0]
|
||||
(res_settings, job_path, ret_val) = vsmsvc.ModifyResourceSettings(
|
||||
ResourceSettings=updated_resource_setting_data)
|
||||
self._jobutils.check_ret_val(ret_val, job_path)
|
||||
|
||||
def _get_vhd_setting_data(self, vm):
|
||||
new_resource_setting_data = []
|
||||
sasds = _wqlutils.get_element_associated_class(
|
||||
self._compat_conn, self._STORAGE_ALLOC_SETTING_DATA_CLASS,
|
||||
element_uuid=vm.Name)
|
||||
for sasd in sasds:
|
||||
if (sasd.ResourceType == 31 and sasd.ResourceSubType ==
|
||||
"Microsoft:Hyper-V:Virtual Hard Disk"):
|
||||
new_resource_setting_data.append(sasd.GetText_(1))
|
||||
return new_resource_setting_data
|
||||
|
||||
def _live_migrate_vm(self, conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
|
||||
new_resource_setting_data, dest_host):
|
||||
# VirtualSystemAndStorage
|
||||
vsmsd = conn_v2_local.query("select * from "
|
||||
"Msvm_VirtualSystemMigrationSettingData "
|
||||
"where MigrationType = 32771")[0]
|
||||
vsmsd.DestinationIPAddressList = rmt_ip_addr_list
|
||||
if planned_vm:
|
||||
vsmsd.DestinationPlannedVirtualSystemId = planned_vm.Name
|
||||
migration_setting_data = vsmsd.GetText_(1)
|
||||
|
||||
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
|
||||
|
||||
LOG.debug("Starting live migration for VM: %s", vm.ElementName)
|
||||
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
|
||||
ComputerSystem=vm.path_(),
|
||||
DestinationHost=dest_host,
|
||||
MigrationSettingData=migration_setting_data,
|
||||
NewResourceSettingData=new_resource_setting_data)
|
||||
self._jobutils.check_ret_val(ret_val, job_path)
|
||||
|
||||
def _get_ip_address_list(self, conn_v2, hostname):
|
||||
LOG.debug("Getting live migration networks for host: %s",
|
||||
hostname)
|
||||
migr_svc_rmt = conn_v2.Msvm_VirtualSystemMigrationService()[0]
|
||||
return migr_svc_rmt.MigrationServiceListenerIPAddressList
|
||||
|
||||
def live_migrate_vm(self, vm_name, dest_host):
|
||||
self.check_live_migration_config()
|
||||
|
||||
conn_v2_remote = self._get_conn_v2(dest_host)
|
||||
|
||||
vm = self._get_vm(self._compat_conn, vm_name)
|
||||
|
||||
rmt_ip_addr_list = self._get_ip_address_list(conn_v2_remote,
|
||||
dest_host)
|
||||
|
||||
planned_vms = self._get_planned_vms(conn_v2_remote, vm)
|
||||
if len(planned_vms) > 1:
|
||||
err_msg = _("Multiple planned VMs were found for VM %(vm_name)s "
|
||||
"on host %(dest_host)s")
|
||||
raise exceptions.OSWinException(
|
||||
err_msg % dict(vm_name=vm_name,
|
||||
dest_host=dest_host))
|
||||
elif not planned_vms:
|
||||
# TODO(claudiub): Remove this branch after the livemigrationutils
|
||||
# usage has been updated to create planned VM on the destination
|
||||
# host beforehand.
|
||||
planned_vm = None
|
||||
disk_paths = self._get_physical_disk_paths(vm_name)
|
||||
if disk_paths:
|
||||
vmutils_remote = vmutils.VMUtils(dest_host)
|
||||
disk_paths_remote = self._get_remote_disk_data(vmutils_remote,
|
||||
disk_paths,
|
||||
dest_host)
|
||||
planned_vm = self._create_planned_vm(conn_v2_remote,
|
||||
self._compat_conn,
|
||||
vm, rmt_ip_addr_list,
|
||||
dest_host)
|
||||
self._update_planned_vm_disk_resources(
|
||||
conn_v2_remote, planned_vm, vm_name, disk_paths_remote)
|
||||
else:
|
||||
planned_vm = planned_vms[0]
|
||||
|
||||
new_resource_setting_data = self._get_vhd_setting_data(vm)
|
||||
self._live_migrate_vm(self._compat_conn, vm, planned_vm,
|
||||
rmt_ip_addr_list, new_resource_setting_data,
|
||||
dest_host)
|
||||
|
||||
def create_planned_vm(self, vm_name, src_host, disk_path_mapping):
|
||||
# This is run on the destination host.
|
||||
dest_host = platform.node()
|
||||
vmutils_remote = vmutils.VMUtils(src_host)
|
||||
|
||||
conn_v2_remote = self._get_conn_v2(src_host)
|
||||
vm = self._get_vm(conn_v2_remote, vm_name)
|
||||
|
||||
# Make sure there are no planned VMs already.
|
||||
self._destroy_existing_planned_vms(self._compat_conn, vm)
|
||||
|
||||
ip_addr_list = self._get_ip_address_list(self._compat_conn,
|
||||
dest_host)
|
||||
|
||||
disk_paths = self._get_disk_data(vm_name, vmutils_remote,
|
||||
disk_path_mapping)
|
||||
|
||||
planned_vm = self._create_planned_vm(self._compat_conn,
|
||||
conn_v2_remote,
|
||||
vm, ip_addr_list,
|
||||
dest_host)
|
||||
self._update_planned_vm_disk_resources(self._compat_conn, planned_vm,
|
||||
vm_name, disk_paths)
|
@ -1,22 +0,0 @@
|
||||
# Copyright 2013 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from os_win.utils import baseutils
|
||||
|
||||
|
||||
class RDPConsoleUtils(baseutils.BaseUtilsVirt):
|
||||
def get_rdp_console_port(self):
|
||||
rdp_setting_data = self._conn.Msvm_TerminalServiceSettingData()[0]
|
||||
return rdp_setting_data.ListenerPort
|
File diff suppressed because it is too large
Load Diff
@ -1,189 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from os_win._i18n import _
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.utils import _wqlutils
|
||||
from os_win.utils.compute import vmutils
|
||||
from oslo_utils import units
|
||||
|
||||
|
||||
class VMUtils10(vmutils.VMUtils):
|
||||
|
||||
_UEFI_CERTIFICATE_AUTH = 'MicrosoftUEFICertificateAuthority'
|
||||
_SERIAL_PORT_SETTING_DATA_CLASS = "Msvm_SerialPortSettingData"
|
||||
_SECURITY_SETTING_DATA = 'Msvm_SecuritySettingData'
|
||||
_MSPS_NAMESPACE = '//%s/root/msps'
|
||||
|
||||
_remote_fx_res_map = {
|
||||
constants.REMOTEFX_MAX_RES_1024x768: 0,
|
||||
constants.REMOTEFX_MAX_RES_1280x1024: 1,
|
||||
constants.REMOTEFX_MAX_RES_1600x1200: 2,
|
||||
constants.REMOTEFX_MAX_RES_1920x1200: 3,
|
||||
constants.REMOTEFX_MAX_RES_2560x1600: 4,
|
||||
constants.REMOTEFX_MAX_RES_3840x2160: 5
|
||||
}
|
||||
|
||||
_remotefx_max_monitors_map = {
|
||||
# defines the maximum number of monitors for a given
|
||||
# resolution
|
||||
constants.REMOTEFX_MAX_RES_1024x768: 8,
|
||||
constants.REMOTEFX_MAX_RES_1280x1024: 8,
|
||||
constants.REMOTEFX_MAX_RES_1600x1200: 4,
|
||||
constants.REMOTEFX_MAX_RES_1920x1200: 4,
|
||||
constants.REMOTEFX_MAX_RES_2560x1600: 2,
|
||||
constants.REMOTEFX_MAX_RES_3840x2160: 1
|
||||
}
|
||||
|
||||
_remotefx_vram_vals = [64 * units.Mi, 128 * units.Mi, 256 * units.Mi,
|
||||
512 * units.Mi, 1024 * units.Mi]
|
||||
|
||||
def __init__(self, host='.'):
|
||||
super(VMUtils10, self).__init__(host)
|
||||
self._conn_msps_attr = None
|
||||
self._sec_svc_attr = None
|
||||
|
||||
@property
|
||||
def _conn_msps(self):
|
||||
if not self._conn_msps_attr:
|
||||
try:
|
||||
namespace = self._MSPS_NAMESPACE % self._host
|
||||
self._conn_msps_attr = self._get_wmi_conn(namespace)
|
||||
except Exception:
|
||||
raise exceptions.OSWinException(
|
||||
_("Namespace %(namespace)s not found. Make sure "
|
||||
"FabricShieldedTools feature is installed.") %
|
||||
{'namespace': namespace})
|
||||
|
||||
return self._conn_msps_attr
|
||||
|
||||
@property
|
||||
def _sec_svc(self):
|
||||
if not self._sec_svc_attr:
|
||||
self._sec_svc_attr = self._conn.Msvm_SecurityService()[0]
|
||||
return self._sec_svc_attr
|
||||
|
||||
def vm_gen_supports_remotefx(self, vm_gen):
|
||||
"""RemoteFX is supported on both generation 1 and 2 virtual
|
||||
machines for Windows 10 / Windows Server 2016.
|
||||
|
||||
:returns: True
|
||||
"""
|
||||
return True
|
||||
|
||||
def _validate_remotefx_params(self, monitor_count, max_resolution,
|
||||
vram_bytes=None):
|
||||
super(VMUtils10, self)._validate_remotefx_params(monitor_count,
|
||||
max_resolution)
|
||||
if vram_bytes not in self._remotefx_vram_vals:
|
||||
raise exceptions.HyperVRemoteFXException(
|
||||
_("Unsuported RemoteFX VRAM value: %(requested_value)s."
|
||||
"The supported VRAM values are: %(supported_values)s") %
|
||||
{'requested_value': vram_bytes,
|
||||
'supported_values': self._remotefx_vram_vals})
|
||||
|
||||
def _add_3d_display_controller(self, vm, monitor_count,
|
||||
max_resolution, vram_bytes=None):
|
||||
synth_3d_disp_ctrl_res = self._get_new_resource_setting_data(
|
||||
self._SYNTH_3D_DISP_CTRL_RES_SUB_TYPE,
|
||||
self._SYNTH_3D_DISP_ALLOCATION_SETTING_DATA_CLASS)
|
||||
|
||||
synth_3d_disp_ctrl_res.MaximumMonitors = monitor_count
|
||||
synth_3d_disp_ctrl_res.MaximumScreenResolution = max_resolution
|
||||
|
||||
if vram_bytes:
|
||||
synth_3d_disp_ctrl_res.VRAMSizeBytes = unicode(vram_bytes)
|
||||
|
||||
self._jobutils.add_virt_resource(synth_3d_disp_ctrl_res, vm)
|
||||
|
||||
def _vm_has_s3_controller(self, vm_name):
|
||||
return self.get_vm_generation(vm_name) == constants.VM_GEN_1
|
||||
|
||||
def _set_secure_boot(self, vs_data, msft_ca_required):
|
||||
vs_data.SecureBootEnabled = True
|
||||
if msft_ca_required:
|
||||
uefi_data = self._conn.Msvm_VirtualSystemSettingData(
|
||||
ElementName=self._UEFI_CERTIFICATE_AUTH)[0]
|
||||
vs_data.SecureBootTemplateId = uefi_data.SecureBootTemplateId
|
||||
|
||||
def populate_fsk(self, fsk_filepath, fsk_pairs):
|
||||
"""Writes in the fsk file all the substitution strings and their
|
||||
values which will populate the unattended file used when
|
||||
creating the pdk.
|
||||
"""
|
||||
|
||||
fabric_data_pairs = []
|
||||
for fsk_key, fsk_value in fsk_pairs.items():
|
||||
fabricdata = self._conn_msps.Msps_FabricData.new()
|
||||
fabricdata.key = fsk_key
|
||||
fabricdata.Value = fsk_value
|
||||
fabric_data_pairs.append(fabricdata)
|
||||
|
||||
fsk = self._conn_msps.Msps_FSK.new()
|
||||
fsk.FabricDataPairs = fabric_data_pairs
|
||||
msps_pfp = self._conn_msps.Msps_ProvisioningFileProcessor
|
||||
|
||||
msps_pfp.SerializeToFile(fsk_filepath, fsk)
|
||||
|
||||
def add_vtpm(self, vm_name, pdk_filepath, shielded):
|
||||
"""Adds a vtpm and enables it with encryption or shielded option."""
|
||||
|
||||
vm = self._lookup_vm_check(vm_name)
|
||||
|
||||
msps_pfp = self._conn_msps.Msps_ProvisioningFileProcessor
|
||||
provisioning_file = msps_pfp.PopulateFromFile(pdk_filepath)[0]
|
||||
# key_protector: array of bytes
|
||||
key_protector = provisioning_file.KeyProtector
|
||||
# policy_data: array of bytes
|
||||
policy_data = provisioning_file.PolicyData
|
||||
|
||||
security_profile = _wqlutils.get_element_associated_class(
|
||||
self._conn, self._SECURITY_SETTING_DATA,
|
||||
element_uuid=vm.ConfigurationID)[0]
|
||||
|
||||
security_profile.EncryptStateAndVmMigrationTraffic = True
|
||||
security_profile.TpmEnabled = True
|
||||
security_profile.ShieldingRequested = shielded
|
||||
|
||||
sec_profile_serialized = security_profile.GetText_(1)
|
||||
(job_path, ret_val) = self._sec_svc.SetKeyProtector(
|
||||
key_protector, sec_profile_serialized)
|
||||
self._jobutils.check_ret_val(ret_val, job_path)
|
||||
|
||||
(job_path, ret_val) = self._sec_svc.SetSecurityPolicy(
|
||||
policy_data, sec_profile_serialized)
|
||||
self._jobutils.check_ret_val(ret_val, job_path)
|
||||
|
||||
(job_path, ret_val) = self._sec_svc.ModifySecuritySettings(
|
||||
sec_profile_serialized)
|
||||
self._jobutils.check_ret_val(ret_val, job_path)
|
||||
|
||||
def provision_vm(self, vm_name, fsk_filepath, pdk_filepath):
|
||||
vm = self._lookup_vm_check(vm_name)
|
||||
provisioning_service = self._conn_msps.Msps_ProvisioningService
|
||||
|
||||
(job_path, ret_val) = provisioning_service.ProvisionMachine(
|
||||
fsk_filepath, vm.ConfigurationID, pdk_filepath)
|
||||
self._jobutils.check_ret_val(ret_val, job_path)
|
||||
|
||||
def is_secure_vm(self, instance_name):
|
||||
inst_id = self.get_vm_id(instance_name)
|
||||
security_profile = _wqlutils.get_element_associated_class(
|
||||
self._conn, self._SECURITY_SETTING_DATA,
|
||||
element_uuid=inst_id)
|
||||
if security_profile:
|
||||
return security_profile[0].EncryptStateAndVmMigrationTraffic
|
||||
return False
|
@ -1,184 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from os_win._i18n import _
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.utils import baseutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DNSUtils(baseutils.BaseUtils):
|
||||
|
||||
_DNS_NAMESPACE = '//%s/root/MicrosoftDNS'
|
||||
|
||||
def __init__(self, host='.'):
|
||||
self._dns_manager_attr = None
|
||||
self._host = host
|
||||
|
||||
@property
|
||||
def _dns_manager(self):
|
||||
if not self._dns_manager_attr:
|
||||
try:
|
||||
namespace = self._DNS_NAMESPACE % self._host
|
||||
self._dns_manager_attr = self._get_wmi_obj(namespace)
|
||||
except Exception:
|
||||
raise exceptions.DNSException(
|
||||
_("Namespace %(namespace)s not found. Make sure "
|
||||
"DNS Server feature is installed.") %
|
||||
{'namespace': namespace})
|
||||
|
||||
return self._dns_manager_attr
|
||||
|
||||
def _get_zone(self, zone_name, ignore_missing=True):
|
||||
zones = self._dns_manager.MicrosoftDNS_Zone(Name=zone_name)
|
||||
if zones:
|
||||
return zones[0]
|
||||
if not ignore_missing:
|
||||
raise exceptions.DNSZoneNotFound(zone_name=zone_name)
|
||||
|
||||
def zone_list(self):
|
||||
"""Returns the current list of DNS Zones.
|
||||
"""
|
||||
zones = self._dns_manager.MicrosoftDNS_Zone()
|
||||
return [x.Name for x in zones]
|
||||
|
||||
def zone_exists(self, zone_name):
|
||||
return self._get_zone(zone_name) is not None
|
||||
|
||||
def get_zone_properties(self, zone_name):
|
||||
zone = self._get_zone(zone_name, ignore_missing=False)
|
||||
|
||||
zone_properties = {}
|
||||
zone_properties['zone_type'] = zone.ZoneType
|
||||
zone_properties['ds_integrated'] = zone.DsIntegrated
|
||||
zone_properties['data_file_name'] = zone.DataFile
|
||||
zone_properties['master_servers'] = zone.MasterServers or []
|
||||
|
||||
return zone_properties
|
||||
|
||||
def zone_create(self, zone_name, zone_type, ds_integrated,
|
||||
data_file_name=None, ip_addrs=None,
|
||||
admin_email_name=None):
|
||||
"""Creates a DNS Zone and returns the path to the associated object.
|
||||
|
||||
:param zone_name: string representing the name of the zone.
|
||||
:param zone_type: type of zone
|
||||
0 = Primary zone
|
||||
1 = Secondary zone, MUST include at least one master IP
|
||||
2 = Stub zone, MUST include at least one master IP
|
||||
3 = Zone forwarder, MUST include at least one master IP
|
||||
:param ds_integrated: Only Primary zones cand be stored in AD
|
||||
True = the zone data is stored in the Active Directory
|
||||
False = the data zone is stored in files
|
||||
:param data_file_name(Optional): name of the data file associated
|
||||
with the zone.
|
||||
:param ip_addrs(Optional): IP addresses of the master DNS servers
|
||||
for this zone. Parameter type MUST be list
|
||||
:param admin_email_name(Optional): email address of the administrator
|
||||
responsible for the zone.
|
||||
"""
|
||||
LOG.debug("Creating DNS Zone '%s'" % zone_name)
|
||||
if self.zone_exists(zone_name):
|
||||
raise exceptions.DNSZoneAlreadyExists(zone_name=zone_name)
|
||||
|
||||
dns_zone_manager = self._dns_manager.MicrosoftDNS_Zone
|
||||
(zone_path,) = dns_zone_manager.CreateZone(
|
||||
ZoneName=zone_name,
|
||||
ZoneType=zone_type,
|
||||
DsIntegrated=ds_integrated,
|
||||
DataFileName=data_file_name,
|
||||
IpAddr=ip_addrs,
|
||||
AdminEmailname=admin_email_name)
|
||||
return zone_path
|
||||
|
||||
def zone_delete(self, zone_name):
|
||||
LOG.debug("Deleting DNS Zone '%s'" % zone_name)
|
||||
|
||||
zone_to_be_deleted = self._get_zone(zone_name)
|
||||
if zone_to_be_deleted:
|
||||
zone_to_be_deleted.Delete_()
|
||||
|
||||
def zone_modify(self, zone_name, allow_update=None, disable_wins=None,
|
||||
notify=None, reverse=None, secure_secondaries=None):
|
||||
"""Modifies properties of an existing zone. If any parameter is None,
|
||||
then that parameter will be skipped and will not be taken into
|
||||
consideration.
|
||||
|
||||
:param zone_name: string representing the name of the zone.
|
||||
:param allow_update:
|
||||
0 = No updates allowed.
|
||||
1 = Zone accepts both secure and nonsecure updates.
|
||||
2 = Zone accepts secure updates only.
|
||||
:param disable_wins: Indicates whether the WINS record is replicated.
|
||||
If set to TRUE, WINS record replication is disabled.
|
||||
:param notify:
|
||||
0 = Do not notify secondaries
|
||||
1 = Notify Servers listed on the Name Servers Tab
|
||||
2 = Notify the specified servers
|
||||
:param reverse: Indicates whether the Zone is reverse (TRUE)
|
||||
or forward (FALSE).
|
||||
:param securese_condaries:
|
||||
0 = Allowed to Any host
|
||||
1 = Only to the Servers listed on the Name Servers tab
|
||||
2 = To the following servers (destination servers IP addresses
|
||||
are specified in SecondaryServers value)
|
||||
3 = Zone tranfers not allowed
|
||||
"""
|
||||
zone = self._get_zone(zone_name, ignore_missing=False)
|
||||
|
||||
if allow_update is not None:
|
||||
zone.AllowUpdate = allow_update
|
||||
if disable_wins is not None:
|
||||
zone.DisableWINSRecordReplication = disable_wins
|
||||
if notify is not None:
|
||||
zone.Notify = notify
|
||||
if reverse is not None:
|
||||
zone.Reverse = reverse
|
||||
if secure_secondaries is not None:
|
||||
zone.SecureSecondaries = secure_secondaries
|
||||
|
||||
zone.put()
|
||||
|
||||
def zone_update(self, zone_name):
|
||||
LOG.debug("Updating DNS Zone '%s'" % zone_name)
|
||||
|
||||
zone = self._get_zone(zone_name, ignore_missing=False)
|
||||
if (zone.DsIntegrated and
|
||||
zone.ZoneType == constants.DNS_ZONE_TYPE_PRIMARY):
|
||||
zone.UpdateFromDS()
|
||||
elif zone.ZoneType in [constants.DNS_ZONE_TYPE_SECONDARY,
|
||||
constants.DNS_ZONE_TYPE_STUB]:
|
||||
zone.ForceRefresh()
|
||||
elif zone.ZoneType in [constants.DNS_ZONE_TYPE_PRIMARY,
|
||||
constants.DNS_ZONE_TYPE_FORWARD]:
|
||||
zone.ReloadZone()
|
||||
|
||||
def get_zone_serial(self, zone_name):
|
||||
# Performing a manual check to make sure the zone exists before
|
||||
# trying to retrieve the MicrosoftDNS_SOAType object. Otherwise,
|
||||
# the query for MicrosoftDNS_SOAType will fail with "Generic Failure"
|
||||
if not self.zone_exists(zone_name):
|
||||
# Return None if zone was not found
|
||||
return None
|
||||
|
||||
zone_soatype = self._dns_manager.MicrosoftDNS_SOAType(
|
||||
ContainerName=zone_name)
|
||||
# Serial number of the SOA record
|
||||
SOA = zone_soatype[0].SerialNumber
|
||||
return int(SOA)
|
@ -1,241 +0,0 @@
|
||||
# Copyright 2013 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ctypes
|
||||
import socket
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from os_win._i18n import _, _LW
|
||||
from os_win import _utils
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.utils import baseutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HostUtils(baseutils.BaseUtilsVirt):
|
||||
|
||||
_windows_version = None
|
||||
|
||||
_MSVM_PROCESSOR = 'Msvm_Processor'
|
||||
_MSVM_MEMORY = 'Msvm_Memory'
|
||||
_MSVM_NUMA_NODE = 'Msvm_NumaNode'
|
||||
|
||||
_CENTRAL_PROCESSOR = 'Central Processor'
|
||||
|
||||
_HOST_FORCED_REBOOT = 6
|
||||
_HOST_FORCED_SHUTDOWN = 12
|
||||
_DEFAULT_VM_GENERATION = constants.IMAGE_PROP_VM_GEN_1
|
||||
|
||||
FEATURE_RDS_VIRTUALIZATION = 322
|
||||
FEATURE_MPIO = 57
|
||||
|
||||
_wmi_cimv2_namespace = '//./root/cimv2'
|
||||
|
||||
def __init__(self, host='.'):
|
||||
super(HostUtils, self).__init__(host)
|
||||
self._conn_cimv2 = self._get_wmi_conn(self._wmi_cimv2_namespace,
|
||||
privileges=["Shutdown"])
|
||||
|
||||
def get_cpus_info(self):
|
||||
# NOTE(abalutoiu): Specifying exactly the fields that we need
|
||||
# improves the speed of the query. The LoadPercentage field
|
||||
# is the load capacity of each processor averaged to the last
|
||||
# second, which is time wasted.
|
||||
cpus = self._conn_cimv2.query(
|
||||
"SELECT Architecture, Name, Manufacturer, MaxClockSpeed, "
|
||||
"NumberOfCores, NumberOfLogicalProcessors FROM Win32_Processor "
|
||||
"WHERE ProcessorType = 3")
|
||||
cpus_list = []
|
||||
for cpu in cpus:
|
||||
cpu_info = {'Architecture': cpu.Architecture,
|
||||
'Name': cpu.Name,
|
||||
'Manufacturer': cpu.Manufacturer,
|
||||
'MaxClockSpeed': cpu.MaxClockSpeed,
|
||||
'NumberOfCores': cpu.NumberOfCores,
|
||||
'NumberOfLogicalProcessors':
|
||||
cpu.NumberOfLogicalProcessors}
|
||||
cpus_list.append(cpu_info)
|
||||
return cpus_list
|
||||
|
||||
def is_cpu_feature_present(self, feature_key):
|
||||
return ctypes.windll.kernel32.IsProcessorFeaturePresent(feature_key)
|
||||
|
||||
def get_memory_info(self):
|
||||
"""Returns a tuple with total visible memory and free physical memory
|
||||
expressed in kB.
|
||||
"""
|
||||
mem_info = self._conn_cimv2.query("SELECT TotalVisibleMemorySize, "
|
||||
"FreePhysicalMemory "
|
||||
"FROM win32_operatingsystem")[0]
|
||||
return (int(mem_info.TotalVisibleMemorySize),
|
||||
int(mem_info.FreePhysicalMemory))
|
||||
|
||||
# TODO(atuvenie) This method should be removed once all the callers have
|
||||
# changed to use the get_disk_capacity method from diskutils.
|
||||
def get_volume_info(self, drive):
|
||||
"""Returns a tuple with total size and free space
|
||||
expressed in bytes.
|
||||
"""
|
||||
logical_disk = self._conn_cimv2.query("SELECT Size, FreeSpace "
|
||||
"FROM win32_logicaldisk "
|
||||
"WHERE DeviceID='%s'"
|
||||
% drive)[0]
|
||||
return (int(logical_disk.Size), int(logical_disk.FreeSpace))
|
||||
|
||||
def check_min_windows_version(self, major, minor, build=0):
|
||||
version_str = self.get_windows_version()
|
||||
return list(map(int, version_str.split('.'))) >= [major, minor, build]
|
||||
|
||||
def get_windows_version(self):
|
||||
if not HostUtils._windows_version:
|
||||
Win32_OperatingSystem = self._conn_cimv2.Win32_OperatingSystem()[0]
|
||||
HostUtils._windows_version = Win32_OperatingSystem.Version
|
||||
return HostUtils._windows_version
|
||||
|
||||
def get_local_ips(self):
|
||||
hostname = socket.gethostname()
|
||||
return _utils.get_ips(hostname)
|
||||
|
||||
def get_host_tick_count64(self):
|
||||
return ctypes.windll.kernel32.GetTickCount64()
|
||||
|
||||
def host_power_action(self, action):
|
||||
win32_os = self._conn_cimv2.Win32_OperatingSystem()[0]
|
||||
|
||||
if action == constants.HOST_POWER_ACTION_SHUTDOWN:
|
||||
win32_os.Win32Shutdown(self._HOST_FORCED_SHUTDOWN)
|
||||
elif action == constants.HOST_POWER_ACTION_REBOOT:
|
||||
win32_os.Win32Shutdown(self._HOST_FORCED_REBOOT)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
_("Host %(action)s is not supported by the Hyper-V driver") %
|
||||
{"action": action})
|
||||
|
||||
def get_supported_vm_types(self):
|
||||
"""Get the supported Hyper-V VM generations.
|
||||
Hyper-V Generation 2 VMs are supported in Windows 8.1,
|
||||
Windows Server / Hyper-V Server 2012 R2 or newer.
|
||||
|
||||
:returns: array of supported VM generations (ex. ['hyperv-gen1'])
|
||||
"""
|
||||
if self.check_min_windows_version(6, 3):
|
||||
return [constants.IMAGE_PROP_VM_GEN_1,
|
||||
constants.IMAGE_PROP_VM_GEN_2]
|
||||
else:
|
||||
return [constants.IMAGE_PROP_VM_GEN_1]
|
||||
|
||||
def get_default_vm_generation(self):
|
||||
return self._DEFAULT_VM_GENERATION
|
||||
|
||||
def check_server_feature(self, feature_id):
|
||||
return len(self._conn_cimv2.Win32_ServerFeature(ID=feature_id)) > 0
|
||||
|
||||
def get_numa_nodes(self):
|
||||
numa_nodes = self._conn.Msvm_NumaNode()
|
||||
nodes_info = []
|
||||
system_memory = self._conn.Msvm_Memory(['NumberOfBlocks'])
|
||||
processors = self._conn.Msvm_Processor(['DeviceID'])
|
||||
|
||||
for node in numa_nodes:
|
||||
# Due to a bug in vmms, getting Msvm_Processor for the numa
|
||||
# node associators resulted in a vmms crash.
|
||||
# As an alternative to using associators we have to manually get
|
||||
# the related Msvm_Processor classes.
|
||||
# Msvm_HostedDependency is the association class between
|
||||
# Msvm_NumaNode and Msvm_Processor. We need to use this class to
|
||||
# relate the two because using associators on Msvm_Processor
|
||||
# will also result in a crash.
|
||||
numa_assoc = self._conn.Msvm_HostedDependency(
|
||||
Antecedent=node.path_())
|
||||
numa_node_assoc = [item.Dependent for item in numa_assoc]
|
||||
|
||||
memory_info = self._get_numa_memory_info(numa_node_assoc,
|
||||
system_memory)
|
||||
if not memory_info:
|
||||
LOG.warning(_LW("Could not find memory information for NUMA "
|
||||
"node. Skipping node measurements."))
|
||||
continue
|
||||
|
||||
cpu_info = self._get_numa_cpu_info(numa_node_assoc, processors)
|
||||
if not cpu_info:
|
||||
LOG.warning(_LW("Could not find CPU information for NUMA "
|
||||
"node. Skipping node measurements."))
|
||||
continue
|
||||
|
||||
node_info = {
|
||||
# NodeID has the format: Microsoft:PhysicalNode\<NODE_ID>
|
||||
'id': node.NodeID.split('\\')[-1],
|
||||
|
||||
# memory block size is 1MB.
|
||||
'memory': memory_info.NumberOfBlocks,
|
||||
'memory_usage': node.CurrentlyConsumableMemoryBlocks,
|
||||
|
||||
# DeviceID has the format: Microsoft:UUID\0\<DEV_ID>
|
||||
'cpuset': set([c.DeviceID.split('\\')[-1] for c in cpu_info]),
|
||||
# cpu_usage can be set, each CPU has a "LoadPercentage"
|
||||
'cpu_usage': 0,
|
||||
}
|
||||
|
||||
nodes_info.append(node_info)
|
||||
|
||||
return nodes_info
|
||||
|
||||
def _get_numa_memory_info(self, numa_node_assoc, system_memory):
|
||||
memory_info = []
|
||||
paths = [x.path_().upper() for x in numa_node_assoc]
|
||||
for memory in system_memory:
|
||||
if memory.path_().upper() in paths:
|
||||
memory_info.append(memory)
|
||||
|
||||
if memory_info:
|
||||
return memory_info[0]
|
||||
|
||||
def _get_numa_cpu_info(self, numa_node_assoc, processors):
|
||||
cpu_info = []
|
||||
paths = [x.path_().upper() for x in numa_node_assoc]
|
||||
for proc in processors:
|
||||
if proc.path_().upper() in paths:
|
||||
cpu_info.append(proc)
|
||||
|
||||
return cpu_info
|
||||
|
||||
def get_remotefx_gpu_info(self):
|
||||
gpus = []
|
||||
all_gpus = self._conn.Msvm_Physical3dGraphicsProcessor(
|
||||
EnabledForVirtualization=True)
|
||||
for gpu in all_gpus:
|
||||
gpus.append({'name': gpu.Name,
|
||||
'driver_version': gpu.DriverVersion,
|
||||
'total_video_ram': gpu.TotalVideoMemory,
|
||||
'available_video_ram': gpu.AvailableVideoMemory,
|
||||
'directx_version': gpu.DirectXVersion})
|
||||
return gpus
|
||||
|
||||
def verify_host_remotefx_capability(self):
|
||||
synth_3d_video_pool = self._conn.Msvm_Synth3dVideoPool()[0]
|
||||
if not synth_3d_video_pool.IsGpuCapable:
|
||||
raise exceptions.HyperVRemoteFXException(
|
||||
_("To enable RemoteFX on Hyper-V at least one GPU supporting "
|
||||
"DirectX 11 is required."))
|
||||
if not synth_3d_video_pool.IsSlatCapable:
|
||||
raise exceptions.HyperVRemoteFXException(
|
||||
_("To enable RemoteFX on Hyper-V it is required that the host "
|
||||
"GPUs support SLAT."))
|
||||
|
||||
def is_host_guarded(self):
|
||||
return False
|
@ -1,56 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from os_win._i18n import _, _LW
|
||||
from os_win import exceptions
|
||||
from os_win.utils import hostutils
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HostUtils10(hostutils.HostUtils):
|
||||
|
||||
_HGS_NAMESPACE = '//%s/Root/Microsoft/Windows/Hgs'
|
||||
|
||||
def __init__(self, host='.'):
|
||||
super(HostUtils10, self).__init__(host)
|
||||
self._conn_hgs_attr = None
|
||||
|
||||
@property
|
||||
def _conn_hgs(self):
|
||||
if not self._conn_hgs_attr:
|
||||
try:
|
||||
namespace = self._HGS_NAMESPACE % self._host
|
||||
self._conn_hgs_attr = self._get_wmi_conn(namespace)
|
||||
except Exception:
|
||||
raise exceptions.OSWinException(
|
||||
_("Namespace %(namespace)s is not supported on this "
|
||||
"Windows version.") %
|
||||
{'namespace': namespace})
|
||||
|
||||
return self._conn_hgs_attr
|
||||
|
||||
def is_host_guarded(self):
|
||||
"""Checks the host is guarded so it can run Shielded VMs"""
|
||||
|
||||
(return_code,
|
||||
host_config) = self._conn_hgs.MSFT_HgsClientConfiguration.Get()
|
||||
if return_code:
|
||||
LOG.warning(_LW('Retrieving the local Host Guardian Service '
|
||||
'Client configuration failed with code: %s'),
|
||||
return_code)
|
||||
return False
|
||||
return host_config.IsHostGuarded
|
@ -1,305 +0,0 @@
|
||||
# Copyright 2014 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ctypes
|
||||
import os
|
||||
import six
|
||||
import struct
|
||||
import sys
|
||||
|
||||
from eventlet import patcher
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import units
|
||||
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.utils import win32utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
native_threading = patcher.original('threading')
|
||||
|
||||
# Avoid using six.moves.queue as we need a non monkey patched class
|
||||
if sys.version_info > (3, 0):
|
||||
Queue = patcher.original('queue')
|
||||
else:
|
||||
Queue = patcher.original('Queue')
|
||||
|
||||
if sys.platform == 'win32':
|
||||
from ctypes import wintypes
|
||||
|
||||
kernel32 = ctypes.windll.kernel32
|
||||
|
||||
class OVERLAPPED(ctypes.Structure):
|
||||
_fields_ = [
|
||||
('Internal', wintypes.ULONG),
|
||||
('InternalHigh', wintypes.ULONG),
|
||||
('Offset', wintypes.DWORD),
|
||||
('OffsetHigh', wintypes.DWORD),
|
||||
('hEvent', wintypes.HANDLE)
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
self.Offset = 0
|
||||
self.OffsetHigh = 0
|
||||
|
||||
LPOVERLAPPED = ctypes.POINTER(OVERLAPPED)
|
||||
LPOVERLAPPED_COMPLETION_ROUTINE = ctypes.WINFUNCTYPE(
|
||||
None, wintypes.DWORD, wintypes.DWORD, LPOVERLAPPED)
|
||||
|
||||
kernel32.ReadFileEx.argtypes = [
|
||||
wintypes.HANDLE, wintypes.LPVOID, wintypes.DWORD,
|
||||
LPOVERLAPPED, LPOVERLAPPED_COMPLETION_ROUTINE]
|
||||
kernel32.WriteFileEx.argtypes = [
|
||||
wintypes.HANDLE, wintypes.LPCVOID, wintypes.DWORD,
|
||||
LPOVERLAPPED, LPOVERLAPPED_COMPLETION_ROUTINE]
|
||||
|
||||
|
||||
FILE_FLAG_OVERLAPPED = 0x40000000
|
||||
FILE_SHARE_READ = 1
|
||||
FILE_SHARE_WRITE = 2
|
||||
GENERIC_READ = 0x80000000
|
||||
GENERIC_WRITE = 0x40000000
|
||||
OPEN_EXISTING = 3
|
||||
|
||||
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
|
||||
FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100
|
||||
FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200
|
||||
|
||||
INVALID_HANDLE_VALUE = -1
|
||||
WAIT_FAILED = 0xFFFFFFFF
|
||||
WAIT_FINISHED = 0
|
||||
ERROR_INVALID_HANDLE = 6
|
||||
ERROR_PIPE_BUSY = 231
|
||||
ERROR_PIPE_NOT_CONNECTED = 233
|
||||
ERROR_NOT_FOUND = 1168
|
||||
|
||||
WAIT_PIPE_DEFAULT_TIMEOUT = 5 # seconds
|
||||
WAIT_IO_COMPLETION_TIMEOUT = 2 * units.k
|
||||
WAIT_INFINITE_TIMEOUT = 0xFFFFFFFF
|
||||
|
||||
IO_QUEUE_TIMEOUT = 2
|
||||
IO_QUEUE_BURST_TIMEOUT = 0.05
|
||||
|
||||
|
||||
# TODO(lpetrut): Remove this class after the patch which
|
||||
# interactively handles serial ports merges in Nova.
|
||||
class IOThread(native_threading.Thread):
|
||||
def __init__(self, src, dest, max_bytes):
|
||||
super(IOThread, self).__init__()
|
||||
self.setDaemon(True)
|
||||
self._src = src
|
||||
self._dest = dest
|
||||
self._dest_archive = dest + '.1'
|
||||
self._max_bytes = max_bytes
|
||||
self._stopped = native_threading.Event()
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
self._copy()
|
||||
except Exception:
|
||||
self._stopped.set()
|
||||
|
||||
def _copy(self):
|
||||
with open(self._src, 'rb') as src:
|
||||
with open(self._dest, 'ab', 0) as dest:
|
||||
dest.seek(0, os.SEEK_END)
|
||||
log_size = dest.tell()
|
||||
while (not self._stopped.isSet()):
|
||||
# Read one byte at a time to avoid blocking.
|
||||
data = src.read(1)
|
||||
dest.write(data)
|
||||
log_size += len(data)
|
||||
if (log_size >= self._max_bytes):
|
||||
dest.close()
|
||||
if os.path.exists(self._dest_archive):
|
||||
os.remove(self._dest_archive)
|
||||
os.rename(self._dest, self._dest_archive)
|
||||
dest = open(self._dest, 'ab', 0)
|
||||
log_size = 0
|
||||
|
||||
def join(self):
|
||||
self._stopped.set()
|
||||
super(IOThread, self).join()
|
||||
|
||||
def is_active(self):
|
||||
return not self._stopped.isSet()
|
||||
|
||||
|
||||
class IOUtils(object):
|
||||
"""Asyncronous IO helper class."""
|
||||
|
||||
def __init__(self):
|
||||
self._win32_utils = win32utils.Win32Utils()
|
||||
|
||||
def _run_and_check_output(self, *args, **kwargs):
|
||||
kwargs.update(kernel32_lib_func=True,
|
||||
failure_exc=exceptions.Win32IOException)
|
||||
return self._win32_utils.run_and_check_output(*args, **kwargs)
|
||||
|
||||
def wait_named_pipe(self, pipe_name, timeout=WAIT_PIPE_DEFAULT_TIMEOUT):
|
||||
"""Wait a given ammount of time for a pipe to become available."""
|
||||
self._run_and_check_output(kernel32.WaitNamedPipeW,
|
||||
ctypes.c_wchar_p(pipe_name),
|
||||
timeout * units.k)
|
||||
|
||||
def open(self, path, desired_access=None, share_mode=None,
|
||||
creation_disposition=None, flags_and_attributes=None):
|
||||
error_ret_vals = [INVALID_HANDLE_VALUE]
|
||||
handle = self._run_and_check_output(kernel32.CreateFileW,
|
||||
ctypes.c_wchar_p(path),
|
||||
desired_access,
|
||||
share_mode,
|
||||
None,
|
||||
creation_disposition,
|
||||
flags_and_attributes,
|
||||
None,
|
||||
error_ret_vals=error_ret_vals)
|
||||
return handle
|
||||
|
||||
def close_handle(self, handle):
|
||||
self._run_and_check_output(kernel32.CloseHandle, handle)
|
||||
|
||||
def cancel_io(self, handle, overlapped_structure=None,
|
||||
ignore_invalid_handle=False):
|
||||
"""Cancels pending IO on specified handle.
|
||||
|
||||
If an overlapped structure is passed, only the IO requests that
|
||||
were issued with the specified overlapped structure are canceled.
|
||||
"""
|
||||
# Ignore errors thrown when there are no requests
|
||||
# to be canceled.
|
||||
ignored_error_codes = [ERROR_NOT_FOUND]
|
||||
if ignore_invalid_handle:
|
||||
ignored_error_codes.append(ERROR_INVALID_HANDLE)
|
||||
lp_overlapped = (ctypes.byref(overlapped_structure)
|
||||
if overlapped_structure else None)
|
||||
|
||||
self._run_and_check_output(kernel32.CancelIoEx,
|
||||
handle,
|
||||
lp_overlapped,
|
||||
ignored_error_codes=ignored_error_codes)
|
||||
|
||||
def _wait_io_completion(self, event):
|
||||
# In order to cancel this, we simply set the event.
|
||||
self._run_and_check_output(kernel32.WaitForSingleObjectEx,
|
||||
event, WAIT_INFINITE_TIMEOUT,
|
||||
True, error_ret_vals=[WAIT_FAILED])
|
||||
|
||||
def set_event(self, event):
|
||||
self._run_and_check_output(kernel32.SetEvent, event)
|
||||
|
||||
def _reset_event(self, event):
|
||||
self._run_and_check_output(kernel32.ResetEvent, event)
|
||||
|
||||
def _create_event(self, event_attributes=None, manual_reset=True,
|
||||
initial_state=False, name=None):
|
||||
return self._run_and_check_output(kernel32.CreateEventW,
|
||||
event_attributes, manual_reset,
|
||||
initial_state, name,
|
||||
error_ret_vals=[None])
|
||||
|
||||
def get_completion_routine(self, callback=None):
|
||||
def _completion_routine(error_code, num_bytes, lpOverLapped):
|
||||
"""Sets the completion event and executes callback, if passed."""
|
||||
overlapped = ctypes.cast(lpOverLapped, LPOVERLAPPED).contents
|
||||
self.set_event(overlapped.hEvent)
|
||||
|
||||
if callback:
|
||||
callback(num_bytes)
|
||||
|
||||
return LPOVERLAPPED_COMPLETION_ROUTINE(_completion_routine)
|
||||
|
||||
def get_new_overlapped_structure(self):
|
||||
"""Structure used for asyncronous IO operations."""
|
||||
# Event used for signaling IO completion
|
||||
hEvent = self._create_event()
|
||||
|
||||
overlapped_structure = OVERLAPPED()
|
||||
overlapped_structure.hEvent = hEvent
|
||||
return overlapped_structure
|
||||
|
||||
def read(self, handle, buff, num_bytes,
|
||||
overlapped_structure, completion_routine):
|
||||
self._reset_event(overlapped_structure.hEvent)
|
||||
self._run_and_check_output(kernel32.ReadFileEx,
|
||||
handle, buff, num_bytes,
|
||||
ctypes.byref(overlapped_structure),
|
||||
completion_routine)
|
||||
self._wait_io_completion(overlapped_structure.hEvent)
|
||||
|
||||
def write(self, handle, buff, num_bytes,
|
||||
overlapped_structure, completion_routine):
|
||||
self._reset_event(overlapped_structure.hEvent)
|
||||
self._run_and_check_output(kernel32.WriteFileEx,
|
||||
handle, buff, num_bytes,
|
||||
ctypes.byref(overlapped_structure),
|
||||
completion_routine)
|
||||
self._wait_io_completion(overlapped_structure.hEvent)
|
||||
|
||||
@classmethod
|
||||
def get_buffer(cls, buff_size, data=None):
|
||||
buff = (ctypes.c_ubyte * buff_size)()
|
||||
if data:
|
||||
cls.write_buffer_data(buff, data)
|
||||
return buff
|
||||
|
||||
@staticmethod
|
||||
def get_buffer_data(buff, num_bytes):
|
||||
return bytes(bytearray(buff[:num_bytes]))
|
||||
|
||||
@staticmethod
|
||||
def write_buffer_data(buff, data):
|
||||
for i, c in enumerate(data):
|
||||
buff[i] = struct.unpack('B', six.b(c))[0]
|
||||
|
||||
|
||||
class IOQueue(Queue.Queue):
|
||||
def __init__(self, client_connected):
|
||||
Queue.Queue.__init__(self)
|
||||
self._client_connected = client_connected
|
||||
|
||||
def get(self, timeout=IO_QUEUE_TIMEOUT, continue_on_timeout=True):
|
||||
while self._client_connected.isSet():
|
||||
try:
|
||||
return Queue.Queue.get(self, timeout=timeout)
|
||||
except Queue.Empty:
|
||||
if continue_on_timeout:
|
||||
continue
|
||||
else:
|
||||
break
|
||||
|
||||
def put(self, item, timeout=IO_QUEUE_TIMEOUT):
|
||||
while self._client_connected.isSet():
|
||||
try:
|
||||
return Queue.Queue.put(self, item, timeout=timeout)
|
||||
except Queue.Full:
|
||||
continue
|
||||
|
||||
def get_burst(self, timeout=IO_QUEUE_TIMEOUT,
|
||||
burst_timeout=IO_QUEUE_BURST_TIMEOUT,
|
||||
max_size=constants.SERIAL_CONSOLE_BUFFER_SIZE):
|
||||
# Get as much data as possible from the queue
|
||||
# to avoid sending small chunks.
|
||||
data = self.get(timeout=timeout)
|
||||
|
||||
while data and len(data) <= max_size:
|
||||
chunk = self.get(timeout=burst_timeout,
|
||||
continue_on_timeout=False)
|
||||
if chunk:
|
||||
data += chunk
|
||||
else:
|
||||
break
|
||||
return data
|
@ -1,253 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import errno
|
||||
import os
|
||||
|
||||
from eventlet import patcher
|
||||
from oslo_log import log as logging
|
||||
|
||||
from os_win._i18n import _
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.utils.io import ioutils
|
||||
|
||||
threading = patcher.original('threading')
|
||||
time = patcher.original('time')
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NamedPipeHandler(object):
|
||||
"""Handles asyncronous I/O operations on a specified named pipe."""
|
||||
|
||||
_MAX_LOG_ROTATE_RETRIES = 5
|
||||
|
||||
def __init__(self, pipe_name, input_queue=None, output_queue=None,
|
||||
connect_event=None, log_file=None):
|
||||
self._pipe_name = pipe_name
|
||||
self._input_queue = input_queue
|
||||
self._output_queue = output_queue
|
||||
self._log_file_path = log_file
|
||||
|
||||
self._connect_event = connect_event
|
||||
self._stopped = threading.Event()
|
||||
self._workers = []
|
||||
self._pipe_handle = None
|
||||
self._lock = threading.Lock()
|
||||
|
||||
self._ioutils = ioutils.IOUtils()
|
||||
|
||||
self._setup_io_structures()
|
||||
|
||||
def start(self):
|
||||
try:
|
||||
self._open_pipe()
|
||||
|
||||
if self._log_file_path:
|
||||
self._log_file_handle = open(self._log_file_path, 'ab', 1)
|
||||
|
||||
jobs = [self._read_from_pipe]
|
||||
if self._input_queue and self._connect_event:
|
||||
jobs.append(self._write_to_pipe)
|
||||
|
||||
for job in jobs:
|
||||
worker = threading.Thread(target=job)
|
||||
worker.setDaemon(True)
|
||||
worker.start()
|
||||
self._workers.append(worker)
|
||||
except Exception as err:
|
||||
msg = (_("Named pipe handler failed to initialize. "
|
||||
"Pipe Name: %(pipe_name)s "
|
||||
"Error: %(err)s") %
|
||||
{'pipe_name': self._pipe_name,
|
||||
'err': err})
|
||||
LOG.error(msg)
|
||||
self.stop()
|
||||
raise exceptions.OSWinException(msg)
|
||||
|
||||
def stop(self):
|
||||
self._stopped.set()
|
||||
|
||||
# If any worker has been spawned already, we rely on it to have
|
||||
# cleaned up the handles before ending its execution.
|
||||
# Note that we expect the caller to synchronize the start/stop calls.
|
||||
if not self._workers:
|
||||
self._cleanup_handles()
|
||||
|
||||
for worker in self._workers:
|
||||
# It may happen that another IO request was issued right after
|
||||
# we've set the stopped event and canceled pending requests.
|
||||
# In this case, retrying will ensure that the IO workers are
|
||||
# stopped properly and that there are no more outstanding IO
|
||||
# operations.
|
||||
while (worker.is_alive() and
|
||||
worker is not threading.current_thread()):
|
||||
self._cancel_io()
|
||||
worker.join(0.5)
|
||||
|
||||
self._workers = []
|
||||
|
||||
def _cleanup_handles(self):
|
||||
self._close_pipe()
|
||||
|
||||
if self._log_file_handle:
|
||||
self._log_file_handle.close()
|
||||
self._log_file_handle = None
|
||||
|
||||
if self._r_overlapped.hEvent:
|
||||
self._ioutils.close_handle(self._r_overlapped.hEvent)
|
||||
self._r_overlapped.hEvent = None
|
||||
|
||||
if self._w_overlapped.hEvent:
|
||||
self._ioutils.close_handle(self._w_overlapped.hEvent)
|
||||
self._w_overlapped.hEvent = None
|
||||
|
||||
def _setup_io_structures(self):
|
||||
self._r_buffer = self._ioutils.get_buffer(
|
||||
constants.SERIAL_CONSOLE_BUFFER_SIZE)
|
||||
self._w_buffer = self._ioutils.get_buffer(
|
||||
constants.SERIAL_CONSOLE_BUFFER_SIZE)
|
||||
|
||||
self._r_overlapped = self._ioutils.get_new_overlapped_structure()
|
||||
self._w_overlapped = self._ioutils.get_new_overlapped_structure()
|
||||
|
||||
self._r_completion_routine = self._ioutils.get_completion_routine(
|
||||
self._read_callback)
|
||||
self._w_completion_routine = self._ioutils.get_completion_routine()
|
||||
|
||||
self._log_file_handle = None
|
||||
|
||||
def _open_pipe(self):
|
||||
"""Opens a named pipe in overlapped mode for asyncronous I/O."""
|
||||
self._ioutils.wait_named_pipe(self._pipe_name)
|
||||
|
||||
self._pipe_handle = self._ioutils.open(
|
||||
self._pipe_name,
|
||||
desired_access=(ioutils.GENERIC_READ | ioutils.GENERIC_WRITE),
|
||||
share_mode=(ioutils.FILE_SHARE_READ | ioutils.FILE_SHARE_WRITE),
|
||||
creation_disposition=ioutils.OPEN_EXISTING,
|
||||
flags_and_attributes=ioutils.FILE_FLAG_OVERLAPPED)
|
||||
|
||||
def _close_pipe(self):
|
||||
if self._pipe_handle:
|
||||
self._ioutils.close_handle(self._pipe_handle)
|
||||
self._pipe_handle = None
|
||||
|
||||
def _cancel_io(self):
|
||||
if self._pipe_handle:
|
||||
# We ignore invalid handle errors. Even if the pipe is closed
|
||||
# and the handle reused, by specifing the overlapped structures
|
||||
# we ensure that we don't cancel IO operations other than the
|
||||
# ones that we care about.
|
||||
self._ioutils.cancel_io(self._pipe_handle, self._r_overlapped,
|
||||
ignore_invalid_handle=True)
|
||||
self._ioutils.cancel_io(self._pipe_handle, self._w_overlapped,
|
||||
ignore_invalid_handle=True)
|
||||
|
||||
def _read_from_pipe(self):
|
||||
self._start_io_worker(self._ioutils.read,
|
||||
self._r_buffer,
|
||||
self._r_overlapped,
|
||||
self._r_completion_routine)
|
||||
|
||||
def _write_to_pipe(self):
|
||||
self._start_io_worker(self._ioutils.write,
|
||||
self._w_buffer,
|
||||
self._w_overlapped,
|
||||
self._w_completion_routine,
|
||||
self._get_data_to_write)
|
||||
|
||||
def _start_io_worker(self, func, buff, overlapped_structure,
|
||||
completion_routine, buff_update_func=None):
|
||||
try:
|
||||
while not self._stopped.isSet():
|
||||
if buff_update_func:
|
||||
num_bytes = buff_update_func()
|
||||
if not num_bytes:
|
||||
continue
|
||||
else:
|
||||
num_bytes = len(buff)
|
||||
|
||||
func(self._pipe_handle, buff, num_bytes,
|
||||
overlapped_structure, completion_routine)
|
||||
except Exception:
|
||||
self._stopped.set()
|
||||
finally:
|
||||
with self._lock:
|
||||
self._cleanup_handles()
|
||||
|
||||
def _read_callback(self, num_bytes):
|
||||
data = self._ioutils.get_buffer_data(self._r_buffer,
|
||||
num_bytes)
|
||||
if self._output_queue:
|
||||
self._output_queue.put(data)
|
||||
|
||||
if self._log_file_handle:
|
||||
self._write_to_log(data)
|
||||
|
||||
def _get_data_to_write(self):
|
||||
while not (self._stopped.isSet() or self._connect_event.isSet()):
|
||||
time.sleep(1)
|
||||
|
||||
data = self._input_queue.get()
|
||||
if data:
|
||||
self._ioutils.write_buffer_data(self._w_buffer, data)
|
||||
return len(data)
|
||||
return 0
|
||||
|
||||
def _write_to_log(self, data):
|
||||
if self._stopped.isSet():
|
||||
return
|
||||
|
||||
try:
|
||||
log_size = self._log_file_handle.tell() + len(data)
|
||||
if log_size >= constants.MAX_CONSOLE_LOG_FILE_SIZE:
|
||||
self._rotate_logs()
|
||||
self._log_file_handle.write(data)
|
||||
except Exception:
|
||||
self._stopped.set()
|
||||
|
||||
def _rotate_logs(self):
|
||||
self._log_file_handle.flush()
|
||||
self._log_file_handle.close()
|
||||
|
||||
log_archive_path = self._log_file_path + '.1'
|
||||
|
||||
if os.path.exists(log_archive_path):
|
||||
self._retry_if_file_in_use(os.remove,
|
||||
log_archive_path)
|
||||
|
||||
self._retry_if_file_in_use(os.rename,
|
||||
self._log_file_path,
|
||||
log_archive_path)
|
||||
|
||||
self._log_file_handle = open(
|
||||
self._log_file_path, 'ab', 1)
|
||||
|
||||
def _retry_if_file_in_use(self, f, *args, **kwargs):
|
||||
# The log files might be in use if the console log is requested
|
||||
# while a log rotation is attempted.
|
||||
retry_count = 0
|
||||
while True:
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except WindowsError as err:
|
||||
if (err.errno == errno.EACCES and
|
||||
retry_count < self._MAX_LOG_ROTATE_RETRIES):
|
||||
retry_count += 1
|
||||
time.sleep(1)
|
||||
else:
|
||||
raise
|
@ -1,199 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Base Utility class for operations on Hyper-V.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from os_win._i18n import _
|
||||
from os_win import _utils
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.utils import baseutils
|
||||
from os_win.utils import win32utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
if sys.platform == 'win32':
|
||||
import wmi
|
||||
|
||||
|
||||
class JobUtils(baseutils.BaseUtilsVirt):
|
||||
|
||||
_CONCRETE_JOB_CLASS = "Msvm_ConcreteJob"
|
||||
|
||||
_DEFAULT_JOB_TERMINATE_TIMEOUT = 15 # seconds
|
||||
_KILL_JOB_STATE_CHANGE_REQUEST = 5
|
||||
_WBEM_E_NOT_FOUND = 0x80041002
|
||||
|
||||
_completed_job_states = [constants.JOB_STATE_COMPLETED,
|
||||
constants.JOB_STATE_TERMINATED,
|
||||
constants.JOB_STATE_KILLED,
|
||||
constants.JOB_STATE_COMPLETED_WITH_WARNINGS]
|
||||
|
||||
def check_ret_val(self, ret_val, job_path, success_values=[0]):
|
||||
if ret_val in [constants.WMI_JOB_STATUS_STARTED,
|
||||
constants.WMI_JOB_STATE_RUNNING]:
|
||||
return self._wait_for_job(job_path)
|
||||
elif ret_val not in success_values:
|
||||
raise exceptions.HyperVException(
|
||||
_('Operation failed with return value: %s') % ret_val)
|
||||
|
||||
def _wait_for_job(self, job_path):
|
||||
"""Poll WMI job state and wait for completion."""
|
||||
|
||||
job_wmi_path = job_path.replace('\\', '/')
|
||||
job = self._get_wmi_obj(job_wmi_path)
|
||||
|
||||
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
|
||||
time.sleep(0.1)
|
||||
job = self._get_wmi_obj(job_wmi_path)
|
||||
|
||||
if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
|
||||
job_state = job.JobState
|
||||
if job.path().Class == "Msvm_ConcreteJob":
|
||||
err_sum_desc = job.ErrorSummaryDescription
|
||||
err_desc = job.ErrorDescription
|
||||
err_code = job.ErrorCode
|
||||
data = {'job_state': job_state,
|
||||
'err_sum_desc': err_sum_desc,
|
||||
'err_desc': err_desc,
|
||||
'err_code': err_code}
|
||||
raise exceptions.HyperVException(
|
||||
_("WMI job failed with status %(job_state)d. "
|
||||
"Error details: %(err_sum_desc)s - %(err_desc)s - "
|
||||
"Error code: %(err_code)d") % data)
|
||||
else:
|
||||
(error, ret_val) = job.GetError()
|
||||
if not ret_val and error:
|
||||
data = {'job_state': job_state,
|
||||
'error': error}
|
||||
raise exceptions.HyperVException(
|
||||
_("WMI job failed with status %(job_state)d. "
|
||||
"Error details: %(error)s") % data)
|
||||
else:
|
||||
raise exceptions.HyperVException(
|
||||
_("WMI job failed with status %d. No error "
|
||||
"description available") % job_state)
|
||||
desc = job.Description
|
||||
elap = job.ElapsedTime
|
||||
LOG.debug("WMI job succeeded: %(desc)s, Elapsed=%(elap)s",
|
||||
{'desc': desc, 'elap': elap})
|
||||
return job
|
||||
|
||||
def _get_pending_jobs_affecting_element(self, element,
|
||||
ignore_error_state=True):
|
||||
# Msvm_AffectedJobElement is in fact an association between
|
||||
# the affected element and the affecting job.
|
||||
mappings = self._conn.Msvm_AffectedJobElement(
|
||||
AffectedElement=element.path_())
|
||||
pending_jobs = [
|
||||
mapping.AffectingElement
|
||||
for mapping in mappings
|
||||
if (mapping.AffectingElement and not
|
||||
self._is_job_completed(mapping.AffectingElement,
|
||||
ignore_error_state))]
|
||||
return pending_jobs
|
||||
|
||||
def _stop_jobs(self, element):
|
||||
pending_jobs = self._get_pending_jobs_affecting_element(
|
||||
element, ignore_error_state=False)
|
||||
for job in pending_jobs:
|
||||
try:
|
||||
if not job.Cancellable:
|
||||
LOG.debug("Got request to terminate "
|
||||
"non-cancelable job.")
|
||||
continue
|
||||
elif job.JobState == constants.JOB_STATE_EXCEPTION:
|
||||
LOG.debug("Attempting to terminate exception state job.")
|
||||
|
||||
job.RequestStateChange(
|
||||
self._KILL_JOB_STATE_CHANGE_REQUEST)
|
||||
except wmi.x_wmi as ex:
|
||||
hresult = win32utils.Win32Utils.get_com_error_hresult(
|
||||
ex.com_error)
|
||||
# The job may had been completed right before we've
|
||||
# attempted to kill it.
|
||||
if not hresult == self._WBEM_E_NOT_FOUND:
|
||||
LOG.debug("Failed to stop job. Exception: %s", ex)
|
||||
|
||||
pending_jobs = self._get_pending_jobs_affecting_element(element)
|
||||
if pending_jobs:
|
||||
LOG.debug("Attempted to terminate jobs "
|
||||
"affecting element %(element)s but "
|
||||
"%(pending_count)s jobs are still pending.",
|
||||
dict(element=element,
|
||||
pending_count=len(pending_jobs)))
|
||||
raise exceptions.JobTerminateFailed()
|
||||
|
||||
def _is_job_completed(self, job, ignore_error_state=True):
|
||||
return (job.JobState in self._completed_job_states or
|
||||
(job.JobState == constants.JOB_STATE_EXCEPTION
|
||||
and ignore_error_state))
|
||||
|
||||
def stop_jobs(self, element, timeout=_DEFAULT_JOB_TERMINATE_TIMEOUT):
|
||||
@_utils.retry_decorator(exceptions=exceptions.JobTerminateFailed,
|
||||
timeout=timeout, max_retry_count=None)
|
||||
def _stop_jobs_with_timeout():
|
||||
self._stop_jobs(element)
|
||||
|
||||
_stop_jobs_with_timeout()
|
||||
|
||||
@_utils.retry_decorator(exceptions=exceptions.HyperVException)
|
||||
def add_virt_resource(self, virt_resource, parent):
|
||||
(job_path, new_resources,
|
||||
ret_val) = self._vs_man_svc.AddResourceSettings(
|
||||
parent.path_(), [virt_resource.GetText_(1)])
|
||||
self.check_ret_val(ret_val, job_path)
|
||||
return new_resources
|
||||
|
||||
# modify_virt_resource can fail, especially while setting up the VM's
|
||||
# serial port connection. Retrying the operation will yield success.
|
||||
@_utils.retry_decorator(exceptions=exceptions.HyperVException)
|
||||
def modify_virt_resource(self, virt_resource):
|
||||
(job_path, out_set_data,
|
||||
ret_val) = self._vs_man_svc.ModifyResourceSettings(
|
||||
ResourceSettings=[virt_resource.GetText_(1)])
|
||||
self.check_ret_val(ret_val, job_path)
|
||||
|
||||
@_utils.retry_decorator(exceptions=exceptions.HyperVException)
|
||||
def remove_virt_resource(self, virt_resource):
|
||||
(job, ret_val) = self._vs_man_svc.RemoveResourceSettings(
|
||||
ResourceSettings=[virt_resource.path_()])
|
||||
self.check_ret_val(ret_val, job)
|
||||
|
||||
def add_virt_feature(self, virt_feature, parent):
|
||||
self.add_multiple_virt_features([virt_feature], parent)
|
||||
|
||||
@_utils.retry_decorator(exceptions=exceptions.HyperVException)
|
||||
def add_multiple_virt_features(self, virt_features, parent):
|
||||
(job_path, out_set_data,
|
||||
ret_val) = self._vs_man_svc.AddFeatureSettings(
|
||||
parent.path_(), [f.GetText_(1) for f in virt_features])
|
||||
self.check_ret_val(ret_val, job_path)
|
||||
|
||||
def remove_virt_feature(self, virt_feature):
|
||||
self.remove_multiple_virt_features([virt_feature])
|
||||
|
||||
def remove_multiple_virt_features(self, virt_features):
|
||||
(job_path, ret_val) = self._vs_man_svc.RemoveFeatureSettings(
|
||||
FeatureSettings=[f.path_() for f in virt_features])
|
||||
self.check_ret_val(ret_val, job_path)
|
@ -1,286 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Utility class for metrics related operations.
|
||||
Based on the "root/virtualization/v2" namespace available starting with
|
||||
Hyper-V Server / Windows Server 2012.
|
||||
"""
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from os_win._i18n import _, _LW
|
||||
from os_win import exceptions
|
||||
from os_win.utils import _wqlutils
|
||||
from os_win.utils import baseutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MetricsUtils(baseutils.BaseUtilsVirt):
|
||||
|
||||
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
|
||||
_DVD_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Virtual CD/DVD Disk'
|
||||
_STORAGE_ALLOC_SETTING_DATA_CLASS = 'Msvm_StorageAllocationSettingData'
|
||||
_PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData'
|
||||
_SYNTH_ETH_PORT_SET_DATA = 'Msvm_SyntheticEthernetPortSettingData'
|
||||
_PORT_ALLOC_SET_DATA = 'Msvm_EthernetPortAllocationSettingData'
|
||||
_PORT_ALLOC_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
|
||||
_BASE_METRICS_VALUE = 'Msvm_BaseMetricValue'
|
||||
|
||||
_CPU_METRICS = 'Aggregated Average CPU Utilization'
|
||||
_MEMORY_METRICS = 'Aggregated Average Memory Utilization'
|
||||
_NET_IN_METRICS = 'Filtered Incoming Network Traffic'
|
||||
_NET_OUT_METRICS = 'Filtered Outgoing Network Traffic'
|
||||
# Disk metrics are supported from Hyper-V 2012 R2
|
||||
_DISK_RD_METRICS = 'Disk Data Read'
|
||||
_DISK_WR_METRICS = 'Disk Data Written'
|
||||
_DISK_LATENCY_METRICS = 'Average Disk Latency'
|
||||
_DISK_IOPS_METRICS = 'Average Normalized Disk Throughput'
|
||||
|
||||
_METRICS_ENABLED = 2
|
||||
|
||||
def __init__(self, host='.'):
|
||||
super(MetricsUtils, self).__init__(host)
|
||||
self._metrics_svc_obj = None
|
||||
self._metrics_defs_obj = {}
|
||||
|
||||
@property
|
||||
def _metrics_svc(self):
|
||||
if not self._metrics_svc_obj:
|
||||
self._metrics_svc_obj = self._compat_conn.Msvm_MetricService()[0]
|
||||
return self._metrics_svc_obj
|
||||
|
||||
@property
|
||||
def _metrics_defs(self):
|
||||
if not self._metrics_defs_obj:
|
||||
self._cache_metrics_defs()
|
||||
return self._metrics_defs_obj
|
||||
|
||||
def _cache_metrics_defs(self):
|
||||
for metrics_def in self._conn.CIM_BaseMetricDefinition():
|
||||
self._metrics_defs_obj[metrics_def.ElementName] = metrics_def
|
||||
|
||||
def enable_vm_metrics_collection(self, vm_name):
|
||||
vm = self._get_vm(vm_name)
|
||||
disks = self._get_vm_resources(vm_name,
|
||||
self._STORAGE_ALLOC_SETTING_DATA_CLASS)
|
||||
filtered_disks = [d for d in disks if
|
||||
d.ResourceSubType is not self._DVD_DISK_RES_SUB_TYPE]
|
||||
|
||||
# enable metrics for disk.
|
||||
for disk in filtered_disks:
|
||||
self._enable_metrics(disk)
|
||||
|
||||
metrics_names = [self._CPU_METRICS, self._MEMORY_METRICS]
|
||||
self._enable_metrics(vm, metrics_names)
|
||||
|
||||
def enable_port_metrics_collection(self, switch_port_name):
|
||||
port = self._get_switch_port(switch_port_name)
|
||||
metrics_names = [self._NET_IN_METRICS, self._NET_OUT_METRICS]
|
||||
self._enable_metrics(port, metrics_names)
|
||||
|
||||
def _enable_metrics(self, element, metrics_names=None):
|
||||
if not metrics_names:
|
||||
definition_paths = [None]
|
||||
else:
|
||||
definition_paths = []
|
||||
for metrics_name in metrics_names:
|
||||
metrics_def = self._metrics_defs.get(metrics_name)
|
||||
if not metrics_def:
|
||||
LOG.warning(_LW("Metric not found: %s"), metrics_name)
|
||||
continue
|
||||
definition_paths.append(metrics_def.path_())
|
||||
|
||||
element_path = element.path_()
|
||||
for definition_path in definition_paths:
|
||||
self._metrics_svc.ControlMetrics(
|
||||
Subject=element_path,
|
||||
Definition=definition_path,
|
||||
MetricCollectionEnabled=self._METRICS_ENABLED)
|
||||
|
||||
def get_cpu_metrics(self, vm_name):
|
||||
vm = self._get_vm(vm_name)
|
||||
cpu_sd = self._get_vm_resources(vm_name,
|
||||
self._PROCESSOR_SETTING_DATA_CLASS)[0]
|
||||
cpu_metrics_def = self._metrics_defs[self._CPU_METRICS]
|
||||
cpu_metrics_aggr = self._get_metrics(vm, cpu_metrics_def)
|
||||
|
||||
cpu_used = 0
|
||||
if cpu_metrics_aggr:
|
||||
cpu_used = int(cpu_metrics_aggr[0].MetricValue)
|
||||
|
||||
return (cpu_used,
|
||||
int(cpu_sd.VirtualQuantity),
|
||||
int(vm.OnTimeInMilliseconds))
|
||||
|
||||
def get_memory_metrics(self, vm_name):
|
||||
vm = self._get_vm(vm_name)
|
||||
memory_def = self._metrics_defs[self._MEMORY_METRICS]
|
||||
metrics_memory = self._get_metrics(vm, memory_def)
|
||||
memory_usage = 0
|
||||
if metrics_memory:
|
||||
memory_usage = int(metrics_memory[0].MetricValue)
|
||||
return memory_usage
|
||||
|
||||
def get_vnic_metrics(self, vm_name):
|
||||
ports = self._get_vm_resources(vm_name, self._PORT_ALLOC_SET_DATA)
|
||||
vnics = self._get_vm_resources(vm_name, self._SYNTH_ETH_PORT_SET_DATA)
|
||||
|
||||
metrics_def_in = self._metrics_defs[self._NET_IN_METRICS]
|
||||
metrics_def_out = self._metrics_defs[self._NET_OUT_METRICS]
|
||||
|
||||
for port in ports:
|
||||
vnic = [v for v in vnics if port.Parent == v.path_()][0]
|
||||
port_acls = _wqlutils.get_element_associated_class(
|
||||
self._conn, self._PORT_ALLOC_ACL_SET_DATA,
|
||||
element_instance_id=port.InstanceID)
|
||||
|
||||
metrics_value_instances = self._get_metrics_value_instances(
|
||||
port_acls, self._BASE_METRICS_VALUE)
|
||||
metrics_values = self._sum_metrics_values_by_defs(
|
||||
metrics_value_instances, [metrics_def_in, metrics_def_out])
|
||||
|
||||
yield {
|
||||
'rx_mb': metrics_values[0],
|
||||
'tx_mb': metrics_values[1],
|
||||
'element_name': vnic.ElementName,
|
||||
'address': vnic.Address
|
||||
}
|
||||
|
||||
def get_disk_metrics(self, vm_name):
|
||||
metrics_def_r = self._metrics_defs[self._DISK_RD_METRICS]
|
||||
metrics_def_w = self._metrics_defs[self._DISK_WR_METRICS]
|
||||
|
||||
disks = self._get_vm_resources(vm_name,
|
||||
self._STORAGE_ALLOC_SETTING_DATA_CLASS)
|
||||
for disk in disks:
|
||||
metrics_values = self._get_metrics_values(
|
||||
disk, [metrics_def_r, metrics_def_w])
|
||||
|
||||
yield {
|
||||
# Values are in megabytes
|
||||
'read_mb': metrics_values[0],
|
||||
'write_mb': metrics_values[1],
|
||||
'instance_id': disk.InstanceID,
|
||||
'host_resource': disk.HostResource[0]
|
||||
}
|
||||
|
||||
def get_disk_latency_metrics(self, vm_name):
|
||||
metrics_latency_def = self._metrics_defs[self._DISK_LATENCY_METRICS]
|
||||
|
||||
disks = self._get_vm_resources(vm_name,
|
||||
self._STORAGE_ALLOC_SETTING_DATA_CLASS)
|
||||
for disk in disks:
|
||||
metrics_values = self._get_metrics_values(
|
||||
disk, [metrics_latency_def])
|
||||
|
||||
yield {
|
||||
'disk_latency': metrics_values[0],
|
||||
'instance_id': disk.InstanceID,
|
||||
}
|
||||
|
||||
def get_disk_iops_count(self, vm_name):
|
||||
metrics_def_iops = self._metrics_defs[self._DISK_IOPS_METRICS]
|
||||
|
||||
disks = self._get_vm_resources(vm_name,
|
||||
self._STORAGE_ALLOC_SETTING_DATA_CLASS)
|
||||
for disk in disks:
|
||||
metrics_values = self._get_metrics_values(
|
||||
disk, [metrics_def_iops])
|
||||
|
||||
yield {
|
||||
'iops_count': metrics_values[0],
|
||||
'instance_id': disk.InstanceID,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _sum_metrics_values(metrics):
|
||||
return sum([int(metric.MetricValue) for metric in metrics])
|
||||
|
||||
def _sum_metrics_values_by_defs(self, element_metrics, metrics_defs):
|
||||
metrics_values = []
|
||||
for metrics_def in metrics_defs:
|
||||
if metrics_def:
|
||||
metrics = self._filter_metrics(element_metrics, metrics_def)
|
||||
metrics_values.append(self._sum_metrics_values(metrics))
|
||||
else:
|
||||
# In case the metric is not defined on this host
|
||||
metrics_values.append(0)
|
||||
return metrics_values
|
||||
|
||||
def _get_metrics_value_instances(self, elements, result_class):
|
||||
instances = []
|
||||
for el in elements:
|
||||
# NOTE(abalutoiu): Msvm_MetricForME is the association between
|
||||
# an element and all the metric values maintained for it.
|
||||
el_metric = [
|
||||
x.Dependent for x in self._conn.Msvm_MetricForME(
|
||||
Antecedent=el.path_())]
|
||||
el_metric = [
|
||||
x for x in el_metric if x.path().Class == result_class]
|
||||
if el_metric:
|
||||
instances.append(el_metric[0])
|
||||
|
||||
return instances
|
||||
|
||||
def _get_metrics_values(self, element, metrics_defs):
|
||||
element_metrics = [
|
||||
x.Dependent for x in self._conn.Msvm_MetricForME(
|
||||
Antecedent=element.path_())]
|
||||
return self._sum_metrics_values_by_defs(element_metrics, metrics_defs)
|
||||
|
||||
def _get_metrics(self, element, metrics_def):
|
||||
metrics = [
|
||||
x.Dependent for x in self._conn.Msvm_MetricForME(
|
||||
Antecedent=element.path_())]
|
||||
return self._filter_metrics(metrics, metrics_def)
|
||||
|
||||
@staticmethod
|
||||
def _filter_metrics(all_metrics, metrics_def):
|
||||
return [v for v in all_metrics if
|
||||
v.MetricDefinitionId == metrics_def.Id]
|
||||
|
||||
def _get_vm_resources(self, vm_name, resource_class):
|
||||
setting_data = self._get_vm_setting_data(vm_name)
|
||||
return _wqlutils.get_element_associated_class(
|
||||
self._conn, resource_class,
|
||||
element_instance_id=setting_data.InstanceID)
|
||||
|
||||
def _get_vm(self, vm_name):
|
||||
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
|
||||
return self._unique_result(vms, vm_name)
|
||||
|
||||
def _get_switch_port(self, port_name):
|
||||
ports = self._conn.Msvm_SyntheticEthernetPortSettingData(
|
||||
ElementName=port_name)
|
||||
return self._unique_result(ports, port_name)
|
||||
|
||||
def _get_vm_setting_data(self, vm_name):
|
||||
vssds = self._conn.Msvm_VirtualSystemSettingData(
|
||||
ElementName=vm_name,
|
||||
VirtualSystemType=self._VIRTUAL_SYSTEM_TYPE_REALIZED)
|
||||
return self._unique_result(vssds, vm_name)
|
||||
|
||||
@staticmethod
|
||||
def _unique_result(objects, resource_name):
|
||||
n = len(objects)
|
||||
if n == 0:
|
||||
raise exceptions.NotFound(resource=resource_name)
|
||||
elif n > 1:
|
||||
raise exceptions.OSWinException(
|
||||
_('Duplicate resource name found: %s') % resource_name)
|
||||
else:
|
||||
return objects[0]
|
@ -1,647 +0,0 @@
|
||||
# Copyright 2013 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Utility class for network related operations.
|
||||
Based on the "root/virtualization/v2" namespace available starting with
|
||||
Hyper-V Server / Windows Server 2012.
|
||||
"""
|
||||
import functools
|
||||
import re
|
||||
|
||||
from eventlet import patcher
|
||||
from eventlet import tpool
|
||||
import sys
|
||||
|
||||
if sys.platform == 'win32':
|
||||
import wmi
|
||||
|
||||
from os_win._i18n import _
|
||||
from os_win import exceptions
|
||||
from os_win.utils import _wqlutils
|
||||
from os_win.utils import baseutils
|
||||
from os_win.utils import jobutils
|
||||
|
||||
|
||||
class NetworkUtils(baseutils.BaseUtilsVirt):
|
||||
|
||||
EVENT_TYPE_CREATE = "__InstanceCreationEvent"
|
||||
EVENT_TYPE_DELETE = "__InstanceDeletionEvent"
|
||||
|
||||
_VNIC_SET_DATA = 'Msvm_SyntheticEthernetPortSettingData'
|
||||
_EXTERNAL_PORT = 'Msvm_ExternalEthernetPort'
|
||||
_ETHERNET_SWITCH_PORT = 'Msvm_EthernetSwitchPort'
|
||||
_PORT_ALLOC_SET_DATA = 'Msvm_EthernetPortAllocationSettingData'
|
||||
_PORT_VLAN_SET_DATA = 'Msvm_EthernetSwitchPortVlanSettingData'
|
||||
_PORT_SECURITY_SET_DATA = 'Msvm_EthernetSwitchPortSecuritySettingData'
|
||||
_PORT_ALLOC_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
|
||||
_PORT_EXT_ACL_SET_DATA = _PORT_ALLOC_ACL_SET_DATA
|
||||
_LAN_ENDPOINT = 'Msvm_LANEndpoint'
|
||||
_STATE_DISABLED = 3
|
||||
_OPERATION_MODE_ACCESS = 1
|
||||
|
||||
_VIRTUAL_SYSTEM_SETTING_DATA = 'Msvm_VirtualSystemSettingData'
|
||||
_VM_SUMMARY_ENABLED_STATE = 100
|
||||
_HYPERV_VM_STATE_ENABLED = 2
|
||||
|
||||
_ACL_DIR_IN = 1
|
||||
_ACL_DIR_OUT = 2
|
||||
|
||||
_ACL_TYPE_IPV4 = 2
|
||||
_ACL_TYPE_IPV6 = 3
|
||||
|
||||
_ACL_ACTION_ALLOW = 1
|
||||
_ACL_ACTION_DENY = 2
|
||||
_ACL_ACTION_METER = 3
|
||||
|
||||
_ACL_APPLICABILITY_LOCAL = 1
|
||||
_ACL_APPLICABILITY_REMOTE = 2
|
||||
|
||||
_ACL_DEFAULT = 'ANY'
|
||||
_IPV4_ANY = '0.0.0.0/0'
|
||||
_IPV6_ANY = '::/0'
|
||||
_TCP_PROTOCOL = 'tcp'
|
||||
_UDP_PROTOCOL = 'udp'
|
||||
_ICMP_PROTOCOL = '1'
|
||||
_ICMPV6_PROTOCOL = '58'
|
||||
_MAX_WEIGHT = 65500
|
||||
|
||||
# 2 directions x 2 address types = 4 ACLs
|
||||
_REJECT_ACLS_COUNT = 4
|
||||
|
||||
_VNIC_LISTENER_TIMEOUT_MS = 2000
|
||||
|
||||
_switches = {}
|
||||
_switch_ports = {}
|
||||
_vlan_sds = {}
|
||||
_vsid_sds = {}
|
||||
_sg_acl_sds = {}
|
||||
|
||||
def __init__(self):
|
||||
super(NetworkUtils, self).__init__()
|
||||
self._jobutils = jobutils.JobUtils()
|
||||
|
||||
def init_caches(self):
|
||||
for vswitch in self._conn.Msvm_VirtualEthernetSwitch():
|
||||
self._switches[vswitch.ElementName] = vswitch
|
||||
|
||||
# map between switch port ID and switch port WMI object.
|
||||
for port in self._conn.Msvm_EthernetPortAllocationSettingData():
|
||||
self._switch_ports[port.ElementName] = port
|
||||
|
||||
# VLAN and VSID setting data's InstanceID will contain the switch
|
||||
# port's InstanceID.
|
||||
switch_port_id_regex = re.compile(
|
||||
"Microsoft:[0-9A-F-]*\\\\[0-9A-F-]*\\\\[0-9A-F-]",
|
||||
flags=re.IGNORECASE)
|
||||
|
||||
# map between switch port's InstanceID and their VLAN setting data WMI
|
||||
# objects.
|
||||
for vlan_sd in self._conn.Msvm_EthernetSwitchPortVlanSettingData():
|
||||
match = switch_port_id_regex.match(vlan_sd.InstanceID)
|
||||
if match:
|
||||
self._vlan_sds[match.group()] = vlan_sd
|
||||
|
||||
# map between switch port's InstanceID and their VSID setting data WMI
|
||||
# objects.
|
||||
for vsid_sd in self._conn.Msvm_EthernetSwitchPortSecuritySettingData():
|
||||
match = switch_port_id_regex.match(vsid_sd.InstanceID)
|
||||
if match:
|
||||
self._vsid_sds[match.group()] = vsid_sd
|
||||
|
||||
def update_cache(self):
|
||||
# map between switch port ID and switch port WMI object.
|
||||
self._switch_ports = {
|
||||
port.ElementName: port for port in
|
||||
self._conn.Msvm_EthernetPortAllocationSettingData()}
|
||||
|
||||
def clear_port_sg_acls_cache(self, switch_port_name):
|
||||
self._sg_acl_sds.pop(switch_port_name, None)
|
||||
|
||||
def get_vswitch_id(self, vswitch_name):
|
||||
vswitch = self._get_vswitch(vswitch_name)
|
||||
return vswitch.Name
|
||||
|
||||
def get_vswitch_external_network_name(self, vswitch_name):
|
||||
ext_port = self._get_vswitch_external_port(vswitch_name)
|
||||
if ext_port:
|
||||
return ext_port.ElementName
|
||||
|
||||
def _get_vswitch(self, vswitch_name):
|
||||
if vswitch_name in self._switches:
|
||||
return self._switches[vswitch_name]
|
||||
|
||||
vswitch = self._conn.Msvm_VirtualEthernetSwitch(
|
||||
ElementName=vswitch_name)
|
||||
if not len(vswitch):
|
||||
raise exceptions.HyperVException(_('VSwitch not found: %s') %
|
||||
vswitch_name)
|
||||
|
||||
self._switches[vswitch_name] = vswitch[0]
|
||||
return vswitch[0]
|
||||
|
||||
def _get_vswitch_external_port(self, vswitch_name):
|
||||
vswitch = self._get_vswitch(vswitch_name)
|
||||
ext_ports = self._conn.Msvm_ExternalEthernetPort()
|
||||
for ext_port in ext_ports:
|
||||
lan_endpoint_assoc_list = (
|
||||
self._conn.Msvm_EthernetDeviceSAPImplementation(
|
||||
Antecedent=ext_port.path_()))
|
||||
if lan_endpoint_assoc_list:
|
||||
lan_endpoint_assoc_list = self._conn.Msvm_ActiveConnection(
|
||||
Dependent=lan_endpoint_assoc_list[0].Dependent.path_())
|
||||
if lan_endpoint_assoc_list:
|
||||
lan_endpoint = lan_endpoint_assoc_list[0].Antecedent
|
||||
if lan_endpoint.SystemName == vswitch.Name:
|
||||
return ext_port
|
||||
|
||||
def vswitch_port_needed(self):
|
||||
return False
|
||||
|
||||
def get_switch_ports(self, vswitch_name):
|
||||
vswitch = self._get_vswitch(vswitch_name)
|
||||
vswitch_ports = self._conn.Msvm_EthernetSwitchPort(
|
||||
SystemName=vswitch.Name)
|
||||
return set(p.Name for p in vswitch_ports)
|
||||
|
||||
def get_port_by_id(self, port_id, vswitch_name):
|
||||
vswitch = self._get_vswitch(vswitch_name)
|
||||
switch_ports = self._conn.Msvm_EthernetSwitchPort(
|
||||
SystemName=vswitch.Name)
|
||||
for switch_port in switch_ports:
|
||||
if (switch_port.ElementName == port_id):
|
||||
return switch_port
|
||||
|
||||
def vnic_port_exists(self, port_id):
|
||||
try:
|
||||
self._get_vnic_settings(port_id)
|
||||
except Exception:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_vnic_ids(self):
|
||||
return set(
|
||||
p.ElementName
|
||||
for p in self._conn.Msvm_SyntheticEthernetPortSettingData()
|
||||
if p.ElementName is not None)
|
||||
|
||||
def get_vnic_mac_address(self, switch_port_name):
|
||||
vnic = self._get_vnic_settings(switch_port_name)
|
||||
return vnic.Address
|
||||
|
||||
def _get_vnic_settings(self, vnic_name):
|
||||
vnic_settings = self._conn.Msvm_SyntheticEthernetPortSettingData(
|
||||
ElementName=vnic_name)
|
||||
if not vnic_settings:
|
||||
raise exceptions.HyperVException(
|
||||
message=_('Vnic not found: %s') % vnic_name)
|
||||
return vnic_settings[0]
|
||||
|
||||
def get_vnic_event_listener(self, event_type):
|
||||
query = self._get_event_wql_query(cls=self._VNIC_SET_DATA,
|
||||
event_type=event_type,
|
||||
timeframe=2)
|
||||
listener = self._conn.Msvm_SyntheticEthernetPortSettingData.watch_for(
|
||||
query)
|
||||
|
||||
def _poll_events(callback):
|
||||
if patcher.is_monkey_patched('thread'):
|
||||
listen = functools.partial(tpool.execute, listener,
|
||||
self._VNIC_LISTENER_TIMEOUT_MS)
|
||||
else:
|
||||
listen = functools.partial(listener,
|
||||
self._VNIC_LISTENER_TIMEOUT_MS)
|
||||
|
||||
while True:
|
||||
# Retrieve one by one all the events that occurred in
|
||||
# the checked interval.
|
||||
try:
|
||||
event = listen()
|
||||
callback(event.ElementName)
|
||||
except wmi.x_wmi_timed_out:
|
||||
# no new event published.
|
||||
pass
|
||||
|
||||
return _poll_events
|
||||
|
||||
def _get_event_wql_query(self, cls, event_type, timeframe=2, **where):
|
||||
"""Return a WQL query used for polling WMI events.
|
||||
|
||||
:param cls: the Hyper-V class polled for events.
|
||||
:param event_type: the type of event expected.
|
||||
:param timeframe: check for events that occurred in
|
||||
the specified timeframe.
|
||||
:param where: key-value arguments which are to be included in the
|
||||
query. For example: like=dict(foo="bar").
|
||||
"""
|
||||
like = where.pop('like', {})
|
||||
like_str = " AND ".join("TargetInstance.%s LIKE '%s%%'" % (k, v)
|
||||
for k, v in like.items())
|
||||
like_str = "AND " + like_str if like_str else ""
|
||||
|
||||
query = ("SELECT * FROM %(event_type)s WITHIN %(timeframe)s "
|
||||
"WHERE TargetInstance ISA '%(class)s' %(like)s" % {
|
||||
'class': cls,
|
||||
'event_type': event_type,
|
||||
'like': like_str,
|
||||
'timeframe': timeframe})
|
||||
return query
|
||||
|
||||
def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name):
|
||||
port, found = self._get_switch_port_allocation(
|
||||
switch_port_name, create=True, expected=False)
|
||||
if found and port.HostResource and port.HostResource[0]:
|
||||
# vswitch port already exists and is connected to vswitch.
|
||||
return
|
||||
|
||||
vswitch = self._get_vswitch(vswitch_name)
|
||||
vnic = self._get_vnic_settings(switch_port_name)
|
||||
|
||||
port.HostResource = [vswitch.path_()]
|
||||
port.Parent = vnic.path_()
|
||||
if not found:
|
||||
vm = self._get_vm_from_res_setting_data(vnic)
|
||||
self._jobutils.add_virt_resource(port, vm)
|
||||
else:
|
||||
self._jobutils.modify_virt_resource(port)
|
||||
|
||||
def _get_vm_from_res_setting_data(self, res_setting_data):
|
||||
vmsettings_instance_id = res_setting_data.InstanceID.split('\\')[0]
|
||||
sd = self._conn.Msvm_VirtualSystemSettingData(
|
||||
InstanceID=vmsettings_instance_id)
|
||||
vm = self._conn.Msvm_ComputerSystem(Name=sd[0].ConfigurationID)
|
||||
return vm[0]
|
||||
|
||||
def remove_switch_port(self, switch_port_name, vnic_deleted=False):
|
||||
"""Removes the switch port."""
|
||||
sw_port, found = self._get_switch_port_allocation(switch_port_name,
|
||||
expected=False)
|
||||
if not sw_port:
|
||||
# Port not found. It happens when the VM was already deleted.
|
||||
return
|
||||
|
||||
if not vnic_deleted:
|
||||
try:
|
||||
self._jobutils.remove_virt_resource(sw_port)
|
||||
except wmi.x_wmi:
|
||||
# port may have already been destroyed by Hyper-V
|
||||
pass
|
||||
|
||||
self._switch_ports.pop(switch_port_name, None)
|
||||
self._vlan_sds.pop(sw_port.InstanceID, None)
|
||||
self._vsid_sds.pop(sw_port.InstanceID, None)
|
||||
|
||||
def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name):
|
||||
port_alloc = self._get_switch_port_allocation(switch_port_name)[0]
|
||||
|
||||
vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc)
|
||||
if vlan_settings:
|
||||
if (vlan_settings.OperationMode == self._OPERATION_MODE_ACCESS and
|
||||
vlan_settings.AccessVlanId == vlan_id):
|
||||
# VLAN already set to corect value, no need to change it.
|
||||
return
|
||||
|
||||
# Removing the feature because it cannot be modified
|
||||
# due to a wmi exception.
|
||||
self._jobutils.remove_virt_feature(vlan_settings)
|
||||
|
||||
# remove from cache.
|
||||
self._vlan_sds.pop(port_alloc.InstanceID, None)
|
||||
|
||||
vlan_settings = self._create_default_setting_data(
|
||||
self._PORT_VLAN_SET_DATA)
|
||||
vlan_settings.AccessVlanId = vlan_id
|
||||
vlan_settings.OperationMode = self._OPERATION_MODE_ACCESS
|
||||
self._jobutils.add_virt_feature(vlan_settings, port_alloc)
|
||||
|
||||
# TODO(claudiub): This will help solve the missing VLAN issue, but it
|
||||
# comes with a performance cost. The root cause of the problem must
|
||||
# be solved.
|
||||
vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc)
|
||||
if not vlan_settings:
|
||||
raise exceptions.HyperVException(
|
||||
_('Port VLAN not found: %s') % switch_port_name)
|
||||
|
||||
def set_vswitch_port_vsid(self, vsid, switch_port_name):
|
||||
port_alloc = self._get_switch_port_allocation(switch_port_name)[0]
|
||||
|
||||
vsid_settings = self._get_security_setting_data_from_port_alloc(
|
||||
port_alloc)
|
||||
|
||||
if vsid_settings:
|
||||
if vsid_settings.VirtualSubnetId == vsid:
|
||||
# VSID already added, no need to readd it.
|
||||
return
|
||||
# Removing the feature because it cannot be modified
|
||||
# due to a wmi exception.
|
||||
self._jobutils.remove_virt_feature(vsid_settings)
|
||||
|
||||
# remove from cache.
|
||||
self._vsid_sds.pop(port_alloc.InstanceID, None)
|
||||
|
||||
vsid_settings = self._create_default_setting_data(
|
||||
self._PORT_SECURITY_SET_DATA)
|
||||
vsid_settings.VirtualSubnetId = vsid
|
||||
self._jobutils.add_virt_feature(vsid_settings, port_alloc)
|
||||
|
||||
# TODO(claudiub): This will help solve the missing VSID issue, but it
|
||||
# comes with a performance cost. The root cause of the problem must
|
||||
# be solved.
|
||||
vsid_settings = self._get_security_setting_data_from_port_alloc(
|
||||
port_alloc)
|
||||
if not vsid_settings:
|
||||
raise exceptions.HyperVException(
|
||||
_('Port VSID not found: %s') % switch_port_name)
|
||||
|
||||
def _get_vlan_setting_data_from_port_alloc(self, port_alloc):
|
||||
return self._get_setting_data_from_port_alloc(
|
||||
port_alloc, self._vlan_sds, self._PORT_VLAN_SET_DATA)
|
||||
|
||||
def _get_security_setting_data_from_port_alloc(self, port_alloc):
|
||||
return self._get_setting_data_from_port_alloc(
|
||||
port_alloc, self._vsid_sds, self._PORT_SECURITY_SET_DATA)
|
||||
|
||||
def _get_setting_data_from_port_alloc(self, port_alloc, cache, data_class):
|
||||
if port_alloc.InstanceID in cache:
|
||||
return cache[port_alloc.InstanceID]
|
||||
|
||||
setting_data = self._get_first_item(
|
||||
_wqlutils.get_element_associated_class(
|
||||
self._conn, data_class,
|
||||
element_instance_id=port_alloc.InstanceID))
|
||||
if setting_data:
|
||||
cache[port_alloc.InstanceID] = setting_data
|
||||
return setting_data
|
||||
|
||||
def _get_switch_port_allocation(self, switch_port_name, create=False,
|
||||
expected=True):
|
||||
if switch_port_name in self._switch_ports:
|
||||
return self._switch_ports[switch_port_name], True
|
||||
|
||||
switch_port, found = self._get_setting_data(
|
||||
self._PORT_ALLOC_SET_DATA,
|
||||
switch_port_name, create)
|
||||
|
||||
if found:
|
||||
# newly created setting data cannot be cached, they do not
|
||||
# represent real objects yet.
|
||||
# if it was found, it means that it was not created.
|
||||
self._switch_ports[switch_port_name] = switch_port
|
||||
elif expected:
|
||||
raise exceptions.HyperVPortNotFoundException(
|
||||
port_name=switch_port_name)
|
||||
return switch_port, found
|
||||
|
||||
def _get_setting_data(self, class_name, element_name, create=True):
|
||||
element_name = element_name.replace("'", '"')
|
||||
q = self._compat_conn.query("SELECT * FROM %(class_name)s WHERE "
|
||||
"ElementName = '%(element_name)s'" %
|
||||
{"class_name": class_name,
|
||||
"element_name": element_name})
|
||||
data = self._get_first_item(q)
|
||||
found = data is not None
|
||||
if not data and create:
|
||||
data = self._get_default_setting_data(class_name)
|
||||
data.ElementName = element_name
|
||||
return data, found
|
||||
|
||||
def _get_default_setting_data(self, class_name):
|
||||
return self._compat_conn.query("SELECT * FROM %s WHERE InstanceID "
|
||||
"LIKE '%%\\Default'" % class_name)[0]
|
||||
|
||||
def _create_default_setting_data(self, class_name):
|
||||
return getattr(self._compat_conn, class_name).new()
|
||||
|
||||
def _get_first_item(self, obj):
|
||||
if obj:
|
||||
return obj[0]
|
||||
|
||||
def add_metrics_collection_acls(self, switch_port_name):
|
||||
port = self._get_switch_port_allocation(switch_port_name)[0]
|
||||
|
||||
# Add the ACLs only if they don't already exist
|
||||
acls = _wqlutils.get_element_associated_class(
|
||||
self._conn, self._PORT_ALLOC_ACL_SET_DATA,
|
||||
element_instance_id=port.InstanceID)
|
||||
for acl_type in [self._ACL_TYPE_IPV4, self._ACL_TYPE_IPV6]:
|
||||
for acl_dir in [self._ACL_DIR_IN, self._ACL_DIR_OUT]:
|
||||
_acls = self._filter_acls(
|
||||
acls, self._ACL_ACTION_METER, acl_dir, acl_type)
|
||||
|
||||
if not _acls:
|
||||
acl = self._create_acl(
|
||||
acl_dir, acl_type, self._ACL_ACTION_METER)
|
||||
self._jobutils.add_virt_feature(acl, port)
|
||||
|
||||
def is_metrics_collection_allowed(self, switch_port_name):
|
||||
port = self._get_switch_port_allocation(switch_port_name)[0]
|
||||
|
||||
if not self._is_port_vm_started(port):
|
||||
return False
|
||||
|
||||
# all 4 meter ACLs must be existent first. (2 x direction)
|
||||
acls = _wqlutils.get_element_associated_class(
|
||||
self._conn, self._PORT_ALLOC_ACL_SET_DATA,
|
||||
element_instance_id=port.InstanceID)
|
||||
acls = [a for a in acls if a.Action == self._ACL_ACTION_METER]
|
||||
if len(acls) < 2:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _is_port_vm_started(self, port):
|
||||
vmsettings_instance_id = port.InstanceID.split('\\')[0]
|
||||
vmsettings = self._conn.Msvm_VirtualSystemSettingData(
|
||||
InstanceID=vmsettings_instance_id)
|
||||
# See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
|
||||
(ret_val, summary_info) = self._vs_man_svc.GetSummaryInformation(
|
||||
[self._VM_SUMMARY_ENABLED_STATE],
|
||||
[v.path_() for v in vmsettings])
|
||||
if ret_val or not summary_info:
|
||||
raise exceptions.HyperVException(_('Cannot get VM summary data '
|
||||
'for: %s') % port.ElementName)
|
||||
|
||||
return summary_info[0].EnabledState is self._HYPERV_VM_STATE_ENABLED
|
||||
|
||||
def create_security_rules(self, switch_port_name, sg_rules):
|
||||
port = self._get_switch_port_allocation(switch_port_name)[0]
|
||||
|
||||
self._bind_security_rules(port, sg_rules)
|
||||
|
||||
def remove_security_rules(self, switch_port_name, sg_rules):
|
||||
port = self._get_switch_port_allocation(switch_port_name)[0]
|
||||
|
||||
acls = _wqlutils.get_element_associated_class(
|
||||
self._conn, self._PORT_EXT_ACL_SET_DATA,
|
||||
element_instance_id=port.InstanceID)
|
||||
remove_acls = []
|
||||
for sg_rule in sg_rules:
|
||||
filtered_acls = self._filter_security_acls(sg_rule, acls)
|
||||
remove_acls.extend(filtered_acls)
|
||||
|
||||
if remove_acls:
|
||||
self._jobutils.remove_multiple_virt_features(remove_acls)
|
||||
|
||||
# remove the old ACLs from the cache.
|
||||
new_acls = [a for a in acls if a not in remove_acls]
|
||||
self._sg_acl_sds[port.ElementName] = new_acls
|
||||
|
||||
def remove_all_security_rules(self, switch_port_name):
|
||||
port = self._get_switch_port_allocation(switch_port_name)[0]
|
||||
|
||||
acls = _wqlutils.get_element_associated_class(
|
||||
self._conn, self._PORT_EXT_ACL_SET_DATA,
|
||||
element_instance_id=port.InstanceID)
|
||||
filtered_acls = [a for a in acls if
|
||||
a.Action is not self._ACL_ACTION_METER]
|
||||
|
||||
if filtered_acls:
|
||||
self._jobutils.remove_multiple_virt_features(filtered_acls)
|
||||
|
||||
# clear the cache.
|
||||
self._sg_acl_sds[port.ElementName] = []
|
||||
|
||||
def _bind_security_rules(self, port, sg_rules):
|
||||
acls = _wqlutils.get_element_associated_class(
|
||||
self._conn, self._PORT_EXT_ACL_SET_DATA,
|
||||
element_instance_id=port.InstanceID)
|
||||
|
||||
# Add the ACL only if it don't already exist.
|
||||
add_acls = []
|
||||
processed_sg_rules = []
|
||||
weights = self._get_new_weights(sg_rules, acls)
|
||||
index = 0
|
||||
|
||||
for sg_rule in sg_rules:
|
||||
filtered_acls = self._filter_security_acls(sg_rule, acls)
|
||||
if filtered_acls:
|
||||
# ACL already exists.
|
||||
continue
|
||||
|
||||
acl = self._create_security_acl(sg_rule, weights[index])
|
||||
add_acls.append(acl)
|
||||
index += 1
|
||||
|
||||
# append sg_rule the acls list, to make sure that the same rule
|
||||
# is not processed twice.
|
||||
processed_sg_rules.append(sg_rule)
|
||||
|
||||
if add_acls:
|
||||
self._jobutils.add_multiple_virt_features(add_acls, port)
|
||||
|
||||
# caching the Security Group Rules that have been processed and
|
||||
# added to the port. The list should only be used to check the
|
||||
# existence of rules, nothing else.
|
||||
acls.extend(processed_sg_rules)
|
||||
|
||||
def _get_port_security_acls(self, port):
|
||||
"""Returns a mutable list of Security Group Rule objects.
|
||||
|
||||
Returns the list of Security Group Rule objects from the cache,
|
||||
otherwise it fetches and caches from the port's associated class.
|
||||
"""
|
||||
|
||||
if port.ElementName in self._sg_acl_sds:
|
||||
return self._sg_acl_sds[port.ElementName]
|
||||
|
||||
acls = _wqlutils.get_element_associated_class(
|
||||
self._conn, self._PORT_EXT_ACL_SET_DATA,
|
||||
element_instance_id=port.InstanceID)
|
||||
self._sg_acl_sds[port.ElementName] = acls
|
||||
|
||||
return acls
|
||||
|
||||
def _create_acl(self, direction, acl_type, action):
|
||||
acl = self._create_default_setting_data(self._PORT_ALLOC_ACL_SET_DATA)
|
||||
acl.set(Direction=direction,
|
||||
AclType=acl_type,
|
||||
Action=action,
|
||||
Applicability=self._ACL_APPLICABILITY_LOCAL)
|
||||
return acl
|
||||
|
||||
def _create_security_acl(self, sg_rule, weight):
|
||||
# Acl instance can be created new eachtime, the object should be
|
||||
# of type ExtendedEthernetSettingsData.
|
||||
acl = self._create_default_setting_data(self._PORT_EXT_ACL_SET_DATA)
|
||||
acl.set(**sg_rule.to_dict())
|
||||
return acl
|
||||
|
||||
def _filter_acls(self, acls, action, direction, acl_type, remote_addr=""):
|
||||
return [v for v in acls
|
||||
if v.Action == action and
|
||||
v.Direction == direction and
|
||||
v.AclType == acl_type and
|
||||
v.RemoteAddress == remote_addr]
|
||||
|
||||
def _filter_security_acls(self, sg_rule, acls):
|
||||
return [a for a in acls if sg_rule == a]
|
||||
|
||||
def _get_new_weights(self, sg_rules, existent_acls):
|
||||
"""Computes the weights needed for given sg_rules.
|
||||
|
||||
:param sg_rules: ACLs to be added. They must have the same Action.
|
||||
:existent_acls: ACLs already bound to a switch port.
|
||||
:return: list of weights which will be used to create ACLs. List will
|
||||
have the recommended order for sg_rules' Action.
|
||||
"""
|
||||
return [0] * len(sg_rules)
|
||||
|
||||
|
||||
class NetworkUtilsR2(NetworkUtils):
|
||||
_PORT_EXT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortExtendedAclSettingData'
|
||||
_MAX_WEIGHT = 65500
|
||||
|
||||
# 2 directions x 2 address types x 4 protocols = 16 ACLs
|
||||
_REJECT_ACLS_COUNT = 16
|
||||
|
||||
def _create_security_acl(self, sg_rule, weight):
|
||||
acl = super(NetworkUtilsR2, self)._create_security_acl(sg_rule,
|
||||
weight)
|
||||
acl.Weight = weight
|
||||
sg_rule.Weight = weight
|
||||
return acl
|
||||
|
||||
def _get_new_weights(self, sg_rules, existent_acls):
|
||||
sg_rule = sg_rules[0]
|
||||
num_rules = len(sg_rules)
|
||||
existent_acls = [a for a in existent_acls
|
||||
if a.Action == sg_rule.Action]
|
||||
if not existent_acls:
|
||||
if sg_rule.Action == self._ACL_ACTION_DENY:
|
||||
return list(range(1, 1 + num_rules))
|
||||
else:
|
||||
return list(range(self._MAX_WEIGHT - 1,
|
||||
self._MAX_WEIGHT - 1 - num_rules, - 1))
|
||||
|
||||
# there are existent ACLs.
|
||||
weights = [a.Weight for a in existent_acls]
|
||||
if sg_rule.Action == self._ACL_ACTION_DENY:
|
||||
return [i for i in list(range(1, self._REJECT_ACLS_COUNT + 1))
|
||||
if i not in weights][:num_rules]
|
||||
|
||||
min_weight = min(weights)
|
||||
last_weight = min_weight - num_rules - 1
|
||||
if last_weight > self._REJECT_ACLS_COUNT:
|
||||
return list(range(min_weight - 1, last_weight, - 1))
|
||||
|
||||
# not enough weights. Must search for available weights.
|
||||
# if it is this case, num_rules is a small number.
|
||||
current_weight = self._MAX_WEIGHT - 1
|
||||
new_weights = []
|
||||
for i in list(range(num_rules)):
|
||||
while current_weight in weights:
|
||||
current_weight -= 1
|
||||
new_weights.append(current_weight)
|
||||
|
||||
return new_weights
|
@ -1,173 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions SRL
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from os_win._i18n import _, _LI, _LW, _LE # noqa
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.utils import baseutils
|
||||
from os_win.utils.network import networkutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NvgreUtils(baseutils.BaseUtils):
|
||||
_HYPERV_VIRT_ADAPTER = 'Hyper-V Virtual Ethernet Adapter'
|
||||
_IPV4_ADDRESS_FAMILY = 2
|
||||
|
||||
_TRANSLATE_NAT = 0
|
||||
_TRANSLATE_ENCAP = 1
|
||||
|
||||
_LOOKUP_RECORD_TYPE_STATIC = 0
|
||||
_LOOKUP_RECORD_TYPE_L2_ONLY = 3
|
||||
|
||||
_STDCIMV2_NAMESPACE = '//./root/StandardCimv2'
|
||||
|
||||
def __init__(self):
|
||||
super(NvgreUtils, self).__init__()
|
||||
self._utils = networkutils.NetworkUtils()
|
||||
self._net_if_indexes = {}
|
||||
self._scimv2 = self._get_wmi_conn(moniker=self._STDCIMV2_NAMESPACE)
|
||||
|
||||
def create_provider_address(self, network_name, provider_vlan_id):
|
||||
iface_index = self._get_network_iface_index(network_name)
|
||||
(provider_addr, prefix_len) = self.get_network_iface_ip(network_name)
|
||||
|
||||
if not provider_addr:
|
||||
# logging is already provided by get_network_iface_ip.
|
||||
raise exceptions.NotFound(resource=network_name)
|
||||
|
||||
provider = (
|
||||
self._scimv2.MSFT_NetVirtualizationProviderAddressSettingData(
|
||||
ProviderAddress=provider_addr))
|
||||
|
||||
if provider:
|
||||
if (provider[0].VlanID == provider_vlan_id and
|
||||
provider[0].InterfaceIndex == iface_index):
|
||||
# ProviderAddress already exists.
|
||||
return
|
||||
# ProviderAddress exists, but with different VlanID or iface index.
|
||||
provider[0].Delete_()
|
||||
|
||||
self._create_new_object(
|
||||
self._scimv2.MSFT_NetVirtualizationProviderAddressSettingData,
|
||||
ProviderAddress=provider_addr,
|
||||
VlanID=provider_vlan_id,
|
||||
InterfaceIndex=iface_index,
|
||||
PrefixLength=prefix_len)
|
||||
|
||||
def create_provider_route(self, network_name):
|
||||
iface_index = self._get_network_iface_index(network_name)
|
||||
|
||||
routes = self._scimv2.MSFT_NetVirtualizationProviderRouteSettingData(
|
||||
InterfaceIndex=iface_index, NextHop=constants.IPV4_DEFAULT)
|
||||
|
||||
if not routes:
|
||||
self._create_new_object(
|
||||
self._scimv2.MSFT_NetVirtualizationProviderRouteSettingData,
|
||||
InterfaceIndex=iface_index,
|
||||
DestinationPrefix='%s/0' % constants.IPV4_DEFAULT,
|
||||
NextHop=constants.IPV4_DEFAULT)
|
||||
|
||||
def clear_customer_routes(self, vsid):
|
||||
routes = self._scimv2.MSFT_NetVirtualizationCustomerRouteSettingData(
|
||||
VirtualSubnetID=vsid)
|
||||
|
||||
for route in routes:
|
||||
route.Delete_()
|
||||
|
||||
def create_customer_route(self, vsid, dest_prefix, next_hop, rdid_uuid):
|
||||
self._create_new_object(
|
||||
self._scimv2.MSFT_NetVirtualizationCustomerRouteSettingData,
|
||||
VirtualSubnetID=vsid,
|
||||
DestinationPrefix=dest_prefix,
|
||||
NextHop=next_hop,
|
||||
Metric=255,
|
||||
RoutingDomainID='{%s}' % rdid_uuid)
|
||||
|
||||
def create_lookup_record(self, provider_addr, customer_addr, mac, vsid):
|
||||
# check for existing entry.
|
||||
lrec = self._scimv2.MSFT_NetVirtualizationLookupRecordSettingData(
|
||||
CustomerAddress=customer_addr, VirtualSubnetID=vsid)
|
||||
if (lrec and lrec[0].VirtualSubnetID == vsid and
|
||||
lrec[0].ProviderAddress == provider_addr and
|
||||
lrec[0].MACAddress == mac):
|
||||
# lookup record already exists, nothing to do.
|
||||
return
|
||||
|
||||
# create new lookup record.
|
||||
if lrec:
|
||||
lrec[0].Delete_()
|
||||
|
||||
if constants.IPV4_DEFAULT == customer_addr:
|
||||
# customer address used for DHCP requests.
|
||||
record_type = self._LOOKUP_RECORD_TYPE_L2_ONLY
|
||||
else:
|
||||
record_type = self._LOOKUP_RECORD_TYPE_STATIC
|
||||
|
||||
self._create_new_object(
|
||||
self._scimv2.MSFT_NetVirtualizationLookupRecordSettingData,
|
||||
VirtualSubnetID=vsid,
|
||||
Rule=self._TRANSLATE_ENCAP,
|
||||
Type=record_type,
|
||||
MACAddress=mac,
|
||||
CustomerAddress=customer_addr,
|
||||
ProviderAddress=provider_addr)
|
||||
|
||||
def _create_new_object(self, object_class, **args):
|
||||
new_obj = object_class.new(**args)
|
||||
new_obj.Put_()
|
||||
return new_obj
|
||||
|
||||
def _get_network_ifaces_by_name(self, network_name):
|
||||
return [n for n in self._scimv2.MSFT_NetAdapter() if
|
||||
n.Name.find(network_name) >= 0]
|
||||
|
||||
def _get_network_iface_index(self, network_name):
|
||||
if self._net_if_indexes.get(network_name):
|
||||
return self._net_if_indexes[network_name]
|
||||
|
||||
description = (
|
||||
self._utils.get_vswitch_external_network_name(network_name))
|
||||
|
||||
# physical NIC and vswitch must have the same MAC address.
|
||||
networks = self._scimv2.MSFT_NetAdapter(
|
||||
InterfaceDescription=description)
|
||||
|
||||
if not networks:
|
||||
raise exceptions.NotFound(resource=network_name)
|
||||
|
||||
self._net_if_indexes[network_name] = networks[0].InterfaceIndex
|
||||
return networks[0].InterfaceIndex
|
||||
|
||||
def get_network_iface_ip(self, network_name):
|
||||
networks = [n for n in self._get_network_ifaces_by_name(network_name)
|
||||
if n.DriverDescription == self._HYPERV_VIRT_ADAPTER]
|
||||
|
||||
if not networks:
|
||||
LOG.error(_LE('No vswitch was found with name: %s'), network_name)
|
||||
return None, None
|
||||
|
||||
ip_addr = self._scimv2.MSFT_NetIPAddress(
|
||||
InterfaceIndex=networks[0].InterfaceIndex,
|
||||
AddressFamily=self._IPV4_ADDRESS_FAMILY)
|
||||
|
||||
if not ip_addr:
|
||||
LOG.error(_LE('No IP Address could be found for network: %s'),
|
||||
network_name)
|
||||
return None, None
|
||||
|
||||
return ip_addr[0].IPAddress, ip_addr[0].PrefixLength
|
@ -1,177 +0,0 @@
|
||||
# Copyright 2013 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import ctypes
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
if sys.platform == 'win32':
|
||||
from ctypes import wintypes
|
||||
kernel32 = ctypes.windll.kernel32
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import fileutils
|
||||
import six
|
||||
|
||||
from os_win._i18n import _
|
||||
from os_win import exceptions
|
||||
from os_win.utils import win32utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
ERROR_DIR_IS_NOT_EMPTY = 145
|
||||
|
||||
|
||||
class PathUtils(object):
|
||||
_FILE_ATTRIBUTE_REPARSE_POINT = 0x0400
|
||||
|
||||
def __init__(self):
|
||||
self._win32_utils = win32utils.Win32Utils()
|
||||
|
||||
def open(self, path, mode):
|
||||
"""Wrapper on __builtin__.open used to simplify unit testing."""
|
||||
from six.moves import builtins
|
||||
return builtins.open(path, mode)
|
||||
|
||||
def exists(self, path):
|
||||
return os.path.exists(path)
|
||||
|
||||
def makedirs(self, path):
|
||||
os.makedirs(path)
|
||||
|
||||
def remove(self, path):
|
||||
os.remove(path)
|
||||
|
||||
def rename(self, src, dest):
|
||||
os.rename(src, dest)
|
||||
|
||||
def copyfile(self, src, dest):
|
||||
self.copy(src, dest)
|
||||
|
||||
def copy(self, src, dest, fail_if_exists=True):
|
||||
"""Copies a file to a specified location.
|
||||
|
||||
:param fail_if_exists: if set to True, the method fails if the
|
||||
destination path exists.
|
||||
"""
|
||||
# With large files this is 2x-3x faster than shutil.copy(src, dest),
|
||||
# especially when copying to a UNC target.
|
||||
if os.path.isdir(dest):
|
||||
src_fname = os.path.basename(src)
|
||||
dest = os.path.join(dest, src_fname)
|
||||
|
||||
try:
|
||||
self._win32_utils.run_and_check_output(
|
||||
kernel32.CopyFileW,
|
||||
ctypes.c_wchar_p(src),
|
||||
ctypes.c_wchar_p(dest),
|
||||
wintypes.BOOL(fail_if_exists),
|
||||
kernel32_lib_func=True)
|
||||
except exceptions.Win32Exception as exc:
|
||||
err_msg = _('The file copy from %(src)s to %(dest)s failed.'
|
||||
'Exception: %(exc)s')
|
||||
raise IOError(err_msg % dict(src=src, dest=dest, exc=exc))
|
||||
|
||||
def move_folder_files(self, src_dir, dest_dir):
|
||||
"""Moves the files of the given src_dir to dest_dir.
|
||||
It will ignore any nested folders.
|
||||
|
||||
:param src_dir: Given folder from which to move files.
|
||||
:param dest_dir: Folder to which to move files.
|
||||
"""
|
||||
|
||||
for fname in os.listdir(src_dir):
|
||||
src = os.path.join(src_dir, fname)
|
||||
# ignore subdirs.
|
||||
if os.path.isfile(src):
|
||||
self.rename(src, os.path.join(dest_dir, fname))
|
||||
|
||||
def rmtree(self, path):
|
||||
# This will be removed once support for Windows Server 2008R2 is
|
||||
# stopped
|
||||
for i in range(5):
|
||||
try:
|
||||
shutil.rmtree(path)
|
||||
return
|
||||
except WindowsError as e:
|
||||
if e.winerror == ERROR_DIR_IS_NOT_EMPTY:
|
||||
time.sleep(1)
|
||||
else:
|
||||
raise e
|
||||
|
||||
def check_create_dir(self, path):
|
||||
if not self.exists(path):
|
||||
LOG.debug('Creating directory: %s', path)
|
||||
self.makedirs(path)
|
||||
|
||||
def check_remove_dir(self, path):
|
||||
if self.exists(path):
|
||||
LOG.debug('Removing directory: %s', path)
|
||||
self.rmtree(path)
|
||||
|
||||
def is_symlink(self, path):
|
||||
if sys.version_info >= (3, 2):
|
||||
return os.path.islink(path)
|
||||
|
||||
file_attr = self._win32_utils.run_and_check_output(
|
||||
kernel32.GetFileAttributesW,
|
||||
six.text_type(path),
|
||||
kernel32_lib_func=True)
|
||||
|
||||
return bool(os.path.isdir(path) and (
|
||||
file_attr & self._FILE_ATTRIBUTE_REPARSE_POINT))
|
||||
|
||||
def create_sym_link(self, link, target, target_is_dir=True):
|
||||
"""If target_is_dir is True, a junction will be created.
|
||||
|
||||
NOTE: Juctions only work on same filesystem.
|
||||
"""
|
||||
create_symlink = kernel32.CreateSymbolicLinkW
|
||||
create_symlink.argtypes = (
|
||||
ctypes.c_wchar_p,
|
||||
ctypes.c_wchar_p,
|
||||
ctypes.c_ulong,
|
||||
)
|
||||
create_symlink.restype = ctypes.c_ubyte
|
||||
|
||||
self._win32_utils.run_and_check_output(create_symlink,
|
||||
link,
|
||||
target,
|
||||
target_is_dir,
|
||||
kernel32_lib_func=True)
|
||||
|
||||
def create_temporary_file(self, suffix=None, *args, **kwargs):
|
||||
fd, tmp_file_path = tempfile.mkstemp(suffix=suffix, *args, **kwargs)
|
||||
os.close(fd)
|
||||
return tmp_file_path
|
||||
|
||||
@contextlib.contextmanager
|
||||
def temporary_file(self, suffix=None, *args, **kwargs):
|
||||
"""Creates a random, temporary, closed file, returning the file's
|
||||
path. It's different from tempfile.NamedTemporaryFile which returns
|
||||
an open file descriptor.
|
||||
"""
|
||||
|
||||
tmp_file_path = None
|
||||
try:
|
||||
tmp_file_path = self.create_temporary_file(suffix, *args, **kwargs)
|
||||
yield tmp_file_path
|
||||
finally:
|
||||
if tmp_file_path:
|
||||
fileutils.delete_if_exists(tmp_file_path)
|
@ -1,99 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ctypes
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from os_win._i18n import _, _LE
|
||||
from os_win import _utils
|
||||
from os_win import exceptions
|
||||
from os_win.utils import baseutils
|
||||
from os_win.utils import win32utils
|
||||
|
||||
if sys.platform == 'win32':
|
||||
kernel32 = ctypes.windll.kernel32
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DiskUtils(baseutils.BaseUtils):
|
||||
|
||||
_wmi_namespace = 'root/microsoft/windows/storage'
|
||||
|
||||
def __init__(self):
|
||||
self._conn_storage = self._get_wmi_conn(self._wmi_namespace)
|
||||
self._win32_utils = win32utils.Win32Utils()
|
||||
|
||||
# Physical device names look like \\.\PHYSICALDRIVE1
|
||||
self._phys_dev_name_regex = re.compile(r'\\\\.*\\[a-zA-Z]*([\d]+)')
|
||||
|
||||
def _get_disk(self, disk_number):
|
||||
disk = self._conn_storage.Msft_Disk(Number=disk_number)
|
||||
if not disk:
|
||||
err_msg = _("Could not find the disk number %s")
|
||||
raise exceptions.DiskNotFound(err_msg % disk_number)
|
||||
return disk[0]
|
||||
|
||||
def get_disk_uid_and_uid_type(self, disk_number):
|
||||
disk = self._get_disk(disk_number)
|
||||
return disk.UniqueId, disk.UniqueIdFormat
|
||||
|
||||
def refresh_disk(self, disk_number):
|
||||
disk = self._get_disk(disk_number)
|
||||
disk.Refresh()
|
||||
|
||||
def get_device_number_from_device_name(self, device_name):
|
||||
matches = self._phys_dev_name_regex.findall(device_name)
|
||||
if matches:
|
||||
return matches[0]
|
||||
|
||||
err_msg = _("Could not find device number for device: %s")
|
||||
raise exceptions.DiskNotFound(err_msg % device_name)
|
||||
|
||||
def rescan_disks(self):
|
||||
# TODO(lpetrut): find a better way to do this.
|
||||
cmd = ("cmd", "/c", "echo", "rescan", "|", "diskpart.exe")
|
||||
_utils.execute(*cmd)
|
||||
|
||||
self._conn_storage.Msft_Disk()
|
||||
|
||||
def get_disk_capacity(self, path, ignore_errors=False):
|
||||
norm_path = os.path.abspath(path)
|
||||
|
||||
total_bytes = ctypes.c_ulonglong(0)
|
||||
free_bytes = ctypes.c_ulonglong(0)
|
||||
|
||||
try:
|
||||
self._win32_utils.run_and_check_output(
|
||||
kernel32.GetDiskFreeSpaceExW,
|
||||
ctypes.c_wchar_p(norm_path),
|
||||
None,
|
||||
ctypes.pointer(total_bytes),
|
||||
ctypes.pointer(free_bytes),
|
||||
kernel32_lib_func=True)
|
||||
return total_bytes.value, free_bytes.value
|
||||
except exceptions.Win32Exception as exc:
|
||||
LOG.error(_LE("Could not get disk %(path)s capacity info. "
|
||||
"Exception: %(exc)s"),
|
||||
dict(path=path,
|
||||
exc=exc))
|
||||
if ignore_errors:
|
||||
return 0, 0
|
||||
else:
|
||||
raise exc
|
@ -1,132 +0,0 @@
|
||||
#
|
||||
# Copyright 2012 Pedro Navarro Perez
|
||||
# Copyright 2013 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Helper methods for operations related to the management of volumes,
|
||||
and storage repositories
|
||||
"""
|
||||
|
||||
import abc
|
||||
import re
|
||||
import sys
|
||||
|
||||
if sys.platform == 'win32':
|
||||
from six.moves import winreg
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from os_win._i18n import _LI
|
||||
from os_win.utils import baseutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseISCSIInitiatorUtils(baseutils.BaseUtils):
|
||||
_FILE_DEVICE_DISK = 7
|
||||
|
||||
def __init__(self, host='.'):
|
||||
self._conn_wmi = self._get_wmi_conn('//%s/root/wmi' % host)
|
||||
self._conn_cimv2 = self._get_wmi_conn('//%s/root/cimv2' % host)
|
||||
self._drive_number_regex = re.compile(r'DeviceID=\"[^,]*\\(\d+)\"')
|
||||
|
||||
@abc.abstractmethod
|
||||
def login_storage_target(self, target_lun, target_iqn, target_portal):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def logout_storage_target(self, target_iqn):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def execute_log_out(self, session_id):
|
||||
pass
|
||||
|
||||
def get_iscsi_initiator(self):
|
||||
"""Get iscsi initiator name for this machine."""
|
||||
|
||||
computer_system = self._conn_cimv2.Win32_ComputerSystem()[0]
|
||||
hostname = computer_system.name
|
||||
keypath = ("SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\"
|
||||
"iSCSI\\Discovery")
|
||||
try:
|
||||
key = winreg.OpenKey(
|
||||
winreg.HKEY_LOCAL_MACHINE,
|
||||
keypath,
|
||||
0,
|
||||
winreg.KEY_WOW64_64KEY + winreg.KEY_ALL_ACCESS)
|
||||
temp = winreg.QueryValueEx(key, 'DefaultInitiatorName')
|
||||
initiator_name = str(temp[0])
|
||||
winreg.CloseKey(key)
|
||||
except Exception:
|
||||
LOG.info(_LI("The ISCSI initiator name can't be found. "
|
||||
"Choosing the default one"))
|
||||
initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower()
|
||||
if computer_system.PartofDomain:
|
||||
initiator_name += '.' + computer_system.Domain.lower()
|
||||
return initiator_name
|
||||
|
||||
def _get_drive_number_from_disk_path(self, disk_path):
|
||||
drive_number = self._drive_number_regex.findall(disk_path)
|
||||
if drive_number:
|
||||
return int(drive_number[0])
|
||||
|
||||
def get_session_id_from_mounted_disk(self, physical_drive_path):
|
||||
drive_number = self._get_drive_number_from_disk_path(
|
||||
physical_drive_path)
|
||||
if not drive_number:
|
||||
return None
|
||||
|
||||
initiator_sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass()
|
||||
for initiator_session in initiator_sessions:
|
||||
devices = initiator_session.Devices
|
||||
for device in devices:
|
||||
device_number = device.DeviceNumber
|
||||
if device_number == drive_number:
|
||||
return initiator_session.SessionId
|
||||
|
||||
def _get_devices_for_target(self, target_iqn):
|
||||
initiator_sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass(
|
||||
TargetName=target_iqn)
|
||||
if not initiator_sessions:
|
||||
return []
|
||||
|
||||
return initiator_sessions[0].Devices
|
||||
|
||||
def get_device_number_for_target(self, target_iqn, target_lun):
|
||||
devices = self._get_devices_for_target(target_iqn)
|
||||
|
||||
for device in devices:
|
||||
if device.ScsiLun == target_lun:
|
||||
return device.DeviceNumber
|
||||
|
||||
def get_target_lun_count(self, target_iqn):
|
||||
devices = self._get_devices_for_target(target_iqn)
|
||||
disk_devices = [device for device in devices
|
||||
if device.DeviceType == self._FILE_DEVICE_DISK]
|
||||
return len(disk_devices)
|
||||
|
||||
def get_target_from_disk_path(self, disk_path):
|
||||
initiator_sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass()
|
||||
drive_number = self._get_drive_number_from_disk_path(disk_path)
|
||||
if not drive_number:
|
||||
return None
|
||||
|
||||
for initiator_session in initiator_sessions:
|
||||
devices = initiator_session.Devices
|
||||
for device in devices:
|
||||
if device.DeviceNumber == drive_number:
|
||||
return (device.TargetName, device.ScsiLun)
|
@ -1,90 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ctypes
|
||||
|
||||
|
||||
HBA_HANDLE = ctypes.c_uint32
|
||||
HBA_PortType = ctypes.c_uint32
|
||||
HBA_PortSpeed = ctypes.c_uint32
|
||||
HBA_PortState = ctypes.c_uint32
|
||||
HBA_COS = ctypes.c_uint32
|
||||
HBA_WWN = ctypes.c_ubyte * 8
|
||||
HBA_FC4Types = ctypes.c_uint32 * 32
|
||||
HBA_FCPBindingType = ctypes.c_int
|
||||
|
||||
|
||||
class HBA_PortAttributes(ctypes.Structure):
|
||||
_fields_ = [('NodeWWN', HBA_WWN),
|
||||
('PortWWN', HBA_WWN),
|
||||
('PortFcId', ctypes.c_uint32),
|
||||
('PortType', HBA_PortType),
|
||||
('PortState', HBA_PortState),
|
||||
('PortSupportedClassofService', HBA_COS),
|
||||
('PortSupportedFc4Types', HBA_FC4Types),
|
||||
('PortSymbolicName', ctypes.c_char * 256),
|
||||
('OSDeviceName', ctypes.c_char * 256),
|
||||
('PortSupportedSpeed', HBA_PortSpeed),
|
||||
('PortSpeed', HBA_PortSpeed),
|
||||
('PortMaxFrameSize', ctypes.c_uint32),
|
||||
('FabricName', HBA_WWN),
|
||||
('NumberOfDiscoveredPorts', ctypes.c_uint32)]
|
||||
|
||||
|
||||
class HBA_FCPId(ctypes.Structure):
|
||||
_fields_ = [('FcId', ctypes.c_uint32),
|
||||
('NodeWWN', HBA_WWN),
|
||||
('PortWWN', HBA_WWN),
|
||||
('FcpLun', ctypes.c_uint64)]
|
||||
|
||||
|
||||
class HBA_ScsiId(ctypes.Structure):
|
||||
_fields_ = [('OSDeviceName', ctypes.c_char * 256),
|
||||
('ScsiBusNumber', ctypes.c_uint32),
|
||||
('ScsiTargetNumber', ctypes.c_uint32),
|
||||
('ScsiOSLun', ctypes.c_uint32)]
|
||||
|
||||
|
||||
class HBA_FCPScsiEntry(ctypes.Structure):
|
||||
_fields_ = [('ScsiId', HBA_ScsiId),
|
||||
('FcpId', HBA_FCPId)]
|
||||
|
||||
|
||||
def get_target_mapping_struct(entry_count=0):
|
||||
class HBA_FCPTargetMapping(ctypes.Structure):
|
||||
_fields_ = [('NumberOfEntries', ctypes.c_uint32),
|
||||
('Entries', HBA_FCPScsiEntry * entry_count)]
|
||||
|
||||
def __init__(self, entry_count):
|
||||
self.NumberOfEntries = entry_count
|
||||
self.Entries = (HBA_FCPScsiEntry * entry_count)()
|
||||
|
||||
return HBA_FCPTargetMapping(entry_count)
|
||||
|
||||
|
||||
class HBA_AdapterAttributes(ctypes.Structure):
|
||||
_fields_ = [('Manufacturer', ctypes.c_char * 64),
|
||||
('SerialNumber', ctypes.c_char * 64),
|
||||
('Model', ctypes.c_char * 256),
|
||||
('ModelDescription', ctypes.c_char * 256),
|
||||
('NodeWWN', HBA_WWN),
|
||||
('NodeSymbolicName', ctypes.c_char * 256),
|
||||
('HardwareVersion', ctypes.c_char * 256),
|
||||
('DriverVersion', ctypes.c_char * 256),
|
||||
('OptionROMVersion', ctypes.c_char * 256),
|
||||
('FirmwareVersion', ctypes.c_char * 256),
|
||||
('VendorSpecificID', ctypes.c_uint32),
|
||||
('NumberOfPorts', ctypes.c_uint32),
|
||||
('DriverName', ctypes.c_char * 256)]
|
@ -1,176 +0,0 @@
|
||||
# Copyright 2015 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import ctypes
|
||||
import six
|
||||
import sys
|
||||
import textwrap
|
||||
|
||||
if sys.platform == 'win32':
|
||||
hbaapi = ctypes.cdll.hbaapi
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from os_win._i18n import _, _LW
|
||||
from os_win import exceptions
|
||||
from os_win.utils.storage.initiator import fc_structures as fc_struct
|
||||
from os_win.utils import win32utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
HBA_STATUS_OK = 0
|
||||
HBA_STATUS_ERROR_MORE_DATA = 7
|
||||
|
||||
|
||||
class FCUtils(object):
|
||||
def __init__(self):
|
||||
self._win32_utils = win32utils.Win32Utils()
|
||||
|
||||
def _run_and_check_output(self, *args, **kwargs):
|
||||
kwargs['failure_exc'] = exceptions.FCWin32Exception
|
||||
return self._win32_utils.run_and_check_output(*args, **kwargs)
|
||||
|
||||
def get_fc_hba_count(self):
|
||||
return hbaapi.HBA_GetNumberOfAdapters()
|
||||
|
||||
def _open_adapter(self, adapter_name=None, adapter_wwn=None):
|
||||
if adapter_name:
|
||||
func = hbaapi.HBA_OpenAdapter
|
||||
arg = ctypes.c_char_p(six.b(adapter_name))
|
||||
elif adapter_wwn:
|
||||
func = hbaapi.HBA_OpenAdapterByWWN
|
||||
arg = fc_struct.HBA_WWN(*adapter_wwn)
|
||||
else:
|
||||
err_msg = _("Could not open HBA adapter. "
|
||||
"No HBA name or WWN was specified")
|
||||
raise exceptions.FCException(err_msg)
|
||||
|
||||
handle = self._run_and_check_output(func, arg,
|
||||
ret_val_is_err_code=False,
|
||||
error_on_nonzero_ret_val=False,
|
||||
error_ret_vals=[0])
|
||||
return handle
|
||||
|
||||
def _close_adapter(self, hba_handle):
|
||||
hbaapi.HBA_CloseAdapter(hba_handle)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _get_hba_handle(self, *args, **kwargs):
|
||||
hba_handle = self._open_adapter(*args, **kwargs)
|
||||
try:
|
||||
yield hba_handle
|
||||
finally:
|
||||
self._close_adapter(hba_handle)
|
||||
|
||||
def _get_adapter_name(self, adapter_index):
|
||||
buff = (ctypes.c_char * 256)()
|
||||
self._run_and_check_output(hbaapi.HBA_GetAdapterName,
|
||||
ctypes.c_uint32(adapter_index),
|
||||
ctypes.byref(buff))
|
||||
|
||||
return buff.value.decode('utf-8')
|
||||
|
||||
def _get_target_mapping(self, hba_handle):
|
||||
entry_count = 0
|
||||
hba_status = HBA_STATUS_ERROR_MORE_DATA
|
||||
|
||||
while hba_status == HBA_STATUS_ERROR_MORE_DATA:
|
||||
mapping = fc_struct.get_target_mapping_struct(entry_count)
|
||||
hba_status = self._run_and_check_output(
|
||||
hbaapi.HBA_GetFcpTargetMapping,
|
||||
hba_handle,
|
||||
ctypes.byref(mapping),
|
||||
ignored_error_codes=[HBA_STATUS_ERROR_MORE_DATA])
|
||||
entry_count = mapping.NumberOfEntries
|
||||
|
||||
return mapping
|
||||
|
||||
def _get_adapter_port_attributes(self, hba_handle, port_index):
|
||||
port_attributes = fc_struct.HBA_PortAttributes()
|
||||
|
||||
self._run_and_check_output(
|
||||
hbaapi.HBA_GetAdapterPortAttributes,
|
||||
hba_handle, port_index,
|
||||
ctypes.byref(port_attributes))
|
||||
return port_attributes
|
||||
|
||||
def _get_adapter_attributes(self, hba_handle):
|
||||
hba_attributes = fc_struct.HBA_AdapterAttributes()
|
||||
|
||||
self._run_and_check_output(
|
||||
hbaapi.HBA_GetAdapterAttributes,
|
||||
hba_handle, ctypes.byref(hba_attributes))
|
||||
return hba_attributes
|
||||
|
||||
def _get_fc_hba_adapter_ports(self, adapter_name):
|
||||
hba_ports = []
|
||||
with self._get_hba_handle(
|
||||
adapter_name=adapter_name) as hba_handle:
|
||||
adapter_attributes = self._get_adapter_attributes(hba_handle)
|
||||
port_count = adapter_attributes.NumberOfPorts
|
||||
|
||||
for port_index in range(port_count):
|
||||
port_attributes = self._get_adapter_port_attributes(
|
||||
hba_handle,
|
||||
port_index)
|
||||
wwnn = self._wwn_array_to_hex_str(port_attributes.NodeWWN)
|
||||
wwpn = self._wwn_array_to_hex_str(port_attributes.PortWWN)
|
||||
|
||||
hba_port_info = dict(node_name=wwnn,
|
||||
port_name=wwpn)
|
||||
hba_ports.append(hba_port_info)
|
||||
return hba_ports
|
||||
|
||||
def get_fc_hba_ports(self):
|
||||
hba_ports = []
|
||||
|
||||
adapter_count = self.get_fc_hba_count()
|
||||
for adapter_index in range(adapter_count):
|
||||
adapter_name = self._get_adapter_name(adapter_index)
|
||||
try:
|
||||
hba_ports += self._get_fc_hba_adapter_ports(adapter_name)
|
||||
except Exception as exc:
|
||||
msg = _LW("Could not retrieve FC HBA ports for "
|
||||
"adapter: %(adapter_name)s. "
|
||||
"Exception: %(exc)s")
|
||||
LOG.warning(msg, dict(adapter_name=adapter_name, exc=exc))
|
||||
|
||||
return hba_ports
|
||||
|
||||
def _wwn_hex_string_to_array(self, wwn):
|
||||
return [int(hex_byte, 16) for hex_byte in textwrap.wrap(wwn, 2)]
|
||||
|
||||
def _wwn_array_to_hex_str(self, wwn):
|
||||
return ''.join('{:02X}'.format(b) for b in wwn)
|
||||
|
||||
def get_fc_target_mappings(self, node_wwn):
|
||||
mappings = []
|
||||
node_wwn = self._wwn_hex_string_to_array(node_wwn)
|
||||
|
||||
with self._get_hba_handle(adapter_wwn=node_wwn) as hba_handle:
|
||||
fcp_mappings = self._get_target_mapping(hba_handle)
|
||||
for entry in fcp_mappings.Entries:
|
||||
wwnn = self._wwn_array_to_hex_str(entry.FcpId.NodeWWN)
|
||||
wwpn = self._wwn_array_to_hex_str(entry.FcpId.PortWWN)
|
||||
mapping = dict(node_name=wwnn,
|
||||
port_name=wwpn,
|
||||
device_name=entry.ScsiId.OSDeviceName,
|
||||
lun=entry.ScsiId.ScsiOSLun)
|
||||
mappings.append(mapping)
|
||||
return mappings
|
||||
|
||||
def refresh_hba_configuration(self):
|
||||
hbaapi.HBA_RefreshAdapterConfiguration()
|
@ -1,119 +0,0 @@
|
||||
# Copyright 2012 Pedro Navarro Perez
|
||||
# Copyright 2013 Cloudbase Solutions Srl
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Helper methods for operations related to the management of volumes,
|
||||
and storage repositories
|
||||
|
||||
Official Microsoft iSCSI Initiator and iSCSI command line interface
|
||||
documentation can be retrieved at:
|
||||
http://www.microsoft.com/en-us/download/details.aspx?id=34750
|
||||
"""
|
||||
|
||||
import re
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from six.moves import range # noqa
|
||||
|
||||
from os_win._i18n import _
|
||||
from os_win import _utils
|
||||
from os_win import exceptions
|
||||
from os_win.utils.storage.initiator import base_iscsi_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class ISCSIInitiatorCLIUtils(base_iscsi_utils.BaseISCSIInitiatorUtils):
|
||||
|
||||
def execute(self, *args, **kwargs):
|
||||
stdout_value, stderr_value = _utils.execute(*args, **kwargs)
|
||||
if stdout_value.find('The operation completed successfully') == -1:
|
||||
raise exceptions.HyperVException(
|
||||
_('An error has occurred when calling the iscsi initiator: %s')
|
||||
% stdout_value)
|
||||
return stdout_value
|
||||
|
||||
def _login_target_portal(self, target_portal):
|
||||
(target_address,
|
||||
target_port) = _utils.parse_server_string(target_portal)
|
||||
|
||||
output = self.execute('iscsicli.exe', 'ListTargetPortals')
|
||||
pattern = r'Address and Socket *: (.*)'
|
||||
portals = [addr.split() for addr in re.findall(pattern, output)]
|
||||
LOG.debug("Ensuring connection to portal: %s" % target_portal)
|
||||
if [target_address, str(target_port)] in portals:
|
||||
self.execute('iscsicli.exe', 'RefreshTargetPortal',
|
||||
target_address, target_port)
|
||||
else:
|
||||
# Adding target portal to iscsi initiator. Sending targets
|
||||
self.execute('iscsicli.exe', 'AddTargetPortal',
|
||||
target_address, target_port,
|
||||
'*', '*', '*', '*', '*', '*', '*', '*', '*', '*', '*',
|
||||
'*', '*')
|
||||
|
||||
def login_storage_target(self, target_lun, target_iqn, target_portal,
|
||||
auth_username=None, auth_password=None):
|
||||
"""Ensure that the target is logged in."""
|
||||
|
||||
self._login_target_portal(target_portal)
|
||||
# Listing targets
|
||||
self.execute('iscsicli.exe', 'ListTargets')
|
||||
|
||||
retry_count = CONF.hyperv.volume_attach_retry_count
|
||||
|
||||
# If the target is not connected, at least two iterations are needed:
|
||||
# one for performing the login and another one for checking if the
|
||||
# target was logged in successfully.
|
||||
if retry_count < 2:
|
||||
retry_count = 2
|
||||
|
||||
for attempt in range(retry_count):
|
||||
try:
|
||||
session_info = self.execute('iscsicli.exe', 'SessionList')
|
||||
if session_info.find(target_iqn) == -1:
|
||||
# Sending login
|
||||
self.execute('iscsicli.exe', 'qlogintarget', target_iqn,
|
||||
auth_username, auth_password)
|
||||
else:
|
||||
return
|
||||
except exceptions.HyperVException as exc:
|
||||
LOG.debug("Attempt %(attempt)d to connect to target "
|
||||
"%(target_iqn)s failed. Retrying. "
|
||||
"Exceptipn: %(exc)s ",
|
||||
{'target_iqn': target_iqn,
|
||||
'exc': exc,
|
||||
'attempt': attempt})
|
||||
time.sleep(CONF.hyperv.volume_attach_retry_interval)
|
||||
|
||||
raise exceptions.HyperVException(_('Failed to login target %s') %
|
||||
target_iqn)
|
||||
|
||||
def logout_storage_target(self, target_iqn):
|
||||
"""Logs out storage target through its session id."""
|
||||
|
||||
sessions = self._conn_wmi.query("SELECT * FROM "
|
||||
"MSiSCSIInitiator_SessionClass "
|
||||
"WHERE TargetName='%s'" % target_iqn)
|
||||
for session in sessions:
|
||||
self.execute_log_out(session.SessionId)
|
||||
|
||||
def execute_log_out(self, session_id):
|
||||
"""Executes log out of the session described by its session ID."""
|
||||
self.execute('iscsicli.exe', 'logouttarget', session_id)
|
@ -1,439 +0,0 @@
|
||||
# Copyright 2016 Cloudbase Solutions Srl
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ctypes
|
||||
import functools
|
||||
import inspect
|
||||
import socket
|
||||
import sys
|
||||
|
||||
if sys.platform == 'win32':
|
||||
iscsidsc = ctypes.windll.iscsidsc
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from os_win._i18n import _LI
|
||||
from os_win import _utils
|
||||
from os_win import constants
|
||||
from os_win import exceptions
|
||||
from os_win.utils.storage import diskutils
|
||||
from os_win.utils.storage.initiator import iscsidsc_structures as iscsi_struct
|
||||
from os_win.utils.storage.initiator import iscsierr
|
||||
from os_win.utils import win32utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
ERROR_INSUFFICIENT_BUFFER = 0x7a
|
||||
|
||||
|
||||
def ensure_buff_and_retrieve_items(struct_type=None,
|
||||
func_requests_buff_sz=True,
|
||||
parse_output=True):
|
||||
# The iscsidsc.dll functions retrieving data accept a buffer, which will
|
||||
# be used for passing back the requested data. If the buffer is too small,
|
||||
# the error code will show it. In this case, the decorator will adjust the
|
||||
# buffer size based on the buffer size or the element count provided by
|
||||
# the function, attempting to call it again.
|
||||
def wrapper(f):
|
||||
@functools.wraps(f)
|
||||
def inner(*args, **kwargs):
|
||||
call_args = inspect.getcallargs(f, *args, **kwargs)
|
||||
call_args['element_count'] = ctypes.c_ulong(0)
|
||||
call_args['buff'] = (ctypes.c_ubyte * 0)()
|
||||
call_args['buff_size'] = ctypes.c_ulong(0)
|
||||
|
||||
while True:
|
||||
try:
|
||||
ret_val = f(**call_args)
|
||||
if parse_output:
|
||||
return _get_items_from_buff(
|
||||
call_args['buff'],
|
||||
struct_type,
|
||||
call_args['element_count'].value)
|
||||
else:
|
||||
return ret_val
|
||||
except exceptions.Win32Exception as ex:
|
||||
if (ex.error_code & 0xFFFF) == ERROR_INSUFFICIENT_BUFFER:
|
||||
if func_requests_buff_sz:
|
||||
buff_size = call_args['buff_size'].value
|
||||
else:
|
||||
buff_size = (ctypes.sizeof(struct_type) *
|
||||
call_args['element_count'].value)
|
||||
call_args['buff'] = (ctypes.c_ubyte * buff_size)()
|
||||
else:
|
||||
raise
|
||||
return inner
|
||||
return wrapper
|
||||
|
||||
|
||||
def _get_items_from_buff(buff, item_type, element_count):
|
||||
array_type = item_type * element_count
|
||||
return ctypes.cast(buff, ctypes.POINTER(array_type)).contents
|
||||
|
||||
|
||||
retry_decorator = functools.partial(
|
||||
_utils.retry_decorator,
|
||||
max_retry_count=10,
|
||||
exceptions=exceptions.ISCSIInitiatorAPIException)
|
||||
|
||||
|
||||
class ISCSIInitiatorUtils(object):
|
||||
_DEFAULT_RESCAN_ATTEMPTS = 3
|
||||
_MS_IQN_PREFIX = "iqn.1991-05.com.microsoft"
|
||||
|
||||
def __init__(self):
|
||||
self._win32utils = win32utils.Win32Utils()
|
||||
self._diskutils = diskutils.DiskUtils()
|
||||
|
||||
def _run_and_check_output(self, *args, **kwargs):
|
||||
kwargs['error_msg_src'] = iscsierr.err_msg_dict
|
||||
kwargs['failure_exc'] = exceptions.ISCSIInitiatorAPIException
|
||||
self._win32utils.run_and_check_output(*args, **kwargs)
|
||||
|
||||
@ensure_buff_and_retrieve_items(
|
||||
struct_type=iscsi_struct.PERSISTENT_ISCSI_LOGIN_INFO)
|
||||
def _get_iscsi_persistent_logins(self, buff=None, buff_size=None,
|
||||
element_count=None):
|
||||
self._run_and_check_output(
|
||||
iscsidsc.ReportIScsiPersistentLoginsW,
|
||||
ctypes.byref(element_count),
|
||||
ctypes.byref(buff),
|
||||
ctypes.byref(buff_size))
|
||||
|
||||
@ensure_buff_and_retrieve_items(
|
||||
struct_type=ctypes.c_wchar,
|
||||
func_requests_buff_sz=False,
|
||||
parse_output=False)
|
||||
def get_targets(self, forced_update=False, buff=None,
|
||||
buff_size=None, element_count=None):
|
||||
"""Get the list of iSCSI targets seen by the initiator service."""
|
||||
self._run_and_check_output(
|
||||
iscsidsc.ReportIScsiTargetsW,
|
||||
forced_update,
|
||||
ctypes.byref(element_count),
|
||||
ctypes.byref(buff))
|
||||
return self._parse_string_list(buff, element_count.value)
|
||||
|
||||
def get_iscsi_initiator(self):
|
||||
"""Returns the initiator node name."""
|
||||
try:
|
||||
buff = (ctypes.c_wchar * (iscsi_struct.MAX_ISCSI_NAME_LEN + 1))()
|
||||
self._run_and_check_output(iscsidsc.GetIScsiInitiatorNodeNameW,
|
||||
ctypes.byref(buff))
|
||||
return buff.value
|
||||
except exceptions.ISCSIInitiatorAPIException as ex:
|
||||
LOG.info(_LI("The ISCSI initiator node name can't be found. "
|
||||
"Choosing the default one. Exception: %s"), ex)
|
||||
return "%s:%s" % (self._MS_IQN_PREFIX, socket.getfqdn().lower())
|
||||
|
||||
@ensure_buff_and_retrieve_items(
|
||||
struct_type=ctypes.c_wchar,
|
||||
func_requests_buff_sz=False,
|
||||
parse_output=False)
|
||||
def get_iscsi_initiators(self, buff=None, buff_size=None,
|
||||
element_count=None):
|
||||
"""Get the list of available iSCSI initiator HBAs."""
|
||||
self._run_and_check_output(
|
||||
iscsidsc.ReportIScsiInitiatorListW,
|
||||
ctypes.byref(element_count),
|
||||
ctypes.byref(buff))
|
||||
return self._parse_string_list(buff, element_count.value)
|
||||
|
||||
@staticmethod
|
||||
def _parse_string_list(buff, element_count):
|
||||
buff = ctypes.cast(buff, ctypes.POINTER(ctypes.c_wchar))
|
||||
str_list = buff[:element_count].strip('\x00')
|
||||
# Avoid returning a list with an empty string
|
||||
str_list = str_list.split('\x00') if str_list else []
|
||||
return str_list
|
||||
|
||||
def _login_iscsi_target(self, target_name, portal=None, login_opts=None,
|
||||
is_persistent=True, initiator_name=None):
|
||||
session_id = iscsi_struct.ISCSI_UNIQUE_SESSION_ID()
|
||||
connection_id = iscsi_struct.ISCSI_UNIQUE_CONNECTION_ID()
|
||||
portal_ref = ctypes.byref(portal) if portal else None
|
||||
login_opts_ref = ctypes.byref(login_opts) if login_opts else None
|
||||
initiator_name_ref = (ctypes.c_wchar_p(initiator_name)
|
||||
if initiator_name else None)
|
||||
|
||||
# If the portal is not provided, the initiator will try to reach any
|
||||
# portal exporting the requested target.
|
||||
self._run_and_check_output(
|
||||
iscsidsc.LoginIScsiTargetW,
|
||||
ctypes.c_wchar_p(target_name),
|
||||
False, # IsInformationalSession
|
||||
initiator_name_ref,
|
||||
ctypes.c_ulong(iscsi_struct.ISCSI_ANY_INITIATOR_PORT),
|
||||
portal_ref,
|
||||
iscsi_struct.ISCSI_SECURITY_FLAGS(
|
||||
iscsi_struct.ISCSI_DEFAULT_SECURITY_FLAGS),
|
||||
None, # Security flags / mappings (using default / auto)
|
||||
login_opts_ref,
|
||||
ctypes.c_ulong(0),
|
||||
None, # Preshared key size / key
|
||||
is_persistent,
|
||||
ctypes.byref(session_id),
|
||||
ctypes.byref(connection_id),
|
||||
ignored_error_codes=[iscsierr.ISDSC_TARGET_ALREADY_LOGGED_IN])
|
||||
return session_id, connection_id
|
||||
|
||||
@ensure_buff_and_retrieve_items(
|
||||
struct_type=iscsi_struct.ISCSI_SESSION_INFO)
|
||||
def _get_iscsi_sessions(self, buff=None, buff_size=None,
|
||||
element_count=None):
|
||||
self._run_and_check_output(
|
||||
iscsidsc.GetIScsiSessionListW,
|
||||
ctypes.byref(buff_size),
|
||||
ctypes.byref(element_count),
|
||||
ctypes.byref(buff))
|
||||
|
||||
def _get_iscsi_target_sessions(self, target_name, connected_only=True):
|
||||
sessions = self._get_iscsi_sessions()
|
||||
return [session for session in sessions
|
||||
if session.TargetNodeName == target_name
|
||||
and (session.ConnectionCount > 0 or not connected_only)]
|
||||
|
||||
@retry_decorator(error_codes=iscsierr.ISDSC_SESSION_BUSY)
|
||||
@ensure_buff_and_retrieve_items(
|
||||
struct_type=iscsi_struct.ISCSI_DEVICE_ON_SESSION,
|
||||
func_requests_buff_sz=False)
|
||||
def _get_iscsi_session_devices(self, session_id,
|
||||
buff=None, buff_size=None,
|
||||
element_count=None):
|
||||
self._run_and_check_output(
|
||||
iscsidsc.GetDevicesForIScsiSessionW,
|
||||
ctypes.byref(session_id),
|
||||
ctypes.byref(element_count),
|
||||
ctypes.byref(buff))
|
||||
|
||||
def _get_iscsi_session_disk_luns(self, session_id):
|
||||
devices = self._get_iscsi_session_devices(session_id)
|
||||
luns = [device.ScsiAddress.Lun for device in devices
|
||||
if (device.StorageDeviceNumber.DeviceType ==
|
||||
iscsi_struct.FILE_DEVICE_DISK)]
|
||||
return luns
|
||||
|
||||
def _get_iscsi_device_from_session(self, session_id, target_lun):
|
||||
devices = self._get_iscsi_session_devices(session_id)
|
||||
for device in devices:
|
||||
if device.ScsiAddress.Lun == target_lun:
|
||||
return device
|
||||
|
||||
def _get_iscsi_device(self, target_name, target_lun):
|
||||
sessions = self._get_iscsi_target_sessions(target_name)
|
||||
for session in sessions:
|
||||
device = self._get_iscsi_device_from_session(session.SessionId,
|
||||
target_lun)
|
||||
if device:
|
||||
return device
|
||||
|
||||
def get_device_number_for_target(self, target_name, target_lun):
|
||||
# This method is preserved as it's used by the Hyper-V Nova driver.
|
||||
device = self._get_iscsi_device(target_name, target_lun)
|
||||
return device.StorageDeviceNumber.DeviceNumber if device else None
|
||||
|
||||
def get_device_number_and_path(self, target_name, target_lun):
|
||||
# We try to avoid the need to seek the disk twice as this may take
|
||||
# unnecessary time.
|
||||
device = self._get_iscsi_device(target_name, target_lun)
|
||||
if device:
|
||||
return device.StorageDeviceNumber.DeviceNumber, device.LegacyName
|
||||
return None, None
|
||||
|
||||
def get_target_luns(self, target_name):
|
||||
# We only care about disk LUNs.
|
||||
sessions = self._get_iscsi_target_sessions(target_name)
|
||||
if sessions:
|
||||
luns = self._get_iscsi_session_disk_luns(sessions[0].SessionId)
|
||||
return luns
|
||||
return []
|
||||
|
||||
def get_target_lun_count(self, target_name):
|
||||
return len(self.get_target_luns(target_name))
|
||||
|
||||
@retry_decorator(error_codes=iscsierr.ISDSC_SESSION_BUSY)
|
||||
def _logout_iscsi_target(self, session_id):
|
||||
self._run_and_check_output(
|
||||
iscsidsc.LogoutIScsiTarget,
|
||||
ctypes.byref(session_id))
|
||||
|
||||
def _add_static_target(self, target_name, is_persistent=True):
|
||||
self._run_and_check_output(iscsidsc.AddIScsiStaticTargetW,
|
||||
ctypes.c_wchar_p(target_name),
|
||||
None, # Target alias
|
||||
0, # Target flags
|
||||
is_persistent,
|
||||
None, # Predefined mappings
|
||||
None, # Login opts
|
||||
None) # Portal group
|
||||
|
||||
def _remove_static_target(self, target_name):
|
||||
ignored_error_codes = [iscsierr.ISDSC_TARGET_NOT_FOUND]
|
||||
self._run_and_check_output(iscsidsc.RemoveIScsiStaticTargetW,
|
||||
ctypes.c_wchar_p(target_name),
|
||||
ignored_error_codes=ignored_error_codes)
|
||||
|
||||
def _get_login_opts(self, auth_username, auth_password, auth_type,
|
||||
login_flags=0):
|
||||
if auth_type is None:
|
||||
auth_type = (constants.ISCSI_CHAP_AUTH_TYPE
|
||||
if auth_username and auth_password
|
||||
else constants.ISCSI_NO_AUTH_TYPE)
|
||||
login_opts = iscsi_struct.ISCSI_LOGIN_OPTIONS(Username=auth_username,
|
||||
Password=auth_password,
|
||||
AuthType=auth_type,
|
||||
LoginFlags=login_flags)
|
||||
return login_opts
|
||||
|
||||
def _session_on_path_exists(self, target_sessions, portal_addr,
|
||||
portal_port, initiator_name):
|
||||
for session in target_sessions:
|
||||
connections = session.Connections[:session.ConnectionCount]
|
||||
uses_requested_initiator = False
|
||||
# Note(lpetrut): unfortunately, the InitiatorName field of the
|
||||
# session structure actually represents the initiator node name.
|
||||
#
|
||||
# We assume that an active path should present at least one device
|
||||
# so we get the initiator name from the device info.
|
||||
if initiator_name:
|
||||
devices = self._get_iscsi_session_devices(session.SessionId)
|
||||
for device in devices:
|
||||
if device.InitiatorName == initiator_name:
|
||||
uses_requested_initiator = True
|
||||
break
|
||||
else:
|
||||
uses_requested_initiator = True
|
||||
|
||||
for conn in connections:
|
||||
is_requested_path = (uses_requested_initiator and
|
||||
conn.TargetAddress == portal_addr and
|
||||
conn.TargetSocket == portal_port)
|
||||
if is_requested_path:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _new_session_required(self, target_iqn, portal_addr, portal_port,
|
||||
initiator_name, mpio_enabled):
|
||||
login_required = False
|
||||
sessions = self._get_iscsi_target_sessions(target_iqn)
|
||||
|
||||
if not sessions:
|
||||
login_required = True
|
||||
elif mpio_enabled:
|
||||
login_required = not self._session_on_path_exists(
|
||||
sessions, portal_addr, portal_port, initiator_name)
|
||||
return login_required
|
||||
|
||||
def login_storage_target(self, target_lun, target_iqn, target_portal,
|
||||
auth_username=None, auth_password=None,
|
||||
auth_type=None,
|
||||
mpio_enabled=False,
|
||||
ensure_lun_available=True,
|
||||
initiator_name=None,
|
||||
rescan_attempts=_DEFAULT_RESCAN_ATTEMPTS):
|
||||
portal_addr, portal_port = _utils.parse_server_string(target_portal)
|
||||
portal_port = (int(portal_port)
|
||||
if portal_port else iscsi_struct.DEFAULT_ISCSI_PORT)
|
||||
|
||||
known_targets = self.get_targets()
|
||||
if target_iqn not in known_targets:
|
||||
self._add_static_target(target_iqn)
|
||||
|
||||
login_required = self._new_session_required(
|
||||
target_iqn, portal_addr, portal_port,
|
||||
initiator_name, mpio_enabled)
|
||||
|
||||
if login_required:
|
||||
LOG.debug("Logging in iSCSI target %(target_iqn)s",
|
||||
dict(target_iqn=target_iqn))
|
||||
# If the multipath flag is set, multiple sessions to the same
|
||||
# target may be estabilished. MPIO must be enabled and configured
|
||||
# to claim iSCSI disks, otherwise data corruption can occur.
|
||||
login_flags = (iscsi_struct.ISCSI_LOGIN_FLAG_MULTIPATH_ENABLED
|
||||
if mpio_enabled else 0)
|
||||
login_opts = self._get_login_opts(auth_username,
|
||||
auth_password,
|
||||
auth_type,
|
||||
login_flags)
|
||||
portal = iscsi_struct.ISCSI_TARGET_PORTAL(Address=portal_addr,
|
||||
Socket=portal_port)
|
||||
# Note(lpetrut): The iscsidsc documentation states that if a
|
||||
# persistent session is requested, the initiator should login
|
||||
# the target only after saving the credentials.
|
||||
#
|
||||
# The issue is that although the Microsoft iSCSI service saves
|
||||
# the credentials, it does not automatically login the target,
|
||||
# for which reason we have two calls, one meant to save the
|
||||
# credentials and another one actually creating the session.
|
||||
self._login_iscsi_target(target_iqn, portal, login_opts,
|
||||
is_persistent=True)
|
||||
sid, cid = self._login_iscsi_target(target_iqn, portal,
|
||||
login_opts,
|
||||
is_persistent=False)
|
||||
|
||||
if ensure_lun_available:
|
||||
self.ensure_lun_available(target_iqn, target_lun, rescan_attempts)
|
||||
|
||||
def ensure_lun_available(self, target_iqn, target_lun,
|
||||
rescan_attempts=_DEFAULT_RESCAN_ATTEMPTS):
|
||||
for attempt in range(rescan_attempts):
|
||||
sessions = self._get_iscsi_target_sessions(target_iqn)
|
||||
for session in sessions:
|
||||
try:
|
||||
sid = session.SessionId
|
||||
device = self._get_iscsi_device_from_session(sid,
|
||||
target_lun)
|
||||
if device and (device.StorageDeviceNumber.DeviceNumber
|
||||
not in (None, -1)):
|
||||
return
|
||||
except exceptions.ISCSIInitiatorAPIException as ex:
|
||||
LOG.exception(ex)
|
||||
continue
|
||||
if attempt <= rescan_attempts:
|
||||
self._diskutils.rescan_disks()
|
||||
|
||||
raise exceptions.ISCSILunNotAvailable(target_lun=target_lun,
|
||||
target_iqn=target_iqn)
|
||||
|
||||
@retry_decorator(error_codes=(iscsierr.ISDSC_SESSION_BUSY,
|
||||
iscsierr.ISDSC_DEVICE_BUSY_ON_SESSION))
|
||||
def logout_storage_target(self, target_iqn):
|
||||
LOG.debug("Logging out iSCSI target %(target_iqn)s",
|
||||
dict(target_iqn=target_iqn))
|
||||
sessions = self._get_iscsi_target_sessions(target_iqn,
|
||||
connected_only=False)
|
||||
for session in sessions:
|
||||
self._logout_iscsi_target(session.SessionId)
|
||||
|
||||
self._remove_target_persistent_logins(target_iqn)
|
||||
self._remove_static_target(target_iqn)
|
||||
|
||||
def _remove_target_persistent_logins(self, target_iqn):
|
||||
persistent_logins = self._get_iscsi_persistent_logins()
|
||||
for persistent_login in persistent_logins:
|
||||
if persistent_login.TargetName == target_iqn:
|
||||
LOG.debug("Removing iSCSI target "
|
||||
"persistent login: %(target_iqn)s",
|
||||
dict(target_iqn=target_iqn))
|
||||
self._remove_persistent_login(persistent_login)
|
||||
|
||||
def _remove_persistent_login(self, persistent_login):
|
||||
self._run_and_check_output(
|
||||
iscsidsc.RemoveIScsiPersistentTargetW,
|
||||
ctypes.c_wchar_p(persistent_login.InitiatorInstance),
|
||||
persistent_login.InitiatorPortNumber,
|
||||
ctypes.c_wchar_p(persistent_login.TargetName),
|
||||
ctypes.byref(persistent_login.TargetPortal))
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user