Support HEALTHY status for db instance
- 'HEALTHY' means the db service is responsive, 'ACTIVE' means the db service is alive. - Remove the CI job fakemodetests, but will add similar testing task in the future. - Fix the periodic CI job - Remove MongoDB and related jobs Change-Id: I5abe9091ba203297dc87db5fba139179166321f7
This commit is contained in:
parent
f16020392d
commit
a0a10f0b94
36
.zuul.yaml
36
.zuul.yaml
@ -14,7 +14,6 @@
|
||||
- openstack-tox-pylint
|
||||
- trove-tox-bandit-baseline:
|
||||
voting: false
|
||||
- trove-tox-fakemodetests
|
||||
- trove-tempest
|
||||
- trove-functional-mysql
|
||||
- trove-scenario-mysql-single
|
||||
@ -26,17 +25,12 @@
|
||||
voting: false
|
||||
- trove-scenario-mariadb-multi:
|
||||
voting: false
|
||||
- trove-scenario-redis-single:
|
||||
voting: false
|
||||
- trove-scenario-redis-multi:
|
||||
voting: false
|
||||
- trove-tempest-ipv6-only:
|
||||
voting: false
|
||||
gate:
|
||||
queue: trove
|
||||
jobs:
|
||||
- openstack-tox-pylint
|
||||
- trove-tox-fakemodetests
|
||||
- trove-functional-mysql
|
||||
- trove-scenario-mysql-single
|
||||
- trove-scenario-mysql-multi
|
||||
@ -47,12 +41,12 @@
|
||||
- trove-scenario-cassandra-multi
|
||||
- trove-scenario-couchbase-single
|
||||
- trove-scenario-couchdb-single
|
||||
- trove-scenario-mongodb-single
|
||||
- trove-scenario-mongodb-multi
|
||||
- trove-scenario-percona-single
|
||||
- trove-scenario-percona-multi
|
||||
- trove-scenario-pxc-single
|
||||
- trove-scenario-pxc-multi
|
||||
- trove-scenario-redis-single
|
||||
- trove-scenario-redis-multi
|
||||
periodic:
|
||||
jobs:
|
||||
- publish-trove-guest-image-mysql-ubuntu-xenial:
|
||||
@ -214,24 +208,6 @@
|
||||
devstack_localrc:
|
||||
TROVE_ENABLE_IMAGE_BUILD: false
|
||||
|
||||
- job:
|
||||
name: trove-scenario-mongodb-single
|
||||
parent: trove-devstack-base
|
||||
vars:
|
||||
trove_test_datastore: mongodb
|
||||
trove_test_group: mongodb-supported-single
|
||||
devstack_localrc:
|
||||
TROVE_ENABLE_IMAGE_BUILD: false
|
||||
|
||||
- job:
|
||||
name: trove-scenario-mongodb-multi
|
||||
parent: trove-devstack-base
|
||||
vars:
|
||||
trove_test_datastore: mongodb
|
||||
trove_test_group: mongodb-supported-multi
|
||||
devstack_localrc:
|
||||
TROVE_ENABLE_IMAGE_BUILD: false
|
||||
|
||||
- job:
|
||||
name: trove-scenario-mysql-single
|
||||
parent: trove-devstack-base
|
||||
@ -318,12 +294,6 @@
|
||||
devstack_localrc:
|
||||
TROVE_ENABLE_IMAGE_BUILD: false
|
||||
|
||||
- job:
|
||||
name: trove-tox-fakemodetests
|
||||
parent: trove-fakemodetests-base
|
||||
vars:
|
||||
tox_envlist: fakemodetests
|
||||
|
||||
- job:
|
||||
name: trove-tox-bandit-baseline
|
||||
parent: openstack-tox
|
||||
@ -419,6 +389,7 @@
|
||||
guest_username: ubuntu
|
||||
branch: master
|
||||
dev_mode: false
|
||||
image_suffix: ""
|
||||
|
||||
- job:
|
||||
name: publish-trove-guest-image-mysql-ubuntu-xenial-dev
|
||||
@ -433,3 +404,4 @@
|
||||
guest_username: ubuntu
|
||||
branch: master
|
||||
dev_mode: true
|
||||
image_suffix: "-dev"
|
||||
|
@ -53,22 +53,12 @@ Shows database instance details.
|
||||
Lists the status and details of the database instance.
|
||||
|
||||
Lists the volume size in gigabytes (GB) and the approximate GB
|
||||
used.
|
||||
|
||||
After instance creation, the ``used`` value is greater than 0, which
|
||||
used. After instance creation, the ``used`` value is greater than 0, which
|
||||
is expected as databases may create some basic (non empty) files to
|
||||
represent an empty schema. The response does not include the ``used``
|
||||
attribute when the instance status is ``BUILD``, ``REBOOT``,
|
||||
``RESIZE``, or ``ERROR``.
|
||||
|
||||
The list operations return a DNS-resolvable host name for the
|
||||
database instance rather than an IP address. Because the host name
|
||||
always resolves to the correct IP address for the database
|
||||
instance, you do not need to maintain the mapping. Although the IP
|
||||
address might change when you resize, migrate, or perform other
|
||||
operations, the host name always resolves to the correct database
|
||||
instance.
|
||||
|
||||
Normal response codes: 200
|
||||
|
||||
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
|
||||
@ -380,7 +370,7 @@ Upgrade datastore version
|
||||
Upgrade datastore version.
|
||||
|
||||
During datastore version upgrading, the instance status change to ``UPGRADE``,
|
||||
and changes back to ``ACTIVE`` after upgrading finishes, otherwize changes to
|
||||
and changes back to ``HEALTHY`` after upgrading finishes, otherwize changes to
|
||||
``ERROR`` if the upgrading fails.
|
||||
|
||||
Normal response codes: 202
|
||||
|
@ -32,7 +32,7 @@
|
||||
],
|
||||
"name": "json_rack_instance",
|
||||
"region": "RegionOne",
|
||||
"status": "ACTIVE",
|
||||
"status": "HEALTHY",
|
||||
"updated": "2014-10-30T12:30:00",
|
||||
"volume": {
|
||||
"size": 2,
|
||||
|
@ -32,7 +32,7 @@
|
||||
],
|
||||
"name": "json_rack_instance",
|
||||
"region": "RegionOne",
|
||||
"status": "ACTIVE",
|
||||
"status": "HEALTHY",
|
||||
"volume": {
|
||||
"size": 2
|
||||
}
|
||||
|
@ -217,6 +217,7 @@ function configure_trove {
|
||||
iniset_conditional $TROVE_CONF DEFAULT resize_time_out $TROVE_RESIZE_TIME_OUT
|
||||
iniset_conditional $TROVE_CONF DEFAULT usage_timeout $TROVE_USAGE_TIMEOUT
|
||||
iniset_conditional $TROVE_CONF DEFAULT state_change_wait_time $TROVE_STATE_CHANGE_WAIT_TIME
|
||||
iniset_conditional $TROVE_CONF DEFAULT reboot_time_out 300
|
||||
|
||||
configure_keystone_authtoken_middleware $TROVE_CONF trove
|
||||
iniset $TROVE_CONF service_credentials username trove
|
||||
@ -489,7 +490,7 @@ function create_guest_image {
|
||||
$TROVE_MANAGE datastore_version_update "$TROVE_DATASTORE_TYPE" "inactive_version" "manager1" $glance_image_id "" $INACTIVE
|
||||
$TROVE_MANAGE datastore_update Test_Datastore_1 ""
|
||||
|
||||
echo "Add validation rules if available"
|
||||
echo "Add parameter validation rules if available"
|
||||
if [ -f $DEST/trove/trove/templates/$TROVE_DATASTORE_TYPE/validation-rules.json ]; then
|
||||
$TROVE_MANAGE db_load_datastore_config_parameters "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" \
|
||||
$DEST/trove/trove/templates/$TROVE_DATASTORE_TYPE/validation-rules.json
|
||||
|
@ -163,7 +163,7 @@ This example shows you how to back up and restore a MySQL database.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ openstack database instance INSTANCE_ID
|
||||
$ openstack database instance show INSTANCE_ID
|
||||
|
||||
+-------------------+--------------------------------------+
|
||||
| Property | Value |
|
||||
|
@ -10,6 +10,7 @@ handling complex administrative tasks.
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
instance-status.rst
|
||||
create-db.rst
|
||||
manage-db-and-users.rst
|
||||
backup-db.rst
|
||||
|
41
doc/source/user/instance-status.rst
Normal file
41
doc/source/user/instance-status.rst
Normal file
@ -0,0 +1,41 @@
|
||||
========================
|
||||
Database instance status
|
||||
========================
|
||||
|
||||
HEALTHY
|
||||
The database service is functional, e.g. table is accessible.
|
||||
|
||||
RUNNING
|
||||
The database service is alive, but maybe not functional yet.
|
||||
|
||||
SHUTDOWN
|
||||
The database service is stopped.
|
||||
|
||||
NEW
|
||||
The database service creation request is just received by Trove.
|
||||
|
||||
BUILD
|
||||
The database service is being installed.
|
||||
|
||||
BLOCKED
|
||||
The database service process exists but service is not accessible for some
|
||||
reason.
|
||||
|
||||
PROMOTE
|
||||
Trove is replicating data between a replication group in order to promote a
|
||||
new master instance.
|
||||
|
||||
EJECT
|
||||
The master election is happening within a replication group.
|
||||
|
||||
RESTART_REQUIRED
|
||||
The database service needs to restart, e.g. due to the configuration change.
|
||||
|
||||
FAILED
|
||||
The database service is failed to spawn.
|
||||
|
||||
ERROR
|
||||
There are some errors in a running database service.
|
||||
|
||||
DELETED
|
||||
The database service is deleted.
|
@ -4,6 +4,9 @@
|
||||
file:
|
||||
path: '{{ ansible_user_dir }}/images'
|
||||
state: directory
|
||||
- name: Get output image name
|
||||
shell: kubeadm token create --print-join-command
|
||||
register: join_command_raw
|
||||
- name: Build Trove guest image
|
||||
shell: >-
|
||||
./trovestack build-image \
|
||||
@ -12,7 +15,7 @@
|
||||
{{ guest_os_release }} \
|
||||
{{ dev_mode }} \
|
||||
{{ guest_username }} \
|
||||
{{ ansible_user_dir }}/images/trove-{{ branch }}-{{ datastore_type }}-{{ guest_os }}-{{ guest_os_release }}
|
||||
{{ ansible_user_dir }}/images/trove-{{ branch }}-{{ datastore_type }}-{{ guest_os }}-{{ guest_os_release }}{{ image_suffix }}
|
||||
args:
|
||||
chdir: "{{ ansible_user_dir }}/src/opendev.org/openstack/trove/integration/scripts"
|
||||
tags:
|
||||
|
@ -1,6 +1,7 @@
|
||||
---
|
||||
deprecations: |
|
||||
- The following config options are deprecated in favor of a separate
|
||||
deprecations:
|
||||
- |
|
||||
The following config options are deprecated in favor of a separate
|
||||
configuration section ``service_credentials`` introduced to define the
|
||||
Trove service user credentials for communication with other OpenStack
|
||||
services.
|
||||
|
@ -0,0 +1,8 @@
|
||||
---
|
||||
features:
|
||||
- A new database service status ``HEALTHY`` is introduced to indicate that
|
||||
the service is responsive. ``HEALTHY`` is the final status after
|
||||
``ACTIVE``.
|
||||
upgrade:
|
||||
- Any existing scripts that rely on the database instance ``ACTIVE`` status
|
||||
should now rely on ``HEALTHY`` status.
|
159
run_tests.sh
159
run_tests.sh
@ -1,159 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eu
|
||||
|
||||
function usage {
|
||||
echo "Usage: $0 [OPTION]..."
|
||||
echo "Run Trove's test suite(s)"
|
||||
echo ""
|
||||
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
|
||||
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
|
||||
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
|
||||
echo " -n, --no-recreate-db Don't recreate the test database."
|
||||
echo " -x, --stop Stop running tests after the first error or failure."
|
||||
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
|
||||
echo " -p, --pep8 Just run pep8"
|
||||
echo " -P, --no-pep8 Don't run pep8"
|
||||
echo " -c, --coverage Generate coverage report"
|
||||
echo " -h, --help Print this usage message"
|
||||
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
|
||||
echo ""
|
||||
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
|
||||
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
|
||||
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
|
||||
exit
|
||||
}
|
||||
|
||||
function process_option {
|
||||
case "$1" in
|
||||
-h|--help) usage;;
|
||||
-V|--virtual-env) always_venv=1; never_venv=0;;
|
||||
-N|--no-virtual-env) always_venv=0; never_venv=1;;
|
||||
-r|--recreate-db) recreate_db=1;;
|
||||
-n|--no-recreate-db) recreate_db=0;;
|
||||
-f|--force) force=1;;
|
||||
-p|--pep8) just_pep8=1;;
|
||||
-P|--no-pep8) no_pep8=1;;
|
||||
-c|--coverage) coverage=1;;
|
||||
-*) noseopts="$noseopts $1";;
|
||||
*) noseargs="$noseargs $1"
|
||||
esac
|
||||
}
|
||||
|
||||
venv=.venv
|
||||
with_venv=tools/with_venv.sh
|
||||
always_venv=0
|
||||
never_venv=0
|
||||
force=0
|
||||
noseargs=
|
||||
noseopts=
|
||||
wrapper=""
|
||||
just_pep8=0
|
||||
no_pep8=0
|
||||
coverage=0
|
||||
recreate_db=1
|
||||
|
||||
for arg in "$@"; do
|
||||
process_option $arg
|
||||
done
|
||||
|
||||
# If enabled, tell nose to collect coverage data
|
||||
if [ $coverage -eq 1 ]; then
|
||||
noseopts="$noseopts --with-coverage --cover-package=trove"
|
||||
fi
|
||||
|
||||
function run_tests {
|
||||
# Just run the test suites in current environment
|
||||
${wrapper} $NOSETESTS 2> run_tests.log
|
||||
# If we get some short import error right away, print the error log directly
|
||||
RESULT=$?
|
||||
if [ "$RESULT" -ne "0" ];
|
||||
then
|
||||
ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'`
|
||||
if [ "$ERRSIZE" -lt "40" ];
|
||||
then
|
||||
cat run_tests.log
|
||||
fi
|
||||
fi
|
||||
return $RESULT
|
||||
}
|
||||
|
||||
function run_pep8 {
|
||||
echo "Running pep8 ..."
|
||||
# Just run PEP8 in current environment
|
||||
#
|
||||
# NOTE(sirp): W602 (deprecated 3-arg raise) is being ignored for the
|
||||
# following reasons:
|
||||
#
|
||||
# 1. It's needed to preserve traceback information when re-raising
|
||||
# exceptions; this is needed b/c Eventlet will clear exceptions when
|
||||
# switching contexts.
|
||||
#
|
||||
# 2. There doesn't appear to be an alternative, "pep8-tool" compatible way of doing this
|
||||
# in Python 2 (in Python 3 `with_traceback` could be used).
|
||||
#
|
||||
# 3. Can find no corroborating evidence that this is deprecated in Python 2
|
||||
# other than what the PEP8 tool claims. It is deprecated in Python 3, so,
|
||||
# perhaps the mistake was thinking that the deprecation applied to Python 2
|
||||
# as well.
|
||||
${wrapper} flake8
|
||||
}
|
||||
|
||||
NOSETESTS="python run_tests.py $noseopts $noseargs"
|
||||
|
||||
if [ $never_venv -eq 0 ]
|
||||
then
|
||||
# Remove the virtual environment if --force used
|
||||
if [ $force -eq 1 ]; then
|
||||
echo "Cleaning virtualenv..."
|
||||
rm -rf ${venv}
|
||||
fi
|
||||
if [ -e ${venv} ]; then
|
||||
wrapper="${with_venv}"
|
||||
else
|
||||
if [ $always_venv -eq 1 ]; then
|
||||
# Automatically install the virtualenv
|
||||
python tools/install_venv.py
|
||||
wrapper="${with_venv}"
|
||||
else
|
||||
echo -e "No virtual environment found...create one? (Y/n) \c"
|
||||
read use_ve
|
||||
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
|
||||
# Install the virtualenv and run the test suite in it
|
||||
python tools/install_venv.py
|
||||
wrapper=${with_venv}
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Delete old coverage data from previous runs
|
||||
if [ $coverage -eq 1 ]; then
|
||||
${wrapper} coverage erase
|
||||
fi
|
||||
|
||||
if [ $just_pep8 -eq 1 ]; then
|
||||
run_pep8
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ $recreate_db -eq 1 ]; then
|
||||
rm -f tests.sqlite
|
||||
fi
|
||||
|
||||
run_tests
|
||||
|
||||
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
|
||||
# not when we're running tests individually. To handle this, we need to
|
||||
# distinguish between options (noseopts), which begin with a '-', and
|
||||
# arguments (noseargs).
|
||||
if [ -z "$noseargs" ]; then
|
||||
if [ $no_pep8 -eq 0 ]; then
|
||||
run_pep8
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $coverage -eq 1 ]; then
|
||||
echo "Generating coverage report in covhtml/"
|
||||
${wrapper} coverage html -d covhtml -i
|
||||
fi
|
3
tox.ini
3
tox.ini
@ -34,9 +34,6 @@ commands =
|
||||
flake8
|
||||
doc8 {posargs}
|
||||
|
||||
[testenv:fakemodetests]
|
||||
commands = {envpython} run_tests.py
|
||||
|
||||
[testenv:debug]
|
||||
commands = oslo_debug_helper {posargs}
|
||||
|
||||
|
@ -35,6 +35,7 @@ class ServiceStatus(object):
|
||||
ServiceStatuses.SHUTDOWN._code,
|
||||
ServiceStatuses.CRASHED._code,
|
||||
ServiceStatuses.BLOCKED._code,
|
||||
ServiceStatuses.HEALTHY._code,
|
||||
]
|
||||
return self._code in allowed_statuses
|
||||
|
||||
@ -102,6 +103,8 @@ class ServiceStatuses(object):
|
||||
INSTANCE_READY = ServiceStatus(0x19, 'instance ready', 'BUILD')
|
||||
RESTART_REQUIRED = ServiceStatus(0x20, 'restart required',
|
||||
'RESTART_REQUIRED')
|
||||
HEALTHY = ServiceStatus(0x21, 'healthy', 'HEALTHY')
|
||||
|
||||
|
||||
# Dissuade further additions at run-time.
|
||||
ServiceStatus.__init__ = None
|
||||
|
@ -31,7 +31,9 @@ def load_and_verify(context, instance_id):
|
||||
instance = base_models.Instance.load(context, instance_id)
|
||||
if not instance.is_datastore_running:
|
||||
raise exception.UnprocessableEntity(
|
||||
"Instance %s is not ready." % instance.id)
|
||||
"Instance %s is not ready, status: %s." %
|
||||
(instance.id, instance.datastore_status.status)
|
||||
)
|
||||
else:
|
||||
return instance
|
||||
|
||||
|
@ -241,16 +241,21 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
|
||||
self._guest_log_loaded_context = self.guest_log_context
|
||||
|
||||
################
|
||||
# Status related
|
||||
################
|
||||
def get_service_status(self):
|
||||
return self.status._get_actual_db_status()
|
||||
|
||||
@periodic_task.periodic_task
|
||||
def update_status(self, context):
|
||||
"""Update the status of the trove instance. It is decorated with
|
||||
perodic_task so it is called automatically.
|
||||
"""
|
||||
LOG.debug("Update status called.")
|
||||
self.status.update()
|
||||
"""Update the status of the trove instance."""
|
||||
if not self.status.is_installed or self.status._is_restarting:
|
||||
LOG.info("Database service is not installed or is in restart "
|
||||
"mode, skip status check")
|
||||
return
|
||||
|
||||
LOG.debug("Starting to check database service status")
|
||||
|
||||
status = self.get_service_status()
|
||||
self.status.set_status(status)
|
||||
|
||||
def rpc_ping(self, context):
|
||||
LOG.debug("Responding to RPC ping.")
|
||||
|
@ -120,6 +120,19 @@ class MySqlManager(manager.Manager):
|
||||
},
|
||||
}
|
||||
|
||||
def get_service_status(self):
|
||||
try:
|
||||
app = self.mysql_app(self.status)
|
||||
with service.BaseLocalSqlClient(app.get_engine()) as client:
|
||||
cmd = "SELECT 1;"
|
||||
client.execute(cmd)
|
||||
|
||||
LOG.debug("Database service check: database query is responsive")
|
||||
return rd_instance.ServiceStatuses.HEALTHY
|
||||
except Exception as e:
|
||||
LOG.warning('Failed to query database, error: %s', str(e))
|
||||
return super(MySqlManager, self).get_service_status()
|
||||
|
||||
def change_passwords(self, context, users):
|
||||
with EndNotification(context):
|
||||
self.mysql_admin().change_passwords(users)
|
||||
|
@ -152,37 +152,46 @@ class BaseMySqlAppStatus(service.BaseDbStatus):
|
||||
return cls._instance
|
||||
|
||||
def _get_actual_db_status(self):
|
||||
"""Check database service status.
|
||||
|
||||
The checks which don't need service app can be put here.
|
||||
"""
|
||||
try:
|
||||
utils.execute_with_timeout(
|
||||
"/usr/bin/mysqladmin",
|
||||
"ping", run_as_root=True, root_helper="sudo",
|
||||
log_output_on_error=True)
|
||||
LOG.debug("MySQL Service Status is RUNNING.")
|
||||
|
||||
LOG.debug("Database service check: mysqld is alive")
|
||||
return rd_instance.ServiceStatuses.RUNNING
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.warning("Failed to get database status.")
|
||||
try:
|
||||
out, _ = utils.execute_with_timeout(
|
||||
"/bin/ps", "-C", "mysqld", "h",
|
||||
log_output_on_error=True
|
||||
)
|
||||
pid = out.split()[0]
|
||||
# TODO(rnirmal): Need to create new statuses for instances
|
||||
# where the mysql service is up, but unresponsive
|
||||
LOG.info('MySQL Service Status %(pid)s is BLOCKED.',
|
||||
{'pid': pid})
|
||||
return rd_instance.ServiceStatuses.BLOCKED
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.warning("Process execution failed.")
|
||||
mysql_args = load_mysqld_options()
|
||||
pid_file = mysql_args.get('pid_file',
|
||||
['/var/run/mysqld/mysqld.pid'])[0]
|
||||
if os.path.exists(pid_file):
|
||||
LOG.info("MySQL Service Status is CRASHED.")
|
||||
return rd_instance.ServiceStatuses.CRASHED
|
||||
else:
|
||||
LOG.info("MySQL Service Status is SHUTDOWN.")
|
||||
return rd_instance.ServiceStatuses.SHUTDOWN
|
||||
LOG.warning("Database service check: Failed to get database "
|
||||
"service status by mysqladmin, fall back to use ps.")
|
||||
|
||||
try:
|
||||
out, _ = utils.execute_with_timeout(
|
||||
"/bin/ps", "-C", "mysqld", "h",
|
||||
log_output_on_error=True
|
||||
)
|
||||
pid = out.split()[0]
|
||||
|
||||
LOG.debug('Database service check: service PID exists', pid)
|
||||
return rd_instance.ServiceStatuses.BLOCKED
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.warning("Database service check: Failed to get database "
|
||||
"service status by ps, fall back to check PID file.")
|
||||
|
||||
mysql_args = load_mysqld_options()
|
||||
pid_file = mysql_args.get('pid_file',
|
||||
['/var/run/mysqld/mysqld.pid'])[0]
|
||||
if os.path.exists(pid_file):
|
||||
LOG.info("Database service check: MySQL Service Status is "
|
||||
"CRASHED.")
|
||||
return rd_instance.ServiceStatuses.CRASHED
|
||||
else:
|
||||
LOG.info("Database service check: MySQL Service Status is "
|
||||
"SHUTDOWN.")
|
||||
return rd_instance.ServiceStatuses.SHUTDOWN
|
||||
|
||||
|
||||
class BaseLocalSqlClient(object):
|
||||
|
@ -181,13 +181,8 @@ class BaseDbStatus(object):
|
||||
The database is updated and the status is also returned.
|
||||
"""
|
||||
if self.is_installed and not self._is_restarting:
|
||||
LOG.debug("Determining status of DB server.")
|
||||
status = self._get_actual_db_status()
|
||||
self.set_status(status)
|
||||
else:
|
||||
LOG.info("DB server is not installed or is in restart mode, so "
|
||||
"for now we'll skip determining the status of DB on "
|
||||
"this instance.")
|
||||
|
||||
def restart_db_service(self, service_candidates, timeout):
|
||||
"""Restart the database.
|
||||
|
@ -97,6 +97,7 @@ def load_server(context, instance_id, server_id, region_name):
|
||||
|
||||
|
||||
class InstanceStatus(object):
|
||||
HEALTHY = "HEALTHY"
|
||||
ACTIVE = "ACTIVE"
|
||||
BLOCKED = "BLOCKED"
|
||||
BUILD = "BUILD"
|
||||
@ -362,7 +363,8 @@ class SimpleInstance(object):
|
||||
|
||||
# Report as Shutdown while deleting, unless there's an error.
|
||||
if 'DELETING' == action:
|
||||
if self.db_info.server_status in ["ACTIVE", "SHUTDOWN", "DELETED"]:
|
||||
if self.db_info.server_status in ["ACTIVE", "SHUTDOWN", "DELETED",
|
||||
"HEALTHY"]:
|
||||
return InstanceStatus.SHUTDOWN
|
||||
else:
|
||||
LOG.error("While shutting down instance (%(instance)s): "
|
||||
@ -1415,7 +1417,7 @@ class Instance(BuiltInstance):
|
||||
"""
|
||||
# cases where action cannot be performed
|
||||
status_type = 'instance'
|
||||
if self.db_info.server_status != 'ACTIVE':
|
||||
if self.db_info.server_status not in ['ACTIVE', 'HEALTHY']:
|
||||
status = self.db_info.server_status
|
||||
elif (self.db_info.task_status != InstanceTasks.NONE and
|
||||
self.db_info.task_status != InstanceTasks.RESTART_REQUIRED):
|
||||
@ -1989,4 +1991,7 @@ def persisted_models():
|
||||
}
|
||||
|
||||
|
||||
MYSQL_RESPONSIVE_STATUSES = [tr_instance.ServiceStatuses.RUNNING]
|
||||
MYSQL_RESPONSIVE_STATUSES = [
|
||||
tr_instance.ServiceStatuses.RUNNING,
|
||||
tr_instance.ServiceStatuses.HEALTHY
|
||||
]
|
||||
|
@ -95,9 +95,10 @@ class InstanceController(wsgi.Controller):
|
||||
selected_action = _actions[key]
|
||||
action_name = key
|
||||
LOG.info("Performing %(action_name)s action against "
|
||||
"instance %(instance_id)s for tenant '%(tenant_id)s'",
|
||||
"instance %(instance_id)s for tenant %(tenant_id)s, "
|
||||
"body: %(body)s",
|
||||
{'action_name': action_name, 'instance_id': id,
|
||||
'tenant_id': tenant_id})
|
||||
'tenant_id': tenant_id, 'body': body})
|
||||
needs_server = True
|
||||
if action_name in ['reset_status']:
|
||||
needs_server = False
|
||||
|
@ -764,9 +764,11 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
|
||||
"""
|
||||
service = InstanceServiceStatus.find_by(instance_id=self.id)
|
||||
status = service.get_status()
|
||||
|
||||
if (status == rd_instance.ServiceStatuses.RUNNING or
|
||||
status == rd_instance.ServiceStatuses.INSTANCE_READY):
|
||||
return True
|
||||
status == rd_instance.ServiceStatuses.INSTANCE_READY or
|
||||
status == rd_instance.ServiceStatuses.HEALTHY):
|
||||
return True
|
||||
elif status not in [rd_instance.ServiceStatuses.NEW,
|
||||
rd_instance.ServiceStatuses.BUILDING,
|
||||
rd_instance.ServiceStatuses.UNKNOWN,
|
||||
@ -1388,6 +1390,7 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
|
||||
utils.poll_until(
|
||||
server_finished_rebuilding,
|
||||
sleep_time=2, time_out=600)
|
||||
|
||||
if not self.server_status_matches(['ACTIVE']):
|
||||
raise TroveError(_("Instance %(instance)s failed to "
|
||||
"upgrade to %(datastore_version)s"),
|
||||
@ -1752,7 +1755,8 @@ class ResizeVolumeAction(object):
|
||||
'old_volume_size': self.old_size,
|
||||
'new_size': self.new_size})
|
||||
|
||||
if self.instance.server.status == InstanceStatus.ACTIVE:
|
||||
if self.instance.server.status in [InstanceStatus.ACTIVE,
|
||||
InstanceStatus.HEALTHY]:
|
||||
self._resize_active_volume()
|
||||
self.instance.reset_task_status()
|
||||
# send usage event for size reported by cinder
|
||||
@ -1768,13 +1772,17 @@ class ResizeVolumeAction(object):
|
||||
).notify()
|
||||
else:
|
||||
self.instance.reset_task_status()
|
||||
msg = _("Failed to resize instance %(id)s volume for server "
|
||||
"%(server_id)s. The instance must be in state %(state)s "
|
||||
"not %(inst_state)s.") % {
|
||||
'id': self.instance.id,
|
||||
'server_id': self.instance.server.id,
|
||||
'state': InstanceStatus.ACTIVE,
|
||||
'inst_state': self.instance.server.status}
|
||||
msg = (
|
||||
"Failed to resize instance %(id)s volume for server "
|
||||
"%(server_id)s. The instance must be in state %(state)s "
|
||||
"not %(inst_state)s." %
|
||||
{
|
||||
'id': self.instance.id,
|
||||
'server_id': self.instance.server.id,
|
||||
'state': [InstanceStatus.ACTIVE, InstanceStatus.HEALTHY],
|
||||
'inst_state': self.instance.server.status
|
||||
}
|
||||
)
|
||||
raise TroveError(msg)
|
||||
|
||||
|
||||
|
@ -82,7 +82,7 @@ class CreateBackups(object):
|
||||
assert_equal('NEW', result.status)
|
||||
instance = instance_info.dbaas.instances.get(instance_info.id)
|
||||
|
||||
assert_true(instance.status in ['ACTIVE', 'BACKUP'])
|
||||
assert_true(instance.status in ['ACTIVE', 'BACKUP', 'HEALTHY'])
|
||||
assert_equal(instance_info.dbaas_datastore,
|
||||
result.datastore['type'])
|
||||
assert_equal(instance_info.dbaas_datastore_version,
|
||||
@ -130,7 +130,7 @@ class BackupRestoreMixin(object):
|
||||
# This version just checks the REST API status.
|
||||
def result_is_active():
|
||||
instance = instance_info.dbaas.instances.get(instance_id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
# If its not ACTIVE, anything but BUILD must be
|
||||
@ -311,7 +311,7 @@ class WaitForRestoreToFinish(object):
|
||||
# This version just checks the REST API status.
|
||||
def result_is_active():
|
||||
instance = instance_info.dbaas.instances.get(instance_id_to_poll)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
# If its not ACTIVE, anything but BUILD must be
|
||||
|
@ -494,7 +494,7 @@ class ListConfigurations(ConfigurationsTestBase):
|
||||
def result_is_not_active():
|
||||
instance = instance_info.dbaas.instances.get(
|
||||
instance_info.id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
@ -503,7 +503,6 @@ class ListConfigurations(ConfigurationsTestBase):
|
||||
instance = instance_info.dbaas.instances.get(instance_info.id)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 200)
|
||||
print(instance.status)
|
||||
assert_equal('RESTART_REQUIRED', instance.status)
|
||||
|
||||
@test(depends_on=[test_waiting_for_instance_in_restart_required])
|
||||
@ -516,7 +515,7 @@ class ListConfigurations(ConfigurationsTestBase):
|
||||
def result_is_active():
|
||||
instance = instance_info.dbaas.instances.get(
|
||||
instance_info.id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
assert_equal("REBOOT", instance.status)
|
||||
@ -600,7 +599,7 @@ class WaitForConfigurationInstanceToFinish(ConfigurationsTestBase):
|
||||
def result_is_active():
|
||||
instance = instance_info.dbaas.instances.get(
|
||||
configuration_instance.id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
assert_equal("BUILD", instance.status)
|
||||
@ -741,7 +740,7 @@ class DeleteConfigurations(ConfigurationsTestBase):
|
||||
def result_is_not_active():
|
||||
instance = instance_info.dbaas.instances.get(
|
||||
instance_info.id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
@ -750,10 +749,8 @@ class DeleteConfigurations(ConfigurationsTestBase):
|
||||
config = instance_info.dbaas.configurations.list()
|
||||
print(config)
|
||||
instance = instance_info.dbaas.instances.get(instance_info.id)
|
||||
print(instance.__dict__)
|
||||
resp, body = instance_info.dbaas.client.last_response
|
||||
assert_equal(resp.status, 200)
|
||||
print(instance.status)
|
||||
assert_equal('RESTART_REQUIRED', instance.status)
|
||||
|
||||
@test(depends_on=[test_restart_service_after_unassign_return_active])
|
||||
@ -767,7 +764,7 @@ class DeleteConfigurations(ConfigurationsTestBase):
|
||||
def result_is_active():
|
||||
instance = instance_info.dbaas.instances.get(
|
||||
instance_info.id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
assert_equal("REBOOT", instance.status)
|
||||
@ -809,7 +806,7 @@ class DeleteConfigurations(ConfigurationsTestBase):
|
||||
def result_is_active():
|
||||
instance = instance_info.dbaas.instances.get(
|
||||
instance_info.id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
assert_equal("REBOOT", instance.status)
|
||||
@ -838,11 +835,12 @@ class DeleteConfigurations(ConfigurationsTestBase):
|
||||
def result_is_active():
|
||||
instance = instance_info.dbaas.instances.get(
|
||||
instance_info.id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
assert_equal("REBOOT", instance.status)
|
||||
return False
|
||||
|
||||
poll_until(result_is_active)
|
||||
result = instance_info.dbaas.configurations.get(configuration_info.id)
|
||||
assert_equal(result.instance_count, 0)
|
||||
|
@ -19,6 +19,7 @@ import time
|
||||
import unittest
|
||||
import uuid
|
||||
|
||||
from proboscis import asserts
|
||||
from proboscis.asserts import assert_equal
|
||||
from proboscis.asserts import assert_is_not_none
|
||||
from proboscis.asserts import assert_not_equal
|
||||
@ -766,7 +767,7 @@ class CreateInstanceFlavors(object):
|
||||
|
||||
def _result_is_active(self):
|
||||
instance = dbaas.instances.get(self.result.id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
# If its not ACTIVE, anything but BUILD must be
|
||||
@ -878,7 +879,7 @@ class WaitForGuestInstallationToFinish(object):
|
||||
# This version just checks the REST API status.
|
||||
def result_is_active():
|
||||
instance = dbaas.instances.get(instance_info.id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
# If its not ACTIVE, anything but BUILD must be
|
||||
@ -1038,7 +1039,7 @@ class TestInstanceListing(object):
|
||||
def test_get_instance_status(self):
|
||||
result = dbaas.instances.get(instance_info.id)
|
||||
assert_equal(200, dbaas.last_http_code)
|
||||
assert_equal("ACTIVE", result.status)
|
||||
asserts.assert_true(result.status in CONFIG.running_status)
|
||||
|
||||
@test
|
||||
def test_get_legacy_status(self):
|
||||
@ -1083,10 +1084,11 @@ class TestInstanceListing(object):
|
||||
assert_equal(200, self.other_client.last_http_code)
|
||||
admin_ids = [instance.id for instance in dbaas.instances.list()]
|
||||
assert_equal(200, dbaas.last_http_code)
|
||||
assert_equal(len(daffy_ids), 0)
|
||||
|
||||
assert_not_equal(sorted(admin_ids), sorted(daffy_ids))
|
||||
assert_raises(exceptions.NotFound,
|
||||
self.other_client.instances.get, instance_info.id)
|
||||
|
||||
for id in admin_ids:
|
||||
assert_equal(daffy_ids.count(id), 0)
|
||||
|
||||
|
@ -163,7 +163,7 @@ class ActionTestBase(object):
|
||||
"MySQL process can not be found.")
|
||||
|
||||
asserts.assert_is_not_none(self.instance)
|
||||
asserts.assert_equal(self.instance.status, "ACTIVE")
|
||||
asserts.assert_true(self.instance.status in CONFIG.running_status)
|
||||
|
||||
def find_mysql_proc_on_instance(self):
|
||||
server = create_server_connection(
|
||||
@ -240,9 +240,9 @@ class RebootTestBase(ActionTestBase):
|
||||
def is_finished_rebooting():
|
||||
instance = self.instance
|
||||
asserts.assert_not_equal(instance.status, "ERROR")
|
||||
if instance.status != "ACTIVE":
|
||||
return False
|
||||
return True
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
return False
|
||||
|
||||
poll_until(is_finished_rebooting, time_out=TIME_OUT_TIME)
|
||||
|
||||
@ -263,7 +263,7 @@ class RebootTestBase(ActionTestBase):
|
||||
"""Wait until status becomes running."""
|
||||
def is_finished_rebooting():
|
||||
instance = self.instance
|
||||
if instance.status == "REBOOT" or instance.status == "ACTIVE":
|
||||
if instance.status in ['REBOOT', 'ACTIVE', 'HEALTHY']:
|
||||
return False
|
||||
# The reason we check for BLOCKED as well as SHUTDOWN is because
|
||||
# Upstart might try to bring mysql back up after the borked
|
||||
@ -338,7 +338,7 @@ class StopTests(RebootTestBase):
|
||||
check.true(isinstance(instance.volume.get('used', None), float))
|
||||
|
||||
@test(depends_on=[test_volume_info_while_mysql_is_down])
|
||||
def test_successful_restart_when_in_shutdown_state(self):
|
||||
def test_successful_restart_from_shutdown(self):
|
||||
"""Restart MySQL via the REST API successfully when MySQL is down."""
|
||||
self.successful_restart()
|
||||
|
||||
@ -391,8 +391,9 @@ class ResizeInstanceTest(ActionTestBase):
|
||||
instance = self.instance
|
||||
if instance.status == "RESIZE":
|
||||
return False
|
||||
asserts.assert_equal("ACTIVE", instance.status)
|
||||
asserts.assert_true(instance.status in CONFIG.running_status)
|
||||
return True
|
||||
|
||||
poll_until(is_finished_resizing, time_out=TIME_OUT_TIME)
|
||||
|
||||
@before_class
|
||||
@ -415,9 +416,10 @@ class ResizeInstanceTest(ActionTestBase):
|
||||
flavors = self.dbaas.find_flavors_by_name(flavor_name)
|
||||
|
||||
def is_active():
|
||||
return self.instance.status == 'ACTIVE'
|
||||
return self.instance.status in CONFIG.running_status
|
||||
|
||||
poll_until(is_active, time_out=TIME_OUT_TIME)
|
||||
asserts.assert_equal(self.instance.status, 'ACTIVE')
|
||||
asserts.assert_true(self.instance.status in CONFIG.running_status)
|
||||
|
||||
asserts.assert_raises(HTTPNotImplemented,
|
||||
self.dbaas.instances.resize_instance,
|
||||
@ -540,7 +542,7 @@ class ResizeInstanceVolume(ActionTestBase):
|
||||
|
||||
def check_resize_status():
|
||||
instance = instance_info.dbaas.instances.get(instance_info.id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
elif instance.status == "RESIZE":
|
||||
return False
|
||||
|
@ -54,7 +54,7 @@ class TestBase(object):
|
||||
nics=instance_info.nics)
|
||||
return result.id
|
||||
|
||||
def wait_for_instance_status(self, instance_id, status="ACTIVE",
|
||||
def wait_for_instance_status(self, instance_id, status="HEALTHY",
|
||||
acceptable_states=None):
|
||||
if acceptable_states:
|
||||
acceptable_states.append(status)
|
||||
@ -64,6 +64,7 @@ class TestBase(object):
|
||||
assert_true(instance.status in acceptable_states,
|
||||
"Invalid status: %s" % instance.status)
|
||||
return instance
|
||||
|
||||
poll_until(lambda: self.dbaas.instances.get(instance_id),
|
||||
lambda instance: assert_state(instance).status == status,
|
||||
time_out=30, sleep_time=1)
|
||||
@ -151,10 +152,10 @@ class ErroredInstanceDelete(TestBase):
|
||||
@time_out(30)
|
||||
def delete_error_on_delete_instance(self):
|
||||
id = self.delete_error
|
||||
self.wait_for_instance_status(id, 'ACTIVE')
|
||||
self.wait_for_instance_status(id, 'HEALTHY')
|
||||
self.wait_for_instance_task_status(id, 'No tasks for the instance.')
|
||||
instance = self.dbaas.management.show(id)
|
||||
asserts.assert_equal(instance.status, "ACTIVE")
|
||||
asserts.assert_equal(instance.status, "HEALTHY")
|
||||
asserts.assert_equal(instance.task_description,
|
||||
'No tasks for the instance.')
|
||||
# Try to delete the instance. This fails the first time due to how
|
||||
|
@ -63,7 +63,7 @@ class TestBase(object):
|
||||
|
||||
def _wait_for_active(self):
|
||||
poll_until(lambda: self.client.instances.get(self.id),
|
||||
lambda instance: instance.status == "ACTIVE",
|
||||
lambda instance: instance.status in CONFIG.running_status,
|
||||
time_out=(60 * 8))
|
||||
|
||||
@test
|
||||
|
@ -142,7 +142,7 @@ class MalformedJson(object):
|
||||
def test_bad_resize_instance_data(self):
|
||||
def _check_instance_status():
|
||||
inst = self.dbaas.instances.get(self.instance)
|
||||
if inst.status == "ACTIVE":
|
||||
if inst.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@ -161,7 +161,7 @@ class MalformedJson(object):
|
||||
def test_bad_resize_vol_data(self):
|
||||
def _check_instance_status():
|
||||
inst = self.dbaas.instances.get(self.instance)
|
||||
if inst.status == "ACTIVE":
|
||||
if inst.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@ -195,7 +195,7 @@ class MalformedJson(object):
|
||||
|
||||
def _check_instance_status():
|
||||
inst = self.dbaas.instances.get(self.instance)
|
||||
if inst.status == "ACTIVE":
|
||||
if inst.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@ -227,7 +227,7 @@ class MalformedJson(object):
|
||||
|
||||
def _check_instance_status():
|
||||
inst = self.dbaas.instances.get(self.instance)
|
||||
if inst.status == "ACTIVE":
|
||||
if inst.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@ -248,7 +248,7 @@ class MalformedJson(object):
|
||||
|
||||
def _check_instance_status():
|
||||
inst = self.dbaas.instances.get(self.instance)
|
||||
if inst.status == "ACTIVE":
|
||||
if inst.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
@ -92,7 +92,7 @@ def backup_count_matches(count):
|
||||
|
||||
def instance_is_active(id):
|
||||
instance = instance_info.dbaas.instances.get(id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
assert_true(instance.status in ['PROMOTE', 'EJECT', 'BUILD', 'BACKUP'])
|
||||
|
@ -94,7 +94,7 @@ class TestRoot(object):
|
||||
|
||||
def result_is_active():
|
||||
instance = self.dbaas.instances.get(self.id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
# If its not ACTIVE, anything but BUILD must be
|
||||
|
@ -25,6 +25,7 @@ from trove import tests
|
||||
from trove.tests.api.databases import TestMysqlAccess
|
||||
from trove.tests.api.instances import instance_info
|
||||
from trove.tests.api.users import TestUsers
|
||||
from trove.tests.config import CONFIG
|
||||
from trove.tests import util
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -59,7 +60,7 @@ class TestRootOnCreate(object):
|
||||
|
||||
def result_is_active():
|
||||
instance = self.dbaas.instances.get(new_id)
|
||||
if instance.status == "ACTIVE":
|
||||
if instance.status in CONFIG.running_status:
|
||||
return True
|
||||
else:
|
||||
assert_equal("BUILD", instance.status)
|
||||
|
@ -125,6 +125,7 @@ class TestConfig(object):
|
||||
"redis": {"volume_support": False},
|
||||
"swift_enabled": True,
|
||||
"trove_mgmt_network": "trove-mgmt",
|
||||
"running_status": ["ACTIVE", "HEALTHY"],
|
||||
}
|
||||
self._frozen_values = FrozenDict(self._values)
|
||||
self._users = None
|
||||
|
@ -17,6 +17,7 @@ import json
|
||||
import time
|
||||
|
||||
from oslo_log import log as logging
|
||||
from proboscis import asserts
|
||||
from proboscis.asserts import assert_equal
|
||||
from proboscis.asserts import assert_true
|
||||
from proboscis.asserts import Check
|
||||
@ -322,7 +323,7 @@ class CreateInstance(Example):
|
||||
def an_instance_is_not_active(self):
|
||||
for instance in self.instances:
|
||||
instance = self.client.instances.get(instance.id)
|
||||
if instance.status != "ACTIVE":
|
||||
if instance.status not in CONFIG.running_status:
|
||||
assert_equal(instance.status, "BUILD")
|
||||
return True
|
||||
return False
|
||||
@ -521,8 +522,7 @@ class ActiveMixin(Example):
|
||||
def _wait_for_active(self, *acceptable_states):
|
||||
global json_instance
|
||||
json_instance = self.client.instances.get(json_instance.id)
|
||||
print('instance.status=%s' % json_instance.status)
|
||||
while json_instance.status != "ACTIVE":
|
||||
while json_instance.status not in CONFIG.running_status:
|
||||
assert_true(
|
||||
json_instance.status in acceptable_states,
|
||||
"Instance status == %s; expected it to be one of: %s"
|
||||
@ -533,8 +533,7 @@ class ActiveMixin(Example):
|
||||
def _wait_for_restore_active(self, *acceptable_states):
|
||||
for instance in (self.json_restore, ):
|
||||
instance = self.client.instances.get(instance.id)
|
||||
print('instance.status=%s' % instance.status)
|
||||
while instance.status != "ACTIVE":
|
||||
while instance.status not in CONFIG.running_status:
|
||||
assert_true(
|
||||
instance.status in acceptable_states,
|
||||
"Instance status == %s; expected it to be one of: %s"
|
||||
@ -810,7 +809,7 @@ class InstanceList(Example):
|
||||
third_instance = self.client.instances.create(
|
||||
"The Third Instance", 1, volume={'size': 2})
|
||||
third_instance = self.client.instances.get(third_instance.id)
|
||||
while third_instance.status != "ACTIVE":
|
||||
while third_instance.status not in CONFIG.running_status:
|
||||
time.sleep(0.1)
|
||||
third_instance = self.client.instances.get(third_instance.id)
|
||||
|
||||
@ -909,7 +908,7 @@ class Backups(ActiveMixin):
|
||||
self.json_restore = results[JSON_INDEX]
|
||||
self._wait_for_restore_active("BUILD")
|
||||
self.json_restore = self.client.instances.get(self.json_restore.id)
|
||||
assert_equal(self.json_restore.status, "ACTIVE")
|
||||
asserts.assert_true(self.json_restore.status in CONFIG.running_status)
|
||||
|
||||
@test(depends_on=[restore])
|
||||
def delete_restores(self):
|
||||
@ -1013,7 +1012,7 @@ class MgmtHosts(Example):
|
||||
for host in results:
|
||||
check.equal(1, len(host.instances))
|
||||
for instance in host.instances:
|
||||
check.equal(instance['status'], 'ACTIVE')
|
||||
check.equal(instance['status'], 'HEALTHY')
|
||||
check.true(isinstance(instance['name'], six.string_types))
|
||||
check.true(isinstance(instance['id'], six.string_types))
|
||||
check.true(isinstance(instance['server_id'],
|
||||
|
@ -238,15 +238,15 @@ class FakeGuest(object):
|
||||
if instance_name.endswith('GUEST_ERROR'):
|
||||
status.status = rd_instance.ServiceStatuses.FAILED
|
||||
else:
|
||||
status.status = rd_instance.ServiceStatuses.RUNNING
|
||||
status.status = rd_instance.ServiceStatuses.HEALTHY
|
||||
status.save()
|
||||
AgentHeartBeat.create(instance_id=self.id)
|
||||
eventlet.spawn_after(3.5, update_db)
|
||||
|
||||
def _set_task_status(self, new_status='RUNNING'):
|
||||
def _set_task_status(self, new_status='HEALTHY'):
|
||||
from trove.instance.models import InstanceServiceStatus
|
||||
print("Setting status to %s" % new_status)
|
||||
states = {'RUNNING': rd_instance.ServiceStatuses.RUNNING,
|
||||
states = {'HEALTHY': rd_instance.ServiceStatuses.HEALTHY,
|
||||
'SHUTDOWN': rd_instance.ServiceStatuses.SHUTDOWN,
|
||||
}
|
||||
status = InstanceServiceStatus.find_by(instance_id=self.id)
|
||||
@ -259,7 +259,7 @@ class FakeGuest(object):
|
||||
# take a nap.
|
||||
print("Sleeping for a second.")
|
||||
time.sleep(1)
|
||||
self._set_task_status('RUNNING')
|
||||
self._set_task_status('HEALTHY')
|
||||
|
||||
def reset_configuration(self, config):
|
||||
# There's nothing to do here, since there is no config to update.
|
||||
@ -267,7 +267,7 @@ class FakeGuest(object):
|
||||
|
||||
def start_db_with_conf_changes(self, config_contents):
|
||||
time.sleep(2)
|
||||
self._set_task_status('RUNNING')
|
||||
self._set_task_status('HEALTHY')
|
||||
|
||||
def stop_db(self, do_not_start_on_reboot=False):
|
||||
self._set_task_status('SHUTDOWN')
|
||||
|
@ -134,7 +134,7 @@ class FakeServer(object):
|
||||
def confirm_resize(self):
|
||||
if self.status != "VERIFY_RESIZE":
|
||||
raise RuntimeError("Not in resize confirm mode.")
|
||||
self._current_status = "ACTIVE"
|
||||
self._current_status = "HEALTHY"
|
||||
|
||||
def revert_resize(self):
|
||||
if self.status != "VERIFY_RESIZE":
|
||||
@ -143,13 +143,13 @@ class FakeServer(object):
|
||||
self.old_host = None
|
||||
self.flavor_ref = self.old_flavor_ref
|
||||
self.old_flavor_ref = None
|
||||
self._current_status = "ACTIVE"
|
||||
self._current_status = "HEALTHY"
|
||||
|
||||
def reboot(self):
|
||||
LOG.debug("Rebooting server %s", self.id)
|
||||
|
||||
def set_to_active():
|
||||
self._current_status = "ACTIVE"
|
||||
self._current_status = "HEALTHY"
|
||||
self.parent.schedule_simulate_running_server(self.id, 1.5)
|
||||
|
||||
self._current_status = "REBOOT"
|
||||
@ -204,7 +204,7 @@ class FakeServer(object):
|
||||
|
||||
def set_flavor():
|
||||
if self.name.endswith("_RESIZE_ERROR"):
|
||||
self._current_status = "ACTIVE"
|
||||
self._current_status = "HEALTHY"
|
||||
return
|
||||
if new_flavor_id is None:
|
||||
# Migrations are flavorless flavor resizes.
|
||||
@ -282,7 +282,7 @@ class FakeServers(object):
|
||||
raise nova_exceptions.ClientException("The requested availability "
|
||||
"zone is not available.")
|
||||
|
||||
server.schedule_status("ACTIVE", 1)
|
||||
server.schedule_status("HEALTHY", 1)
|
||||
LOG.info("FAKE_SERVERS_DB : %s", str(FAKE_SERVERS_DB))
|
||||
return server
|
||||
|
||||
|
@ -196,7 +196,7 @@ class BackupRunner(TestRunner):
|
||||
|
||||
poll_until(_result_is_active, time_out=self.TIMEOUT_BACKUP_CREATE)
|
||||
|
||||
def run_instance_goes_active(self, expected_states=['BACKUP', 'ACTIVE']):
|
||||
def run_instance_goes_active(self, expected_states=['BACKUP', 'HEALTHY']):
|
||||
self._assert_instance_states(self.instance_info.id, expected_states)
|
||||
|
||||
def run_backup_list(self):
|
||||
@ -334,7 +334,7 @@ class BackupRunner(TestRunner):
|
||||
expected_http_code=expected_http_code)
|
||||
|
||||
def run_restore_from_backup_completed(
|
||||
self, expected_states=['BUILD', 'ACTIVE']):
|
||||
self, expected_states=['BUILD', 'HEALTHY']):
|
||||
self.assert_restore_from_backup_completed(
|
||||
self.restore_instance_id, expected_states)
|
||||
self.restore_host = self.get_instance_host(self.restore_instance_id)
|
||||
@ -344,7 +344,7 @@ class BackupRunner(TestRunner):
|
||||
self._assert_instance_states(instance_id, expected_states)
|
||||
|
||||
def run_restore_from_inc_1_backup_completed(
|
||||
self, expected_states=['BUILD', 'ACTIVE']):
|
||||
self, expected_states=['BUILD', 'HEALTHY']):
|
||||
self.assert_restore_from_backup_completed(
|
||||
self.restore_inc_1_instance_id, expected_states)
|
||||
self.restore_inc_1_host = self.get_instance_host(
|
||||
|
@ -125,7 +125,7 @@ class ClusterRunner(TestRunner):
|
||||
return cluster.id
|
||||
|
||||
def run_cluster_create_wait(self,
|
||||
expected_instance_states=['BUILD', 'ACTIVE']):
|
||||
expected_instance_states=['BUILD', 'HEALTHY']):
|
||||
|
||||
self.assert_cluster_create_wait(
|
||||
self.cluster_id, expected_instance_states=expected_instance_states)
|
||||
@ -198,7 +198,7 @@ class ClusterRunner(TestRunner):
|
||||
cluster_instances = self._get_cluster_instances(
|
||||
client, cluster_id)
|
||||
self.assert_all_instance_states(
|
||||
cluster_instances, ['REBOOT', 'ACTIVE'])
|
||||
cluster_instances, ['REBOOT', 'HEALTHY'])
|
||||
|
||||
self._assert_cluster_states(
|
||||
client, cluster_id, ['NONE'])
|
||||
@ -313,7 +313,7 @@ class ClusterRunner(TestRunner):
|
||||
def assert_cluster_grow_wait(self, cluster_id):
|
||||
client = self.auth_client
|
||||
cluster_instances = self._get_cluster_instances(client, cluster_id)
|
||||
self.assert_all_instance_states(cluster_instances, ['ACTIVE'])
|
||||
self.assert_all_instance_states(cluster_instances, ['HEALTHY'])
|
||||
|
||||
self._assert_cluster_states(client, cluster_id, ['NONE'])
|
||||
self._assert_cluster_response(client, cluster_id, 'NONE')
|
||||
@ -345,10 +345,12 @@ class ClusterRunner(TestRunner):
|
||||
|
||||
def run_cluster_upgrade_wait(self):
|
||||
self.assert_cluster_upgrade_wait(
|
||||
self.cluster_id, expected_last_instance_state='ACTIVE')
|
||||
self.cluster_id,
|
||||
expected_last_instance_states=['HEALTHY']
|
||||
)
|
||||
|
||||
def assert_cluster_upgrade_wait(self, cluster_id,
|
||||
expected_last_instance_state):
|
||||
expected_last_instance_states):
|
||||
client = self.auth_client
|
||||
self._assert_cluster_states(client, cluster_id, ['NONE'])
|
||||
cluster_instances = self._get_cluster_instances(client, cluster_id)
|
||||
@ -357,7 +359,7 @@ class ClusterRunner(TestRunner):
|
||||
len(cluster_instances),
|
||||
"Unexpected number of instances after upgrade.")
|
||||
self.assert_all_instance_states(cluster_instances,
|
||||
[expected_last_instance_state])
|
||||
expected_last_instance_states)
|
||||
self._assert_cluster_response(client, cluster_id, 'NONE')
|
||||
|
||||
def run_add_upgrade_cluster_data(self, data_type=DataType.tiny3):
|
||||
@ -411,7 +413,7 @@ class ClusterRunner(TestRunner):
|
||||
"Unexpected number of removed nodes.")
|
||||
|
||||
cluster_instances = self._get_cluster_instances(client, cluster_id)
|
||||
self.assert_all_instance_states(cluster_instances, ['ACTIVE'])
|
||||
self.assert_all_instance_states(cluster_instances, ['HEALTHY'])
|
||||
self.assert_all_gone(self.cluster_removed_instances,
|
||||
expected_last_instance_state)
|
||||
self._assert_cluster_response(client, cluster_id, 'NONE')
|
||||
|
@ -102,7 +102,7 @@ class ConfigurationRunner(TestRunner):
|
||||
expected_exception, expected_http_code)
|
||||
|
||||
def run_detach_group_with_none_attached(self,
|
||||
expected_states=['ACTIVE'],
|
||||
expected_states=['HEALTHY'],
|
||||
expected_http_code=202):
|
||||
self.assert_instance_modify(
|
||||
self.instance_info.id, None,
|
||||
@ -301,7 +301,7 @@ class ConfigurationRunner(TestRunner):
|
||||
self.instance_info.id in conf_instance_ids)
|
||||
|
||||
def run_attach_dynamic_group(
|
||||
self, expected_states=['ACTIVE'], expected_http_code=202):
|
||||
self, expected_states=['HEALTHY'], expected_http_code=202):
|
||||
if self.dynamic_group_id:
|
||||
self.assert_instance_modify(
|
||||
self.instance_info.id, self.dynamic_group_id,
|
||||
@ -362,8 +362,8 @@ class ConfigurationRunner(TestRunner):
|
||||
self.assert_group_delete_failure(
|
||||
self.dynamic_group_id, expected_exception, expected_http_code)
|
||||
|
||||
def run_update_dynamic_group(
|
||||
self, expected_states=['ACTIVE'], expected_http_code=202):
|
||||
def run_update_dynamic_group(self, expected_states=['HEALTHY'],
|
||||
expected_http_code=202):
|
||||
if self.dynamic_group_id:
|
||||
values = json.dumps(self.test_helper.get_dynamic_group())
|
||||
self.assert_update_group(
|
||||
@ -381,7 +381,7 @@ class ConfigurationRunner(TestRunner):
|
||||
self._restart_instance(instance_id)
|
||||
|
||||
def run_detach_dynamic_group(
|
||||
self, expected_states=['ACTIVE'], expected_http_code=202):
|
||||
self, expected_states=['HEALTHY'], expected_http_code=202):
|
||||
if self.dynamic_group_id:
|
||||
self.assert_instance_modify(
|
||||
self.instance_info.id, None,
|
||||
@ -502,7 +502,7 @@ class ConfigurationRunner(TestRunner):
|
||||
client, client.configurations.delete, group_id)
|
||||
|
||||
def _restart_instance(
|
||||
self, instance_id, expected_states=['REBOOT', 'ACTIVE'],
|
||||
self, instance_id, expected_states=['REBOOT', 'HEALTHY'],
|
||||
expected_http_code=202):
|
||||
client = self.auth_client
|
||||
client.instances.restart(instance_id)
|
||||
@ -538,7 +538,7 @@ class ConfigurationRunner(TestRunner):
|
||||
return result.id
|
||||
|
||||
def run_wait_for_conf_instance(
|
||||
self, expected_states=['BUILD', 'ACTIVE']):
|
||||
self, expected_states=['BUILD', 'HEALTHY']):
|
||||
if self.config_inst_id:
|
||||
self.assert_instance_action(self.config_inst_id, expected_states)
|
||||
self.create_test_helper_on_instance(self.config_inst_id)
|
||||
|
@ -500,13 +500,13 @@ class GuestLogRunner(TestRunner):
|
||||
instance_id = self.instance_info.id
|
||||
# we need to wait until the heartbeat flips the instance
|
||||
# back into 'ACTIVE' before we issue the restart command
|
||||
expected_states = ['RESTART_REQUIRED', 'ACTIVE']
|
||||
expected_states = ['RESTART_REQUIRED', 'HEALTHY']
|
||||
self.assert_instance_action(instance_id, expected_states)
|
||||
client = self.auth_client
|
||||
client.instances.restart(instance_id)
|
||||
self.assert_client_code(client, expected_http_code)
|
||||
|
||||
def run_test_wait_for_restart(self, expected_states=['REBOOT', 'ACTIVE']):
|
||||
def run_test_wait_for_restart(self, expected_states=['REBOOT', 'HEALTHY']):
|
||||
if self.test_helper.log_enable_requires_restart():
|
||||
self.assert_instance_action(self.instance_info.id, expected_states)
|
||||
|
||||
|
@ -50,7 +50,7 @@ class InstanceActionsRunner(TestRunner):
|
||||
self.test_helper.remove_data(DataType.small, host)
|
||||
|
||||
def run_instance_restart(
|
||||
self, expected_states=['REBOOT', 'ACTIVE'],
|
||||
self, expected_states=['REBOOT', 'HEALTHY'],
|
||||
expected_http_code=202):
|
||||
self.assert_instance_restart(self.instance_info.id, expected_states,
|
||||
expected_http_code)
|
||||
@ -66,7 +66,7 @@ class InstanceActionsRunner(TestRunner):
|
||||
|
||||
def run_instance_resize_volume(
|
||||
self, resize_amount=1,
|
||||
expected_states=['RESIZE', 'ACTIVE'],
|
||||
expected_states=['RESIZE', 'HEALTHY'],
|
||||
expected_http_code=202):
|
||||
if self.VOLUME_SUPPORT:
|
||||
self.assert_instance_resize_volume(
|
||||
@ -106,7 +106,7 @@ class InstanceActionsRunner(TestRunner):
|
||||
self.assert_client_code(client, expected_http_code)
|
||||
|
||||
def run_wait_for_instance_resize_flavor(
|
||||
self, expected_states=['RESIZE', 'ACTIVE']):
|
||||
self, expected_states=['RESIZE', 'HEALTHY']):
|
||||
self.report.log("Waiting for resize to '%s' on instance: %s" %
|
||||
(self.resize_flavor_id, self.instance_info.id))
|
||||
self._assert_instance_states(self.instance_info.id, expected_states)
|
||||
|
@ -34,8 +34,8 @@ class InstanceCreateRunner(TestRunner):
|
||||
self.init_inst_config_group_id = None
|
||||
self.config_group_id = None
|
||||
|
||||
def run_empty_instance_create(
|
||||
self, expected_states=['BUILD', 'ACTIVE'], expected_http_code=200):
|
||||
def run_empty_instance_create(self, expected_states=['BUILD', 'HEALTHY'],
|
||||
expected_http_code=200):
|
||||
name = self.instance_info.name
|
||||
flavor = self.get_instance_flavor()
|
||||
volume_size = self.instance_info.volume_size
|
||||
@ -68,7 +68,7 @@ class InstanceCreateRunner(TestRunner):
|
||||
|
||||
def run_initialized_instance_create(
|
||||
self, with_dbs=True, with_users=True, configuration_id=None,
|
||||
expected_states=['BUILD', 'ACTIVE'], expected_http_code=200,
|
||||
expected_states=['BUILD', 'HEALTHY'], expected_http_code=200,
|
||||
create_helper_user=True, name_suffix='_init'):
|
||||
if self.is_using_existing_instance:
|
||||
# The user requested to run the tests using an existing instance.
|
||||
@ -215,16 +215,14 @@ class InstanceCreateRunner(TestRunner):
|
||||
|
||||
return instance_info
|
||||
|
||||
def run_wait_for_instance(
|
||||
self, expected_states=['BUILD', 'ACTIVE']):
|
||||
def run_wait_for_instance(self, expected_states=['BUILD', 'HEALTHY']):
|
||||
instances = [self.instance_info.id]
|
||||
self.assert_all_instance_states(instances, expected_states)
|
||||
self.instance_info.srv_grp_id = self.assert_server_group_exists(
|
||||
self.instance_info.id)
|
||||
self.wait_for_test_helpers(self.instance_info)
|
||||
|
||||
def run_wait_for_init_instance(
|
||||
self, expected_states=['BUILD', 'ACTIVE']):
|
||||
def run_wait_for_init_instance(self, expected_states=['BUILD', 'HEALTHY']):
|
||||
if self.init_inst_info:
|
||||
instances = [self.init_inst_info.id]
|
||||
self.assert_all_instance_states(instances, expected_states)
|
||||
|
@ -35,7 +35,7 @@ class InstanceUpgradeRunner(TestRunner):
|
||||
host = self.get_instance_host(self.instance_info.id)
|
||||
self.test_helper.remove_data(DataType.small, host)
|
||||
|
||||
def run_instance_upgrade(self, expected_states=['UPGRADE', 'ACTIVE'],
|
||||
def run_instance_upgrade(self, expected_states=['UPGRADE', 'HEALTHY'],
|
||||
expected_http_code=202):
|
||||
instance_id = self.instance_info.id
|
||||
self.report.log("Testing upgrade on instance: %s" % instance_id)
|
||||
|
@ -1292,7 +1292,8 @@ class ModuleRunner(TestRunner):
|
||||
client.instances.module_remove(instance_id, module_id)
|
||||
self.assert_client_code(client, expected_http_code)
|
||||
|
||||
def run_wait_for_inst_with_mods(self, expected_states=['BUILD', 'ACTIVE']):
|
||||
def run_wait_for_inst_with_mods(self,
|
||||
expected_states=['BUILD', 'HEALTHY']):
|
||||
self.assert_instance_action(self.mod_inst_id, expected_states)
|
||||
|
||||
def run_module_query_after_inst_create(self):
|
||||
|
@ -99,7 +99,8 @@ class ReplicationRunner(TestRunner):
|
||||
self.register_debug_inst_ids(new_replicas)
|
||||
return list(new_replicas)
|
||||
|
||||
def run_wait_for_single_replica(self, expected_states=['BUILD', 'ACTIVE']):
|
||||
def run_wait_for_single_replica(self,
|
||||
expected_states=['BUILD', 'HEALTHY']):
|
||||
self.assert_instance_action(self.replica_1_id, expected_states)
|
||||
self._assert_is_master(self.master_id, [self.replica_1_id])
|
||||
self._assert_is_replica(self.replica_1_id, self.master_id)
|
||||
@ -143,7 +144,7 @@ class ReplicationRunner(TestRunner):
|
||||
replica_id)
|
||||
|
||||
def run_wait_for_non_affinity_master(self,
|
||||
expected_states=['BUILD', 'ACTIVE']):
|
||||
expected_states=['BUILD', 'HEALTHY']):
|
||||
self._assert_instance_states(self.non_affinity_master_id,
|
||||
expected_states)
|
||||
self.non_affinity_srv_grp_id = self.assert_server_group_exists(
|
||||
@ -168,7 +169,7 @@ class ReplicationRunner(TestRunner):
|
||||
'replica2', 2, expected_http_code)
|
||||
|
||||
def run_wait_for_multiple_replicas(
|
||||
self, expected_states=['BUILD', 'ACTIVE']):
|
||||
self, expected_states=['BUILD', 'HEALTHY']):
|
||||
replica_ids = self._get_replica_set(self.master_id)
|
||||
self.report.log("Waiting for replicas: %s" % replica_ids)
|
||||
self.assert_instance_action(replica_ids, expected_states)
|
||||
@ -181,7 +182,7 @@ class ReplicationRunner(TestRunner):
|
||||
self, expected_states=['BUILD', 'ERROR']):
|
||||
self._assert_instance_states(self.non_affinity_repl_id,
|
||||
expected_states,
|
||||
fast_fail_status=['ACTIVE'])
|
||||
fast_fail_status=['HEALTHY'])
|
||||
|
||||
def run_delete_non_affinity_repl(self, expected_http_code=202):
|
||||
self.assert_delete_instances(
|
||||
@ -267,7 +268,7 @@ class ReplicationRunner(TestRunner):
|
||||
self.instance_info.id)
|
||||
|
||||
def run_promote_to_replica_source(self,
|
||||
expected_states=['PROMOTE', 'ACTIVE'],
|
||||
expected_states=['PROMOTE', 'HEALTHY'],
|
||||
expected_http_code=202):
|
||||
self.assert_promote_to_replica_source(
|
||||
self.replica_1_id, self.instance_info.id, expected_states,
|
||||
@ -312,7 +313,7 @@ class ReplicationRunner(TestRunner):
|
||||
self.assert_verify_replica_data(self.replica_1_id, DataType.tiny2)
|
||||
|
||||
def run_promote_original_source(self,
|
||||
expected_states=['PROMOTE', 'ACTIVE'],
|
||||
expected_states=['PROMOTE', 'HEALTHY'],
|
||||
expected_http_code=202):
|
||||
self.assert_promote_to_replica_source(
|
||||
self.instance_info.id, self.replica_1_id, expected_states,
|
||||
@ -339,7 +340,7 @@ class ReplicationRunner(TestRunner):
|
||||
self.test_helper.remove_data(data_set, host)
|
||||
|
||||
def run_detach_replica_from_source(self,
|
||||
expected_states=['DETACH', 'ACTIVE'],
|
||||
expected_states=['DETACH', 'HEALTHY'],
|
||||
expected_http_code=202):
|
||||
self.assert_detach_replica_from_source(
|
||||
self.instance_info.id, self.replica_1_id,
|
||||
|
@ -133,9 +133,13 @@ class GuestAgentCassandraDBManagerTest(DatastoreManagerTest):
|
||||
|
||||
def test_update_status(self):
|
||||
mock_status = MagicMock()
|
||||
self.manager.app.status = mock_status
|
||||
mock_status.is_installed = True
|
||||
mock_status._is_restarting = False
|
||||
self.manager._app.status = mock_status
|
||||
|
||||
self.manager.update_status(self.context)
|
||||
mock_status.update.assert_any_call()
|
||||
|
||||
self.assertTrue(mock_status.set_status.called)
|
||||
|
||||
def test_prepare_pkg(self):
|
||||
self._prepare_dynamic(['cassandra'])
|
||||
|
@ -55,9 +55,11 @@ class GuestAgentCouchbaseManagerTest(DatastoreManagerTest):
|
||||
|
||||
def test_update_status(self):
|
||||
mock_status = MagicMock()
|
||||
mock_status.is_installed = True
|
||||
mock_status._is_restarting = False
|
||||
self.manager.appStatus = mock_status
|
||||
self.manager.update_status(self.context)
|
||||
mock_status.update.assert_any_call()
|
||||
self.assertTrue(mock_status.set_status.called)
|
||||
|
||||
def test_prepare_device_path_true(self):
|
||||
self._prepare_dynamic()
|
||||
|
@ -108,9 +108,11 @@ class GuestAgentCouchDBManagerTest(DatastoreManagerTest):
|
||||
|
||||
def test_update_status(self):
|
||||
mock_status = MagicMock()
|
||||
mock_status.is_installed = True
|
||||
mock_status._is_restarting = False
|
||||
self.manager.appStatus = mock_status
|
||||
self.manager.update_status(self.context)
|
||||
mock_status.update.assert_any_call()
|
||||
self.assertTrue(mock_status.set_status.called)
|
||||
|
||||
def _prepare_dynamic(self, packages=None, databases=None,
|
||||
config_content=None, device_path='/dev/vdb',
|
||||
|
@ -1,270 +0,0 @@
|
||||
# Copyright 2015 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import DEFAULT
|
||||
from mock import MagicMock
|
||||
from mock import patch
|
||||
from testtools.matchers import Is, Equals, Not
|
||||
|
||||
from trove.common.instance import ServiceStatuses
|
||||
from trove.guestagent import backup
|
||||
from trove.guestagent.common import configuration
|
||||
from trove.guestagent.common.configuration import ImportOverrideStrategy
|
||||
from trove.guestagent.common import operating_system
|
||||
from trove.guestagent.datastore.experimental.db2 import (
|
||||
manager as db2_manager)
|
||||
from trove.guestagent.datastore.experimental.db2 import (
|
||||
service as db2_service)
|
||||
from trove.guestagent import pkg
|
||||
from trove.guestagent import volume
|
||||
from trove.tests.unittests.guestagent.test_datastore_manager import \
|
||||
DatastoreManagerTest
|
||||
|
||||
|
||||
class GuestAgentDB2ManagerTest(DatastoreManagerTest):
|
||||
|
||||
@patch.object(ImportOverrideStrategy, '_initialize_import_directory')
|
||||
@patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT,
|
||||
chown=DEFAULT, chmod=DEFAULT)
|
||||
@patch.object(db2_service.DB2App, 'process_default_dbm_config')
|
||||
def setUp(self, *arg, **kwargs):
|
||||
super(GuestAgentDB2ManagerTest, self).setUp('db2')
|
||||
self.real_status = db2_service.DB2AppStatus.set_status
|
||||
|
||||
class FakeInstanceServiceStatus(object):
|
||||
status = ServiceStatuses.NEW
|
||||
|
||||
def save(self):
|
||||
pass
|
||||
|
||||
db2_service.DB2AppStatus.set_status = MagicMock(
|
||||
return_value=FakeInstanceServiceStatus())
|
||||
self.manager = db2_manager.Manager()
|
||||
self.real_db_app_status = db2_service.DB2AppStatus
|
||||
self.origin_format = volume.VolumeDevice.format
|
||||
self.origin_mount = volume.VolumeDevice.mount
|
||||
self.origin_mount_points = volume.VolumeDevice.mount_points
|
||||
self.origin_stop_db = db2_service.DB2App.stop_db
|
||||
self.origin_start_db = db2_service.DB2App.start_db
|
||||
self.orig_change_ownership = (db2_service.DB2App.change_ownership)
|
||||
self.orig_create_databases = db2_service.DB2Admin.create_database
|
||||
self.orig_list_databases = db2_service.DB2Admin.list_databases
|
||||
self.orig_delete_database = db2_service.DB2Admin.delete_database
|
||||
self.orig_create_users = db2_service.DB2Admin.create_user
|
||||
self.orig_list_users = db2_service.DB2Admin.list_users
|
||||
self.orig_delete_user = db2_service.DB2Admin.delete_user
|
||||
self.orig_update_hostname = db2_service.DB2App.update_hostname
|
||||
self.orig_backup_restore = backup.restore
|
||||
self.orig_init_config = db2_service.DB2App.init_config
|
||||
self.orig_update_overrides = db2_service.DB2App.update_overrides
|
||||
self.orig_remove_overrides = db2_service.DB2App.remove_overrides
|
||||
|
||||
def tearDown(self):
|
||||
super(GuestAgentDB2ManagerTest, self).tearDown()
|
||||
db2_service.DB2AppStatus.set_status = self.real_db_app_status
|
||||
volume.VolumeDevice.format = self.origin_format
|
||||
volume.VolumeDevice.mount = self.origin_mount
|
||||
volume.VolumeDevice.mount_points = self.origin_mount_points
|
||||
db2_service.DB2App.stop_db = self.origin_stop_db
|
||||
db2_service.DB2App.start_db = self.origin_start_db
|
||||
db2_service.DB2App.change_ownership = self.orig_change_ownership
|
||||
db2_service.DB2Admin.create_database = self.orig_create_databases
|
||||
db2_service.DB2Admin.create_user = self.orig_create_users
|
||||
db2_service.DB2Admin.create_database = self.orig_create_databases
|
||||
db2_service.DB2Admin.list_databases = self.orig_list_databases
|
||||
db2_service.DB2Admin.delete_database = self.orig_delete_database
|
||||
db2_service.DB2Admin.create_user = self.orig_create_users
|
||||
db2_service.DB2Admin.list_users = self.orig_list_users
|
||||
db2_service.DB2Admin.delete_user = self.orig_delete_user
|
||||
db2_service.DB2App.update_hostname = self.orig_update_hostname
|
||||
backup.restore = self.orig_backup_restore
|
||||
db2_service.DB2App.init_config = self.orig_init_config
|
||||
db2_service.DB2App.update_overrides = self.orig_update_overrides
|
||||
db2_service.DB2App.remove_overrides = self.orig_remove_overrides
|
||||
|
||||
def test_update_status(self):
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
self.manager.update_status(self.context)
|
||||
mock_status.update.assert_any_call()
|
||||
|
||||
def test_prepare_device_path_true(self):
|
||||
self._prepare_dynamic()
|
||||
|
||||
def test_prepare_device_path_false(self):
|
||||
self._prepare_dynamic(device_path=None)
|
||||
|
||||
def test_prepare_database(self):
|
||||
self._prepare_dynamic(databases=['db1'])
|
||||
|
||||
def test_prepare_from_backup(self):
|
||||
self._prepare_dynamic(['db2'], backup_id='123backup')
|
||||
|
||||
@patch.object(configuration.ConfigurationManager, 'save_configuration')
|
||||
def _prepare_dynamic(self, packages=None, databases=None, users=None,
|
||||
config_content='MockContent', device_path='/dev/vdb',
|
||||
is_db_installed=True, backup_id=None, overrides=None):
|
||||
|
||||
backup_info = {'id': backup_id,
|
||||
'location': 'fake-location',
|
||||
'type': 'DB2Backup',
|
||||
'checksum': 'fake-checksum'} if backup_id else None
|
||||
|
||||
mock_status = MagicMock()
|
||||
mock_app = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
self.manager.app = mock_app
|
||||
|
||||
mock_status.begin_install = MagicMock(return_value=None)
|
||||
mock_app.change_ownership = MagicMock(return_value=None)
|
||||
mock_app.restart = MagicMock(return_value=None)
|
||||
mock_app.start_db = MagicMock(return_value=None)
|
||||
mock_app.stop_db = MagicMock(return_value=None)
|
||||
volume.VolumeDevice.format = MagicMock(return_value=None)
|
||||
volume.VolumeDevice.mount = MagicMock(return_value=None)
|
||||
volume.VolumeDevice.mount_points = MagicMock(return_value=[])
|
||||
db2_service.DB2Admin.create_user = MagicMock(return_value=None)
|
||||
db2_service.DB2Admin.create_database = MagicMock(return_value=None)
|
||||
backup.restore = MagicMock(return_value=None)
|
||||
|
||||
with patch.object(pkg.Package, 'pkg_is_installed',
|
||||
return_value=MagicMock(
|
||||
return_value=is_db_installed)):
|
||||
self.manager.prepare(context=self.context, packages=packages,
|
||||
config_contents=config_content,
|
||||
databases=databases,
|
||||
memory_mb='2048', users=users,
|
||||
device_path=device_path,
|
||||
mount_point="/home/db2inst1/db2inst1",
|
||||
backup_info=backup_info,
|
||||
overrides=None,
|
||||
cluster_config=None)
|
||||
|
||||
mock_status.begin_install.assert_any_call()
|
||||
self.assertEqual(1, mock_app.change_ownership.call_count)
|
||||
if databases:
|
||||
self.assertTrue(db2_service.DB2Admin.create_database.called)
|
||||
else:
|
||||
self.assertFalse(db2_service.DB2Admin.create_database.called)
|
||||
|
||||
if users:
|
||||
self.assertTrue(db2_service.DB2Admin.create_user.called)
|
||||
else:
|
||||
self.assertFalse(db2_service.DB2Admin.create_user.called)
|
||||
|
||||
if backup_id:
|
||||
backup.restore.assert_any_call(self.context,
|
||||
backup_info,
|
||||
'/home/db2inst1/db2inst1')
|
||||
self.assertTrue(
|
||||
self.manager.configuration_manager.save_configuration.called
|
||||
)
|
||||
|
||||
def test_restart(self):
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
with patch.object(db2_service.DB2App, 'restart',
|
||||
return_value=None) as restart_mock:
|
||||
# invocation
|
||||
self.manager.restart(self.context)
|
||||
# verification/assertion
|
||||
restart_mock.assert_any_call()
|
||||
|
||||
def test_stop_db(self):
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
db2_service.DB2App.stop_db = MagicMock(return_value=None)
|
||||
self.manager.stop_db(self.context)
|
||||
db2_service.DB2App.stop_db.assert_any_call(
|
||||
do_not_start_on_reboot=False)
|
||||
|
||||
def test_start_db_with_conf_changes(self):
|
||||
with patch.object(db2_service.DB2App, 'start_db_with_conf_changes'):
|
||||
self.manager.start_db_with_conf_changes(self.context, 'something')
|
||||
db2_service.DB2App.start_db_with_conf_changes.assert_any_call(
|
||||
'something')
|
||||
|
||||
def test_create_database(self):
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
db2_service.DB2Admin.create_database = MagicMock(return_value=None)
|
||||
self.manager.create_database(self.context, ['db1'])
|
||||
db2_service.DB2Admin.create_database.assert_any_call(['db1'])
|
||||
|
||||
def test_create_user(self):
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
db2_service.DB2Admin.create_user = MagicMock(return_value=None)
|
||||
self.manager.create_user(self.context, ['user1'])
|
||||
db2_service.DB2Admin.create_user.assert_any_call(['user1'])
|
||||
|
||||
def test_delete_database(self):
|
||||
databases = ['db1']
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
db2_service.DB2Admin.delete_database = MagicMock(return_value=None)
|
||||
self.manager.delete_database(self.context, databases)
|
||||
db2_service.DB2Admin.delete_database.assert_any_call(databases)
|
||||
|
||||
def test_delete_user(self):
|
||||
user = ['user1']
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
db2_service.DB2Admin.delete_user = MagicMock(return_value=None)
|
||||
self.manager.delete_user(self.context, user)
|
||||
db2_service.DB2Admin.delete_user.assert_any_call(user)
|
||||
|
||||
def test_list_databases(self):
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
db2_service.DB2Admin.list_databases = MagicMock(
|
||||
return_value=['database1'])
|
||||
databases = self.manager.list_databases(self.context)
|
||||
self.assertThat(databases, Not(Is(None)))
|
||||
self.assertThat(databases, Equals(['database1']))
|
||||
db2_service.DB2Admin.list_databases.assert_any_call(None, None, False)
|
||||
|
||||
def test_list_users(self):
|
||||
db2_service.DB2Admin.list_users = MagicMock(return_value=['user1'])
|
||||
users = self.manager.list_users(self.context)
|
||||
self.assertThat(users, Equals(['user1']))
|
||||
db2_service.DB2Admin.list_users.assert_any_call(None, None, False)
|
||||
|
||||
@patch.object(db2_service.DB2Admin, 'get_user',
|
||||
return_value=MagicMock(return_value=['user1']))
|
||||
def test_get_users(self, get_user_mock):
|
||||
username = ['user1']
|
||||
hostname = ['host']
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
users = self.manager.get_user(self.context, username, hostname)
|
||||
self.assertThat(users, Equals(get_user_mock.return_value))
|
||||
get_user_mock.assert_any_call(username, hostname)
|
||||
|
||||
def test_rpc_ping(self):
|
||||
output = self.manager.rpc_ping(self.context)
|
||||
self.assertTrue(output)
|
||||
|
||||
def test_update_update_overrides(self):
|
||||
configuration = {"DIAGSIZE": 50}
|
||||
db2_service.DB2App.update_overrides = MagicMock()
|
||||
self.manager.update_overrides(self.context, configuration, False)
|
||||
db2_service.DB2App.update_overrides.assert_any_call(self.context,
|
||||
configuration)
|
||||
|
||||
def test_reset_update_overrides(self):
|
||||
configuration = {}
|
||||
db2_service.DB2App.remove_overrides = MagicMock()
|
||||
self.manager.update_overrides(self.context, configuration, True)
|
||||
db2_service.DB2App.remove_overrides.assert_any_call()
|
@ -123,8 +123,10 @@ class ManagerTest(trove_testtools.TestCase):
|
||||
super(ManagerTest, self).tearDown()
|
||||
|
||||
def test_update_status(self):
|
||||
self.manager._status.is_installed = True
|
||||
self.manager._status._is_restarting = False
|
||||
self.manager.update_status(self.context)
|
||||
self.manager.status.update.assert_any_call()
|
||||
self.assertTrue(self.manager.status.set_status.called)
|
||||
|
||||
def test_guest_log_list(self):
|
||||
log_list = self.manager.guest_log_list(self.context)
|
||||
|
@ -1,222 +0,0 @@
|
||||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from oslo_utils import netutils
|
||||
import pymongo
|
||||
|
||||
import trove.common.instance as ds_instance
|
||||
import trove.common.utils as utils
|
||||
from trove.guestagent.common.configuration import ImportOverrideStrategy
|
||||
import trove.guestagent.datastore.experimental.mongodb.manager as manager
|
||||
import trove.guestagent.datastore.experimental.mongodb.service as service
|
||||
import trove.guestagent.volume as volume
|
||||
import trove.tests.unittests.trove_testtools as trove_testtools
|
||||
|
||||
|
||||
class GuestAgentMongoDBClusterManagerTest(trove_testtools.TestCase):
|
||||
|
||||
@mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory')
|
||||
def setUp(self, _):
|
||||
super(GuestAgentMongoDBClusterManagerTest, self).setUp()
|
||||
self.context = trove_testtools.TroveTestContext(self)
|
||||
self.manager = manager.Manager()
|
||||
self.manager.app.configuration_manager = mock.MagicMock()
|
||||
self.manager.app.status.set_status = mock.MagicMock()
|
||||
self.manager.app.status.set_host = mock.MagicMock()
|
||||
self.conf_mgr = self.manager.app.configuration_manager
|
||||
|
||||
self.pymongo_patch = mock.patch.object(
|
||||
pymongo, 'MongoClient'
|
||||
)
|
||||
self.addCleanup(self.pymongo_patch.stop)
|
||||
self.pymongo_patch.start()
|
||||
|
||||
def tearDown(self):
|
||||
super(GuestAgentMongoDBClusterManagerTest, self).tearDown()
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, 'add_members',
|
||||
side_effect=RuntimeError("Boom!"))
|
||||
def test_add_members_failure(self, mock_add_members):
|
||||
members = ["test1", "test2"]
|
||||
self.assertRaises(RuntimeError, self.manager.add_members,
|
||||
self.context, members)
|
||||
self.manager.app.status.set_status.assert_called_with(
|
||||
ds_instance.ServiceStatuses.FAILED)
|
||||
|
||||
@mock.patch.object(utils, 'poll_until')
|
||||
@mock.patch.object(service.MongoDBAdmin, 'rs_initiate')
|
||||
@mock.patch.object(service.MongoDBAdmin, 'rs_add_members')
|
||||
def test_add_member(self, mock_add, mock_initiate, mock_poll):
|
||||
members = ["test1", "test2"]
|
||||
self.manager.add_members(self.context, members)
|
||||
mock_initiate.assert_any_call()
|
||||
mock_add.assert_any_call(["test1", "test2"])
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, 'restart')
|
||||
@mock.patch.object(service.MongoDBApp, 'create_admin_user')
|
||||
@mock.patch.object(utils, 'generate_random_password', return_value='pwd')
|
||||
def test_prep_primary(self, mock_pwd, mock_user, mock_restart):
|
||||
self.manager.prep_primary(self.context)
|
||||
mock_user.assert_called_with('pwd')
|
||||
mock_restart.assert_called_with()
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, 'add_shard',
|
||||
side_effect=RuntimeError("Boom!"))
|
||||
def test_add_shard_failure(self, mock_add_shard):
|
||||
self.assertRaises(RuntimeError, self.manager.add_shard,
|
||||
self.context, "rs", "rs_member")
|
||||
self.manager.app.status.set_status.assert_called_with(
|
||||
ds_instance.ServiceStatuses.FAILED)
|
||||
|
||||
@mock.patch.object(service.MongoDBAdmin, 'add_shard')
|
||||
def test_add_shard(self, mock_add_shard):
|
||||
self.manager.add_shard(self.context, "rs", "rs_member")
|
||||
mock_add_shard.assert_called_with("rs/rs_member:27017")
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, 'add_config_servers',
|
||||
side_effect=RuntimeError("Boom!"))
|
||||
def test_add_config_server_failure(self, mock_add_config):
|
||||
self.assertRaises(RuntimeError, self.manager.add_config_servers,
|
||||
self.context,
|
||||
["cfg_server1", "cfg_server2"])
|
||||
self.manager.app.status.set_status.assert_called_with(
|
||||
ds_instance.ServiceStatuses.FAILED)
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, 'start_db')
|
||||
def test_add_config_servers(self, mock_start_db):
|
||||
self.manager.add_config_servers(self.context,
|
||||
["cfg_server1",
|
||||
"cfg_server2"])
|
||||
self.conf_mgr.apply_system_override.assert_called_once_with(
|
||||
{'sharding.configDB': "cfg_server1:27019,cfg_server2:27019"},
|
||||
'clustering')
|
||||
mock_start_db.assert_called_with(True)
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, '_initialize_writable_run_dir')
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_as_query_router')
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_cluster_security')
|
||||
def test_prepare_mongos(self, mock_secure, mock_config, mock_run_init):
|
||||
self._prepare_method("test-id-1", "query_router", None)
|
||||
mock_run_init.assert_called_once_with()
|
||||
mock_config.assert_called_once_with()
|
||||
mock_secure.assert_called_once_with(None)
|
||||
self.manager.app.status.set_status.assert_called_with(
|
||||
ds_instance.ServiceStatuses.INSTANCE_READY, force=True)
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, '_initialize_writable_run_dir')
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_as_config_server')
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_cluster_security')
|
||||
def test_prepare_config_server(
|
||||
self, mock_secure, mock_config, mock_run_init):
|
||||
self._prepare_method("test-id-2", "config_server", None)
|
||||
mock_run_init.assert_called_once_with()
|
||||
mock_config.assert_called_once_with()
|
||||
mock_secure.assert_called_once_with(None)
|
||||
self.manager.app.status.set_status.assert_called_with(
|
||||
ds_instance.ServiceStatuses.INSTANCE_READY, force=True)
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, '_initialize_writable_run_dir')
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_as_cluster_member')
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_cluster_security')
|
||||
def test_prepare_member(self, mock_secure, mock_config, mock_run_init):
|
||||
self._prepare_method("test-id-3", "member", None)
|
||||
mock_run_init.assert_called_once_with()
|
||||
mock_config.assert_called_once_with('rs1')
|
||||
mock_secure.assert_called_once_with(None)
|
||||
self.manager.app.status.set_status.assert_called_with(
|
||||
ds_instance.ServiceStatuses.INSTANCE_READY, force=True)
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_network')
|
||||
def test_configure_as_query_router(self, net_conf):
|
||||
self.conf_mgr.parse_configuration = mock.Mock(
|
||||
return_value={'storage.mmapv1.smallFiles': False,
|
||||
'storage.journal.enabled': True})
|
||||
self.manager.app._configure_as_query_router()
|
||||
self.conf_mgr.save_configuration.assert_called_once_with({})
|
||||
net_conf.assert_called_once_with(service.MONGODB_PORT)
|
||||
self.conf_mgr.apply_system_override.assert_called_once_with(
|
||||
{'sharding.configDB': ''}, 'clustering')
|
||||
self.assertTrue(self.manager.app.is_query_router)
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_network')
|
||||
def test_configure_as_config_server(self, net_conf):
|
||||
self.manager.app._configure_as_config_server()
|
||||
net_conf.assert_called_once_with(service.CONFIGSVR_PORT)
|
||||
self.conf_mgr.apply_system_override.assert_called_once_with(
|
||||
{'sharding.clusterRole': 'configsvr'}, 'clustering')
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, 'start_db')
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_network')
|
||||
def test_configure_as_cluster_member(self, net_conf, start):
|
||||
self.manager.app._configure_as_cluster_member('rs1')
|
||||
net_conf.assert_called_once_with(service.MONGODB_PORT)
|
||||
self.conf_mgr.apply_system_override.assert_called_once_with(
|
||||
{'replication.replSetName': 'rs1'}, 'clustering')
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, 'store_key')
|
||||
@mock.patch.object(service.MongoDBApp, 'get_key_file',
|
||||
return_value='/var/keypath')
|
||||
def test_configure_cluster_security(self, get_key_mock, store_key_mock):
|
||||
self.manager.app._configure_cluster_security('key')
|
||||
store_key_mock.assert_called_once_with('key')
|
||||
# TODO(mvandijk): enable cluster security once Trove features are in
|
||||
# self.conf_mgr.apply_system_override.assert_called_once_with(
|
||||
# {'security.clusterAuthMode': 'keyFile',
|
||||
# 'security.keyFile': '/var/keypath'}, 'clustering')
|
||||
|
||||
@mock.patch.object(netutils, 'get_my_ipv4', return_value="10.0.0.2")
|
||||
def test_configure_network(self, ip_mock):
|
||||
self.manager.app._configure_network()
|
||||
self.conf_mgr.apply_system_override.assert_called_once_with(
|
||||
{'net.bindIp': '10.0.0.2,127.0.0.1'})
|
||||
self.manager.app.status.set_host.assert_called_once_with(
|
||||
'10.0.0.2', port=None)
|
||||
|
||||
self.manager.app._configure_network(10000)
|
||||
self.conf_mgr.apply_system_override.assert_called_with(
|
||||
{'net.bindIp': '10.0.0.2,127.0.0.1',
|
||||
'net.port': 10000})
|
||||
self.manager.app.status.set_host.assert_called_with(
|
||||
'10.0.0.2', port=10000)
|
||||
|
||||
@mock.patch.object(utils, 'poll_until')
|
||||
@mock.patch.object(service.MongoDBApp, 'get_key_file',
|
||||
return_value="/test/key/file")
|
||||
@mock.patch.object(volume.VolumeDevice, 'mount_points', return_value=[])
|
||||
@mock.patch.object(volume.VolumeDevice, 'mount', return_value=None)
|
||||
@mock.patch.object(volume.VolumeDevice, 'migrate_data', return_value=None)
|
||||
@mock.patch.object(volume.VolumeDevice, 'format', return_value=None)
|
||||
@mock.patch.object(service.MongoDBApp, 'clear_storage')
|
||||
@mock.patch.object(service.MongoDBApp, 'start_db')
|
||||
@mock.patch.object(service.MongoDBApp, 'stop_db')
|
||||
@mock.patch.object(service.MongoDBAppStatus,
|
||||
'wait_for_database_service_start')
|
||||
@mock.patch.object(service.MongoDBApp, 'install_if_needed')
|
||||
@mock.patch.object(service.MongoDBAppStatus, 'begin_install')
|
||||
def _prepare_method(self, instance_id, instance_type, key, *args):
|
||||
cluster_config = {"id": instance_id,
|
||||
"shard_id": "test_shard_id",
|
||||
"instance_type": instance_type,
|
||||
"replica_set_name": "rs1",
|
||||
"key": key}
|
||||
|
||||
# invocation
|
||||
self.manager.prepare(context=self.context, databases=None,
|
||||
packages=['package'],
|
||||
memory_mb='2048', users=None,
|
||||
mount_point='/var/lib/mongodb',
|
||||
overrides=None,
|
||||
cluster_config=cluster_config)
|
@ -1,371 +0,0 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
import pymongo
|
||||
|
||||
import trove.common.db.mongodb.models as models
|
||||
import trove.common.utils as utils
|
||||
import trove.guestagent.backup as backup
|
||||
from trove.guestagent.common.configuration import ImportOverrideStrategy
|
||||
import trove.guestagent.datastore.experimental.mongodb.manager as manager
|
||||
import trove.guestagent.datastore.experimental.mongodb.service as service
|
||||
import trove.guestagent.volume as volume
|
||||
from trove.tests.unittests.guestagent.test_datastore_manager import \
|
||||
DatastoreManagerTest
|
||||
|
||||
|
||||
class GuestAgentMongoDBManagerTest(DatastoreManagerTest):
|
||||
|
||||
@mock.patch.object(ImportOverrideStrategy, '_initialize_import_directory')
|
||||
def setUp(self, _):
|
||||
super(GuestAgentMongoDBManagerTest, self).setUp('mongodb')
|
||||
self.manager = manager.Manager()
|
||||
|
||||
self.execute_with_timeout_patch = mock.patch.object(
|
||||
utils, 'execute_with_timeout', return_value=('0', '')
|
||||
)
|
||||
self.addCleanup(self.execute_with_timeout_patch.stop)
|
||||
self.execute_with_timeout_patch.start()
|
||||
|
||||
self.pymongo_patch = mock.patch.object(
|
||||
pymongo, 'MongoClient'
|
||||
)
|
||||
self.addCleanup(self.pymongo_patch.stop)
|
||||
self.pymongo_patch.start()
|
||||
|
||||
self.mount_point = '/var/lib/mongodb'
|
||||
self.host_wildcard = '%' # This is used in the test_*_user tests below
|
||||
self.serialized_user = {
|
||||
'_name': 'testdb.testuser', '_password': None,
|
||||
'_roles': [{'db': 'testdb', 'role': 'testrole'}],
|
||||
'_username': 'testuser', '_databases': [],
|
||||
'_host': self.host_wildcard,
|
||||
'_database': {'_name': 'testdb',
|
||||
'_character_set': None,
|
||||
'_collate': None},
|
||||
'_is_root': False
|
||||
}
|
||||
|
||||
def tearDown(self):
|
||||
super(GuestAgentMongoDBManagerTest, self).tearDown()
|
||||
|
||||
def test_update_status(self):
|
||||
self.manager.app.status = mock.MagicMock()
|
||||
self.manager.update_status(self.context)
|
||||
self.manager.app.status.update.assert_any_call()
|
||||
|
||||
def _prepare_method(self, packages=['packages'], databases=None,
|
||||
memory_mb='2048', users=None, device_path=None,
|
||||
mount_point=None, backup_info=None,
|
||||
config_contents=None, root_password=None,
|
||||
overrides=None, cluster_config=None,):
|
||||
"""self.manager.app must be correctly mocked before calling."""
|
||||
|
||||
self.manager.app.status = mock.Mock()
|
||||
|
||||
self.manager.prepare(self.context, packages,
|
||||
databases, memory_mb, users,
|
||||
device_path=device_path,
|
||||
mount_point=mount_point,
|
||||
backup_info=backup_info,
|
||||
config_contents=config_contents,
|
||||
root_password=root_password,
|
||||
overrides=overrides,
|
||||
cluster_config=cluster_config)
|
||||
|
||||
self.manager.app.status.begin_install.assert_any_call()
|
||||
self.manager.app.install_if_needed.assert_called_with(packages)
|
||||
self.manager.app.stop_db.assert_any_call()
|
||||
self.manager.app.clear_storage.assert_any_call()
|
||||
|
||||
(self.manager.app.apply_initial_guestagent_configuration.
|
||||
assert_called_once_with(cluster_config, self.mount_point))
|
||||
|
||||
@mock.patch.object(volume, 'VolumeDevice')
|
||||
@mock.patch('os.path.exists')
|
||||
def test_prepare_for_volume(self, exists, mocked_volume):
|
||||
device_path = '/dev/vdb'
|
||||
|
||||
self.manager.app = mock.Mock()
|
||||
|
||||
self._prepare_method(device_path=device_path)
|
||||
|
||||
mocked_volume().unmount_device.assert_called_with(device_path)
|
||||
mocked_volume().format.assert_any_call()
|
||||
mocked_volume().migrate_data.assert_called_with(self.mount_point)
|
||||
mocked_volume().mount.assert_called_with(self.mount_point)
|
||||
|
||||
def test_secure(self):
|
||||
self.manager.app = mock.Mock()
|
||||
|
||||
mock_secure = mock.Mock()
|
||||
self.manager.app.secure = mock_secure
|
||||
|
||||
self._prepare_method()
|
||||
|
||||
mock_secure.assert_called_with()
|
||||
|
||||
@mock.patch.object(backup, 'restore')
|
||||
@mock.patch.object(service.MongoDBAdmin, 'is_root_enabled')
|
||||
def test_prepare_from_backup(self, mocked_root_check, mocked_restore):
|
||||
self.manager.app = mock.Mock()
|
||||
|
||||
backup_info = {'id': 'backup_id_123abc',
|
||||
'location': 'fake-location',
|
||||
'type': 'MongoDBDump',
|
||||
'checksum': 'fake-checksum'}
|
||||
|
||||
self._prepare_method(backup_info=backup_info)
|
||||
|
||||
mocked_restore.assert_called_with(self.context, backup_info,
|
||||
'/var/lib/mongodb')
|
||||
mocked_root_check.assert_any_call()
|
||||
|
||||
def test_prepare_with_databases(self):
|
||||
self.manager.app = mock.Mock()
|
||||
|
||||
database = mock.Mock()
|
||||
mock_create_databases = mock.Mock()
|
||||
self.manager.create_database = mock_create_databases
|
||||
|
||||
self._prepare_method(databases=[database])
|
||||
|
||||
mock_create_databases.assert_called_with(self.context, [database])
|
||||
|
||||
def test_prepare_with_users(self):
|
||||
self.manager.app = mock.Mock()
|
||||
|
||||
user = mock.Mock()
|
||||
mock_create_users = mock.Mock()
|
||||
self.manager.create_user = mock_create_users
|
||||
|
||||
self._prepare_method(users=[user])
|
||||
|
||||
mock_create_users.assert_called_with(self.context, [user])
|
||||
|
||||
@mock.patch.object(service.MongoDBAdmin, 'enable_root')
|
||||
def test_provide_root_password(self, mocked_enable_root):
|
||||
self.manager.app = mock.Mock()
|
||||
|
||||
self._prepare_method(root_password='test_password')
|
||||
|
||||
mocked_enable_root.assert_called_with('test_password')
|
||||
|
||||
@mock.patch.object(service, 'MongoDBClient')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_get_user_record')
|
||||
def test_create_user(self, mocked_get_user, mocked_admin_user,
|
||||
mocked_client):
|
||||
user = self.serialized_user.copy()
|
||||
user['_password'] = 'testpassword'
|
||||
users = [user]
|
||||
|
||||
client = mocked_client().__enter__()['testdb']
|
||||
mocked_get_user.return_value = None
|
||||
|
||||
self.manager.create_user(self.context, users)
|
||||
|
||||
client.add_user.assert_called_with('testuser', password='testpassword',
|
||||
roles=[{'db': 'testdb',
|
||||
'role': 'testrole'}])
|
||||
|
||||
@mock.patch.object(service, 'MongoDBClient')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
|
||||
def test_delete_user(self, mocked_admin_user, mocked_client):
|
||||
client = mocked_client().__enter__()['testdb']
|
||||
|
||||
self.manager.delete_user(self.context, self.serialized_user)
|
||||
|
||||
client.remove_user.assert_called_with('testuser')
|
||||
|
||||
@mock.patch.object(service, 'MongoDBClient')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
|
||||
def test_get_user(self, mocked_admin_user, mocked_client):
|
||||
mocked_find = mock.MagicMock(return_value={
|
||||
'_id': 'testdb.testuser',
|
||||
'user': 'testuser', 'db': 'testdb',
|
||||
'roles': [{'db': 'testdb', 'role': 'testrole'}]
|
||||
})
|
||||
client = mocked_client().__enter__().admin
|
||||
client.system.users.find_one = mocked_find
|
||||
|
||||
result = self.manager.get_user(self.context, 'testdb.testuser', None)
|
||||
|
||||
mocked_find.assert_called_with({'user': 'testuser', 'db': 'testdb'})
|
||||
self.assertEqual(self.serialized_user, result)
|
||||
|
||||
@mock.patch.object(service, 'MongoDBClient')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
|
||||
def test_list_users(self, mocked_admin_user, mocked_client):
|
||||
# roles are NOT returned by list_users
|
||||
user1 = self.serialized_user.copy()
|
||||
user2 = self.serialized_user.copy()
|
||||
user2['_name'] = 'testdb.otheruser'
|
||||
user2['_username'] = 'otheruser'
|
||||
user2['_roles'] = [{'db': 'testdb2', 'role': 'readWrite'}]
|
||||
user2['_databases'] = [{'_name': 'testdb2',
|
||||
'_character_set': None,
|
||||
'_collate': None}]
|
||||
|
||||
mocked_find = mock.MagicMock(return_value=[
|
||||
{
|
||||
'_id': 'admin.os_admin',
|
||||
'user': 'os_admin', 'db': 'admin',
|
||||
'roles': [{'db': 'admin', 'role': 'root'}]
|
||||
},
|
||||
{
|
||||
'_id': 'testdb.testuser',
|
||||
'user': 'testuser', 'db': 'testdb',
|
||||
'roles': [{'db': 'testdb', 'role': 'testrole'}]
|
||||
},
|
||||
{
|
||||
'_id': 'testdb.otheruser',
|
||||
'user': 'otheruser', 'db': 'testdb',
|
||||
'roles': [{'db': 'testdb2', 'role': 'readWrite'}]
|
||||
}
|
||||
])
|
||||
|
||||
client = mocked_client().__enter__().admin
|
||||
client.system.users.find = mocked_find
|
||||
|
||||
users, next_marker = self.manager.list_users(self.context)
|
||||
|
||||
self.assertIsNone(next_marker)
|
||||
self.assertEqual(sorted([user1, user2], key=lambda x: x['_name']),
|
||||
users)
|
||||
|
||||
@mock.patch.object(service.MongoDBAdmin, 'create_validated_user')
|
||||
@mock.patch.object(utils, 'generate_random_password',
|
||||
return_value='password')
|
||||
def test_enable_root(self, mock_gen_rand_pwd, mock_create_user):
|
||||
root_user = {'_name': 'admin.root',
|
||||
'_username': 'root',
|
||||
'_database': {'_name': 'admin',
|
||||
'_character_set': None,
|
||||
'_collate': None},
|
||||
'_password': 'password',
|
||||
'_roles': [{'db': 'admin', 'role': 'root'}],
|
||||
'_databases': [],
|
||||
'_host': self.host_wildcard,
|
||||
'_is_root': True}
|
||||
|
||||
result = self.manager.enable_root(self.context)
|
||||
|
||||
self.assertTrue(mock_create_user.called)
|
||||
self.assertEqual(root_user, result)
|
||||
|
||||
@mock.patch.object(service, 'MongoDBClient')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_get_user_record',
|
||||
return_value=models.MongoDBUser('testdb.testuser'))
|
||||
def test_grant_access(self, mocked_get_user,
|
||||
mocked_admin_user, mocked_client):
|
||||
client = mocked_client().__enter__()['testdb']
|
||||
|
||||
self.manager.grant_access(self.context, 'testdb.testuser',
|
||||
None, ['db1', 'db2', 'db3'])
|
||||
|
||||
client.add_user.assert_called_with('testuser', roles=[
|
||||
{'db': 'db1', 'role': 'readWrite'},
|
||||
{'db': 'db2', 'role': 'readWrite'},
|
||||
{'db': 'db3', 'role': 'readWrite'}
|
||||
])
|
||||
|
||||
@mock.patch.object(service, 'MongoDBClient')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_get_user_record',
|
||||
return_value=models.MongoDBUser('testdb.testuser'))
|
||||
def test_revoke_access(self, mocked_get_user,
|
||||
mocked_admin_user, mocked_client):
|
||||
client = mocked_client().__enter__()['testdb']
|
||||
|
||||
mocked_get_user.return_value.roles = [
|
||||
{'db': 'db1', 'role': 'readWrite'},
|
||||
{'db': 'db2', 'role': 'readWrite'},
|
||||
{'db': 'db3', 'role': 'readWrite'}
|
||||
]
|
||||
|
||||
self.manager.revoke_access(self.context, 'testdb.testuser',
|
||||
None, 'db2')
|
||||
|
||||
client.add_user.assert_called_with('testuser', roles=[
|
||||
{'db': 'db1', 'role': 'readWrite'},
|
||||
{'db': 'db3', 'role': 'readWrite'}
|
||||
])
|
||||
|
||||
@mock.patch.object(service, 'MongoDBClient')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_get_user_record',
|
||||
return_value=models.MongoDBUser('testdb.testuser'))
|
||||
def test_list_access(self, mocked_get_user,
|
||||
mocked_admin_user, mocked_client):
|
||||
mocked_get_user.return_value.roles = [
|
||||
{'db': 'db1', 'role': 'readWrite'},
|
||||
{'db': 'db2', 'role': 'readWrite'},
|
||||
{'db': 'db3', 'role': 'readWrite'}
|
||||
]
|
||||
|
||||
accessible_databases = self.manager.list_access(
|
||||
self.context, 'testdb.testuser', None
|
||||
)
|
||||
|
||||
self.assertEqual(['db1', 'db2', 'db3'],
|
||||
[db['_name'] for db in accessible_databases])
|
||||
|
||||
@mock.patch.object(service, 'MongoDBClient')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
|
||||
def test_create_databases(self, mocked_admin_user, mocked_client):
|
||||
schema = models.MongoDBSchema('testdb').serialize()
|
||||
db_client = mocked_client().__enter__()['testdb']
|
||||
|
||||
self.manager.create_database(self.context, [schema])
|
||||
|
||||
# FIXME(songjian):can not create database with null content,
|
||||
# so create a collection
|
||||
# db_client['dummy'].insert.assert_called_with({'dummy': True})
|
||||
# db_client.drop_collection.assert_called_with('dummy')
|
||||
db_client.create_collection.assert_called_with('dummy')
|
||||
|
||||
@mock.patch.object(service, 'MongoDBClient')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
|
||||
def test_list_databases(self, # mocked_ignored_dbs,
|
||||
mocked_admin_user, mocked_client):
|
||||
# This list contains the special 'admin', 'local' and 'config' dbs;
|
||||
# the special dbs should be skipped in the output.
|
||||
# Pagination is tested by starting at 'db1', so 'db0' should not
|
||||
# be in the output. The limit is set to 2, meaning the result
|
||||
# should be 'db1' and 'db2'. The next_marker should be 'db3'.
|
||||
mocked_list = mock.MagicMock(
|
||||
return_value=['admin', 'local', 'config',
|
||||
'db0', 'db1', 'db2', 'db3'])
|
||||
mocked_client().__enter__().database_names = mocked_list
|
||||
|
||||
dbs, next_marker = self.manager.list_databases(
|
||||
self.context, limit=2, marker='db1', include_marker=True)
|
||||
|
||||
mocked_list.assert_any_call()
|
||||
self.assertEqual([models.MongoDBSchema('db1').serialize(),
|
||||
models.MongoDBSchema('db2').serialize()],
|
||||
dbs)
|
||||
self.assertEqual('db2', next_marker)
|
||||
|
||||
@mock.patch.object(service, 'MongoDBClient')
|
||||
@mock.patch.object(service.MongoDBAdmin, '_admin_user')
|
||||
def test_delete_database(self, mocked_admin_user, mocked_client):
|
||||
schema = models.MongoDBSchema('testdb').serialize()
|
||||
|
||||
self.manager.delete_database(self.context, schema)
|
||||
|
||||
mocked_client().__enter__().drop_database.assert_called_with('testdb')
|
@ -99,10 +99,11 @@ class GuestAgentManagerTest(DatastoreManagerTest):
|
||||
|
||||
def test_update_status(self):
|
||||
mock_status = MagicMock()
|
||||
mock_status.is_installed = True
|
||||
mock_status._is_restarting = False
|
||||
dbaas.MySqlAppStatus.get = MagicMock(return_value=mock_status)
|
||||
self.manager.update_status(self.context)
|
||||
dbaas.MySqlAppStatus.get.assert_any_call()
|
||||
mock_status.update.assert_any_call()
|
||||
self.assertTrue(mock_status.set_status.called)
|
||||
|
||||
def _empty_user(self):
|
||||
return models.MySQLUser(deserializing=True)
|
||||
|
@ -80,9 +80,11 @@ class RedisGuestAgentManagerTest(DatastoreManagerTest):
|
||||
|
||||
def test_update_status(self):
|
||||
mock_status = MagicMock()
|
||||
mock_status.is_installed = True
|
||||
mock_status._is_restarting = False
|
||||
self.manager._app.status = mock_status
|
||||
self.manager.update_status(self.context)
|
||||
mock_status.update.assert_any_call()
|
||||
self.assertTrue(mock_status.set_status.called)
|
||||
|
||||
def test_prepare_redis_not_installed(self):
|
||||
self._prepare_dynamic(is_redis_installed=False)
|
||||
|
@ -1,133 +0,0 @@
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from eventlet import Timeout
|
||||
import mock
|
||||
|
||||
import trove.common.context as context
|
||||
from trove.common import exception
|
||||
from trove.common.rpc.version import RPC_API_VERSION
|
||||
from trove.common.strategies.cluster.experimental.vertica.guestagent import (
|
||||
VerticaGuestAgentAPI)
|
||||
from trove import rpc
|
||||
from trove.tests.unittests import trove_testtools
|
||||
|
||||
|
||||
def _mock_call(cmd, timeout, version=None, user=None,
|
||||
public_keys=None, members=None):
|
||||
# To check get_public_keys, authorize_public_keys,
|
||||
# install_cluster, cluster_complete in cmd.
|
||||
if cmd in ('get_public_keys', 'authorize_public_keys',
|
||||
'install_cluster', 'cluster_complete'):
|
||||
return True
|
||||
else:
|
||||
raise BaseException("Test Failed")
|
||||
|
||||
|
||||
class ApiTest(trove_testtools.TestCase):
|
||||
@mock.patch.object(rpc, 'get_client')
|
||||
@mock.patch('trove.instance.models.get_instance_encryption_key',
|
||||
return_value='2LMDgren5citVxmSYNiRFCyFfVDjJtDaQT9LYV08')
|
||||
def setUp(self, mock_get_encryption_key, *args):
|
||||
super(ApiTest, self).setUp()
|
||||
self.context = context.TroveContext()
|
||||
self.guest = VerticaGuestAgentAPI(self.context, 0)
|
||||
|
||||
self.guest._call = _mock_call
|
||||
self.api = VerticaGuestAgentAPI(self.context, "instance-id-x23d2d")
|
||||
self._mock_rpc_client()
|
||||
mock_get_encryption_key.assert_called()
|
||||
|
||||
def test_get_routing_key(self):
|
||||
self.assertEqual('guestagent.instance-id-x23d2d',
|
||||
self.api._get_routing_key())
|
||||
|
||||
@mock.patch('trove.guestagent.api.LOG')
|
||||
def test_api_cast_exception(self, mock_logging):
|
||||
self.call_context.cast.side_effect = IOError('host down')
|
||||
self.assertRaises(exception.GuestError, self.api.create_user,
|
||||
'test_user')
|
||||
|
||||
@mock.patch('trove.guestagent.api.LOG')
|
||||
def test_api_call_exception(self, mock_logging):
|
||||
self.call_context.call.side_effect = IOError('host_down')
|
||||
self.assertRaises(exception.GuestError, self.api.list_users)
|
||||
|
||||
def test_api_call_timeout(self):
|
||||
self.call_context.call.side_effect = Timeout()
|
||||
self.assertRaises(exception.GuestTimeout, self.api.restart)
|
||||
|
||||
def _verify_rpc_prepare_before_call(self):
|
||||
self.api.client.prepare.assert_called_once_with(
|
||||
version=RPC_API_VERSION, timeout=mock.ANY)
|
||||
|
||||
def _verify_rpc_prepare_before_cast(self):
|
||||
self.api.client.prepare.assert_called_once_with(
|
||||
version=RPC_API_VERSION)
|
||||
|
||||
def _verify_cast(self, *args, **kwargs):
|
||||
self.call_context.cast.assert_called_once_with(self.context, *args,
|
||||
**kwargs)
|
||||
|
||||
def _verify_call(self, *args, **kwargs):
|
||||
self.call_context.call.assert_called_once_with(self.context, *args,
|
||||
**kwargs)
|
||||
|
||||
def _mock_rpc_client(self):
|
||||
self.call_context = mock.Mock()
|
||||
self.api.client.prepare = mock.Mock(return_value=self.call_context)
|
||||
self.call_context.call = mock.Mock()
|
||||
self.call_context.cast = mock.Mock()
|
||||
|
||||
def test_get_public_keys(self):
|
||||
exp_resp = 'some_key'
|
||||
self.call_context.call.return_value = exp_resp
|
||||
|
||||
resp = self.api.get_public_keys(user='dummy')
|
||||
|
||||
self._verify_rpc_prepare_before_call()
|
||||
self._verify_call('get_public_keys', user='dummy')
|
||||
self.assertEqual(exp_resp, resp)
|
||||
|
||||
def test_authorize_public_keys(self):
|
||||
exp_resp = None
|
||||
self.call_context.call.return_value = exp_resp
|
||||
|
||||
resp = self.api.authorize_public_keys(user='dummy',
|
||||
public_keys='some_key')
|
||||
|
||||
self._verify_rpc_prepare_before_call()
|
||||
self._verify_call('authorize_public_keys', user='dummy',
|
||||
public_keys='some_key')
|
||||
self.assertEqual(exp_resp, resp)
|
||||
|
||||
def test_install_cluster(self):
|
||||
exp_resp = None
|
||||
self.call_context.call.return_value = exp_resp
|
||||
|
||||
resp = self.api.install_cluster(members=['10.0.0.1', '10.0.0.2'])
|
||||
|
||||
self._verify_rpc_prepare_before_call()
|
||||
self._verify_call('install_cluster', members=['10.0.0.1', '10.0.0.2'])
|
||||
self.assertEqual(exp_resp, resp)
|
||||
|
||||
def test_cluster_complete(self):
|
||||
exp_resp = None
|
||||
self.call_context.call.return_value = exp_resp
|
||||
|
||||
resp = self.api.cluster_complete()
|
||||
|
||||
self._verify_rpc_prepare_before_call()
|
||||
self._verify_call('cluster_complete')
|
||||
self.assertEqual(exp_resp, resp)
|
@ -1,389 +0,0 @@
|
||||
# Copyright [2015] Hewlett-Packard Development Company, L.P.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from mock import DEFAULT
|
||||
from mock import MagicMock
|
||||
from mock import patch
|
||||
from os import path
|
||||
from testtools.matchers import Is
|
||||
|
||||
from trove.common.exception import DatastoreOperationNotSupported
|
||||
from trove.common import instance as rd_instance
|
||||
from trove.guestagent.common import configuration
|
||||
from trove.guestagent.common.configuration import ImportOverrideStrategy
|
||||
from trove.guestagent.common import operating_system
|
||||
from trove.guestagent.datastore.experimental.vertica.manager import Manager
|
||||
from trove.guestagent.datastore.experimental.vertica.service import (
|
||||
VerticaAppStatus)
|
||||
from trove.guestagent.datastore.experimental.vertica.service import VerticaApp
|
||||
from trove.guestagent.datastore.experimental.vertica import system
|
||||
from trove.guestagent import dbaas
|
||||
from trove.guestagent import volume
|
||||
from trove.guestagent.volume import VolumeDevice
|
||||
from trove.tests.unittests.guestagent.test_datastore_manager import \
|
||||
DatastoreManagerTest
|
||||
|
||||
|
||||
class GuestAgentManagerTest(DatastoreManagerTest):
|
||||
|
||||
@patch.object(ImportOverrideStrategy, '_initialize_import_directory')
|
||||
@patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT,
|
||||
chown=DEFAULT, chmod=DEFAULT)
|
||||
def setUp(self, *args, **kwargs):
|
||||
super(GuestAgentManagerTest, self).setUp('vertica')
|
||||
self.manager = Manager()
|
||||
self.origin_format = volume.VolumeDevice.format
|
||||
self.origin_migrate_data = volume.VolumeDevice.migrate_data
|
||||
self.origin_mount = volume.VolumeDevice.mount
|
||||
self.origin_unmount = volume.VolumeDevice.unmount
|
||||
self.origin_mount_points = volume.VolumeDevice.mount_points
|
||||
self.origin_set_read = volume.VolumeDevice.set_readahead_size
|
||||
self.origin_install_vertica = VerticaApp.install_vertica
|
||||
self.origin_create_db = VerticaApp.create_db
|
||||
self.origin_stop_db = VerticaApp.stop_db
|
||||
self.origin_start_db = VerticaApp.start_db
|
||||
self.origin_restart = VerticaApp.restart
|
||||
self.origin_install_if = VerticaApp.install_if_needed
|
||||
self.origin_enable_root = VerticaApp.enable_root
|
||||
self.origin_is_root_enabled = VerticaApp.is_root_enabled
|
||||
self.origin_prepare_for_install_vertica = (
|
||||
VerticaApp.prepare_for_install_vertica)
|
||||
self.origin_add_udls = VerticaApp.add_udls
|
||||
|
||||
def tearDown(self):
|
||||
super(GuestAgentManagerTest, self).tearDown()
|
||||
volume.VolumeDevice.format = self.origin_format
|
||||
volume.VolumeDevice.migrate_data = self.origin_migrate_data
|
||||
volume.VolumeDevice.mount = self.origin_mount
|
||||
volume.VolumeDevice.unmount = self.origin_unmount
|
||||
volume.VolumeDevice.mount_points = self.origin_mount_points
|
||||
volume.VolumeDevice.set_readahead_size = self.origin_set_read
|
||||
VerticaApp.create_db = self.origin_create_db
|
||||
VerticaApp.install_vertica = self.origin_install_vertica
|
||||
VerticaApp.stop_db = self.origin_stop_db
|
||||
VerticaApp.start_db = self.origin_start_db
|
||||
VerticaApp.restart = self.origin_restart
|
||||
VerticaApp.install_if_needed = self.origin_install_if
|
||||
VerticaApp.enable_root = self.origin_enable_root
|
||||
VerticaApp.is_root_enabled = self.origin_is_root_enabled
|
||||
VerticaApp.prepare_for_install_vertica = (
|
||||
self.origin_prepare_for_install_vertica)
|
||||
VerticaApp.add_udls = self.origin_add_udls
|
||||
|
||||
def test_update_status(self):
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
self.manager.update_status(self.context)
|
||||
mock_status.update.assert_any_call()
|
||||
|
||||
@patch.object(path, 'exists', MagicMock())
|
||||
@patch.object(configuration.ConfigurationManager, 'save_configuration')
|
||||
def _prepare_dynamic(self, packages,
|
||||
config_content='MockContent', device_path='/dev/vdb',
|
||||
backup_id=None,
|
||||
overrides=None, is_mounted=False):
|
||||
# covering all outcomes is starting to cause trouble here
|
||||
expected_vol_count = 1 if device_path else 0
|
||||
if not backup_id:
|
||||
backup_info = {'id': backup_id,
|
||||
'location': 'fake-location',
|
||||
'type': 'InnoBackupEx',
|
||||
'checksum': 'fake-checksum',
|
||||
}
|
||||
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
|
||||
mock_status.begin_install = MagicMock(return_value=None)
|
||||
volume.VolumeDevice.format = MagicMock(return_value=None)
|
||||
volume.VolumeDevice.migrate_data = MagicMock(return_value=None)
|
||||
volume.VolumeDevice.mount = MagicMock(return_value=None)
|
||||
mount_points = []
|
||||
if is_mounted:
|
||||
mount_points = ['/mnt']
|
||||
VolumeDevice.mount_points = MagicMock(return_value=mount_points)
|
||||
VolumeDevice.unmount = MagicMock(return_value=None)
|
||||
|
||||
VerticaApp.install_if_needed = MagicMock(return_value=None)
|
||||
VerticaApp.install_vertica = MagicMock(return_value=None)
|
||||
VerticaApp.create_db = MagicMock(return_value=None)
|
||||
VerticaApp.prepare_for_install_vertica = MagicMock(return_value=None)
|
||||
VerticaApp.add_udls = MagicMock()
|
||||
# invocation
|
||||
self.manager.prepare(context=self.context, packages=packages,
|
||||
config_contents=config_content,
|
||||
databases=None,
|
||||
memory_mb='2048', users=None,
|
||||
device_path=device_path,
|
||||
mount_point="/var/lib/vertica",
|
||||
backup_info=backup_info,
|
||||
overrides=None,
|
||||
cluster_config=None)
|
||||
|
||||
self.assertEqual(expected_vol_count, VolumeDevice.format.call_count)
|
||||
self.assertEqual(expected_vol_count,
|
||||
VolumeDevice.migrate_data.call_count)
|
||||
self.assertEqual(expected_vol_count,
|
||||
VolumeDevice.mount_points.call_count)
|
||||
if is_mounted:
|
||||
self.assertEqual(1, VolumeDevice.unmount.call_count)
|
||||
else:
|
||||
self.assertEqual(0, VolumeDevice.unmount.call_count)
|
||||
|
||||
VerticaApp.install_if_needed.assert_any_call(packages)
|
||||
VerticaApp.prepare_for_install_vertica.assert_any_call()
|
||||
VerticaApp.install_vertica.assert_any_call()
|
||||
VerticaApp.create_db.assert_any_call()
|
||||
VerticaApp.add_udls.assert_any_call()
|
||||
|
||||
def test_prepare_pkg(self):
|
||||
self._prepare_dynamic(['vertica'])
|
||||
|
||||
def test_prepare_no_pkg(self):
|
||||
self._prepare_dynamic([])
|
||||
|
||||
def test_restart(self):
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
VerticaApp.restart = MagicMock(return_value=None)
|
||||
# invocation
|
||||
self.manager.restart(self.context)
|
||||
# verification/assertion
|
||||
VerticaApp.restart.assert_any_call()
|
||||
|
||||
def test_stop_db(self):
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
VerticaApp.stop_db = MagicMock(return_value=None)
|
||||
# invocation
|
||||
self.manager.stop_db(self.context)
|
||||
# verification/assertion
|
||||
VerticaApp.stop_db.assert_any_call(do_not_start_on_reboot=False)
|
||||
|
||||
@patch.object(VerticaApp, 'install_vertica')
|
||||
@patch.object(VerticaApp, '_export_conf_to_members')
|
||||
@patch.object(VerticaApp, 'create_db')
|
||||
@patch.object(VerticaApp, 'add_udls')
|
||||
def test_install_cluster(self, mock_udls, mock_install, mock_export,
|
||||
mock_create_db):
|
||||
members = ['test1', 'test2']
|
||||
self.manager.install_cluster(self.context, members)
|
||||
mock_install.assert_called_with('test1,test2')
|
||||
mock_export.assert_called_with(members)
|
||||
mock_create_db.assert_called_with('test1,test2')
|
||||
mock_udls.assert_any_call()
|
||||
|
||||
@patch.object(VerticaAppStatus, 'set_status')
|
||||
@patch.object(VerticaApp, 'install_cluster',
|
||||
side_effect=RuntimeError("Boom!"))
|
||||
@patch('trove.guestagent.datastore.experimental.vertica.manager.LOG')
|
||||
def test_install_cluster_failure(self, mock_logging,
|
||||
mock_install, mock_set_status):
|
||||
members = ["test1", "test2"]
|
||||
self.assertRaises(RuntimeError, self.manager.install_cluster,
|
||||
self.context, members)
|
||||
mock_set_status.assert_called_with(rd_instance.ServiceStatuses.FAILED)
|
||||
|
||||
@patch.object(VerticaApp, '_get_database_password')
|
||||
@patch.object(path, 'isfile')
|
||||
@patch.object(system, 'exec_vsql_command')
|
||||
def test_add_udls(self, mock_vsql, mock_isfile, mock_pwd):
|
||||
mock_vsql.return_value = (None, None)
|
||||
password = 'password'
|
||||
mock_pwd.return_value = password
|
||||
mock_isfile.return_value = True
|
||||
self.manager.app.add_udls()
|
||||
mock_vsql.assert_any_call(
|
||||
password,
|
||||
"CREATE LIBRARY curllib AS "
|
||||
"'/opt/vertica/sdk/examples/build/cURLLib.so'"
|
||||
)
|
||||
mock_vsql.assert_any_call(
|
||||
password,
|
||||
"CREATE SOURCE curl AS LANGUAGE 'C++' NAME 'CurlSourceFactory' "
|
||||
"LIBRARY curllib"
|
||||
)
|
||||
|
||||
@patch.object(volume.VolumeDevice, 'mount_points', return_value=[])
|
||||
@patch.object(volume.VolumeDevice, 'unmount_device', return_value=None)
|
||||
@patch.object(volume.VolumeDevice, 'mount', return_value=None)
|
||||
@patch.object(volume.VolumeDevice, 'migrate_data', return_value=None)
|
||||
@patch.object(volume.VolumeDevice, 'format', return_value=None)
|
||||
@patch.object(VerticaApp, 'prepare_for_install_vertica')
|
||||
@patch.object(VerticaApp, 'install_if_needed')
|
||||
@patch.object(VerticaApp, 'add_udls')
|
||||
@patch.object(VerticaAppStatus, 'begin_install')
|
||||
def _prepare_method(self, instance_id, instance_type, *args):
|
||||
cluster_config = {"id": instance_id,
|
||||
"instance_type": instance_type}
|
||||
|
||||
# invocation
|
||||
self.manager.prepare(context=self.context, databases=None,
|
||||
packages=['vertica'],
|
||||
memory_mb='2048', users=None,
|
||||
mount_point='/var/lib/vertica',
|
||||
overrides=None,
|
||||
cluster_config=cluster_config)
|
||||
|
||||
@patch.object(VerticaAppStatus, 'set_status')
|
||||
def test_prepare_member(self, mock_set_status):
|
||||
self._prepare_method("test-instance-3", "member")
|
||||
mock_set_status.assert_called_with(
|
||||
rd_instance.ServiceStatuses.INSTANCE_READY, force=True)
|
||||
|
||||
def test_rpc_ping(self):
|
||||
output = self.manager.rpc_ping(self.context)
|
||||
self.assertTrue(output)
|
||||
|
||||
@patch.object(VerticaAppStatus, 'set_status')
|
||||
@patch('trove.guestagent.datastore.manager.LOG')
|
||||
def test_prepare_invalid_cluster_config(self, mock_logging,
|
||||
mock_set_status):
|
||||
self.assertRaises(RuntimeError,
|
||||
self._prepare_method,
|
||||
"test-instance-3", "query_router")
|
||||
mock_set_status.assert_called_with(
|
||||
rd_instance.ServiceStatuses.FAILED, force=True)
|
||||
|
||||
def test_get_filesystem_stats(self):
|
||||
with patch.object(dbaas, 'get_filesystem_volume_stats'):
|
||||
self.manager.get_filesystem_stats(self.context, '/var/lib/vertica')
|
||||
dbaas.get_filesystem_volume_stats.assert_any_call(
|
||||
'/var/lib/vertica')
|
||||
|
||||
def test_mount_volume(self):
|
||||
with patch.object(volume.VolumeDevice, 'mount', return_value=None):
|
||||
self.manager.mount_volume(self.context,
|
||||
device_path='/dev/vdb',
|
||||
mount_point='/var/lib/vertica')
|
||||
test_mount = volume.VolumeDevice.mount.call_args_list[0]
|
||||
test_mount.assert_called_with('/var/lib/vertica', False)
|
||||
|
||||
def test_unmount_volume(self):
|
||||
with patch.object(volume.VolumeDevice, 'unmount', return_value=None):
|
||||
self.manager.unmount_volume(self.context, device_path='/dev/vdb')
|
||||
test_unmount = volume.VolumeDevice.unmount.call_args_list[0]
|
||||
test_unmount.assert_called_with('/var/lib/vertica')
|
||||
|
||||
def test_resize_fs(self):
|
||||
with patch.object(volume.VolumeDevice, 'resize_fs', return_value=None):
|
||||
self.manager.resize_fs(self.context, device_path='/dev/vdb')
|
||||
test_resize_fs = volume.VolumeDevice.resize_fs.call_args_list[0]
|
||||
test_resize_fs.assert_called_with('/var/lib/vertica')
|
||||
|
||||
@patch.object(operating_system, 'write_file')
|
||||
def test_cluster_complete(self, mock_write_file):
|
||||
mock_set_status = MagicMock()
|
||||
self.manager.appStatus.set_status = mock_set_status
|
||||
self.manager.appStatus._get_actual_db_status = MagicMock(
|
||||
return_value=rd_instance.ServiceStatuses.RUNNING)
|
||||
self.manager.cluster_complete(self.context)
|
||||
mock_set_status.assert_called_with(
|
||||
rd_instance.ServiceStatuses.RUNNING, force=True)
|
||||
|
||||
def test_get_public_keys(self):
|
||||
with patch.object(VerticaApp, 'get_public_keys',
|
||||
return_value='some_key'):
|
||||
test_key = self.manager.get_public_keys(self.context, 'test_user')
|
||||
self.assertEqual('some_key', test_key)
|
||||
|
||||
def test_authorize_public_keys(self):
|
||||
with patch.object(VerticaApp, 'authorize_public_keys',
|
||||
return_value=None):
|
||||
self.manager.authorize_public_keys(self.context,
|
||||
'test_user',
|
||||
'some_key')
|
||||
VerticaApp.authorize_public_keys.assert_any_call(
|
||||
'test_user', 'some_key')
|
||||
|
||||
def test_start_db_with_conf_changes(self):
|
||||
with patch.object(VerticaApp, 'start_db_with_conf_changes'):
|
||||
self.manager.start_db_with_conf_changes(self.context, 'something')
|
||||
VerticaApp.start_db_with_conf_changes.assert_any_call('something')
|
||||
|
||||
def test_change_passwords(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.change_passwords,
|
||||
self.context, None)
|
||||
|
||||
def test_update_attributes(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.update_attributes,
|
||||
self.context, 'test_user', '%', {'name': 'new_user'})
|
||||
|
||||
def test_create_database(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.create_database,
|
||||
self.context, [{'name': 'test_db'}])
|
||||
|
||||
def test_create_user(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.create_user,
|
||||
self.context, [{'name': 'test_user'}])
|
||||
|
||||
def test_delete_database(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.delete_database,
|
||||
self.context, [{'name': 'test_db'}])
|
||||
|
||||
def test_delete_user(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.delete_user,
|
||||
self.context, [{'name': 'test_user'}])
|
||||
|
||||
def test_get_user(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.get_user,
|
||||
self.context, 'test_user', '%')
|
||||
|
||||
def test_grant_access(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.grant_access,
|
||||
self.context, 'test_user', '%', [{'name': 'test_db'}]
|
||||
)
|
||||
|
||||
def test_revoke_access(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.revoke_access,
|
||||
self.context, 'test_user', '%', [{'name': 'test_db'}]
|
||||
)
|
||||
|
||||
def test_list_access(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.list_access,
|
||||
self.context, 'test_user', '%')
|
||||
|
||||
def test_list_databases(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.list_databases,
|
||||
self.context)
|
||||
|
||||
def test_list_users(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.list_users,
|
||||
self.context)
|
||||
|
||||
def test_enable_root(self):
|
||||
VerticaApp.enable_root = MagicMock(return_value='user_id_stuff')
|
||||
user_id = self.manager.enable_root_with_password(self.context)
|
||||
self.assertThat(user_id, Is('user_id_stuff'))
|
||||
|
||||
def test_is_root_enabled(self):
|
||||
VerticaApp.is_root_enabled = MagicMock(return_value=True)
|
||||
is_enabled = self.manager.is_root_enabled(self.context)
|
||||
self.assertThat(is_enabled, Is(True))
|
||||
|
||||
def test_create_backup(self):
|
||||
self.assertRaises(DatastoreOperationNotSupported,
|
||||
self.manager.create_backup,
|
||||
self.context, {})
|
@ -37,7 +37,7 @@ class FakeDBInstance(object):
|
||||
self.id = str(uuid.uuid4())
|
||||
self.deleted = False
|
||||
self.datastore_version_id = str(uuid.uuid4())
|
||||
self.server_status = "ACTIVE"
|
||||
self.server_status = "HEALTHY"
|
||||
self.task_status = FakeInstanceTask()
|
||||
|
||||
|
||||
|
@ -75,7 +75,7 @@ class fake_Server(object):
|
||||
self.files = None
|
||||
self.userdata = None
|
||||
self.block_device_mapping_v2 = None
|
||||
self.status = 'ACTIVE'
|
||||
self.status = 'HEALTHY'
|
||||
self.key_name = None
|
||||
|
||||
|
||||
@ -723,7 +723,7 @@ class BuiltInstanceTasksTest(trove_testtools.TestCase):
|
||||
volume_id=VOLUME_ID)
|
||||
|
||||
# this is used during the final check of whether the resize successful
|
||||
db_instance.server_status = 'ACTIVE'
|
||||
db_instance.server_status = 'HEALTHY'
|
||||
self.db_instance = db_instance
|
||||
self.dm_dv_load_by_uuid_patch = patch.object(
|
||||
datastore_models.DatastoreVersion, 'load_by_uuid', MagicMock(
|
||||
@ -750,7 +750,7 @@ class BuiltInstanceTasksTest(trove_testtools.TestCase):
|
||||
spec=novaclient.v2.servers.ServerManager)
|
||||
self.stub_running_server = MagicMock(
|
||||
spec=novaclient.v2.servers.Server)
|
||||
self.stub_running_server.status = 'ACTIVE'
|
||||
self.stub_running_server.status = 'HEALTHY'
|
||||
self.stub_running_server.flavor = {'id': 6, 'ram': 512}
|
||||
self.stub_verifying_server = MagicMock(
|
||||
spec=novaclient.v2.servers.Server)
|
||||
|
Loading…
Reference in New Issue
Block a user