Add volume migrate action

This patch adds volume migrate action.

Change-Id: I9f46931d2a7edff4c727d674ec315924b9ae30c2
Implements: blueprint volume-migrate-action
This commit is contained in:
Hidekazu Nakamura 2017-06-25 20:49:31 +09:00
parent 2266e2baa3
commit bff76de6f1
11 changed files with 1001 additions and 1 deletions

View File

@ -0,0 +1,4 @@
---
features:
- |
Added volume migrate action

View File

@ -80,6 +80,7 @@ watcher_actions =
change_nova_service_state = watcher.applier.actions.change_nova_service_state:ChangeNovaServiceState
resize = watcher.applier.actions.resize:Resize
change_node_power_state = watcher.applier.actions.change_node_power_state:ChangeNodePowerState
volume_migrate = watcher.applier.actions.volume_migration:VolumeMigrate
watcher_workflow_engines =
taskflow = watcher.applier.workflow_engine.default:DefaultWorkFlowEngine

View File

@ -0,0 +1,252 @@
# Copyright 2017 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
from oslo_log import log
from cinderclient import client as cinder_client
from watcher._i18n import _
from watcher.applier.actions import base
from watcher.common import cinder_helper
from watcher.common import exception
from watcher.common import keystone_helper
from watcher.common import nova_helper
from watcher.common import utils
from watcher import conf
CONF = conf.CONF
LOG = log.getLogger(__name__)
class VolumeMigrate(base.BaseAction):
"""Migrates a volume to destination node or type
By using this action, you will be able to migrate cinder volume.
Migration type 'swap' can only be used for migrating attached volume.
Migration type 'cold' can only be used for migrating detached volume.
The action schema is::
schema = Schema({
'resource_id': str, # should be a UUID
'migration_type': str, # choices -> "swap", "cold"
'destination_node': str,
'destination_type': str,
)}
The `resource_id` is the UUID of cinder volume to migrate.
The `destination_node` is the destination block storage pool name.
(list of available pools are returned by this command: ``cinder
get-pools``) which is mandatory for migrating detached volume
to the one with same volume type.
The `destination_type` is the destination block storage type name.
(list of available types are returned by this command: ``cinder
type-list``) which is mandatory for migrating detached volume or
swapping attached volume to the one with different volume type.
"""
MIGRATION_TYPE = 'migration_type'
SWAP = 'swap'
COLD = 'cold'
DESTINATION_NODE = "destination_node"
DESTINATION_TYPE = "destination_type"
def __init__(self, config, osc=None):
super(VolumeMigrate, self).__init__(config)
self.temp_username = utils.random_string(10)
self.temp_password = utils.random_string(10)
self.cinder_util = cinder_helper.CinderHelper(osc=self.osc)
self.nova_util = nova_helper.NovaHelper(osc=self.osc)
@property
def schema(self):
return {
'type': 'object',
'properties': {
'resource_id': {
'type': 'string',
"minlength": 1,
"pattern": ("^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-"
"([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-"
"([a-fA-F0-9]){12}$")
},
'migration_type': {
'type': 'string',
"enum": ["swap", "cold"]
},
'destination_node': {
"anyof": [
{'type': 'string', "minLength": 1},
{'type': 'None'}
]
},
'destination_type': {
"anyof": [
{'type': 'string', "minLength": 1},
{'type': 'None'}
]
}
},
'required': ['resource_id', 'migration_type'],
'additionalProperties': False,
}
def validate_parameters(self):
try:
jsonschema.validate(self.input_parameters, self.schema)
return True
except jsonschema.ValidationError as e:
raise e
@property
def volume_id(self):
return self.input_parameters.get(self.RESOURCE_ID)
@property
def migration_type(self):
return self.input_parameters.get(self.MIGRATION_TYPE)
@property
def destination_node(self):
return self.input_parameters.get(self.DESTINATION_NODE)
@property
def destination_type(self):
return self.input_parameters.get(self.DESTINATION_TYPE)
def _cold_migrate(self, volume, dest_node, dest_type):
if not self.cinder_util.can_cold(volume, dest_node):
raise exception.Invalid(
message=(_("Invalid state for cold migration")))
if dest_node:
return self.cinder_util.migrate(volume, dest_node)
elif dest_type:
return self.cinder_util.retype(volume, dest_type)
else:
raise exception.Invalid(
message=(_("destination host or destination type is "
"required when migration type is cold")))
def _can_swap(self, volume):
"""Judge volume can be swapped"""
if not volume.attachments:
return False
instance_id = volume.attachments[0]['server_id']
instance_status = self.nova_util.find_instance(instance_id).status
if (volume.status == 'in-use' and
instance_status in ('ACTIVE', 'PAUSED', 'RESIZED')):
return True
return False
def _create_user(self, volume, user):
"""Create user with volume attribute and user information"""
keystone_util = keystone_helper.KeystoneHelper(osc=self.osc)
project_id = getattr(volume, 'os-vol-tenant-attr:tenant_id')
user['project'] = project_id
user['domain'] = keystone_util.get_project(project_id).domain_id
user['roles'] = ['admin']
return keystone_util.create_user(user)
def _get_cinder_client(self, session):
"""Get cinder client by session"""
return cinder_client.Client(
CONF.cinder_client.api_version,
session=session,
endpoint_type=CONF.cinder_client.endpoint_type)
def _swap_volume(self, volume, dest_type):
"""Swap volume to dest_type
Limitation note: only for compute libvirt driver
"""
if not dest_type:
raise exception.Invalid(
message=(_("destination type is required when "
"migration type is swap")))
if not self._can_swap(volume):
raise exception.Invalid(
message=(_("Invalid state for swapping volume")))
user_info = {
'name': self.temp_username,
'password': self.temp_password}
user = self._create_user(volume, user_info)
keystone_util = keystone_helper.KeystoneHelper(osc=self.osc)
try:
session = keystone_util.create_session(
user.id, self.temp_password)
temp_cinder = self._get_cinder_client(session)
# swap volume
new_volume = self.cinder_util.create_volume(
temp_cinder, volume, dest_type)
self.nova_util.swap_volume(volume, new_volume)
# delete old volume
self.cinder_util.delete_volume(volume)
finally:
keystone_util.delete_user(user)
return True
def _migrate(self, volume_id, dest_node, dest_type):
try:
volume = self.cinder_util.get_volume(volume_id)
if self.migration_type == self.COLD:
return self._cold_migrate(volume, dest_node, dest_type)
elif self.migration_type == self.SWAP:
if dest_node:
LOG.warning("dest_node is ignored")
return self._swap_volume(volume, dest_type)
else:
raise exception.Invalid(
message=(_("Migration of type '%(migration_type)s' is not "
"supported.") %
{'migration_type': self.migration_type}))
except exception.Invalid as ei:
LOG.exception(ei)
return False
except Exception as e:
LOG.critical("Unexpected exception occurred.")
LOG.exception(e)
return False
def execute(self):
return self._migrate(self.volume_id,
self.destination_node,
self.destination_type)
def revert(self):
LOG.warning("revert not supported")
def abort(self):
pass
def pre_condition(self):
pass
def post_condition(self):
pass
def get_description(self):
return "Moving a volume to destination_node or destination_type"

View File

@ -12,12 +12,18 @@
# limitations under the License.
#
import time
from oslo_log import log
from cinderclient import exceptions as cinder_exception
from cinderclient.v2.volumes import Volume
from watcher._i18n import _
from watcher.common import clients
from watcher.common import exception
from watcher import conf
CONF = conf.CONF
LOG = log.getLogger(__name__)
@ -77,3 +83,190 @@ class CinderHelper(object):
return volume_type[0].name
else:
return ""
def get_volume(self, volume):
if isinstance(volume, Volume):
volume = volume.id
try:
volume = self.cinder.volumes.get(volume)
return volume
except cinder_exception.NotFound:
return self.cinder.volumes.find(name=volume)
def backendname_from_poolname(self, poolname):
"""Get backendname from poolname"""
# pooolname formatted as host@backend#pool since ocata
# as of ocata, may as only host
backend = poolname.split('#')[0]
backendname = ""
try:
backendname = backend.split('@')[1]
except IndexError:
pass
return backendname
def _has_snapshot(self, volume):
"""Judge volume has a snapshot"""
volume = self.get_volume(volume)
if volume.snapshot_id:
return True
return False
def can_cold(self, volume, host=None):
"""Judge volume can be migrated"""
can_cold = False
status = self.get_volume(volume).status
snapshot = self._has_snapshot(volume)
same_host = False
if host and getattr(volume, 'os-vol-host-attr:host') == host:
same_host = True
if (status == 'available' and
snapshot is False and
same_host is False):
can_cold = True
return can_cold
def get_deleting_volume(self, volume):
volume = self.get_volume(volume)
all_volume = self.get_volume_list()
for _volume in all_volume:
if getattr(_volume, 'os-vol-mig-status-attr:name_id') == volume.id:
return _volume
return False
def _can_get_volume(self, volume_id):
"""Check to get volume with volume_id"""
try:
volume = self.get_volume(volume_id)
if not volume:
raise Exception
except cinder_exception.NotFound:
return False
else:
return True
def check_volume_deleted(self, volume, retry=120, retry_interval=10):
"""Check volume has been deleted"""
volume = self.get_volume(volume)
while self._can_get_volume(volume.id) and retry:
volume = self.get_volume(volume.id)
time.sleep(retry_interval)
retry -= 1
LOG.debug("retry count: %s" % retry)
LOG.debug("Waiting to complete deletion of volume %s" % volume.id)
if self._can_get_volume(volume.id):
LOG.error("Volume deletion error: %s" % volume.id)
return False
LOG.debug("Volume %s was deleted successfully." % volume.id)
return True
def check_migrated(self, volume, retry_interval=10):
volume = self.get_volume(volume)
while getattr(volume, 'migration_status') == 'migrating':
volume = self.get_volume(volume.id)
LOG.debug('Waiting the migration of {0}'.format(volume))
time.sleep(retry_interval)
if getattr(volume, 'migration_status') == 'error':
host_name = getattr(volume, 'os-vol-host-attr:host')
error_msg = (("Volume migration error : "
"volume %(volume)s is now on host '%(host)s'.") %
{'volume': volume.id, 'host': host_name})
LOG.error(error_msg)
return False
host_name = getattr(volume, 'os-vol-host-attr:host')
if getattr(volume, 'migration_status') == 'success':
# check original volume deleted
deleting_volume = self.get_deleting_volume(volume)
if deleting_volume:
delete_id = getattr(deleting_volume, 'id')
if not self.check_volume_deleted(delete_id):
return False
else:
host_name = getattr(volume, 'os-vol-host-attr:host')
error_msg = (("Volume migration error : "
"volume %(volume)s is now on host '%(host)s'.") %
{'volume': volume.id, 'host': host_name})
LOG.error(error_msg)
return False
LOG.debug(
"Volume migration succeeded : "
"volume %s is now on host '%s'." % (
volume.id, host_name))
return True
def migrate(self, volume, dest_node):
"""Migrate volume to dest_node"""
volume = self.get_volume(volume)
dest_backend = self.backendname_from_poolname(dest_node)
dest_type = self.get_volume_type_by_backendname(dest_backend)
if volume.volume_type != dest_type:
raise exception.Invalid(
message=(_("Volume type must be same for migrating")))
source_node = getattr(volume, 'os-vol-host-attr:host')
LOG.debug("Volume %s found on host '%s'."
% (volume.id, source_node))
self.cinder.volumes.migrate_volume(
volume, dest_node, False, True)
return self.check_migrated(volume)
def retype(self, volume, dest_type):
"""Retype volume to dest_type with on-demand option"""
volume = self.get_volume(volume)
if volume.volume_type == dest_type:
raise exception.Invalid(
message=(_("Volume type must be different for retyping")))
source_node = getattr(volume, 'os-vol-host-attr:host')
LOG.debug(
"Volume %s found on host '%s'." % (
volume.id, source_node))
self.cinder.volumes.retype(
volume, dest_type, "on-demand")
return self.check_migrated(volume)
def create_volume(self, cinder, volume,
dest_type, retry=120, retry_interval=10):
"""Create volume of volume with dest_type using cinder"""
volume = self.get_volume(volume)
LOG.debug("start creating new volume")
new_volume = cinder.volumes.create(
getattr(volume, 'size'),
name=getattr(volume, 'name'),
volume_type=dest_type,
availability_zone=getattr(volume, 'availability_zone'))
while getattr(new_volume, 'status') != 'available' and retry:
new_volume = cinder.volumes.get(new_volume.id)
LOG.debug('Waiting volume creation of {0}'.format(new_volume))
time.sleep(retry_interval)
retry -= 1
LOG.debug("retry count: %s" % retry)
if getattr(new_volume, 'status') != 'available':
error_msg = (_("Failed to create volume '%(volume)s. ") %
{'volume': new_volume.id})
raise Exception(error_msg)
LOG.debug("Volume %s was created successfully." % new_volume)
return new_volume
def delete_volume(self, volume):
"""Delete volume"""
volume = self.get_volume(volume)
self.cinder.volumes.delete(volume)
result = self.check_volume_deleted(volume)
if not result:
error_msg = (_("Failed to delete volume '%(volume)s. ") %
{'volume': volume.id})
raise Exception(error_msg)

View File

@ -0,0 +1,124 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from oslo_log import log
from keystoneauth1.exceptions import http as ks_exceptions
from keystoneauth1 import loading
from keystoneauth1 import session
from watcher._i18n import _
from watcher.common import clients
from watcher.common import exception
from watcher import conf
CONF = conf.CONF
LOG = log.getLogger(__name__)
class KeystoneHelper(object):
def __init__(self, osc=None):
""":param osc: an OpenStackClients instance"""
self.osc = osc if osc else clients.OpenStackClients()
self.keystone = self.osc.keystone()
def get_role(self, name_or_id):
try:
role = self.keystone.roles.get(name_or_id)
return role
except ks_exceptions.NotFound:
roles = self.keystone.roles.list(name=name_or_id)
if len(roles) == 0:
raise exception.Invalid(
message=(_("Role not Found: %s") % name_or_id))
if len(roles) > 1:
raise exception.Invalid(
message=(_("Role name seems ambiguous: %s") % name_or_id))
return roles[0]
def get_user(self, name_or_id):
try:
user = self.keystone.users.get(name_or_id)
return user
except ks_exceptions.NotFound:
users = self.keystone.users.list(name=name_or_id)
if len(users) == 0:
raise exception.Invalid(
message=(_("User not Found: %s") % name_or_id))
if len(users) > 1:
raise exception.Invalid(
message=(_("User name seems ambiguous: %s") % name_or_id))
return users[0]
def get_project(self, name_or_id):
try:
project = self.keystone.projects.get(name_or_id)
return project
except ks_exceptions.NotFound:
projects = self.keystone.projects.list(name=name_or_id)
if len(projects) == 0:
raise exception.Invalid(
message=(_("Project not Found: %s") % name_or_id))
if len(projects) > 1:
raise exception.Invalid(
messsage=(_("Project name seems ambiguous: %s") %
name_or_id))
return projects[0]
def get_domain(self, name_or_id):
try:
domain = self.keystone.domains.get(name_or_id)
return domain
except ks_exceptions.NotFound:
domains = self.keystone.domains.list(name=name_or_id)
if len(domains) == 0:
raise exception.Invalid(
message=(_("Domain not Found: %s") % name_or_id))
if len(domains) > 1:
raise exception.Invalid(
message=(_("Domain name seems ambiguous: %s") %
name_or_id))
return domains[0]
def create_session(self, user_id, password):
user = self.get_user(user_id)
loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(
auth_url=CONF.watcher_clients_auth.auth_url,
password=password,
user_id=user_id,
project_id=user.default_project_id)
return session.Session(auth=auth)
def create_user(self, user):
project = self.get_project(user['project'])
domain = self.get_domain(user['domain'])
_user = self.keystone.users.create(
user['name'],
password=user['password'],
domain=domain,
project=project,
)
for role in user['roles']:
role = self.get_role(role)
self.keystone.roles.grant(
role.id, user=_user.id, project=project.id)
return _user
def delete_user(self, user):
try:
user = self.get_user(user)
self.keystone.users.delete(user)
except exception.Invalid:
pass

View File

@ -797,3 +797,27 @@ class NovaHelper(object):
def get_running_migration(self, instance_id):
return self.nova.server_migrations.list(server=instance_id)
def swap_volume(self, old_volume, new_volume,
retry=120, retry_interval=10):
"""Swap old_volume for new_volume"""
attachments = old_volume.attachments
instance_id = attachments[0]['server_id']
# do volume update
self.nova.volumes.update_server_volume(
instance_id, old_volume.id, new_volume.id)
while getattr(new_volume, 'status') != 'in-use' and retry:
new_volume = self.cinder.volumes.get(new_volume.id)
LOG.debug('Waiting volume update to {0}'.format(new_volume))
time.sleep(retry_interval)
retry -= 1
LOG.debug("retry count: %s" % retry)
if getattr(new_volume, 'status') != "in-use":
LOG.error("Volume update retry timeout or error")
return False
host_name = getattr(new_volume, "os-vol-host-attr:host")
LOG.debug(
"Volume update succeeded : "
"Volume %s is now on host '%s'." % (new_volume.id, host_name))
return True

View File

@ -17,7 +17,9 @@
"""Utilities and helper functions."""
import datetime
import random
import re
import string
from croniter import croniter
@ -158,3 +160,8 @@ StrictDefaultValidatingDraft4Validator = extend_with_default(
extend_with_strict_schema(validators.Draft4Validator))
Draft4Validator = validators.Draft4Validator
def random_string(n):
return ''.join([random.choice(
string.ascii_letters + string.digits) for i in range(n)])

View File

@ -47,7 +47,8 @@ class WeightPlanner(base.BasePlanner):
super(WeightPlanner, self).__init__(config)
action_weights = {
'nop': 60,
'nop': 70,
'volume_migrate': 60,
'change_nova_service_state': 50,
'sleep': 40,
'migrate': 30,
@ -64,6 +65,7 @@ class WeightPlanner(base.BasePlanner):
'change_nova_service_state': 1,
'nop': 1,
'change_node_power_state': 2,
'volume_migrate': 2
}
@classmethod

View File

@ -0,0 +1,249 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import jsonschema
import mock
from watcher.applier.actions import base as baction
from watcher.applier.actions import volume_migration
from watcher.common import cinder_helper
from watcher.common import clients
from watcher.common import keystone_helper
from watcher.common import nova_helper
from watcher.common import utils as w_utils
from watcher.tests import base
class TestMigration(base.TestCase):
VOLUME_UUID = "45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba"
INSTANCE_UUID = "45a37aec-85ab-4dda-a303-7d9f62c2f5bb"
def setUp(self):
super(TestMigration, self).setUp()
self.m_osc_cls = mock.Mock()
self.m_osc = mock.Mock(spec=clients.OpenStackClients)
self.m_osc_cls.return_value = self.m_osc
self.m_n_helper_cls = mock.Mock()
self.m_n_helper = mock.Mock(spec=nova_helper.NovaHelper)
self.m_n_helper_cls.return_value = self.m_n_helper
self.m_c_helper_cls = mock.Mock()
self.m_c_helper = mock.Mock(spec=cinder_helper.CinderHelper)
self.m_c_helper_cls.return_value = self.m_c_helper
self.m_k_helper_cls = mock.Mock()
self.m_k_helper = mock.Mock(spec=keystone_helper.KeystoneHelper)
self.m_k_helper_cls.return_value = self.m_k_helper
m_openstack_clients = mock.patch.object(
clients, "OpenStackClients", self.m_osc_cls)
m_nova_helper = mock.patch.object(
nova_helper, "NovaHelper", self.m_n_helper_cls)
m_cinder_helper = mock.patch.object(
cinder_helper, "CinderHelper", self.m_c_helper_cls)
m_keystone_helper = mock.patch.object(
keystone_helper, "KeystoneHelper", self.m_k_helper_cls)
m_openstack_clients.start()
m_nova_helper.start()
m_cinder_helper.start()
m_keystone_helper.start()
self.addCleanup(m_keystone_helper.stop)
self.addCleanup(m_cinder_helper.stop)
self.addCleanup(m_nova_helper.stop)
self.addCleanup(m_openstack_clients.stop)
self.action = volume_migration.VolumeMigrate(mock.Mock())
self.input_parameters_swap = {
"migration_type": "swap",
"destination_node": "storage1-poolname",
"destination_type": "storage1-typename",
baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID,
}
self.action_swap = volume_migration.VolumeMigrate(mock.Mock())
self.action_swap.input_parameters = self.input_parameters_swap
self.input_parameters_migrate = {
"migration_type": "cold",
"destination_node": "storage1-poolname",
"destination_type": "",
baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID,
}
self.action_migrate = volume_migration.VolumeMigrate(mock.Mock())
self.action_migrate.input_parameters = self.input_parameters_migrate
self.input_parameters_retype = {
"migration_type": "cold",
"destination_node": "",
"destination_type": "storage1-typename",
baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID,
}
self.action_retype = volume_migration.VolumeMigrate(mock.Mock())
self.action_retype.input_parameters = self.input_parameters_retype
@staticmethod
def fake_volume(**kwargs):
volume = mock.MagicMock()
volume.id = kwargs.get('id', TestMigration.VOLUME_UUID)
volume.size = kwargs.get('size', '1')
volume.status = kwargs.get('status', 'available')
volume.snapshot_id = kwargs.get('snapshot_id', None)
volume.availability_zone = kwargs.get('availability_zone', 'nova')
return volume
@staticmethod
def fake_instance(**kwargs):
instance = mock.MagicMock()
instance.id = kwargs.get('id', TestMigration.INSTANCE_UUID)
instance.status = kwargs.get('status', 'ACTIVE')
return instance
def test_parameters_swap(self):
params = {baction.BaseAction.RESOURCE_ID:
self.VOLUME_UUID,
self.action.MIGRATION_TYPE: 'swap',
self.action.DESTINATION_NODE: None,
self.action.DESTINATION_TYPE: 'type-1'}
self.action_swap.input_parameters = params
self.assertTrue(self.action_swap.validate_parameters)
def test_parameters_migrate(self):
params = {baction.BaseAction.RESOURCE_ID:
self.VOLUME_UUID,
self.action.MIGRATION_TYPE: 'cold',
self.action.DESTINATION_NODE: 'node-1',
self.action.DESTINATION_TYPE: None}
self.action_migrate.input_parameters = params
self.assertTrue(self.action_migrate.validate_parameters)
def test_parameters_retype(self):
params = {baction.BaseAction.RESOURCE_ID:
self.VOLUME_UUID,
self.action.MIGRATION_TYPE: 'cold',
self.action.DESTINATION_NODE: None,
self.action.DESTINATION_TYPE: 'type-1'}
self.action_retype.input_parameters = params
self.assertTrue(self.action_retype.validate_parameters)
def test_parameters_exception_resource_id(self):
params = {baction.BaseAction.RESOURCE_ID: "EFEF",
self.action.MIGRATION_TYPE: 'swap',
self.action.DESTINATION_NODE: None,
self.action.DESTINATION_TYPE: 'type-1'}
self.action_swap.input_parameters = params
self.assertRaises(jsonschema.ValidationError,
self.action_swap.validate_parameters)
def test_migrate_success(self):
volume = self.fake_volume()
self.m_c_helper.can_cold.return_value = True
self.m_c_helper.get_volume.return_value = volume
result = self.action_migrate.execute()
self.assertTrue(result)
self.m_c_helper.migrate.assert_called_once_with(
volume,
"storage1-poolname"
)
def test_migrate_fail(self):
self.m_c_helper.can_cold.return_value = False
result = self.action_migrate.execute()
self.assertFalse(result)
self.m_c_helper.migrate.assert_not_called()
def test_retype_success(self):
volume = self.fake_volume()
self.m_c_helper.can_cold.return_value = True
self.m_c_helper.get_volume.return_value = volume
result = self.action_retype.execute()
self.assertTrue(result)
self.m_c_helper.retype.assert_called_once_with(
volume,
"storage1-typename",
)
def test_retype_fail(self):
self.m_c_helper.can_cold.return_value = False
result = self.action_migrate.execute()
self.assertFalse(result)
self.m_c_helper.migrate.assert_not_called()
def test_swap_success(self):
volume = self.fake_volume(
status='in-use', attachments=[{'server_id': 'server_id'}])
self.m_n_helper.find_instance.return_value = self.fake_instance()
new_volume = self.fake_volume(id=w_utils.generate_uuid())
user = mock.Mock()
session = mock.MagicMock()
self.m_k_helper.create_user.return_value = user
self.m_k_helper.create_session.return_value = session
self.m_c_helper.get_volume.return_value = volume
self.m_c_helper.create_volume.return_value = new_volume
result = self.action_swap.execute()
self.assertTrue(result)
self.m_n_helper.swap_volume.assert_called_once_with(
volume,
new_volume
)
self.m_k_helper.delete_user.assert_called_once_with(user)
def test_swap_fail(self):
# _can_swap fail
instance = self.fake_instance(status='STOPPED')
self.m_n_helper.find_instance.return_value = instance
result = self.action_swap.execute()
self.assertFalse(result)
def test_can_swap_success(self):
volume = self.fake_volume(
status='in-use', attachments=[{'server_id': 'server_id'}])
instance = self.fake_instance()
self.m_n_helper.find_instance.return_value = instance
result = self.action_swap._can_swap(volume)
self.assertTrue(result)
instance = self.fake_instance(status='PAUSED')
self.m_n_helper.find_instance.return_value = instance
result = self.action_swap._can_swap(volume)
self.assertTrue(result)
instance = self.fake_instance(status='RESIZED')
self.m_n_helper.find_instance.return_value = instance
result = self.action_swap._can_swap(volume)
self.assertTrue(result)
def test_can_swap_fail(self):
volume = self.fake_volume(
status='in-use', attachments=[{'server_id': 'server_id'}])
instance = self.fake_instance(status='STOPPED')
self.m_n_helper.find_instance.return_value = instance
result = self.action_swap._can_swap(volume)
self.assertFalse(result)

View File

@ -1,3 +1,4 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@ -13,6 +14,7 @@
#
import mock
import time
from watcher.common import cinder_helper
from watcher.common import clients
@ -124,3 +126,120 @@ class TestCinderHelper(base.TestCase):
'nobackend')
self.assertEqual("", volume_type_name)
@staticmethod
def fake_volume(**kwargs):
volume = mock.MagicMock()
volume.id = kwargs.get('id', '45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba')
volume.name = kwargs.get('name', 'fakename')
volume.size = kwargs.get('size', '1')
volume.status = kwargs.get('status', 'available')
volume.snapshot_id = kwargs.get('snapshot_id', None)
volume.availability_zone = kwargs.get('availability_zone', 'nova')
volume.volume_type = kwargs.get('volume_type', 'fake_type')
return volume
def test_can_cold_success(self, mock_cinder):
cinder_util = cinder_helper.CinderHelper()
volume = self.fake_volume()
cinder_util.cinder.volumes.get.return_value = volume
result = cinder_util.can_cold(volume)
self.assertTrue(result)
def test_can_cold_fail(self, mock_cinder):
cinder_util = cinder_helper.CinderHelper()
volume = self.fake_volume(status='in-use')
cinder_util.cinder.volumes.get.return_value = volume
result = cinder_util.can_cold(volume)
self.assertFalse(result)
volume = self.fake_volume(snapshot_id='snapshot_id')
cinder_util.cinder.volumes.get.return_value = volume
result = cinder_util.can_cold(volume)
self.assertFalse(result)
volume = self.fake_volume()
setattr(volume, 'os-vol-host-attr:host', 'host@backend#pool')
cinder_util.cinder.volumes.get.return_value = volume
result = cinder_util.can_cold(volume, 'host@backend#pool')
self.assertFalse(result)
@mock.patch.object(time, 'sleep', mock.Mock())
def test_migrate_success(self, mock_cinder):
cinder_util = cinder_helper.CinderHelper()
volume = self.fake_volume()
setattr(volume, 'os-vol-host-attr:host', 'source_node')
setattr(volume, 'migration_status', 'success')
cinder_util.cinder.volumes.get.return_value = volume
volume_type = self.fake_volume_type()
cinder_util.cinder.volume_types.list.return_value = [volume_type]
result = cinder_util.migrate(volume, 'host@backend#pool')
self.assertTrue(result)
@mock.patch.object(time, 'sleep', mock.Mock())
def test_migrate_fail(self, mock_cinder):
cinder_util = cinder_helper.CinderHelper()
volume = self.fake_volume()
cinder_util.cinder.volumes.get.return_value = volume
volume_type = self.fake_volume_type()
volume_type.name = 'notbackend'
cinder_util.cinder.volume_types.list.return_value = [volume_type]
self.assertRaisesRegex(
exception.Invalid,
"Volume type must be same for migrating",
cinder_util.migrate, volume, 'host@backend#pool')
volume = self.fake_volume()
setattr(volume, 'os-vol-host-attr:host', 'source_node')
setattr(volume, 'migration_status', 'error')
cinder_util.cinder.volumes.get.return_value = volume
volume_type = self.fake_volume_type()
cinder_util.cinder.volume_types.list.return_value = [volume_type]
result = cinder_util.migrate(volume, 'host@backend#pool')
self.assertFalse(result)
@mock.patch.object(time, 'sleep', mock.Mock())
def test_retype_success(self, mock_cinder):
cinder_util = cinder_helper.CinderHelper()
volume = self.fake_volume()
setattr(volume, 'os-vol-host-attr:host', 'source_node')
setattr(volume, 'migration_status', 'success')
cinder_util.cinder.volumes.get.return_value = volume
result = cinder_util.retype(volume, 'notfake_type')
self.assertTrue(result)
@mock.patch.object(time, 'sleep', mock.Mock())
def test_retype_fail(self, mock_cinder):
cinder_util = cinder_helper.CinderHelper()
volume = self.fake_volume()
setattr(volume, 'os-vol-host-attr:host', 'source_node')
setattr(volume, 'migration_status', 'success')
cinder_util.cinder.volumes.get.return_value = volume
self.assertRaisesRegex(
exception.Invalid,
"Volume type must be different for retyping",
cinder_util.retype, volume, 'fake_type')
volume = self.fake_volume()
setattr(volume, 'os-vol-host-attr:host', 'source_node')
setattr(volume, 'migration_status', 'error')
cinder_util.cinder.volumes.get.return_value = volume
result = cinder_util.retype(volume, 'notfake_type')
self.assertFalse(result)

View File

@ -310,3 +310,28 @@ class TestNovaHelper(base.TestCase):
nova_util.get_flavor_instance(instance, cache)
self.assertEqual(instance.flavor['name'], cache['name'])
@staticmethod
def fake_volume(**kwargs):
volume = mock.MagicMock()
volume.id = kwargs.get('id', '45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba')
volume.size = kwargs.get('size', '1')
volume.status = kwargs.get('status', 'available')
volume.snapshot_id = kwargs.get('snapshot_id', None)
volume.availability_zone = kwargs.get('availability_zone', 'nova')
return volume
@mock.patch.object(time, 'sleep', mock.Mock())
def test_swap_volume(self, mock_glance, mock_cinder,
mock_neutron, mock_nova):
nova_util = nova_helper.NovaHelper()
server = self.fake_server(self.instance_uuid)
self.fake_nova_find_list(nova_util, find=server, list=server)
old_volume = self.fake_volume(
status='in-use', attachments=[{'server_id': self.instance_uuid}])
new_volume = self.fake_volume(
id=utils.generate_uuid(), status='in-use')
result = nova_util.swap_volume(old_volume, new_volume)
self.assertTrue(result)