Add cluster support in migration and manage

This patch adds support for API microversion 3.16, which allows us to
pass --cluster optional argument to migration and manage operations.

For this, a new type of CLI argument is added, the mutually exclusive
arguments that can be used similarly to the utils.arg decorator, but
with utils.exclusive_arg decorator.

Implements: blueprint cinder-volume-active-active-support
Change-Id: If004715b9887d2a0f9fc630b44d6e11a4a8b778d
This commit is contained in:
Gorka Eguileor 2016-10-17 14:39:33 +02:00
parent 6c214eeec0
commit 8fce74056f
9 changed files with 454 additions and 48 deletions

View File

@ -20,6 +20,7 @@ Command-line interface to the OpenStack Cinder API.
from __future__ import print_function from __future__ import print_function
import argparse import argparse
import collections
import getpass import getpass
import logging import logging
import sys import sys
@ -490,6 +491,7 @@ class OpenStackCinderShell(object):
action_help = desc.strip().split('\n')[0] action_help = desc.strip().split('\n')[0]
action_help += additional_msg action_help += additional_msg
exclusive_args = getattr(callback, 'exclusive_args', {})
arguments = getattr(callback, 'arguments', []) arguments = getattr(callback, 'arguments', [])
subparser = subparsers.add_parser( subparser = subparsers.add_parser(
@ -504,16 +506,24 @@ class OpenStackCinderShell(object):
help=argparse.SUPPRESS,) help=argparse.SUPPRESS,)
self.subcommands[command] = subparser self.subcommands[command] = subparser
self._add_subparser_args(subparser, arguments, version, do_help,
input_args, command)
self._add_subparser_exclusive_args(subparser, exclusive_args,
version, do_help, input_args,
command)
subparser.set_defaults(func=callback)
def _add_subparser_args(self, subparser, arguments, version, do_help,
input_args, command):
# NOTE(ntpttr): We get a counter for each argument in this # NOTE(ntpttr): We get a counter for each argument in this
# command here because during the microversion check we only # command here because during the microversion check we only
# want to raise an exception if no version of the argument # want to raise an exception if no version of the argument
# matches the current microversion. The exception will only # matches the current microversion. The exception will only
# be raised after the last instance of a particular argument # be raised after the last instance of a particular argument
# fails the check. # fails the check.
arg_counter = dict() arg_counter = collections.defaultdict(int)
for (args, kwargs) in arguments: for (args, kwargs) in arguments:
arg_counter[args[0]] = arg_counter.get(args[0], 0) + 1 arg_counter[args[0]] += 1
for (args, kwargs) in arguments: for (args, kwargs) in arguments:
start_version = kwargs.get("start_version", None) start_version = kwargs.get("start_version", None)
@ -537,7 +547,17 @@ class OpenStackCinderShell(object):
kw.pop("start_version", None) kw.pop("start_version", None)
kw.pop("end_version", None) kw.pop("end_version", None)
subparser.add_argument(*args, **kw) subparser.add_argument(*args, **kw)
subparser.set_defaults(func=callback)
def _add_subparser_exclusive_args(self, subparser, exclusive_args,
version, do_help, input_args, command):
for group_name, arguments in exclusive_args.items():
if group_name == '__required__':
continue
required = exclusive_args['__required__'][group_name]
exclusive_group = subparser.add_mutually_exclusive_group(
required=required)
self._add_subparser_args(exclusive_group, arguments,
version, do_help, input_args, command)
def setup_debugging(self, debug): def setup_debugging(self, debug):
if not debug: if not debug:

View File

@ -543,6 +543,15 @@ class FakeHTTPClient(base_client.HTTPClient):
raise AssertionError("Unexpected action: %s" % action) raise AssertionError("Unexpected action: %s" % action)
return (resp, {}, _body) return (resp, {}, _body)
def get_volumes_fake(self, **kw):
r = {'volume': self.get_volumes_detail(id='fake')[2]['volumes'][0]}
return (200, {}, r)
def post_volumes_fake_action(self, body, **kw):
_body = None
resp = 202
return (resp, {}, _body)
def post_volumes_5678_action(self, body, **kw): def post_volumes_5678_action(self, body, **kw):
return self.post_volumes_1234_action(body, **kw) return self.post_volumes_1234_action(body, **kw)

View File

@ -15,11 +15,13 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from cinderclient import api_versions
from cinderclient.tests.unit import utils from cinderclient.tests.unit import utils
from cinderclient.tests.unit.v2 import fakes from cinderclient.tests.unit.v2 import fakes
from cinderclient.v2.volumes import Volume from cinderclient.v2.volumes import Volume
cs = fakes.FakeClient() cs = fakes.FakeClient()
cs3 = fakes.FakeClient(api_versions.APIVersion('3.15'))
class VolumesTest(utils.TestCase): class VolumesTest(utils.TestCase):
@ -211,20 +213,20 @@ class VolumesTest(utils.TestCase):
self._assert_request_id(vol) self._assert_request_id(vol)
def test_migrate(self): def test_migrate(self):
v = cs.volumes.get('1234') v = cs3.volumes.get('1234')
self._assert_request_id(v) self._assert_request_id(v)
vol = cs.volumes.migrate_volume(v, 'dest', False, False) vol = cs3.volumes.migrate_volume(v, 'dest', False, False)
cs.assert_called('POST', '/volumes/1234/action', cs3.assert_called('POST', '/volumes/1234/action',
{'os-migrate_volume': {'host': 'dest', {'os-migrate_volume': {'host': 'dest',
'force_host_copy': False, 'force_host_copy': False,
'lock_volume': False}}) 'lock_volume': False}})
self._assert_request_id(vol) self._assert_request_id(vol)
def test_migrate_with_lock_volume(self): def test_migrate_with_lock_volume(self):
v = cs.volumes.get('1234') v = cs3.volumes.get('1234')
self._assert_request_id(v) self._assert_request_id(v)
vol = cs.volumes.migrate_volume(v, 'dest', False, True) vol = cs3.volumes.migrate_volume(v, 'dest', False, True)
cs.assert_called('POST', '/volumes/1234/action', cs3.assert_called('POST', '/volumes/1234/action',
{'os-migrate_volume': {'host': 'dest', {'os-migrate_volume': {'host': 'dest',
'force_host_copy': False, 'force_host_copy': False,
'lock_volume': True}}) 'lock_volume': True}})
@ -260,19 +262,19 @@ class VolumesTest(utils.TestCase):
self._assert_request_id(vol) self._assert_request_id(vol)
def test_volume_manage(self): def test_volume_manage(self):
vol = cs.volumes.manage('host1', {'k': 'v'}) vol = cs3.volumes.manage('host1', {'k': 'v'})
expected = {'host': 'host1', 'name': None, 'availability_zone': None, expected = {'host': 'host1', 'name': None, 'availability_zone': None,
'description': None, 'metadata': None, 'ref': {'k': 'v'}, 'description': None, 'metadata': None, 'ref': {'k': 'v'},
'volume_type': None, 'bootable': False} 'volume_type': None, 'bootable': False}
cs.assert_called('POST', '/os-volume-manage', {'volume': expected}) cs3.assert_called('POST', '/os-volume-manage', {'volume': expected})
self._assert_request_id(vol) self._assert_request_id(vol)
def test_volume_manage_bootable(self): def test_volume_manage_bootable(self):
vol = cs.volumes.manage('host1', {'k': 'v'}, bootable=True) vol = cs3.volumes.manage('host1', {'k': 'v'}, bootable=True)
expected = {'host': 'host1', 'name': None, 'availability_zone': None, expected = {'host': 'host1', 'name': None, 'availability_zone': None,
'description': None, 'metadata': None, 'ref': {'k': 'v'}, 'description': None, 'metadata': None, 'ref': {'k': 'v'},
'volume_type': None, 'bootable': True} 'volume_type': None, 'bootable': True}
cs.assert_called('POST', '/os-volume-manage', {'volume': expected}) cs3.assert_called('POST', '/os-volume-manage', {'volume': expected})
self._assert_request_id(vol) self._assert_request_id(vol)
def test_volume_list_manageable(self): def test_volume_list_manageable(self):

View File

@ -1007,3 +1007,129 @@ class ShellTest(utils.TestCase):
columns = ['ID', 'Volume ID', 'Status', 'Name', 'Size', 'User ID'] columns = ['ID', 'Volume ID', 'Status', 'Name', 'Size', 'User ID']
mock_print_list.assert_called_once_with(mock.ANY, columns, mock_print_list.assert_called_once_with(mock.ANY, columns,
sortby_index=0) sortby_index=0)
@mock.patch('cinderclient.v3.volumes.Volume.migrate_volume')
def test_migrate_volume_before_3_16(self, v3_migrate_mock):
self.run_command('--os-volume-api-version 3.15 '
'migrate 1234 fakehost')
v3_migrate_mock.assert_called_once_with(
'fakehost', False, False, None)
@mock.patch('cinderclient.v3.volumes.Volume.migrate_volume')
def test_migrate_volume_3_16(self, v3_migrate_mock):
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost')
self.assertEqual(4, len(v3_migrate_mock.call_args[0]))
def test_migrate_volume_with_cluster_before_3_16(self):
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'--os-volume-api-version 3.15 '
'migrate 1234 fakehost --cluster fakecluster')
@mock.patch('cinderclient.shell.CinderClientArgumentParser.error')
def test_migrate_volume_mutual_exclusion(self, error_mock):
error_mock.side_effect = SystemExit
self.assertRaises(SystemExit,
self.run_command,
'--os-volume-api-version 3.16 '
'migrate 1234 fakehost --cluster fakecluster')
msg = 'argument --cluster: not allowed with argument <host>'
error_mock.assert_called_once_with(msg)
@mock.patch('cinderclient.shell.CinderClientArgumentParser.error')
def test_migrate_volume_missing_required(self, error_mock):
error_mock.side_effect = SystemExit
self.assertRaises(SystemExit,
self.run_command,
'--os-volume-api-version 3.16 '
'migrate 1234')
msg = 'one of the arguments <host> --cluster is required'
error_mock.assert_called_once_with(msg)
def test_migrate_volume_host(self):
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost')
expected = {'os-migrate_volume': {'force_host_copy': False,
'lock_volume': False,
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_migrate_volume_cluster(self):
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 --cluster mycluster')
expected = {'os-migrate_volume': {'force_host_copy': False,
'lock_volume': False,
'cluster': 'mycluster'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_migrate_volume_bool_force(self):
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost --force-host-copy '
'--lock-volume')
expected = {'os-migrate_volume': {'force_host_copy': True,
'lock_volume': True,
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_migrate_volume_bool_force_false(self):
# Set both --force-host-copy and --lock-volume to False.
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost --force-host-copy=False '
'--lock-volume=False')
expected = {'os-migrate_volume': {'force_host_copy': 'False',
'lock_volume': 'False',
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
# Do not set the values to --force-host-copy and --lock-volume.
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost')
expected = {'os-migrate_volume': {'force_host_copy': False,
'lock_volume': False,
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action',
body=expected)
@ddt.data({'bootable': False, 'by_id': False, 'cluster': None},
{'bootable': True, 'by_id': False, 'cluster': None},
{'bootable': False, 'by_id': True, 'cluster': None},
{'bootable': True, 'by_id': True, 'cluster': None},
{'bootable': True, 'by_id': True, 'cluster': 'clustername'})
@ddt.unpack
def test_volume_manage(self, bootable, by_id, cluster):
cmd = ('--os-volume-api-version 3.16 '
'manage host1 some_fake_name --name foo --description bar '
'--volume-type baz --availability-zone az '
'--metadata k1=v1 k2=v2')
if by_id:
cmd += ' --id-type source-id'
if bootable:
cmd += ' --bootable'
if cluster:
cmd += ' --cluster ' + cluster
self.run_command(cmd)
ref = 'source-id' if by_id else 'source-name'
expected = {'volume': {'host': 'host1',
'ref': {ref: 'some_fake_name'},
'name': 'foo',
'description': 'bar',
'volume_type': 'baz',
'availability_zone': 'az',
'metadata': {'k1': 'v1', 'k2': 'v2'},
'bootable': bootable}}
if cluster:
expected['cluster'] = cluster
self.assert_called_anytime('POST', '/os-volume-manage', body=expected)
def test_volume_manage_before_3_16(self):
"""Cluster optional argument was not acceptable."""
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'manage host1 some_fake_name '
'--cluster clustername'
'--name foo --description bar --bootable '
'--volume-type baz --availability-zone az '
'--metadata k1=v1 k2=v2')

View File

@ -27,6 +27,7 @@ from cinderclient.v3 import volumes
from six.moves.urllib import parse from six.moves.urllib import parse
cs = fakes.FakeClient() cs = fakes.FakeClient()
cs3 = fakes.FakeClient(api_versions.APIVersion('3.16'))
@ddt.ddt @ddt.ddt
@ -145,3 +146,34 @@ class VolumesTest(utils.TestCase):
request_url = '/scheduler-stats/get_pools?detail=True&name=pool1' request_url = '/scheduler-stats/get_pools?detail=True&name=pool1'
cs.assert_called('GET', request_url) cs.assert_called('GET', request_url)
self._assert_request_id(vol) self._assert_request_id(vol)
def test_migrate_host(self):
v = cs3.volumes.get('1234')
self._assert_request_id(v)
vol = cs3.volumes.migrate_volume(v, 'host_dest', False, False)
cs3.assert_called('POST', '/volumes/1234/action',
{'os-migrate_volume': {'host': 'host_dest',
'force_host_copy': False,
'lock_volume': False}})
self._assert_request_id(vol)
def test_migrate_with_lock_volume(self):
v = cs3.volumes.get('1234')
self._assert_request_id(v)
vol = cs3.volumes.migrate_volume(v, 'dest', False, True)
cs3.assert_called('POST', '/volumes/1234/action',
{'os-migrate_volume': {'host': 'dest',
'force_host_copy': False,
'lock_volume': True}})
self._assert_request_id(vol)
def test_migrate_cluster(self):
v = cs3.volumes.get('fake')
self._assert_request_id(v)
vol = cs3.volumes.migrate_volume(v, 'host_dest', False, False,
'cluster_dest')
cs3.assert_called('POST', '/volumes/fake/action',
{'os-migrate_volume': {'cluster': 'cluster_dest',
'force_host_copy': False,
'lock_volume': False}})
self._assert_request_id(vol)

View File

@ -14,6 +14,7 @@
# under the License. # under the License.
from __future__ import print_function from __future__ import print_function
import collections
import os import os
import pkg_resources import pkg_resources
@ -36,6 +37,15 @@ def arg(*args, **kwargs):
return _decorator return _decorator
def exclusive_arg(group_name, *args, **kwargs):
"""Decorator for CLI mutually exclusive args."""
def _decorator(func):
required = kwargs.pop('required', None)
add_exclusive_arg(func, group_name, required, *args, **kwargs)
return func
return _decorator
def env(*vars, **kwargs): def env(*vars, **kwargs):
""" """
returns the first environment variable set returns the first environment variable set
@ -62,6 +72,24 @@ def add_arg(f, *args, **kwargs):
f.arguments.insert(0, (args, kwargs)) f.arguments.insert(0, (args, kwargs))
def add_exclusive_arg(f, group_name, required, *args, **kwargs):
"""Bind CLI mutally exclusive arguments to a shell.py `do_foo` function."""
if not hasattr(f, 'exclusive_args'):
f.exclusive_args = collections.defaultdict(list)
# Default required to False
f.exclusive_args['__required__'] = collections.defaultdict(bool)
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in f.exclusive_args[group_name]:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
f.exclusive_args[group_name].insert(0, (args, kwargs))
if required is not None:
f.exclusive_args['__required__'][group_name] = required
def unauthenticated(f): def unauthenticated(f):
""" """
Adds 'unauthenticated' attribute to decorated function. Adds 'unauthenticated' attribute to decorated function.

View File

@ -858,6 +858,54 @@ def do_upload_to_image(cs, args):
args.disk_format)) args.disk_format))
@utils.arg('volume', metavar='<volume>', help='ID of volume to migrate.')
# NOTE(geguileo): host is positional but optional in order to maintain backward
# compatibility even with mutually exclusive arguments. If version is < 3.16
# then only host positional argument will be possible, and since the
# exclusive_arg group has required=True it will be required even if it's
# optional.
@utils.exclusive_arg('destination', 'host', required=True, nargs='?',
metavar='<host>', help='Destination host. Takes the '
'form: host@backend-name#pool')
@utils.exclusive_arg('destination', '--cluster', required=True,
help='Destination cluster. Takes the form: '
'cluster@backend-name#pool',
start_version='3.16')
@utils.arg('--force-host-copy', metavar='<True|False>',
choices=['True', 'False'],
required=False,
const=True,
nargs='?',
default=False,
help='Enables or disables generic host-based '
'force-migration, which bypasses driver '
'optimizations. Default=False.')
@utils.arg('--lock-volume', metavar='<True|False>',
choices=['True', 'False'],
required=False,
const=True,
nargs='?',
default=False,
help='Enables or disables the termination of volume migration '
'caused by other commands. This option applies to the '
'available volume. True means it locks the volume '
'state and does not allow the migration to be aborted. The '
'volume status will be in maintenance during the '
'migration. False means it allows the volume migration '
'to be aborted. The volume status is still in the original '
'status. Default=False.')
def do_migrate(cs, args):
"""Migrates volume to a new host."""
volume = utils.find_volume(cs, args.volume)
try:
volume.migrate_volume(args.host, args.force_host_copy,
args.lock_volume, getattr(args, 'cluster', None))
print("Request to migrate volume %s has been accepted." % (volume.id))
except Exception as e:
print("Migration for volume %s failed: %s." % (volume.id,
six.text_type(e)))
@api_versions.wraps('3.9', '3.43') @api_versions.wraps('3.9', '3.43')
@utils.arg('backup', metavar='<backup>', @utils.arg('backup', metavar='<backup>',
help='Name or ID of backup to rename.') help='Name or ID of backup to rename.')
@ -963,6 +1011,84 @@ def do_cluster_disable(cs, args):
utils.print_dict(cluster.to_dict()) utils.print_dict(cluster.to_dict())
@utils.arg('host',
metavar='<host>',
help='Cinder host on which the existing volume resides; '
'takes the form: host@backend-name#pool')
@utils.arg('--cluster',
help='Cinder cluster on which the existing volume resides; '
'takes the form: cluster@backend-name#pool',
start_version='3.16')
@utils.arg('identifier',
metavar='<identifier>',
help='Name or other Identifier for existing volume')
@utils.arg('--id-type',
metavar='<id-type>',
default='source-name',
help='Type of backend device identifier provided, '
'typically source-name or source-id (Default=source-name)')
@utils.arg('--name',
metavar='<name>',
help='Volume name (Default=None)')
@utils.arg('--description',
metavar='<description>',
help='Volume description (Default=None)')
@utils.arg('--volume-type',
metavar='<volume-type>',
help='Volume type (Default=None)')
@utils.arg('--availability-zone',
metavar='<availability-zone>',
help='Availability zone for volume (Default=None)')
@utils.arg('--metadata',
type=str,
nargs='*',
metavar='<key=value>',
help='Metadata key=value pairs (Default=None)')
@utils.arg('--bootable',
action='store_true',
help='Specifies that the newly created volume should be'
' marked as bootable')
def do_manage(cs, args):
"""Manage an existing volume."""
volume_metadata = None
if args.metadata is not None:
volume_metadata = shell_utils.extract_metadata(args)
# Build a dictionary of key/value pairs to pass to the API.
ref_dict = {args.id_type: args.identifier}
# The recommended way to specify an existing volume is by ID or name, and
# have the Cinder driver look for 'source-name' or 'source-id' elements in
# the ref structure. To make things easier for the user, we have special
# --source-name and --source-id CLI options that add the appropriate
# element to the ref structure.
#
# Note how argparse converts hyphens to underscores. We use hyphens in the
# dictionary so that it is consistent with what the user specified on the
# CLI.
if hasattr(args, 'source_name') and args.source_name is not None:
ref_dict['source-name'] = args.source_name
if hasattr(args, 'source_id') and args.source_id is not None:
ref_dict['source-id'] = args.source_id
volume = cs.volumes.manage(host=args.host,
ref=ref_dict,
name=args.name,
description=args.description,
volume_type=args.volume_type,
availability_zone=args.availability_zone,
metadata=volume_metadata,
bootable=args.bootable,
cluster=getattr(args, 'cluster', None))
info = {}
volume = cs.volumes.get(volume.id)
info.update(volume._info)
info.pop('links', None)
utils.print_dict(info)
@api_versions.wraps('3.8') @api_versions.wraps('3.8')
@utils.arg('host', @utils.arg('host',
metavar='<host>', metavar='<host>',

View File

@ -49,6 +49,22 @@ class Volume(volumes.Volume):
"""Revert a volume to a snapshot.""" """Revert a volume to a snapshot."""
self.manager.revert_to_snapshot(self, snapshot) self.manager.revert_to_snapshot(self, snapshot)
def migrate_volume(self, host, force_host_copy, lock_volume, cluster=None):
"""Migrate the volume to a new host."""
return self.manager.migrate_volume(self, host, force_host_copy,
lock_volume, cluster)
def manage(self, host, ref, name=None, description=None,
volume_type=None, availability_zone=None, metadata=None,
bootable=False, cluster=None):
"""Manage an existing volume."""
return self.manager.manage(host=host, ref=ref, name=name,
description=description,
volume_type=volume_type,
availability_zone=availability_zone,
metadata=metadata, bootable=bootable,
cluster=cluster)
class VolumeManager(volumes.VolumeManager): class VolumeManager(volumes.VolumeManager):
resource_class = Volume resource_class = Volume
@ -194,6 +210,46 @@ class VolumeManager(volumes.VolumeManager):
'visibility': visibility, 'visibility': visibility,
'protected': protected}) 'protected': protected})
def migrate_volume(self, volume, host, force_host_copy, lock_volume,
cluster=None):
"""Migrate volume to new backend.
The new backend is defined by the host or the cluster (not both).
:param volume: The :class:`Volume` to migrate
:param host: The destination host
:param force_host_copy: Skip driver optimizations
:param lock_volume: Lock the volume and guarantee the migration
to finish
:param cluster: The cluster
"""
body = {'host': host, 'force_host_copy': force_host_copy,
'lock_volume': lock_volume}
if self.api_version.matches('3.16'):
if cluster:
body['cluster'] = cluster
del body['host']
return self._action('os-migrate_volume', volume, body)
def manage(self, host, ref, name=None, description=None,
volume_type=None, availability_zone=None, metadata=None,
bootable=False, cluster=None):
"""Manage an existing volume."""
body = {'volume': {'host': host,
'ref': ref,
'name': name,
'description': description,
'volume_type': volume_type,
'availability_zone': availability_zone,
'metadata': metadata,
'bootable': bootable
}}
if self.api_version.matches('3.16') and cluster:
body['cluster'] = cluster
return self._create('/os-volume-manage', body, 'volume')
@api_versions.wraps("3.8") @api_versions.wraps("3.8")
def list_manageable(self, host, detailed=True, marker=None, limit=None, def list_manageable(self, host, detailed=True, marker=None, limit=None,
offset=None, sort=None): offset=None, sort=None):

View File

@ -0,0 +1,7 @@
---
features:
- |
Cinder migrate and manage commands now accept ``--cluster`` argument to
define the destination for Active-Active deployments on microversion 3.16
and higher. This argument and the ``host`` positional argument are
mutually exclusive for the migrate command.