Merge "Add service cleanup command"
This commit is contained in:
@@ -606,6 +606,17 @@ class FakeHTTPClient(fake_v2.FakeHTTPClient):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def post_workers_cleanup(self, **kw):
|
||||||
|
response = {
|
||||||
|
'cleaning': [{'id': '1', 'cluster_name': 'cluster1',
|
||||||
|
'host': 'host1', 'binary': 'binary'},
|
||||||
|
{'id': '3', 'cluster_name': 'cluster1',
|
||||||
|
'host': 'host3', 'binary': 'binary'}],
|
||||||
|
'unavailable': [{'id': '2', 'cluster_name': 'cluster2',
|
||||||
|
'host': 'host2', 'binary': 'binary'}],
|
||||||
|
}
|
||||||
|
return 200, {}, response
|
||||||
|
|
||||||
#
|
#
|
||||||
# resource filters
|
# resource filters
|
||||||
#
|
#
|
||||||
|
@@ -1241,3 +1241,23 @@ class ShellTest(utils.TestCase):
|
|||||||
'--name foo --description bar --bootable '
|
'--name foo --description bar --bootable '
|
||||||
'--volume-type baz --availability-zone az '
|
'--volume-type baz --availability-zone az '
|
||||||
'--metadata k1=v1 k2=v2')
|
'--metadata k1=v1 k2=v2')
|
||||||
|
|
||||||
|
def test_worker_cleanup_before_3_24(self):
|
||||||
|
self.assertRaises(SystemExit,
|
||||||
|
self.run_command,
|
||||||
|
'work-cleanup fakehost')
|
||||||
|
|
||||||
|
def test_worker_cleanup(self):
|
||||||
|
self.run_command('--os-volume-api-version 3.24 '
|
||||||
|
'work-cleanup --cluster clustername --host hostname '
|
||||||
|
'--binary binaryname --is-up false --disabled true '
|
||||||
|
'--resource-id uuid --resource-type Volume')
|
||||||
|
expected = {'cluster_name': 'clustername',
|
||||||
|
'host': 'hostname',
|
||||||
|
'binary': 'binaryname',
|
||||||
|
'is_up': 'false',
|
||||||
|
'disabled': 'true',
|
||||||
|
'resource_id': 'uuid',
|
||||||
|
'resource_type': 'Volume'}
|
||||||
|
|
||||||
|
self.assert_called('POST', '/workers/cleanup', body=expected)
|
||||||
|
@@ -42,6 +42,7 @@ from cinderclient.v3 import volume_transfers
|
|||||||
from cinderclient.v3 import volume_type_access
|
from cinderclient.v3 import volume_type_access
|
||||||
from cinderclient.v3 import volume_types
|
from cinderclient.v3 import volume_types
|
||||||
from cinderclient.v3 import volumes
|
from cinderclient.v3 import volumes
|
||||||
|
from cinderclient.v3 import workers
|
||||||
|
|
||||||
|
|
||||||
class Client(object):
|
class Client(object):
|
||||||
@@ -91,6 +92,7 @@ class Client(object):
|
|||||||
self.transfers = volume_transfers.VolumeTransferManager(self)
|
self.transfers = volume_transfers.VolumeTransferManager(self)
|
||||||
self.services = services.ServiceManager(self)
|
self.services = services.ServiceManager(self)
|
||||||
self.clusters = clusters.ClusterManager(self)
|
self.clusters = clusters.ClusterManager(self)
|
||||||
|
self.workers = workers.WorkerManager(self)
|
||||||
self.consistencygroups = consistencygroups.\
|
self.consistencygroups = consistencygroups.\
|
||||||
ConsistencygroupManager(self)
|
ConsistencygroupManager(self)
|
||||||
self.groups = groups.GroupManager(self)
|
self.groups = groups.GroupManager(self)
|
||||||
|
@@ -1060,6 +1060,52 @@ def do_cluster_disable(cs, args):
|
|||||||
utils.print_dict(cluster.to_dict())
|
utils.print_dict(cluster.to_dict())
|
||||||
|
|
||||||
|
|
||||||
|
@api_versions.wraps('3.24')
|
||||||
|
@utils.arg('--cluster', metavar='<cluster-name>', default=None,
|
||||||
|
help='Cluster name. Default=None.')
|
||||||
|
@utils.arg('--host', metavar='<hostname>', default=None,
|
||||||
|
help='Service host name. Default=None.')
|
||||||
|
@utils.arg('--binary', metavar='<binary>', default=None,
|
||||||
|
help='Service binary. Default=None.')
|
||||||
|
@utils.arg('--is-up', metavar='<True|true|False|false>', dest='is_up',
|
||||||
|
default=None, choices=('True', 'true', 'False', 'false'),
|
||||||
|
help='Filter by up/down status, if set to true services need to be'
|
||||||
|
' up, if set to false services need to be down. Default is '
|
||||||
|
'None, which means up/down status is ignored.')
|
||||||
|
@utils.arg('--disabled', metavar='<True|true|False|false>', default=None,
|
||||||
|
choices=('True', 'true', 'False', 'false'),
|
||||||
|
help='Filter by disabled status. Default=None.')
|
||||||
|
@utils.arg('--resource-id', metavar='<resource-id>', default=None,
|
||||||
|
help='UUID of a resource to cleanup. Default=None.')
|
||||||
|
@utils.arg('--resource-type', metavar='<Volume|Snapshot>', default=None,
|
||||||
|
choices=('Volume', 'Snapshot'),
|
||||||
|
help='Type of resource to cleanup.')
|
||||||
|
def do_work_cleanup(cs, args):
|
||||||
|
"""Request cleanup of services with optional filtering."""
|
||||||
|
filters = dict(cluster_name=args.cluster, host=args.host,
|
||||||
|
binary=args.binary, is_up=args.is_up,
|
||||||
|
disabled=args.disabled, resource_id=args.resource_id,
|
||||||
|
resource_type=args.resource_type)
|
||||||
|
|
||||||
|
filters = {k: v for k, v in filters.items() if v is not None}
|
||||||
|
|
||||||
|
cleaning, unavailable = cs.workers.clean(**filters)
|
||||||
|
|
||||||
|
columns = ('ID', 'Cluster Name', 'Host', 'Binary')
|
||||||
|
|
||||||
|
if cleaning:
|
||||||
|
print('Following services will be cleaned:')
|
||||||
|
utils.print_list(cleaning, columns)
|
||||||
|
|
||||||
|
if unavailable:
|
||||||
|
print('There are no alternative nodes to do cleanup for the following '
|
||||||
|
'services:')
|
||||||
|
utils.print_list(unavailable, columns)
|
||||||
|
|
||||||
|
if not (cleaning or unavailable):
|
||||||
|
print('No cleanable services matched cleanup criteria.')
|
||||||
|
|
||||||
|
|
||||||
@utils.arg('host',
|
@utils.arg('host',
|
||||||
metavar='<host>',
|
metavar='<host>',
|
||||||
help='Cinder host on which the existing volume resides; '
|
help='Cinder host on which the existing volume resides; '
|
||||||
|
44
cinderclient/v3/workers.py
Normal file
44
cinderclient/v3/workers.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# Copyright (c) 2016 Red Hat, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Interface to workers API
|
||||||
|
"""
|
||||||
|
from cinderclient.apiclient import base as common_base
|
||||||
|
from cinderclient import base
|
||||||
|
|
||||||
|
|
||||||
|
class Service(base.Resource):
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Service (%s): %s in cluster %s>" % (self.id, self.host,
|
||||||
|
self.cluster_name or '-')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def list_factory(cls, mngr, elements):
|
||||||
|
return [cls(mngr, element, loaded=True) for element in elements]
|
||||||
|
|
||||||
|
|
||||||
|
class WorkerManager(base.Manager):
|
||||||
|
base_url = '/workers'
|
||||||
|
|
||||||
|
def clean(self, **filters):
|
||||||
|
url = self.base_url + '/cleanup'
|
||||||
|
resp, body = self.api.client.post(url, body=filters)
|
||||||
|
|
||||||
|
cleaning = Service.list_factory(self, body['cleaning'])
|
||||||
|
unavailable = Service.list_factory(self, body['unavailable'])
|
||||||
|
|
||||||
|
result = common_base.TupleWithMeta((cleaning, unavailable), resp)
|
||||||
|
return result
|
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
New ``work-cleanup`` command to trigger server cleanups by other nodes
|
||||||
|
within a cluster on Active-Active deployments on microversion 3.24 and
|
||||||
|
higher.
|
Reference in New Issue
Block a user