Add missing APIv2 features to client, OSC
Now in the basic client: - Boot from volume enhancements - Update keypair Now in OSC: - Force delete cluster - Update keypair - Boot from volume enhancements - Decommision specific node (only via --json) Change-Id: I031fdb6f7754f6cf242bfae6f10ed05249c07dac Story: 2003092 Task: 23183 Task: 29740
This commit is contained in:
parent
9ebd05065d
commit
25043cbdf1
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
features:
|
||||
- |
|
||||
The basic saharaclient and the OSC plugin now include support for
|
||||
the enhanced boot from volume mechanism introduced in the Stein
|
||||
release of Sahara, and support for the keypair replacement
|
||||
mechanism introduced in the Rocky release of Sahara. The OSC plugin
|
||||
also now includes support for the force deletion of clusters
|
||||
feature introduced in the Queens release of Sahara, and support
|
||||
for the decommision of a specific instance feature (albeit only via
|
||||
the --json flag) introduced in the Queens release of Sahara. (All
|
||||
of these features are exclusive to Sahara's APIv2.)
|
|
@ -213,6 +213,11 @@ class ClusterManagerV2(ClusterManagerV1):
|
|||
data = {'force': True}
|
||||
return self._delete('/clusters/%s' % cluster_id, data)
|
||||
|
||||
def update_keypair(self, cluster_id):
|
||||
"""Reflect an updated keypair on the cluster."""
|
||||
data = {'update_keypair': True}
|
||||
return self._patch("/clusters/%s" % cluster_id, data)
|
||||
|
||||
|
||||
# NOTE(jfreud): keep this around for backwards compatibility
|
||||
ClusterManager = ClusterManagerV1
|
||||
|
|
|
@ -58,7 +58,8 @@ class NodeGroupTemplateManagerV1(base.ResourceManager):
|
|||
volumes_availability_zone, volume_type, image_id,
|
||||
is_proxy_gateway, volume_local_to_instance, use_autoconfig,
|
||||
shares, is_public, is_protected, volume_mount_prefix,
|
||||
boot_from_volume=None):
|
||||
boot_from_volume=None, boot_volume_type=None,
|
||||
boot_volume_az=None, boot_volume_local=None):
|
||||
|
||||
self._copy_if_defined(data,
|
||||
description=description,
|
||||
|
@ -73,7 +74,10 @@ class NodeGroupTemplateManagerV1(base.ResourceManager):
|
|||
shares=shares,
|
||||
is_public=is_public,
|
||||
is_protected=is_protected,
|
||||
boot_from_volume=boot_from_volume
|
||||
boot_from_volume=boot_from_volume,
|
||||
boot_volume_type=boot_volume_type,
|
||||
boot_volume_availability_zone=boot_volume_az,
|
||||
boot_volume_local_to_instance=boot_volume_local
|
||||
)
|
||||
|
||||
if volumes_per_node:
|
||||
|
@ -162,7 +166,9 @@ class NodeGroupTemplateManagerV2(NodeGroupTemplateManagerV1):
|
|||
volume_type=None, image_id=None, is_proxy_gateway=None,
|
||||
volume_local_to_instance=None, use_autoconfig=None,
|
||||
shares=None, is_public=None, is_protected=None,
|
||||
volume_mount_prefix=None, boot_from_volume=None):
|
||||
volume_mount_prefix=None, boot_from_volume=None,
|
||||
boot_volume_type=None, boot_volume_availability_zone=None,
|
||||
boot_volume_local_to_instance=None):
|
||||
"""Create a Node Group Template."""
|
||||
|
||||
data = {
|
||||
|
@ -180,7 +186,10 @@ class NodeGroupTemplateManagerV2(NodeGroupTemplateManagerV1):
|
|||
volume_type, image_id, is_proxy_gateway,
|
||||
volume_local_to_instance, use_autoconfig,
|
||||
shares, is_public, is_protected,
|
||||
volume_mount_prefix, boot_from_volume)
|
||||
volume_mount_prefix, boot_from_volume,
|
||||
boot_volume_type,
|
||||
boot_volume_availability_zone,
|
||||
boot_volume_local_to_instance)
|
||||
|
||||
def update(self, ng_template_id, name=NotUpdated, plugin_name=NotUpdated,
|
||||
plugin_version=NotUpdated, flavor_id=NotUpdated,
|
||||
|
@ -194,7 +203,10 @@ class NodeGroupTemplateManagerV2(NodeGroupTemplateManagerV1):
|
|||
volume_local_to_instance=NotUpdated, use_autoconfig=NotUpdated,
|
||||
shares=NotUpdated, is_public=NotUpdated,
|
||||
is_protected=NotUpdated, volume_mount_prefix=NotUpdated,
|
||||
boot_from_volume=NotUpdated):
|
||||
boot_from_volume=NotUpdated,
|
||||
boot_volume_type=NotUpdated,
|
||||
boot_volume_availability_zone=NotUpdated,
|
||||
boot_volume_local_to_instance=NotUpdated):
|
||||
"""Update a Node Group Template."""
|
||||
|
||||
data = {}
|
||||
|
@ -214,7 +226,10 @@ class NodeGroupTemplateManagerV2(NodeGroupTemplateManagerV1):
|
|||
use_autoconfig=use_autoconfig, shares=shares,
|
||||
is_public=is_public, is_protected=is_protected,
|
||||
volume_mount_prefix=volume_mount_prefix,
|
||||
boot_from_volume=boot_from_volume
|
||||
boot_from_volume=boot_from_volume,
|
||||
boot_volume_type=boot_volume_type,
|
||||
boot_volume_availability_zone=boot_volume_availability_zone,
|
||||
boot_volume_local_to_instance=boot_volume_local_to_instance
|
||||
)
|
||||
|
||||
return self._patch('/node-group-templates/%s' % ng_template_id, data,
|
||||
|
|
|
@ -367,7 +367,13 @@ def create_node_group_templates(client, app, parsed_args, flavor_id, configs,
|
|||
volumes_availability_zone=(
|
||||
parsed_args.volumes_availability_zone),
|
||||
volume_mount_prefix=parsed_args.volumes_mount_prefix,
|
||||
boot_from_volume=parsed_args.boot_from_volume).to_dict()
|
||||
boot_from_volume=parsed_args.boot_from_volume,
|
||||
boot_volume_type=parsed_args.boot_volume_type,
|
||||
boot_volume_availability_zone=(
|
||||
parsed_args.boot_volume_availability_zone),
|
||||
boot_volume_local_to_instance=(
|
||||
parsed_args.boot_volume_local_to_instance)
|
||||
).to_dict()
|
||||
else:
|
||||
data = client.node_group_templates.create(
|
||||
name=parsed_args.name,
|
||||
|
@ -564,6 +570,15 @@ class NodeGroupTemplatesUtils(object):
|
|||
if parsed_args.boot_from_volume is not None:
|
||||
update_dict['boot_from_volume'] = (
|
||||
parsed_args.boot_from_volume)
|
||||
if parsed_args.boot_volume_type is not None:
|
||||
update_dict['boot_volume_type'] = (
|
||||
parsed_args.boot_volume_type)
|
||||
if parsed_args.boot_volume_availability_zone is not None:
|
||||
update_dict['boot_volume_availability_zone'] = (
|
||||
parsed_args.boot_volume_availability_zone)
|
||||
if parsed_args.boot_volume_local_to_instance is not None:
|
||||
update_dict['boot_volume_local_to_instance'] = (
|
||||
parsed_args.boot_volume_local_to_instance)
|
||||
data = client.node_group_templates.update(
|
||||
ngt_id, **update_dict).to_dict()
|
||||
|
||||
|
|
|
@ -393,14 +393,20 @@ class DeleteCluster(command.Command):
|
|||
|
||||
return parser
|
||||
|
||||
def _choose_delete_mode(self, parsed_args):
|
||||
return "delete"
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.log.debug("take_action(%s)", parsed_args)
|
||||
client = self.app.client_manager.data_processing
|
||||
|
||||
delete_function_attr = self._choose_delete_mode(parsed_args)
|
||||
|
||||
clusters = []
|
||||
for cluster in parsed_args.cluster:
|
||||
cluster_id = utils.get_resource_id(
|
||||
client.clusters, cluster)
|
||||
client.clusters.delete(cluster_id)
|
||||
getattr(client.clusters, delete_function_attr)(cluster_id)
|
||||
clusters.append((cluster_id, cluster))
|
||||
sys.stdout.write(
|
||||
'Cluster "{cluster}" deletion has been started.\n'.format(
|
||||
|
@ -518,6 +524,13 @@ class ScaleCluster(command.ShowOne):
|
|||
|
||||
log = logging.getLogger(__name__ + ".ScaleCluster")
|
||||
|
||||
def _get_json_arg_helptext(self):
|
||||
return '''
|
||||
JSON representation of the cluster scale object. Other
|
||||
arguments (except for --wait) will not be taken into
|
||||
account if this one is provided
|
||||
'''
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(ScaleCluster, self).get_parser(prog_name)
|
||||
|
||||
|
@ -536,9 +549,7 @@ class ScaleCluster(command.ShowOne):
|
|||
parser.add_argument(
|
||||
'--json',
|
||||
metavar='<filename>',
|
||||
help='JSON representation of the cluster scale object. Other '
|
||||
'arguments (except for --wait) will not be taken into '
|
||||
'account if this one is provided'
|
||||
help=self._get_json_arg_helptext()
|
||||
)
|
||||
parser.add_argument(
|
||||
'--wait',
|
||||
|
|
|
@ -13,6 +13,9 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from osc_lib.command import command
|
||||
from osc_lib import utils as osc_utils
|
||||
from oslo_log import log as logging
|
||||
|
||||
|
@ -131,6 +134,22 @@ class DeleteCluster(c_v1.DeleteCluster):
|
|||
|
||||
log = logging.getLogger(__name__ + ".DeleteCluster")
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(DeleteCluster, self).get_parser(prog_name)
|
||||
parser.add_argument(
|
||||
'--force',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Force the deletion of the cluster',
|
||||
)
|
||||
return parser
|
||||
|
||||
def _choose_delete_mode(self, parsed_args):
|
||||
if parsed_args.force:
|
||||
return "force_delete"
|
||||
else:
|
||||
return "delete"
|
||||
|
||||
|
||||
class UpdateCluster(c_v1.UpdateCluster):
|
||||
"""Updates cluster"""
|
||||
|
@ -154,6 +173,15 @@ class ScaleCluster(c_v1.ScaleCluster):
|
|||
|
||||
log = logging.getLogger(__name__ + ".ScaleCluster")
|
||||
|
||||
def _get_json_arg_helptext(self):
|
||||
return '''
|
||||
JSON representation of the cluster scale object. Other
|
||||
arguments (except for --wait) will not be taken into
|
||||
account if this one is provided. Specifiying a JSON
|
||||
object is also the only way to indicate specific
|
||||
instances to decomission.
|
||||
'''
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.log.debug("take_action(%s)", parsed_args)
|
||||
client = self.app.client_manager.data_processing
|
||||
|
@ -170,3 +198,31 @@ class VerificationUpdateCluster(c_v1.VerificationUpdateCluster):
|
|||
"""Updates cluster verifications"""
|
||||
|
||||
log = logging.getLogger(__name__ + ".VerificationUpdateCluster")
|
||||
|
||||
|
||||
class UpdateKeypairCluster(command.ShowOne):
|
||||
"""Reflects an updated keypair on the cluster"""
|
||||
|
||||
log = logging.getLogger(__name__ + ".UpdateKeypairCluster")
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(UpdateKeypairCluster, self).get_parser(prog_name)
|
||||
|
||||
parser.add_argument(
|
||||
'cluster',
|
||||
metavar="<cluster>",
|
||||
help="Name or ID of the cluster",
|
||||
)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.log.debug("take_action(%s)", parsed_args)
|
||||
client = self.app.client_manager.data_processing
|
||||
|
||||
cluster_id = utils.get_resource_id(
|
||||
client.clusters, parsed_args.cluster)
|
||||
client.clusters.update_keypair(cluster_id)
|
||||
sys.stdout.write(
|
||||
'Cluster "{cluster}" keypair has been updated.\n'
|
||||
.format(cluster=parsed_args.cluster))
|
||||
return {}, {}
|
||||
|
|
|
@ -27,7 +27,8 @@ NGT_FIELDS = ['id', 'name', 'plugin_name', 'plugin_version', 'node_processes',
|
|||
'volume_type', 'volume_local_to_instance', 'volume_mount_prefix',
|
||||
'volumes_availability_zone', 'use_autoconfig',
|
||||
'is_proxy_gateway', 'is_default', 'is_protected', 'is_public',
|
||||
'boot_from_volume']
|
||||
'boot_from_volume', 'boot_volume_type',
|
||||
'boot_volume_availability_zone', 'boot_volume_local_to_instance']
|
||||
|
||||
|
||||
def _format_ngt_output(data):
|
||||
|
@ -38,6 +39,10 @@ def _format_ngt_output(data):
|
|||
del data['volume_type'],
|
||||
del data['volumes_availability_zone']
|
||||
del data['volumes_size']
|
||||
if not data['boot_from_volume']:
|
||||
del data['boot_volume_type']
|
||||
del data['boot_volume_availability_zone']
|
||||
del data['boot_volume_local_to_instance']
|
||||
|
||||
|
||||
class CreateNodeGroupTemplate(ngt_v1.CreateNodeGroupTemplate,
|
||||
|
@ -53,6 +58,28 @@ class CreateNodeGroupTemplate(ngt_v1.CreateNodeGroupTemplate,
|
|||
default=False,
|
||||
help="Make the node group bootable from volume",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--boot-volume-type',
|
||||
metavar="<boot-volume-type>",
|
||||
help='Type of the boot volume. '
|
||||
'This parameter will be taken into account only '
|
||||
'if booting from volume.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--boot-volume-availability-zone',
|
||||
metavar="<boot-volume-availability-zone>",
|
||||
help='Name of the availability zone to create boot volume in.'
|
||||
' This parameter will be taken into account only '
|
||||
'if booting from volume.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--boot-volume-local-to-instance',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help='Instance and volume guaranteed on the same host. '
|
||||
'This parameter will be taken into account only '
|
||||
'if booting from volume.'
|
||||
)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
|
@ -132,10 +159,39 @@ class UpdateNodeGroupTemplate(ngt_v1.UpdateNodeGroupTemplate,
|
|||
help='Makes node group not bootable from volume.',
|
||||
dest='boot_from_volume'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--boot-volume-type',
|
||||
metavar="<boot-volume-type>",
|
||||
help='Type of the boot volume. '
|
||||
'This parameter will be taken into account only '
|
||||
'if booting from volume.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--boot-volume-availability-zone',
|
||||
metavar="<boot-volume-availability-zone>",
|
||||
help='Name of the availability zone to create boot volume in.'
|
||||
' This parameter will be taken into account only '
|
||||
'if booting from volume.'
|
||||
)
|
||||
bfv_locality = parser.add_mutually_exclusive_group()
|
||||
bfv_locality.add_argument(
|
||||
'--boot-volume-local-to-instance-enable',
|
||||
action='store_true',
|
||||
help='Makes boot volume explicitly local to instance.',
|
||||
dest='boot_volume_local_to_instance'
|
||||
)
|
||||
bfv_locality.add_argument(
|
||||
'--boot-volume-local-to-instance-disable',
|
||||
action='store_false',
|
||||
help='Removes explicit instruction of boot volume locality.',
|
||||
dest='boot_volume_local_to_instance'
|
||||
)
|
||||
parser.set_defaults(is_public=None, is_protected=None,
|
||||
is_proxy_gateway=None, volume_locality=None,
|
||||
use_auto_security_group=None, use_autoconfig=None,
|
||||
boot_from_volume=None)
|
||||
boot_from_volume=None, boot_volume_type=None,
|
||||
boot_volume_availability_zone=None,
|
||||
boot_volume_local_to_instance=None)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
|
|
|
@ -50,7 +50,10 @@ NGT_INFO = {
|
|||
"volumes_availability_zone": None,
|
||||
"volumes_per_node": 2,
|
||||
"volume_local_to_instance": False,
|
||||
"boot_from_volume": False
|
||||
"boot_from_volume": False,
|
||||
"boot_volume_type": None,
|
||||
"boot_volume_availability_zone": None,
|
||||
"boot_volume_local_to_instance": False
|
||||
}
|
||||
|
||||
|
||||
|
@ -101,7 +104,10 @@ class TestCreateNodeGroupTemplate(TestNodeGroupTemplates):
|
|||
volume_type=None, volumes_availability_zone=None,
|
||||
volumes_per_node=None, volumes_size=None, shares=None,
|
||||
node_configs=None, volume_mount_prefix=None,
|
||||
boot_from_volume=False)
|
||||
boot_from_volume=False,
|
||||
boot_volume_type=None,
|
||||
boot_volume_availability_zone=None,
|
||||
boot_volume_local_to_instance=False)
|
||||
|
||||
def test_ngt_create_all_options(self):
|
||||
arglist = ['--name', 'template', '--plugin', 'fake',
|
||||
|
@ -115,7 +121,10 @@ class TestCreateNodeGroupTemplate(TestNodeGroupTemplates):
|
|||
'--volumes-mount-prefix', '/volume/asd',
|
||||
'--volumes-locality', '--description', 'descr',
|
||||
'--autoconfig', '--proxy-gateway', '--public',
|
||||
'--protected', '--boot-from-volume']
|
||||
'--protected', '--boot-from-volume',
|
||||
'--boot-volume-type', 'volume2',
|
||||
'--boot-volume-availability-zone', 'ceph',
|
||||
'--boot-volume-local-to-instance']
|
||||
|
||||
verifylist = [('name', 'template'), ('plugin', 'fake'),
|
||||
('plugin_version', '0.1'),
|
||||
|
@ -149,7 +158,10 @@ class TestCreateNodeGroupTemplate(TestNodeGroupTemplates):
|
|||
volume_local_to_instance=True, volume_type='type',
|
||||
volumes_availability_zone='vavzone', volumes_per_node=2,
|
||||
volumes_size=2, shares=None, node_configs=None,
|
||||
volume_mount_prefix='/volume/asd', boot_from_volume=True)
|
||||
volume_mount_prefix='/volume/asd', boot_from_volume=True,
|
||||
boot_volume_type='volume2',
|
||||
boot_volume_availability_zone='ceph',
|
||||
boot_volume_local_to_instance=True)
|
||||
|
||||
# Check that columns are correct
|
||||
expected_columns = (
|
||||
|
@ -340,7 +352,10 @@ class TestUpdateNodeGroupTemplate(TestNodeGroupTemplates):
|
|||
'--volumes-mount-prefix', '/volume/asd',
|
||||
'--volumes-locality-enable', '--description', 'descr',
|
||||
'--autoconfig-enable', '--proxy-gateway-enable', '--public',
|
||||
'--protected', '--boot-from-volume-enable']
|
||||
'--protected', '--boot-from-volume-enable',
|
||||
'--boot-volume-type', 'volume2',
|
||||
'--boot-volume-availability-zone', 'ceph',
|
||||
'--boot-volume-local-to-instance-enable']
|
||||
|
||||
verifylist = [('node_group_template', 'template'),
|
||||
('name', 'template'), ('plugin', 'fake'),
|
||||
|
@ -377,7 +392,10 @@ class TestUpdateNodeGroupTemplate(TestNodeGroupTemplates):
|
|||
volume_local_to_instance=True, volume_type='type',
|
||||
volumes_availability_zone='vavzone', volumes_per_node=2,
|
||||
volumes_size=2, volume_mount_prefix='/volume/asd',
|
||||
boot_from_volume=True)
|
||||
boot_from_volume=True,
|
||||
boot_volume_type='volume2',
|
||||
boot_volume_availability_zone='ceph',
|
||||
boot_volume_local_to_instance=True)
|
||||
|
||||
# Check that columns are correct
|
||||
expected_columns = (
|
||||
|
|
|
@ -140,6 +140,7 @@ openstack.data_processing.v2 =
|
|||
dataprocessing_cluster_delete = saharaclient.osc.v2.clusters:DeleteCluster
|
||||
dataprocessing_cluster_scale = saharaclient.osc.v2.clusters:ScaleCluster
|
||||
dataprocessing_cluster_verification = saharaclient.osc.v2.clusters:VerificationUpdateCluster
|
||||
dataprocessing_cluster_update_keypair = saharaclient.osc.v2.clusters:UpdateKeypairCluster
|
||||
|
||||
dataprocessing_job_template_create = saharaclient.osc.v2.job_templates:CreateJobTemplate
|
||||
dataprocessing_job_template_list = saharaclient.osc.v2.job_templates:ListJobTemplates
|
||||
|
|
Loading…
Reference in New Issue