APIv2 support in client

* Support of all APIv2 features carried from APIv1 ("feature parity")
* Minimum amount of docs to pass the gate
* Endpoint manipulation and version discovery handled by keystoneauth
* APIv2 feature: decommision of specific instances (doc change only)
* APIv2 feature: force delete (new method)

Unit tests will arrive in a future patch.

bp v2-api-experimental-impl

Co-Authored-By: Monty Taylor <mordred@inaugust.com>
Change-Id: I32178439fe85cc6d5faf4ac2e33ae80c08619de5
This commit is contained in:
Jeremy Freudberg 2018-01-12 04:22:06 +00:00
parent 44ecbf55c7
commit 45088c61f0
19 changed files with 584 additions and 64 deletions

View File

@ -21,6 +21,7 @@ Contents:
:maxdepth: 2
reference/index
v2_reference/index
cli/index
contributor/index

View File

@ -103,37 +103,37 @@ Supported operations
Plugin ops
~~~~~~~~~~
.. autoclass:: saharaclient.api.plugins.PluginManager
.. autoclass:: saharaclient.api.plugins.PluginManagerV1
:members:
Image Registry ops
~~~~~~~~~~~~~~~~~~
.. autoclass:: saharaclient.api.images.ImageManager
.. autoclass:: saharaclient.api.images.ImageManagerV1
:members:
Node Group Template ops
~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: saharaclient.api.node_group_templates.NodeGroupTemplateManager
.. autoclass:: saharaclient.api.node_group_templates.NodeGroupTemplateManagerV1
:members:
Cluster Template ops
~~~~~~~~~~~~~~~~~~~~
.. autoclass:: saharaclient.api.cluster_templates.ClusterTemplateManager
.. autoclass:: saharaclient.api.cluster_templates.ClusterTemplateManagerV1
:members:
Cluster ops
~~~~~~~~~~~
.. autoclass:: saharaclient.api.clusters.ClusterManager
.. autoclass:: saharaclient.api.clusters.ClusterManagerV1
:members:
Data Source ops
~~~~~~~~~~~~~~~
.. autoclass:: saharaclient.api.data_sources.DataSourceManager
.. autoclass:: saharaclient.api.data_sources.DataSourceManagerV1
:members:
Job Binary Internal ops
@ -145,13 +145,13 @@ Job Binary Internal ops
Job Binary ops
~~~~~~~~~~~~~~
.. autoclass:: saharaclient.api.job_binaries.JobBinariesManager
.. autoclass:: saharaclient.api.job_binaries.JobBinariesManagerV1
:members:
Job ops
~~~~~~~
.. autoclass:: saharaclient.api.jobs.JobsManager
.. autoclass:: saharaclient.api.jobs.JobsManagerV1
:members:
Job Execution ops

View File

@ -0,0 +1,8 @@
===============
Reference guide
===============
.. toctree::
:maxdepth: 2
pythonclient_v2

View File

@ -0,0 +1,72 @@
Python Sahara client for APIv2
==============================
Overview
--------
There is also support for Sahara's experimental APIv2.
Supported operations
--------------------
Plugin ops
~~~~~~~~~~
.. autoclass:: saharaclient.api.plugins.PluginManagerV2
:members:
Image Registry ops
~~~~~~~~~~~~~~~~~~
.. autoclass:: saharaclient.api.images.ImageManagerV2
:members:
Node Group Template ops
~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: saharaclient.api.node_group_templates.NodeGroupTemplateManagerV2
:members:
Cluster Template ops
~~~~~~~~~~~~~~~~~~~~
.. autoclass:: saharaclient.api.cluster_templates.ClusterTemplateManagerV2
:members:
Cluster ops
~~~~~~~~~~~
.. autoclass:: saharaclient.api.clusters.ClusterManagerV2
:members:
Data Source ops
~~~~~~~~~~~~~~~
.. autoclass:: saharaclient.api.data_sources.DataSourceManagerV2
:members:
Job Binary ops
~~~~~~~~~~~~~~
.. autoclass:: saharaclient.api.job_binaries.JobBinariesManagerV2
:members:
Job Template ops
~~~~~~~~~~~~~~~~
.. autoclass:: saharaclient.api.v2.job_templates.JobTemplatesManagerV2
:members:
Job ops
~~~~~~~
.. autoclass:: saharaclient.api.v2.jobs.JobsManagerV2
:members:
Job Types ops
~~~~~~~~~~~~~
.. autoclass:: saharaclient.api.job_types.JobTypesManager
:members:
:noindex:

View File

@ -0,0 +1,3 @@
---
features:
- Initial support for Sahara's experimental APIv2 is present.

View File

@ -120,7 +120,7 @@ class ResourceManager(object):
resp = self.api.put(url, **kwargs)
if resp.status_code != 202:
if resp.status_code not in [200, 202]:
self._raise_api_exception(resp)
if response_key is not None:
data = get_json(resp)[response_key]
@ -206,8 +206,12 @@ class ResourceManager(object):
else:
self._raise_api_exception(resp)
def _delete(self, url):
resp = self.api.delete(url)
def _delete(self, url, data=None):
if data is not None:
kwargs = {'json': data}
resp = self.api.delete(url, **kwargs)
else:
resp = self.api.delete(url)
if resp.status_code != 204:
self._raise_api_exception(resp)

View File

@ -16,7 +16,6 @@
import warnings
from keystoneauth1 import adapter
from keystoneauth1 import exceptions
from keystoneauth1.identity import v2
from keystoneauth1.identity import v3
from keystoneauth1 import session as keystone_session
@ -33,6 +32,8 @@ from saharaclient.api import job_types
from saharaclient.api import jobs
from saharaclient.api import node_group_templates
from saharaclient.api import plugins
from saharaclient.api.v2 import job_templates
from saharaclient.api.v2 import jobs as jobs_v2
USER_AGENT = 'python-saharaclient'
@ -42,11 +43,15 @@ class HTTPClient(adapter.Adapter):
def request(self, *args, **kwargs):
kwargs.setdefault('raise_exc', False)
kwargs.setdefault('allow', {'allow_experimental': True})
return super(HTTPClient, self).request(*args, **kwargs)
class Client(object):
"""Client for the OpenStack Data Processing v1 API.
_api_version = '1.1'
"""Client for the OpenStack Data Processing API.
:param str username: Username for Keystone authentication.
:param str api_key: Password for Keystone authentication.
@ -101,16 +106,6 @@ class Client(object):
if not auth:
auth = session.auth
# NOTE(Toan): bug #1512801. If sahara_url is provided, it does not
# matter if service_type is orthographically correct or not.
# Only find Sahara service_type and endpoint in Keystone catalog
# if sahara_url is not provided.
if not sahara_url:
service_type = self._determine_service_type(session,
auth,
service_type,
endpoint_type)
kwargs['user_agent'] = USER_AGENT
kwargs.setdefault('interface', endpoint_type)
kwargs.setdefault('endpoint_override', sahara_url)
@ -119,22 +114,25 @@ class Client(object):
auth=auth,
service_type=service_type,
region_name=region_name,
version=self._api_version,
**kwargs)
self.clusters = clusters.ClusterManager(client)
self._register_managers(client)
def _register_managers(self, client):
self.clusters = clusters.ClusterManagerV1(client)
self.cluster_templates = (
cluster_templates.ClusterTemplateManager(client)
cluster_templates.ClusterTemplateManagerV1(client)
)
self.node_group_templates = (
node_group_templates.NodeGroupTemplateManager(client)
node_group_templates.NodeGroupTemplateManagerV1(client)
)
self.plugins = plugins.PluginManager(client)
self.images = images.ImageManager(client)
self.data_sources = data_sources.DataSourceManager(client)
self.jobs = jobs.JobsManager(client)
self.plugins = plugins.PluginManagerV1(client)
self.images = images.ImageManagerV1(client)
self.data_sources = data_sources.DataSourceManagerV1(client)
self.jobs = jobs.JobsManagerV1(client)
self.job_executions = job_executions.JobExecutionsManager(client)
self.job_binaries = job_binaries.JobBinariesManager(client)
self.job_binaries = job_binaries.JobBinariesManagerV1(client)
self.job_binary_internals = (
job_binary_internals.JobBinaryInternalsManager(client)
)
@ -162,27 +160,23 @@ class Client(object):
project_name=project_name,
project_domain_id='default')
@staticmethod
def _determine_service_type(session, auth, service_type, interface):
"""Check a catalog for data-processing or data_processing"""
# NOTE(jamielennox): calling get_endpoint forces an auth on
# initialization which is required for backwards compatibility. It
# also allows us to reset the service type if not in the catalog.
for st in (service_type, service_type.replace('-', '_')):
try:
url = auth.get_endpoint(session,
service_type=st,
interface=interface)
except exceptions.Unauthorized:
raise RuntimeError("Not Authorized")
except exceptions.EndpointNotFound:
# NOTE(jamielennox): bug #1428447. This should not be
# raised, instead None should be returned. Handle in case
# it changes in the future
url = None
class ClientV2(Client):
if url:
return st
_api_version = '2'
raise RuntimeError("Could not find Sahara endpoint in catalog")
def _register_managers(self, client):
self.clusters = clusters.ClusterManagerV2(client)
self.cluster_templates = (
cluster_templates.ClusterTemplateManagerV2(client)
)
self.node_group_templates = (
node_group_templates.NodeGroupTemplateManagerV2(client)
)
self.plugins = plugins.PluginManagerV2(client)
self.images = images.ImageManagerV2(client)
self.data_sources = data_sources.DataSourceManagerV2(client)
self.job_templates = job_templates.JobTemplatesManagerV2(client)
self.jobs = jobs_v2.JobsManagerV2(client)
self.job_binaries = job_binaries.JobBinariesManagerV2(client)
self.job_types = job_types.JobTypesManager(client)

View File

@ -20,7 +20,7 @@ class ClusterTemplate(base.Resource):
resource_name = 'Cluster Template'
class ClusterTemplateManager(base.ResourceManager):
class ClusterTemplateManagerV1(base.ResourceManager):
resource_class = ClusterTemplate
NotUpdated = base.NotUpdated()
@ -37,6 +37,15 @@ class ClusterTemplateManager(base.ResourceManager):
'hadoop_version': hadoop_version,
}
return self._do_create(data, description, cluster_configs,
node_groups, anti_affinity, net_id,
default_image_id, use_autoconfig, shares,
is_public, is_protected, domain_name)
def _do_create(self, data, description, cluster_configs, node_groups,
anti_affinity, net_id, default_image_id, use_autoconfig,
shares, is_public, is_protected, domain_name):
self._copy_if_defined(data,
description=description,
cluster_configs=cluster_configs,
@ -101,3 +110,58 @@ class ClusterTemplateManager(base.ResourceManager):
def export(self, cluster_template_id):
"""Export a Cluster Template."""
return self._get('/cluster-templates/%s/export' % cluster_template_id)
class ClusterTemplateManagerV2(ClusterTemplateManagerV1):
NotUpdated = base.NotUpdated()
def create(self, name, plugin_name, plugin_version, description=None,
cluster_configs=None, node_groups=None, anti_affinity=None,
net_id=None, default_image_id=None, use_autoconfig=None,
shares=None, is_public=None, is_protected=None,
domain_name=None):
"""Create a Cluster Template."""
data = {
'name': name,
'plugin_name': plugin_name,
'plugin_version': plugin_version
}
return self._do_create(data, description, cluster_configs,
node_groups, anti_affinity, net_id,
default_image_id, use_autoconfig, shares,
is_public, is_protected, domain_name)
def update(self, cluster_template_id, name=NotUpdated,
plugin_name=NotUpdated, plugin_version=NotUpdated,
description=NotUpdated, cluster_configs=NotUpdated,
node_groups=NotUpdated, anti_affinity=NotUpdated,
net_id=NotUpdated, default_image_id=NotUpdated,
use_autoconfig=NotUpdated, shares=NotUpdated,
is_public=NotUpdated, is_protected=NotUpdated,
domain_name=NotUpdated):
"""Update a Cluster Template."""
data = {}
self._copy_if_updated(data, name=name,
plugin_name=plugin_name,
plugin_version=plugin_version,
description=description,
cluster_configs=cluster_configs,
node_groups=node_groups,
anti_affinity=anti_affinity,
neutron_management_network=net_id,
default_image_id=default_image_id,
use_autoconfig=use_autoconfig,
shares=shares,
is_public=is_public,
is_protected=is_protected,
domain_name=domain_name)
return self._patch('/cluster-templates/%s' % cluster_template_id,
data, 'cluster_template')
# NOTE(jfreud): keep this around for backwards compatibility
ClusterTemplateManager = ClusterTemplateManagerV1

View File

@ -22,7 +22,7 @@ class Cluster(base.Resource):
resource_name = 'Cluster'
class ClusterManager(base.ResourceManager):
class ClusterManagerV1(base.ResourceManager):
resource_class = Cluster
NotUpdated = base.NotUpdated()
@ -41,6 +41,17 @@ class ClusterManager(base.ResourceManager):
'hadoop_version': hadoop_version,
}
return self._do_create(data, cluster_template_id, default_image_id,
is_transient, description, cluster_configs,
node_groups, user_keypair_id, anti_affinity,
net_id, count, use_autoconfig, shares,
is_public, is_protected)
def _do_create(self, data, cluster_template_id, default_image_id,
is_transient, description, cluster_configs, node_groups,
user_keypair_id, anti_affinity, net_id, count,
use_autoconfig, shares, is_public, is_protected):
# Checking if count is greater than 1, otherwise we set it to None
# so the created dict in the _copy_if_defined method does not contain
# the count parameter.
@ -136,3 +147,69 @@ class ClusterManager(base.ResourceManager):
"""Start a verification for a Cluster."""
data = {'verification': {'status': status}}
return self._patch("/clusters/%s" % cluster_id, data)
class ClusterManagerV2(ClusterManagerV1):
def create(self, name, plugin_name, plugin_version,
cluster_template_id=None, default_image_id=None,
is_transient=None, description=None, cluster_configs=None,
node_groups=None, user_keypair_id=None,
anti_affinity=None, net_id=None, count=None,
use_autoconfig=None, shares=None,
is_public=None, is_protected=None):
"""Launch a Cluster."""
data = {
'name': name,
'plugin_name': plugin_name,
'plugin_version': plugin_version,
}
return self._do_create(data, cluster_template_id, default_image_id,
is_transient, description, cluster_configs,
node_groups, user_keypair_id, anti_affinity,
net_id, count, use_autoconfig, shares,
is_public, is_protected)
def scale(self, cluster_id, scale_object):
"""Scale an existing Cluster.
:param scale_object: dict that describes scaling operation
:Example:
The following `scale_object` can be used to change the number of
instances in the node group (optionally specifiying which instances to
delete) or add instances of a new node group to an existing cluster:
.. sourcecode:: json
{
"add_node_groups": [
{
"count": 3,
"name": "new_ng",
"node_group_template_id": "ngt_id"
}
],
"resize_node_groups": [
{
"count": 2,
"name": "old_ng",
"instances": ["instance_id1", "instance_id2"]
}
]
}
"""
return self._update('/clusters/%s' % cluster_id, scale_object)
def force_delete(self, cluster_id):
"""Force Delete a Cluster."""
data = {'force': True}
self._delete('/clusters/%s' % cluster_id, data)
# NOTE(jfreud): keep this around for backwards compatibility
ClusterManager = ClusterManagerV1

View File

@ -20,8 +20,9 @@ class DataSources(base.Resource):
resource_name = 'Data Source'
class DataSourceManager(base.ResourceManager):
class DataSourceManagerV1(base.ResourceManager):
resource_class = DataSources
version = 1.1
def create(self, name, description, data_source_type,
url, credential_user=None, credential_pass=None,
@ -76,5 +77,18 @@ class DataSourceManager(base.ResourceManager):
* is_protected
* credentials - dict with `user` and `password` keyword arguments
"""
return self._update('/data-sources/%s' % data_source_id,
update_data)
if self.version >= 2:
UPDATE_FUNC = self._patch
else:
UPDATE_FUNC = self._update
return UPDATE_FUNC('/data-sources/%s' % data_source_id,
update_data)
class DataSourceManagerV2(DataSourceManagerV1):
version = 2
# NOTE(jfreud): keep this around for backwards compatibility
DataSourceManager = DataSourceManagerV1

View File

@ -21,7 +21,7 @@ class Image(base.Resource):
defaults = {'description': ''}
class ImageManager(base.ResourceManager):
class _ImageManager(base.ResourceManager):
resource_class = Image
def list(self, search_opts=None):
@ -45,6 +45,8 @@ class ImageManager(base.ResourceManager):
return self._post('/images/%s' % image_id, data)
class ImageManagerV1(_ImageManager):
def update_tags(self, image_id, new_tags):
"""Update an Image tags.
@ -72,3 +74,18 @@ class ImageManager(base.ResourceManager):
{'tags': to_remove}, 'image')
return remove_response or add_response or self.get(image_id)
class ImageManagerV2(_ImageManager):
def get_tags(self, image_id):
return self._get('/images/%s/tags' % image_id)
def update_tags(self, image_id, new_tags):
return self._update('/images/%s/tags' % image_id,
{'tags': new_tags})
def delete_tags(self, image_id):
return self._delete('/images/%s/tags' % image_id)
# NOTE(jfreud): keep this around for backwards compatibility
ImageManager = ImageManagerV1

View File

@ -20,8 +20,9 @@ class JobBinaries(base.Resource):
resource_name = 'Job Binary'
class JobBinariesManager(base.ResourceManager):
class JobBinariesManagerV1(base.ResourceManager):
resource_class = JobBinaries
version = 1.1
def create(self, name, url, description=None, extra=None, is_public=None,
is_protected=None):
@ -84,5 +85,19 @@ class JobBinariesManager(base.ResourceManager):
in Swift, or with the keys `accesskey`, `secretkey`, and `endpoint`
for job binary in S3
"""
return self._update(
if self.version >= 2:
UPDATE_FUNC = self._patch
else:
UPDATE_FUNC = self._update
return UPDATE_FUNC(
'/job-binaries/%s' % job_binary_id, data, 'job_binary')
class JobBinariesManagerV2(JobBinariesManagerV1):
version = 2
# NOTE(jfreud): keep this around for backwards compatibility
JobBinariesManager = JobBinariesManagerV1

View File

@ -20,7 +20,7 @@ class Job(base.Resource):
resource_name = 'Job'
class JobsManager(base.ResourceManager):
class JobsManagerV1(base.ResourceManager):
resource_class = Job
NotUpdated = base.NotUpdated()
@ -67,3 +67,7 @@ class JobsManager(base.ResourceManager):
is_public=is_public, is_protected=is_protected)
return self._patch('/jobs/%s' % job_id, data)
# NOTE(jfreud): keep this around for backwards compatibility
JobsManager = JobsManagerV1

View File

@ -20,7 +20,7 @@ class NodeGroupTemplate(base.Resource):
resource_name = 'Node Group Template'
class NodeGroupTemplateManager(base.ResourceManager):
class NodeGroupTemplateManagerV1(base.ResourceManager):
resource_class = NodeGroupTemplate
NotUpdated = base.NotUpdated()
@ -43,6 +43,22 @@ class NodeGroupTemplateManager(base.ResourceManager):
'node_processes': node_processes
}
return self._do_create(data, description, volumes_per_node,
volumes_size, node_configs, floating_ip_pool,
security_groups, auto_security_group,
availability_zone, volumes_availability_zone,
volume_type, image_id, is_proxy_gateway,
volume_local_to_instance, use_autoconfig,
shares, is_public, is_protected,
volume_mount_prefix)
def _do_create(self, data, description, volumes_per_node, volumes_size,
node_configs, floating_ip_pool, security_groups,
auto_security_group, availability_zone,
volumes_availability_zone, volume_type, image_id,
is_proxy_gateway, volume_local_to_instance, use_autoconfig,
shares, is_public, is_protected, volume_mount_prefix):
self._copy_if_defined(data,
description=description,
node_configs=node_configs,
@ -131,3 +147,75 @@ class NodeGroupTemplateManager(base.ResourceManager):
def export(self, ng_template_id):
"""Export a Node Group Template."""
return self._get('/node-group-templates/%s/export' % ng_template_id)
class NodeGroupTemplateManagerV2(NodeGroupTemplateManagerV1):
NotUpdated = base.NotUpdated()
def create(self, name, plugin_name, plugin_version, flavor_id,
description=None, volumes_per_node=None, volumes_size=None,
node_processes=None, node_configs=None, floating_ip_pool=None,
security_groups=None, auto_security_group=None,
availability_zone=None, volumes_availability_zone=None,
volume_type=None, image_id=None, is_proxy_gateway=None,
volume_local_to_instance=None, use_autoconfig=None,
shares=None, is_public=None, is_protected=None,
volume_mount_prefix=None):
"""Create a Node Group Template."""
data = {
'name': name,
'plugin_name': plugin_name,
'plugin_version': plugin_version,
'flavor_id': flavor_id,
'node_processes': node_processes
}
return self._do_create(data, description, volumes_per_node,
volumes_size, node_configs, floating_ip_pool,
security_groups, auto_security_group,
availability_zone, volumes_availability_zone,
volume_type, image_id, is_proxy_gateway,
volume_local_to_instance, use_autoconfig,
shares, is_public, is_protected,
volume_mount_prefix)
def update(self, ng_template_id, name=NotUpdated, plugin_name=NotUpdated,
plugin_version=NotUpdated, flavor_id=NotUpdated,
description=NotUpdated, volumes_per_node=NotUpdated,
volumes_size=NotUpdated, node_processes=NotUpdated,
node_configs=NotUpdated, floating_ip_pool=NotUpdated,
security_groups=NotUpdated, auto_security_group=NotUpdated,
availability_zone=NotUpdated,
volumes_availability_zone=NotUpdated, volume_type=NotUpdated,
image_id=NotUpdated, is_proxy_gateway=NotUpdated,
volume_local_to_instance=NotUpdated, use_autoconfig=NotUpdated,
shares=NotUpdated, is_public=NotUpdated,
is_protected=NotUpdated, volume_mount_prefix=NotUpdated):
"""Update a Node Group Template."""
data = {}
self._copy_if_updated(
data, name=name, plugin_name=plugin_name,
plugin_version=plugin_version, flavor_id=flavor_id,
description=description, volumes_per_node=volumes_per_node,
volumes_size=volumes_size, node_processes=node_processes,
node_configs=node_configs, floating_ip_pool=floating_ip_pool,
security_groups=security_groups,
auto_security_group=auto_security_group,
availability_zone=availability_zone,
volumes_availability_zone=volumes_availability_zone,
volume_type=volume_type, image_id=image_id,
is_proxy_gateway=is_proxy_gateway,
volume_local_to_instance=volume_local_to_instance,
use_autoconfig=use_autoconfig, shares=shares,
is_public=is_public, is_protected=is_protected,
volume_mount_prefix=volume_mount_prefix
)
return self._patch('/node-group-templates/%s' % ng_template_id, data,
'node_group_template')
# NOTE(jfreud): keep this around for backwards compatibility
NodeGroupTemplateManager = NodeGroupTemplateManagerV1

View File

@ -28,7 +28,7 @@ class Plugin(base.Resource):
self.id = self.name
class PluginManager(base.ResourceManager):
class _PluginManager(base.ResourceManager):
resource_class = Plugin
def list(self, search_opts=None):
@ -55,6 +55,8 @@ class PluginManager(base.ResourceManager):
"""
return self._patch("/plugins/%s" % plugin_name, values, 'plugin')
class PluginManagerV1(_PluginManager):
def convert_to_cluster_template(self, plugin_name, hadoop_version,
template_name, filecontent):
"""Convert to cluster template
@ -73,3 +75,17 @@ class PluginManager(base.ResourceManager):
(plugin_name, hadoop_version))
else:
return base.get_json(resp)['cluster_template']
class PluginManagerV2(_PluginManager):
def get_version_details(self, plugin_name, plugin_version):
"""Get version details
Get the list of Services and Service Parameters for a specified
Plugin and Plugin Version.
"""
return self._get('/plugins/%s/%s' % (plugin_name, plugin_version),
'plugin')
# NOTE(jfreud): keep this around for backwards compatibility
PluginManager = PluginManagerV1

View File

View File

@ -0,0 +1,70 @@
# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from saharaclient.api import base
class JobTemplate(base.Resource):
resource_name = 'Job Template'
class JobTemplatesManagerV2(base.ResourceManager):
resource_class = JobTemplate
NotUpdated = base.NotUpdated()
def create(self, name, type, mains=None, libs=None, description=None,
interface=None, is_public=None, is_protected=None):
"""Create a Job Template."""
data = {
'name': name,
'type': type
}
self._copy_if_defined(data, description=description, mains=mains,
libs=libs, interface=interface,
is_public=is_public, is_protected=is_protected)
return self._create('/%s' % 'job-templates', data, 'job')
def list(self, search_opts=None, limit=None,
marker=None, sort_by=None, reverse=None):
"""Get a list of Job Templates."""
query = base.get_query_string(search_opts, limit=limit, marker=marker,
sort_by=sort_by, reverse=reverse)
url = "/%s%s" % ('job-templates', query)
return self._page(url, 'job_templates', limit)
def get(self, job_id):
"""Get information about a Job Template."""
return self._get('/%s/%s' % ('job-templates', job_id), 'job')
def get_configs(self, job_type):
"""Get config hints for a specified Job Template type."""
return self._get('/%s/config-hints/%s' % ('job-templates', job_type))
def delete(self, job_id):
"""Delete a Job Template."""
self._delete('/%s/%s' % ('job-templates', job_id))
def update(self, job_id, name=NotUpdated, description=NotUpdated,
is_public=NotUpdated, is_protected=NotUpdated):
"""Update a Job Template."""
data = {}
self._copy_if_updated(data, name=name, description=description,
is_public=is_public, is_protected=is_protected)
return self._patch('/%s/%s' % ('job-templates', job_id), data)

View File

@ -0,0 +1,72 @@
# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from saharaclient.api import base
class Job(base.Resource):
resource_name = 'Job'
class JobsManagerV2(base.ResourceManager):
resource_class = Job
NotUpdated = base.NotUpdated()
def list(self, search_opts=None, marker=None, limit=None,
sort_by=None, reverse=None):
"""Get a list of Jobs."""
query = base.get_query_string(search_opts, limit=limit, marker=marker,
sort_by=sort_by, reverse=reverse)
url = "/jobs%s" % query
return self._page(url, 'jobs', limit)
def get(self, obj_id):
"""Get information about a Job."""
return self._get('/jobs/%s' % obj_id, 'job_execution')
def delete(self, obj_id):
"""Delete a Job."""
self._delete('/jobs/%s' % obj_id)
def create(self, job_template_id, cluster_id, input_id=None,
output_id=None, configs=None, interface=None, is_public=None,
is_protected=None):
"""Launch a Job."""
data = {
"cluster_id": cluster_id,
"job_template_id": job_template_id
}
self._copy_if_defined(data, input_id=input_id, output_id=output_id,
job_configs=configs, interface=interface,
is_public=is_public, is_protected=is_protected)
return self._create('/jobs', data, 'job_execution')
def refresh_status(self, obj_id):
"""Refresh Job Status."""
return self._get(
'/jobs/%s?refresh_status=True' % obj_id,
'job_execution'
)
def update(self, obj_id, is_public=NotUpdated, is_protected=NotUpdated):
"""Update a Job."""
data = {}
self._copy_if_updated(data, is_public=is_public,
is_protected=is_protected)
return self._patch('/jobs/%s' % obj_id, data)

View File

@ -29,6 +29,7 @@ def get_client_class(version):
version_map = {
'1.0': 'saharaclient.api.client.Client',
'1.1': 'saharaclient.api.client.Client',
'2': 'saharaclient.api.client.ClientV2',
}
try:
client_path = version_map[str(version)]