APIv2 in dashboard

Everything except decomission of a specific node.

Change-Id: Iaa02b91d75b51b812c7e5eaf8612a1d75e17748e
This commit is contained in:
Jeremy Freudberg 2018-07-11 16:15:50 -04:00 committed by Telles Nobrega
parent 16dd27b486
commit 95aca19615
19 changed files with 232 additions and 50 deletions

View File

@ -0,0 +1,6 @@
---
features:
- There is now support for APIv2 in sahara-dashboard. Enable it by setting
the "data-processing" API version to "2" in the `OPENSTACK_API_VERSIONS`
dictionary found in Horizon's local_settings.py. Some Sahara features are
only exposed through APIv2.

View File

@ -51,6 +51,8 @@ VERSIONS = base.APIVersionManager(
{}).get(SAHARA_SERVICE, 1.1))
VERSIONS.load_supported_version(1.1, {"client": api_client,
"version": 1.1})
VERSIONS.load_supported_version(2, {"client": api_client,
"version": 2})
SAHARA_PAGE_SIZE = 15
@ -149,8 +151,7 @@ def plugin_get(request, plugin_name):
def plugin_get_version_details(request, plugin_name, hadoop_version):
return client(request).plugins.get_version_details(
plugin_name=plugin_name,
hadoop_version=hadoop_version)
plugin_name, hadoop_version)
def nodegroup_template_create(request, name, plugin_name, hadoop_version,
@ -170,10 +171,9 @@ def nodegroup_template_create(request, name, plugin_name, hadoop_version,
is_public=None,
is_protected=None,
volume_mount_prefix=None):
return client(request).node_group_templates.create(
payload = dict(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
description=description,
volumes_per_node=volumes_per_node,
@ -194,6 +194,11 @@ def nodegroup_template_create(request, name, plugin_name, hadoop_version,
is_public=is_public,
is_protected=is_protected,
volume_mount_prefix=volume_mount_prefix)
if VERSIONS.active == '2':
payload['plugin_version'] = hadoop_version
else:
payload['hadoop_version'] = hadoop_version
return client(request).node_group_templates.create(**payload)
def nodegroup_template_list(request, search_opts=None,
@ -208,6 +213,8 @@ def nodegroup_template_get(request, ngt_id):
def nodegroup_template_find(request, **kwargs):
if "hadoop_version" in kwargs and VERSIONS.active == '2':
kwargs["plugin_version"] = kwargs.pop("hadoop_version")
return client(request).node_group_templates.find(**kwargs)
@ -231,11 +238,10 @@ def nodegroup_template_update(request, ngt_id, name, plugin_name,
is_protected=None,
is_public=None,
image_id=None):
return client(request).node_group_templates.update(
payload = dict(
ng_template_id=ngt_id,
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
description=description,
volumes_per_node=volumes_per_node,
@ -255,6 +261,11 @@ def nodegroup_template_update(request, ngt_id, name, plugin_name,
is_public=is_public,
is_protected=is_protected,
image_id=image_id)
if VERSIONS.active == '2':
payload['plugin_version'] = hadoop_version
else:
payload['hadoop_version'] = hadoop_version
return client(request).node_group_templates.update(**payload)
def nodegroup_update_acl_rules(request, nid,
@ -273,10 +284,9 @@ def cluster_template_create(request, name, plugin_name, hadoop_version,
net_id=None, use_autoconfig=None, shares=None,
is_public=None, is_protected=None,
domain_name=None):
return client(request).cluster_templates.create(
payload = dict(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
description=description,
cluster_configs=cluster_configs,
node_groups=node_groups,
@ -288,6 +298,11 @@ def cluster_template_create(request, name, plugin_name, hadoop_version,
is_protected=is_protected,
domain_name=domain_name
)
if VERSIONS.active == '2':
payload['plugin_version'] = hadoop_version
else:
payload['hadoop_version'] = hadoop_version
return client(request).cluster_templates.create(**payload)
def cluster_template_list(request, search_opts=None, marker=None, limit=None):
@ -314,11 +329,10 @@ def cluster_template_update(request, ct_id, name, plugin_name,
is_public=None, is_protected=None,
domain_name=None):
try:
template = client(request).cluster_templates.update(
payload = dict(
cluster_template_id=ct_id,
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
description=description,
cluster_configs=cluster_configs,
node_groups=node_groups,
@ -330,6 +344,11 @@ def cluster_template_update(request, ct_id, name, plugin_name,
is_protected=is_protected,
domain_name=domain_name
)
if VERSIONS.active == '2':
payload['plugin_version'] = hadoop_version
else:
payload['hadoop_version'] = hadoop_version
template = client(request).cluster_templates.update(**payload)
except APIException as e:
raise exceptions.Conflict(e)
@ -352,10 +371,9 @@ def cluster_create(request, name, plugin_name, hadoop_version,
node_groups=None, user_keypair_id=None, anti_affinity=None,
net_id=None, count=None, use_autoconfig=None,
is_public=None, is_protected=None):
return client(request).clusters.create(
payload = dict(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
cluster_template_id=cluster_template_id,
default_image_id=default_image_id,
is_transient=is_transient,
@ -369,6 +387,11 @@ def cluster_create(request, name, plugin_name, hadoop_version,
use_autoconfig=use_autoconfig,
is_public=is_public,
is_protected=is_protected)
if VERSIONS.active == '2':
payload['plugin_version'] = hadoop_version
else:
payload['hadoop_version'] = hadoop_version
return client(request).clusters.create(**payload)
def cluster_scale(request, cluster_id, scale_object):
@ -397,6 +420,10 @@ def cluster_delete(request, cluster_id):
client(request).clusters.delete(cluster_id=cluster_id)
def cluster_force_delete(request, cluster_id):
client(request).clusters.force_delete(cluster_id=cluster_id)
def cluster_update(request, cluster_id, name=None, description=None,
is_public=None, is_protected=None, shares=None):
return client(request).clusters.update(cluster_id,
@ -516,7 +543,12 @@ def job_binary_internal_delete(request, jbi_id):
def job_create(request, name, j_type, mains, libs, description, interface,
is_public=None, is_protected=None):
return client(request).jobs.create(
sahara = client(request)
if VERSIONS.active == '2':
manager = 'job_templates'
else:
manager = 'jobs'
return getattr(sahara, manager).create(
name=name,
type=j_type,
mains=mains,
@ -528,32 +560,62 @@ def job_create(request, name, j_type, mains, libs, description, interface,
def job_update(request, job_id, is_public=None, is_protected=None):
return client(request).jobs.update(
sahara = client(request)
if VERSIONS.active == '2':
manager = 'job_templates'
else:
manager = 'jobs'
return getattr(sahara, manager).update(
job_id=job_id, **prepare_acl_update_dict(is_public, is_protected))
def job_list(request, search_opts=None, marker=None, limit=None):
marker, limit = _update_pagination_params(marker, limit, request)
return client(request).jobs.list(
sahara = client(request)
if VERSIONS.active == '2':
manager = 'job_templates'
else:
manager = 'jobs'
return getattr(sahara, manager).list(
search_opts=search_opts,
limit=limit,
marker=marker)
def _job_list(request):
return client(request).jobs.list()
sahara = client(request)
if VERSIONS.active == '2':
manager = 'job_templates'
else:
manager = 'jobs'
return getattr(sahara, manager).list()
def job_get(request, job_id):
return client(request).jobs.get(job_id=job_id)
sahara = client(request)
if VERSIONS.active == '2':
manager = 'job_templates'
else:
manager = 'jobs'
return getattr(sahara, manager).get(job_id=job_id)
def job_delete(request, job_id):
client(request).jobs.delete(job_id=job_id)
sahara = client(request)
if VERSIONS.active == '2':
manager = 'job_templates'
else:
manager = 'jobs'
getattr(sahara, manager).delete(job_id=job_id)
def job_get_configs(request, job_type):
return client(request).jobs.get_configs(job_type=job_type)
sahara = client(request)
if VERSIONS.active == '2':
manager = 'job_templates'
else:
manager = 'jobs'
return getattr(sahara, manager).get_configs(job_type=job_type)
def job_execution_create(request, job_id, cluster_id,
@ -563,8 +625,13 @@ def job_execution_create(request, job_id, cluster_id,
input_id = None
if output_id in [None, "", "None"]:
output_id = None
return client(request).job_executions.create(
job_id=job_id,
sahara = client(request)
if VERSIONS.active == '2':
manager = 'jobs'
else:
manager = 'job_executions'
return getattr(sahara, manager).create(
job_id,
cluster_id=cluster_id,
input_id=input_id,
output_id=output_id,
@ -576,9 +643,14 @@ def job_execution_create(request, job_id, cluster_id,
def job_execution_update(request, jbe_id, is_public=None, is_protected=None):
return client(request).job_executions.update(job_execution_id=jbe_id,
**prepare_acl_update_dict(
is_public, is_protected))
sahara = client(request)
if VERSIONS.active == '2':
manager = 'jobs'
else:
manager = 'job_executions'
return getattr(sahara, manager).update(jbe_id,
**prepare_acl_update_dict(
is_public, is_protected))
def _resolve_job_execution_names(job_execution, cluster=None,
@ -597,7 +669,12 @@ def _resolve_job_execution_names(job_execution, cluster=None,
def job_execution_list(request, search_opts=None, marker=None, limit=None):
marker, limit = _update_pagination_params(marker, limit, request)
job_execution_list = client(request).job_executions.list(
sahara = client(request)
if VERSIONS.active == '2':
manager = 'jobs'
else:
manager = 'job_executions'
job_execution_list = getattr(sahara, manager).list(
search_opts=search_opts, limit=limit,
marker=marker)
@ -620,15 +697,28 @@ def job_execution_list(request, search_opts=None, marker=None, limit=None):
def job_execution_get(request, jex_id):
jex = client(request).job_executions.get(obj_id=jex_id)
sahara = client(request)
if VERSIONS.active == '2':
je_manager = 'jobs'
jt_manager = 'job_templates'
else:
je_manager = 'job_executions'
jt_manager = 'jobs'
jex = getattr(sahara, je_manager).get(obj_id=jex_id)
cluster = safe_call(client(request).clusters.get, jex.cluster_id)
job = safe_call(client(request).jobs.get, jex.job_id)
job = safe_call(getattr(sahara, jt_manager).get, jex.job_id)
return _resolve_job_execution_names(jex, cluster, job)
def job_execution_delete(request, jex_id):
client(request).job_executions.delete(obj_id=jex_id)
sahara = client(request)
if VERSIONS.active == '2':
manager = 'jobs'
else:
manager = 'job_executions'
getattr(sahara, manager).delete(obj_id=jex_id)
def job_types_list(request):

View File

@ -132,7 +132,8 @@ class ImportClusterTemplateNodegroupsForm(forms.SelfHandlingForm):
req.update(request.POST)
plugin = template_json["plugin_name"]
version = template_json["hadoop_version"]
version = (template_json.get("hadoop_version", None) or
template_json["plugin_version"])
if not plugin or not version:
self.templates = saharaclient.nodegroup_template_find(request)

View File

@ -47,7 +47,12 @@ class CreateCluster(tables.LinkAction):
def get_link_url(self, datum):
base_url = urls.reverse(self.url)
params = http.urlencode({"hadoop_version": datum.hadoop_version,
if saharaclient.VERSIONS.active == '2':
version_attr = "plugin_version"
else:
version_attr = "hadoop_version"
params = http.urlencode({"hadoop_version":
getattr(datum, version_attr),
"plugin_name": datum.plugin_name,
"cluster_template_id": datum.id})
return "?".join([base_url, params])
@ -172,7 +177,11 @@ class ClusterTemplatesTable(sahara_table.SaharaPaginateTabbedTable):
"clusters:ct-details"))
plugin_name = tables.Column("plugin_name",
verbose_name=_("Plugin"))
hadoop_version = tables.Column("hadoop_version",
if saharaclient.VERSIONS.active == '2':
version_attr = "plugin_version"
else:
version_attr = "hadoop_version"
hadoop_version = tables.Column(version_attr,
verbose_name=_("Version"))
node_groups = tables.Column(render_node_groups,
verbose_name=_("Node Groups"),

View File

@ -65,6 +65,8 @@ class GeneralTab(tabs.Tab):
except Exception as e:
template = {}
LOG.error("Unable to fetch cluster template details: %s" % str(e))
if saharaclient.VERSIONS.active == '2':
template.hadoop_version = template.plugin_version
return {"template": template}

View File

@ -37,9 +37,15 @@ class CopyClusterTemplate(create_flow.ConfigureClusterTemplate):
self.cluster_template_id)
self._set_configs_to_copy(self.template.cluster_configs)
if saharaclient.VERSIONS.active == '2':
version_attr = 'plugin_version'
else:
version_attr = 'hadoop_version'
hadoop_version = getattr(self.template, version_attr)
request.GET = request.GET.copy()
request.GET.update({"plugin_name": self.template.plugin_name,
"hadoop_version": self.template.hadoop_version,
version_attr: hadoop_version,
"aa_groups": self.template.anti_affinity})
super(CopyClusterTemplate, self).__init__(request, context_seed,

View File

@ -171,7 +171,7 @@ class ConfigureNodegroupsAction(workflows.Action):
req = request.GET.copy()
req.update(request.POST)
plugin = req.get("plugin_name")
version = req.get("hadoop_version")
version = req.get("hadoop_version", None) or req["plugin_version"]
if plugin and not version:
version_name = plugin + "_version"
version = req.get(version_name)

View File

@ -106,6 +106,29 @@ class CheckClusterAction(tables.BatchAction):
saharaclient.verification_update(request, datum_id, status='START')
class ForceDeleteCluster(tables.DeleteAction):
name = "force_delete"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Force Delete Cluster",
u"Force Delete Clusters",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Force Deleted Cluster",
u"Force Deleted Clusters",
count
)
def delete(self, request, obj_id):
saharaclient.cluster_force_delete(request, obj_id)
class UpdateClusterShares(tables.LinkAction):
name = "update_shares"
verbose_name = _("Update Shares")
@ -237,7 +260,11 @@ class ClustersTable(sahara_table.SaharaPaginateTabbedTable):
plugin = tables.Column("plugin_name",
verbose_name=_("Plugin"))
version = tables.Column("hadoop_version",
if saharaclient.VERSIONS.active == '2':
version_attr = "plugin_version"
else:
version_attr = "hadoop_version"
version = tables.Column(version_attr,
verbose_name=_("Version"))
# Status field need the whole cluster object to build the rich status.
@ -268,6 +295,8 @@ class ClustersTable(sahara_table.SaharaPaginateTabbedTable):
ConfigureCluster,
DeleteCluster,
ClustersFilterAction)
if saharaclient.VERSIONS.active == '2':
table_actions = table_actions + (ForceDeleteCluster,)
table_actions_menu = (MakePublic, MakePrivate,
MakeProtected, MakeUnProtected)
if SAHARA_VERIFICATION_DISABLED:
@ -281,3 +310,5 @@ class ClustersTable(sahara_table.SaharaPaginateTabbedTable):
DeleteCluster, MakePublic, MakePrivate,
MakeProtected, MakeUnProtected,
CheckClusterAction)
if saharaclient.VERSIONS.active == '2':
row_actions = row_actions + (ForceDeleteCluster,)

View File

@ -100,6 +100,11 @@ class GeneralTab(tabs.Tab):
"base_image": base_image,
"cluster_template": cluster_template,
"network": net_name})
if saharaclient.VERSIONS.active == '2':
cluster_info["cluster"].hadoop_version = (
cluster_info["cluster"].plugin_version
)
except Exception as e:
LOG.error("Unable to fetch cluster details: %s" % str(e))

View File

@ -48,7 +48,11 @@ class ScaleCluster(cl_create_flow.ConfigureCluster,
try:
cluster = saharaclient.cluster_get(request, cluster_id)
plugin = cluster.plugin_name
hadoop_version = cluster.hadoop_version
if saharaclient.VERSIONS.active == '2':
version_attr = 'plugin_version'
else:
version_attr = 'hadoop_version'
hadoop_version = getattr(cluster, version_attr)
# Initialize deletable node groups.
deletable = dict()
@ -58,7 +62,7 @@ class ScaleCluster(cl_create_flow.ConfigureCluster,
request.GET.update({
"cluster_id": cluster_id,
"plugin_name": plugin,
"hadoop_version": hadoop_version,
version_attr: hadoop_version,
"deletable": deletable
})

View File

@ -125,10 +125,12 @@ class ImportNodegroupTemplateDetailsForm(forms.SelfHandlingForm):
else:
self.fields["flavor"].choices = []
version = (template_json.get("hadoop_version", None) or
template_json["plugin_version"])
self.fields["image_id"].choices = \
self._populate_image_choices(request,
template_json["plugin_name"],
template_json["hadoop_version"])
version)
except (ValueError, KeyError):
raise exceptions.BadRequest(_("Could not parse template"))
except Exception:

View File

@ -147,7 +147,11 @@ class NodegroupTemplatesTable(sahara_table.SaharaPaginateTabbedTable):
link="horizon:project:data_processing.clusters:details")
plugin_name = tables.Column("plugin_name",
verbose_name=_("Plugin"))
hadoop_version = tables.Column("hadoop_version",
if saharaclient.VERSIONS.active == '2':
version_attr = "plugin_version"
else:
version_attr = "hadoop_version"
hadoop_version = tables.Column(version_attr,
verbose_name=_("Version"))
node_processes = tables.Column("node_processes",
verbose_name=_("Node Processes"),

View File

@ -30,11 +30,15 @@ class CopyNodegroupTemplate(create_flow.ConfigureNodegroupTemplate):
self._set_configs_to_copy(self.template.node_configs)
plugin = self.template.plugin_name
hadoop_version = self.template.hadoop_version
if saharaclient.VERSIONS.active == '2':
version_attr = 'plugin_version'
else:
version_attr = 'hadoop_version'
hadoop_version = getattr(self.template, version_attr)
request.GET = request.GET.copy()
request.GET.update(
{"plugin_name": plugin, "hadoop_version": hadoop_version})
{"plugin_name": plugin, version_attr: hadoop_version})
super(CopyNodegroupTemplate, self).__init__(request, context_seed,
entry_point, *args,

View File

@ -13,7 +13,7 @@
</dl>
<dl class="dl-horizontal">
<dt>{% trans "Plugin" %}</dt>
<dd><a href="{% url 'horizon:project:data_processing.data_plugins:plugin-details' template.plugin_name %}">{{ cluster.plugin_name }}</a></dd>
<dd><a href="{% url 'horizon:project:data_processing.data_plugins:plugin-details' template.plugin_name %}">{{ template.plugin_name }}</a></dd>
<dt>{% trans "Version" %}</dt>
<dd>{{ template.hadoop_version }}</dd>
<dt>{% trans "Use auto-configuration" %}</dt>

View File

@ -215,12 +215,15 @@ class JobBinaryCreateForm(forms.SelfHandlingForm):
_("job binary"))
self.fields["job_binary_type"].choices =\
[("internal-db", "Internal database"),
("swift", "Swift"),
[("swift", "Swift"),
("s3", "S3")]
self.fields["job_binary_internal"].choices =\
self.populate_job_binary_internal_choices(request)
if saharaclient.VERSIONS.active == '1.1':
self.fields["job_binary_type"].choices.append(
("internal-db", "Internal database")
)
self.fields["job_binary_internal"].choices =\
self.populate_job_binary_internal_choices(request)
if saharaclient.base.is_service_enabled(request, 'share'):
self.fields["job_binary_type"].choices.append(("manila", "Manila"))

View File

@ -57,7 +57,7 @@ class DeleteJobBinary(tables.DeleteAction):
jb_type = url_parts[0]
jb_internal_id = url_parts[len(url_parts) - 1]
if jb_type == "internal-db":
if jb_type == "internal-db" and saharaclient.VERSIONS.active == '1.1':
try:
saharaclient.job_binary_internal_delete(request,
jb_internal_id)

View File

@ -85,8 +85,21 @@ class GeneralTab(tabs.Tab):
LOG.error("Unable to fetch output url: %s", e)
object_names["output_url"] = "None"
if saharaclient.VERSIONS.active == '2':
job_execution_engine_job_attr = "engine_job_id"
job_execution_engine_job_attr_pretty = _("Engine Job ID")
else:
job_execution_engine_job_attr = "oozie_job_id"
job_execution_engine_job_attr_pretty = _("Oozie Job ID")
job_execution_engine_job_id = getattr(job_execution,
job_execution_engine_job_attr)
return {"job_execution": job_execution,
"object_names": object_names}
"object_names": object_names,
"job_execution_engine_job_id": job_execution_engine_job_id,
"job_execution_engine_job_attr_pretty":
job_execution_engine_job_attr_pretty}
def get_object_names(self, job_ex, request):
object_names = {}

View File

@ -32,8 +32,8 @@
<dd>{{ job_execution.end_time|parse_isotime }}</dd>
<dt>{% trans "Return Code" %}</dt>
<dd>{{ job_execution.return_code }}</dd>
<dt>{% trans "Oozie Job ID" %}</dt>
<dd>{{ job_execution.oozie_job_id }}</dd>
<dt>{{ job_execution_engine_job_attr_pretty }}</dt>
<dd>{{ job_execution_engine_job_id }}</dd>
<dt>{% trans "Created" context "Created time" %}</dt>
<dd>{{ job_execution.created_at|parse_isotime }}</dd>
<dt>{% trans "Job Configuration" %}</dt>

View File

@ -208,7 +208,9 @@ def get_plugin_and_hadoop_version(request):
req.update(request.POST)
if req.get("plugin_name"):
plugin_name = req["plugin_name"]
hadoop_version = req["hadoop_version"]
hadoop_version = (
req.get("plugin_version", None) or req["hadoop_version"]
)
return plugin_name, hadoop_version