Adding sahara client api code

This is the code that will interface with the sahara
library.  This is part of the merging of the sahara
dashboard into horizon.
This code was originally from:
https://github.com/openstack/sahara-dashboard

Change-Id: Icdec326e276e1cfeedae6d2fd6047051725f9566
Partial-Implements:  blueprint merge-sahara-dashboard
Co-Authored-By: Nikita Konovalov <nkonovalov@mirantis.com>
Co-Authored-By: Dmitry Mescheryakov <dmescheryakov@mirantis.com>
This commit is contained in:
Chad Roberts 2014-04-10 12:28:28 -04:00
parent 9da62c463d
commit 799d24e13f
4 changed files with 314 additions and 1 deletions

View File

@ -491,6 +491,15 @@ Default: ``"1800"``
Specifies the timespan in seconds inactivity, until a user is considered as
logged out.
``SAHARA_AUTO_IP_ALLOCATION_ENABLED``
-------------------------------------
Default: ``False``
This setting notifies the Data Processing (Sahara) system whether or not
automatic IP allocation is enabled. You would want to set this to True
if you were running Nova Networking with auto_assign_floating_ip = True.
Django Settings (Partial)
=========================

View File

@ -0,0 +1,293 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.conf import settings
from openstack_dashboard.api import base
from saharaclient import client as api_client
LOG = logging.getLogger(__name__)
# "type" of Sahara service registered in keystone
SAHARA_SERVICE = 'data_processing'
SAHARA_AUTO_IP_ALLOCATION_ENABLED = getattr(settings,
'SAHARA_AUTO_IP_ALLOCATION_ENABLED',
False)
VERSIONS = base.APIVersionManager(SAHARA_SERVICE,
preferred_version=getattr(settings,
'OPENSTACK_API_VERSIONS',
{}).get(SAHARA_SERVICE, 1.1))
VERSIONS.load_supported_version(1.1, {"client": api_client,
"version": 1.1})
def client(request):
return api_client.Client(VERSIONS.get_active_version()["version"],
sahara_url=base.url_for(request, SAHARA_SERVICE),
service_type=SAHARA_SERVICE,
project_id=request.user.project_id,
input_auth_token=request.user.token.id)
def image_list(request):
return client(request).images.list()
def image_get(request, image_id):
return client(request).images.get(image_id)
def image_unregister(request, image_id):
client(request).images.unregister_image(image_id)
def image_update(request, image_id, user_name, desc):
client(request).images.update_image(image_id, user_name, desc)
def image_tags_update(request, image_id, image_tags):
client(request).images.update_tags(image_id, image_tags)
def plugin_list(request):
return client(request).plugins.list()
def plugin_get(request, plugin_name):
return client(request).plugins.get(plugin_name)
def plugin_get_version_details(request, plugin_name, hadoop_version):
return client(request).plugins.get_version_details(plugin_name,
hadoop_version)
def plugin_convert_to_template(request, plugin_name, hadoop_version,
template_name, file_content):
return client(request).plugins.convert_to_cluster_template(plugin_name,
hadoop_version,
template_name,
file_content)
def nodegroup_template_create(request, name, plugin_name, hadoop_version,
flavor_id, description=None,
volumes_per_node=None, volumes_size=None,
node_processes=None, node_configs=None,
floating_ip_pool=None):
return client(request).node_group_templates.create(name, plugin_name,
hadoop_version,
flavor_id, description,
volumes_per_node,
volumes_size,
node_processes,
node_configs,
floating_ip_pool)
def nodegroup_template_list(request):
return client(request).node_group_templates.list()
def nodegroup_template_get(request, ngt_id):
return client(request).node_group_templates.get(ngt_id)
def nodegroup_template_delete(request, ngt_id):
client(request).node_group_templates.delete(ngt_id)
def nodegroup_template_update(request, ngt_id, name, plugin_name,
hadoop_version, flavor_id,
description=None, volumes_per_node=None,
volumes_size=None, node_processes=None,
node_configs=None, floating_ip_pool=None):
return client(request).node_group_templates.update(ngt_id, name,
plugin_name,
hadoop_version,
flavor_id,
description,
volumes_per_node,
volumes_size,
node_processes,
node_configs,
floating_ip_pool)
def cluster_template_create(request, name, plugin_name, hadoop_version,
description=None, cluster_configs=None,
node_groups=None, anti_affinity=None,
net_id=None):
return client(request).cluster_templates.create(name, plugin_name,
hadoop_version,
description,
cluster_configs,
node_groups,
anti_affinity,
net_id)
def cluster_template_list(request):
return client(request).cluster_templates.list()
def cluster_template_get(request, ct_id):
return client(request).cluster_templates.get(ct_id)
def cluster_template_delete(request, ct_id):
client(request).cluster_templates.delete(ct_id)
def cluster_template_update(request, ct_id, name, plugin_name,
hadoop_version, description=None,
cluster_configs=None, node_groups=None,
anti_affinity=None, net_id=None):
return client(request).cluster_templates.update(ct_id, name,
plugin_name,
hadoop_version,
description,
cluster_configs,
node_groups,
anti_affinity,
net_id)
def cluster_create(request, name, plugin_name, hadoop_version,
cluster_template_id=None, default_image_id=None,
is_transient=None, description=None, cluster_configs=None,
node_groups=None, user_keypair_id=None,
anti_affinity=None, net_id=None):
return client(request).clusters.create(name, plugin_name, hadoop_version,
cluster_template_id,
default_image_id,
is_transient, description,
cluster_configs, node_groups,
user_keypair_id, anti_affinity,
net_id)
def cluster_scale(request, cluster_id, scale_object):
return client(request).clusters.scale(cluster_id, scale_object)
def cluster_list(request):
return client(request).clusters.list()
def cluster_get(request, cluster_id):
return client(request).clusters.get(cluster_id)
def cluster_delete(request, cluster_id):
client(request).clusters.delete(cluster_id)
def data_source_create(request, name, description, ds_type, url,
credential_user=None, credential_pass=None):
return client(request).data_sources.create(name, description, ds_type,
url, credential_user,
credential_pass)
def data_source_list(request):
return client(request).data_sources.list()
def data_source_get(request, ds_id):
return client(request).data_sources.get(ds_id)
def data_source_delete(request, ds_id):
client(request).data_sources.delete(ds_id)
def job_binary_create(request, name, url, description, extra):
return client(request).job_binaries.create(name, url, description, extra)
def job_binary_list(request):
return client(request).job_binaries.list()
def job_binary_get(request, jb_id):
return client(request).job_binaries.get(jb_id)
def job_binary_delete(request, jb_id):
client(request).job_binaries.delete(jb_id)
def job_binary_get_file(request, jb_id):
return client(request).job_binaries.get_file(jb_id)
def job_binary_internal_create(request, name, data):
return client(request).job_binary_internals.create(name, data)
def job_binary_internal_list(request):
return client(request).job_binary_internals.list()
def job_binary_internal_get(request, jbi_id):
return client(request).job_binary_internals.get(jbi_id)
def job_binary_internal_delete(request, jbi_id):
client(request).job_binary_internals.delete(jbi_id)
def job_create(request, name, j_type, mains, libs, description):
return client(request).jobs.create(name, j_type, mains, libs, description)
def job_list(request):
return client(request).jobs.list()
def job_get(request, job_id):
return client(request).jobs.get(job_id)
def job_delete(request, job_id):
client(request).jobs.delete(job_id)
def job_get_configs(request, job_type):
return client(request).jobs.get_configs(job_type)
def job_execution_create(request, job_id, cluster_id,
input_id, output_id, configs):
return client(request).job_executions.create(job_id, cluster_id,
input_id, output_id,
configs)
def job_execution_list(request):
return client(request).job_executions.list()
def job_execution_get(request, jex_id):
return client(request).job_executions.get(jex_id)
def job_execution_delete(request, jex_id):
client(request).job_executions.delete(jex_id)

View File

@ -22,6 +22,7 @@ from heatclient import exc as heatclient
from keystoneclient import exceptions as keystoneclient
from neutronclient.common import exceptions as neutronclient
from novaclient import exceptions as novaclient
from saharaclient.api import base as saharaclient
from swiftclient import client as swiftclient
from troveclient import exceptions as troveclient
@ -73,5 +74,6 @@ RECOVERABLE = (
swiftclient.ClientException,
heatclient.HTTPForbidden,
heatclient.HTTPException,
troveclient.ClientException
troveclient.ClientException,
saharaclient.APIException,
)

View File

@ -32,6 +32,7 @@ TEMPLATE_DEBUG = DEBUG
# service API. For example, The identity service APIs have inconsistent
# use of the decimal point, so valid options would be "2.0" or "3".
# OPENSTACK_API_VERSIONS = {
# "data_processing": 1.1,
# "identity": 3,
# "volume": 2
# }
@ -492,3 +493,11 @@ FLAVOR_EXTRA_KEYS = {
('quota:outbound_average', _('Quota: Outbound average')),
]
}
# Indicate to the Sahara data processing service whether or not
# automatic floating IP allocation is in effect. If it is not
# in effect, the user will be prompted to choose a floating IP
# pool for use in their cluster. False by default. You would want
# to set this to True if you were running Nova Networking with
# auto_assign_floating_ip = True.
# SAHARA_AUTO_IP_ALLOCATION_ENABLED = False