Support updates and ACL for objects

It implements PATCH updates for clients, updates
support for all objects and is_public/is_protected
support. It's a combination of parts of the
follwing patches (squashed to make review and merge
gating easier and faster):

I3abc57b43d825d69129f9c0b2058d1e2a004c832
I0d1d892ce11c9a3b46c8583ed58f9131a0f915a3

Co-Authored-By: luhuichun <huichun.lu@intel.com>
Co-Authored-By: Andrey Pavlov <apavlov@mirantis.com>
Co-Authored-By: Vitaly Gridnev <vgridnev@mirantis.com>

Change-Id: I5a0156b7effc59ef3d183b0caacb6980c27f425c
Partially-Implements: blueprint api-for-objects-update
Partially-Implements: blueprint shared-protected-resources
This commit is contained in:
Sergey Lukjanov
2015-08-28 11:45:54 +03:00
committed by Vitaly Gridnev
parent 6caf479b3c
commit 3b2bfbb8b9
13 changed files with 164 additions and 17 deletions

View File

@@ -119,6 +119,24 @@ class ResourceManager(object):
data = get_json(resp)[response_key]
else:
data = get_json(resp)
return self.resource_class(self, data)
def _patch(self, url, data, response_key=None, dump_json=True):
if dump_json:
kwargs = {'json': data}
else:
kwargs = {'data': data}
resp = self.api.patch(url, **kwargs)
if resp.status_code != 202:
self._raise_api_exception(resp)
if response_key is not None:
data = get_json(resp)[response_key]
else:
data = get_json(resp)
return self.resource_class(self, data)
def _list(self, url, response_key):

View File

@@ -26,7 +26,8 @@ class ClusterTemplateManager(base.ResourceManager):
def _assign_field(self, name, plugin_name, hadoop_version,
description=None, cluster_configs=None, node_groups=None,
anti_affinity=None, net_id=None, default_image_id=None,
use_autoconfig=None, shares=None):
use_autoconfig=None, shares=None, is_public=None,
is_protected=None):
data = {
'name': name,
'plugin_name': plugin_name,
@@ -41,28 +42,33 @@ class ClusterTemplateManager(base.ResourceManager):
neutron_management_network=net_id,
default_image_id=default_image_id,
use_autoconfig=use_autoconfig,
shares=shares)
shares=shares,
is_public=is_public,
is_protected=is_protected)
return data
def create(self, name, plugin_name, hadoop_version, description=None,
cluster_configs=None, node_groups=None, anti_affinity=None,
net_id=None, default_image_id=None, use_autoconfig=None,
shares=None):
shares=None, is_public=None, is_protected=None):
data = self._assign_field(name, plugin_name, hadoop_version,
description, cluster_configs, node_groups,
anti_affinity, net_id, default_image_id,
use_autoconfig, shares)
use_autoconfig, shares, is_public,
is_protected)
return self._create('/cluster-templates', data, 'cluster_template')
def update(self, cluster_template_id, name, plugin_name, hadoop_version,
description=None, cluster_configs=None, node_groups=None,
anti_affinity=None, net_id=None, default_image_id=None,
use_autoconfig=None, shares=None):
use_autoconfig=None, shares=None, is_public=None,
is_protected=None):
data = self._assign_field(name, plugin_name, hadoop_version,
description, cluster_configs, node_groups,
anti_affinity, net_id, default_image_id,
use_autoconfig, shares)
use_autoconfig, shares, is_public,
is_protected)
return self._update('/cluster-templates/%s' % cluster_template_id,
data, 'cluster_template')

View File

@@ -30,7 +30,8 @@ class ClusterManager(base.ResourceManager):
is_transient=None, description=None, cluster_configs=None,
node_groups=None, user_keypair_id=None,
anti_affinity=None, net_id=None, count=None,
use_autoconfig=None, shares=None):
use_autoconfig=None, shares=None,
is_public=None, is_protected=None):
data = {
'name': name,
@@ -56,7 +57,9 @@ class ClusterManager(base.ResourceManager):
neutron_management_network=net_id,
count=count,
use_autoconfig=use_autoconfig,
shares=shares)
shares=shares,
is_public=is_public,
is_protected=is_protected)
if count:
return self._create('/clusters/multiple', data)
@@ -79,3 +82,12 @@ class ClusterManager(base.ResourceManager):
def delete(self, cluster_id):
self._delete('/clusters/%s' % cluster_id)
def update(self, cluster_id, name=None, description=None, is_public=None,
is_protected=None):
data = {}
self._copy_if_defined(data, name=name, description=description,
is_public=is_public, is_protected=is_protected)
return self._patch('/clusters/%s' % cluster_id, data)

View File

@@ -24,7 +24,8 @@ class DataSourceManager(base.ResourceManager):
resource_class = DataSources
def create(self, name, description, data_source_type,
url, credential_user=None, credential_pass=None):
url, credential_user=None, credential_pass=None,
is_public=None, is_protected=None):
data = {
'name': name,
'description': description,
@@ -35,6 +36,10 @@ class DataSourceManager(base.ResourceManager):
self._copy_if_defined(data['credentials'],
user=credential_user,
password=credential_pass)
self._copy_if_defined(data, is_public=is_public,
is_protected=is_protected)
return self._create('/data-sources', data, 'data_source')
def list(self, search_opts=None):

View File

@@ -23,7 +23,8 @@ class JobBinaries(base.Resource):
class JobBinariesManager(base.ResourceManager):
resource_class = JobBinaries
def create(self, name, url, description, extra):
def create(self, name, url, description, extra, is_public=None,
is_protected=None):
data = {
"name": name,
"url": url,
@@ -31,6 +32,9 @@ class JobBinariesManager(base.ResourceManager):
"extra": extra
}
self._copy_if_defined(data, is_public=is_public,
is_protected=is_protected)
return self._create('/job-binaries', data, 'job_binary')
def list(self, search_opts=None):

View File

@@ -40,3 +40,12 @@ class JobBinaryInternalsManager(base.ResourceManager):
def delete(self, job_binary_id):
self._delete('/job-binary-internals/%s' % job_binary_id)
def update(self, job_binary_id, name=None, is_public=None,
is_protected=None):
data = {}
self._copy_if_defined(data, name=name, is_public=is_public,
is_protected=is_protected)
return self._patch('/job-binary-internals/%s' % job_binary_id, data)

View File

@@ -34,7 +34,9 @@ class JobExecutionsManager(base.ResourceManager):
self._delete('/job-executions/%s' % obj_id)
def create(self, job_id, cluster_id, input_id,
output_id, configs, interface=None):
output_id, configs, interface=None, is_public=None,
is_protected=None):
url = "/jobs/%s/execute" % job_id
data = {
"cluster_id": cluster_id,
@@ -52,4 +54,14 @@ class JobExecutionsManager(base.ResourceManager):
if value is not None:
data.update({key: value})
self._copy_if_defined(data, is_public=is_public,
is_protected=is_protected)
return self._create(url, data, 'job_execution')
def update(self, obj_id, is_public=None, is_protected=None):
data = {}
self._copy_if_defined(data, is_public=is_public,
is_protected=is_protected)
return self._patch('/job-executions/%s' % obj_id, data)

View File

@@ -49,3 +49,12 @@ class JobsManager(base.ResourceManager):
def delete(self, job_id):
self._delete('/jobs/%s' % job_id)
def update(self, job_id, name=None, description=None, is_public=None,
is_protected=None):
data = {}
self._copy_if_defined(data, name=name, description=description,
is_public=is_public, is_protected=is_protected)
return self._patch('/jobs/%s' % job_id, data)

View File

@@ -31,7 +31,7 @@ class NodeGroupTemplateManager(base.ResourceManager):
availability_zone=None, volumes_availability_zone=None,
volume_type=None, image_id=None, is_proxy_gateway=None,
volume_local_to_instance=None, use_autoconfig=None,
shares=None):
shares=None, is_public=None, is_protected=None):
data = {
'name': name,
@@ -51,7 +51,10 @@ class NodeGroupTemplateManager(base.ResourceManager):
image_id=image_id,
is_proxy_gateway=is_proxy_gateway,
use_autoconfig=use_autoconfig,
shares=shares)
shares=shares,
is_public=is_public,
is_protected=is_protected
)
if volumes_per_node:
data.update({"volumes_per_node": volumes_per_node,
@@ -74,7 +77,7 @@ class NodeGroupTemplateManager(base.ResourceManager):
availability_zone=None, volumes_availability_zone=None,
volume_type=None, image_id=None, is_proxy_gateway=None,
volume_local_to_instance=None, use_autoconfig=None,
shares=None):
shares=None, is_public=None, is_protected=None):
data = self._assign_field(name, plugin_name, hadoop_version, flavor_id,
description, volumes_per_node, volumes_size,
@@ -84,7 +87,7 @@ class NodeGroupTemplateManager(base.ResourceManager):
volumes_availability_zone, volume_type,
image_id, is_proxy_gateway,
volume_local_to_instance, use_autoconfig,
shares)
shares, is_public, is_protected)
return self._create('/node-group-templates', data,
'node_group_template')
@@ -97,7 +100,7 @@ class NodeGroupTemplateManager(base.ResourceManager):
volumes_availability_zone=None, volume_type=None,
image_id=None, is_proxy_gateway=None,
volume_local_to_instance=None, use_autoconfig=None,
shares=None):
shares=None, is_public=None, is_protected=None):
data = self._assign_field(name, plugin_name, hadoop_version, flavor_id,
description, volumes_per_node, volumes_size,
@@ -107,7 +110,7 @@ class NodeGroupTemplateManager(base.ResourceManager):
volumes_availability_zone, volume_type,
image_id, is_proxy_gateway,
volume_local_to_instance, use_autoconfig,
shares)
shares, is_public, is_protected)
return self._update('/node-group-templates/%s' % ng_template_id, data,
'node_group_template')

View File

@@ -148,3 +148,21 @@ class ClusterTest(base.BaseTestCase):
self.client.clusters.delete('id')
self.assertEqual(url, self.responses.last_request.url)
def test_clusters_update(self):
url = self.URL + '/clusters/id'
update_body = {
'name': 'new_name',
'description': 'descr'
}
self.responses.patch(url, status_code=202, json=update_body)
resp = self.client.clusters.update('id', name='new_name',
description='descr')
self.assertEqual(url, self.responses.last_request.url)
self.assertIsInstance(resp, cl.Cluster)
self.assertEqual(update_body,
json.loads(self.responses.last_request.body))

View File

@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import json
from saharaclient.api import job_binary_internals as jbi
from saharaclient.tests.unit import base
@@ -62,3 +64,19 @@ class JobBinaryInternalTest(base.BaseTestCase):
self.client.job_binary_internals.delete('id')
self.assertEqual(url, self.responses.last_request.url)
def test_job_binary_update(self):
url = self.URL + '/job-binary-internals/id'
update_body = {
'name': 'new_name'
}
self.responses.patch(url, status_code=202, json=update_body)
resp = self.client.job_binary_internals.update('id', name='new_name')
self.assertEqual(url, self.responses.last_request.url)
self.assertIsInstance(resp, jbi.JobBinaryInternal)
self.assertEqual(update_body,
json.loads(self.responses.last_request.body))

View File

@@ -32,6 +32,11 @@ class JobExecutionTest(base.BaseTestCase):
'job_configs': {}
}
update_json = {
'is_public': True,
'is_protected': True,
}
def test_create_job_execution_with_io(self):
url = self.URL + '/jobs/job_id/execute'
@@ -92,3 +97,13 @@ class JobExecutionTest(base.BaseTestCase):
self.client.job_executions.delete('id')
self.assertEqual(url, self.responses.last_request.url)
def test_job_executions_update(self):
url = self.URL + '/job-executions/id'
self.responses.patch(url, status_code=202, json=self.update_json)
resp = self.client.job_executions.update("id", **self.update_json)
self.assertEqual(url, self.responses.last_request.url)
self.assertIsInstance(resp, je.JobExecution)
self.assertEqual(self.update_json,
json.loads(self.responses.last_request.body))

View File

@@ -83,3 +83,21 @@ class JobTest(base.BaseTestCase):
self.client.jobs.delete('id')
self.assertEqual(url, self.responses.last_request.url)
def test_jobs_update(self):
url = self.URL + '/jobs/id'
update_body = {
'name': 'new_name',
'description': 'description'
}
self.responses.patch(url, status_code=202, json=update_body)
resp = self.client.jobs.update('id', name='new_name',
description='description')
self.assertEqual(url, self.responses.last_request.url)
self.assertIsInstance(resp, jobs.Job)
self.assertEqual(update_body,
json.loads(self.responses.last_request.body))