Renamed InvalidException to InvalidReferenceException
Also replaced with other exceptions where appropriate. Change-Id: I4e07598afc69d90398ca3451e9b04864e55310ed Closes-Bug: #1404995
This commit is contained in:
parent
3afd0a1ddb
commit
1ad5ffb791
@ -74,14 +74,14 @@ class InvalidCredentials(SaharaException):
|
|||||||
super(InvalidCredentials, self).__init__()
|
super(InvalidCredentials, self).__init__()
|
||||||
|
|
||||||
|
|
||||||
class InvalidException(SaharaException):
|
class InvalidReferenceException(SaharaException):
|
||||||
message = _("Invalid object reference")
|
message = _("Invalid object reference")
|
||||||
|
|
||||||
def __init__(self, message=None):
|
def __init__(self, message=None):
|
||||||
self.code = "INVALID_REFERENCE"
|
self.code = "INVALID_REFERENCE"
|
||||||
if message:
|
if message:
|
||||||
self.message = message
|
self.message = message
|
||||||
super(InvalidException, self).__init__()
|
super(InvalidReferenceException, self).__init__()
|
||||||
|
|
||||||
|
|
||||||
class RemoteCommandException(SaharaException):
|
class RemoteCommandException(SaharaException):
|
||||||
|
@ -626,7 +626,7 @@ class AmbariClient(object):
|
|||||||
|
|
||||||
def decommission_cluster_instances(self, cluster, clusterspec, instances,
|
def decommission_cluster_instances(self, cluster, clusterspec, instances,
|
||||||
ambari_info):
|
ambari_info):
|
||||||
raise exc.InvalidException(_('The HDP plugin does not support '
|
raise exc.InvalidDataException(_('The HDP plugin does not support '
|
||||||
'the decommissioning of nodes '
|
'the decommissioning of nodes '
|
||||||
'for HDP version 1.3.2'))
|
'for HDP version 1.3.2'))
|
||||||
|
|
||||||
|
@ -658,7 +658,7 @@ class AmbariClient(object):
|
|||||||
if result.status_code != 202:
|
if result.status_code != 202:
|
||||||
LOG.error(_LE('AmbariClient: error while making decommission post '
|
LOG.error(_LE('AmbariClient: error while making decommission post '
|
||||||
'request. Error is = %s'), result.text)
|
'request. Error is = %s'), result.text)
|
||||||
raise exc.InvalidException(
|
raise ex.DecommissionError(
|
||||||
_('An error occurred while trying to '
|
_('An error occurred while trying to '
|
||||||
'decommission the DataNode instances that are '
|
'decommission the DataNode instances that are '
|
||||||
'being shut down. '
|
'being shut down. '
|
||||||
|
@ -53,20 +53,20 @@ def _get_plugin_configs(plugin_name, hadoop_version, scope=None):
|
|||||||
|
|
||||||
def check_plugin_name_exists(name):
|
def check_plugin_name_exists(name):
|
||||||
if name not in [p.name for p in api.get_plugins()]:
|
if name not in [p.name for p in api.get_plugins()]:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Sahara doesn't contain plugin with name '%s'") % name)
|
_("Sahara doesn't contain plugin with name '%s'") % name)
|
||||||
|
|
||||||
|
|
||||||
def check_plugin_supports_version(p_name, version):
|
def check_plugin_supports_version(p_name, version):
|
||||||
if version not in plugin_base.PLUGINS.get_plugin(p_name).get_versions():
|
if version not in plugin_base.PLUGINS.get_plugin(p_name).get_versions():
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Requested plugin '%(name)s' doesn't support version "
|
_("Requested plugin '%(name)s' doesn't support version "
|
||||||
"'%(version)s'") % {'name': p_name, 'version': version})
|
"'%(version)s'") % {'name': p_name, 'version': version})
|
||||||
|
|
||||||
|
|
||||||
def check_image_registered(image_id):
|
def check_image_registered(image_id):
|
||||||
if image_id not in [i.id for i in nova.client().images.list_registered()]:
|
if image_id not in [i.id for i in nova.client().images.list_registered()]:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Requested image '%s' is not registered") % image_id)
|
_("Requested image '%s' is not registered") % image_id)
|
||||||
|
|
||||||
|
|
||||||
@ -77,12 +77,12 @@ def check_node_group_configs(plugin_name, hadoop_version, ng_configs,
|
|||||||
hadoop_version)
|
hadoop_version)
|
||||||
for app_target, configs in ng_configs.items():
|
for app_target, configs in ng_configs.items():
|
||||||
if app_target not in pl_confs:
|
if app_target not in pl_confs:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Plugin doesn't contain applicable target '%s'")
|
_("Plugin doesn't contain applicable target '%s'")
|
||||||
% app_target)
|
% app_target)
|
||||||
for name, values in configs.items():
|
for name, values in configs.items():
|
||||||
if name not in pl_confs[app_target]:
|
if name not in pl_confs[app_target]:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Plugin's applicable target '%(target)s' doesn't "
|
_("Plugin's applicable target '%(target)s' doesn't "
|
||||||
"contain config with name '%(name)s'") %
|
"contain config with name '%(name)s'") %
|
||||||
{'target': app_target, 'name': name})
|
{'target': app_target, 'name': name})
|
||||||
@ -151,8 +151,8 @@ def check_node_group_basic_fields(plugin_name, hadoop_version, ng,
|
|||||||
def check_flavor_exists(flavor_id):
|
def check_flavor_exists(flavor_id):
|
||||||
flavor_list = nova.client().flavors.list()
|
flavor_list = nova.client().flavors.list()
|
||||||
if flavor_id not in [flavor.id for flavor in flavor_list]:
|
if flavor_id not in [flavor.id for flavor in flavor_list]:
|
||||||
raise ex.InvalidException(
|
raise ex.NotFoundException(
|
||||||
_("Requested flavor '%s' not found") % flavor_id)
|
flavor_id, _("Requested flavor '%s' not found"))
|
||||||
|
|
||||||
|
|
||||||
def check_security_groups_exist(security_groups):
|
def check_security_groups_exist(security_groups):
|
||||||
@ -162,7 +162,8 @@ def check_security_groups_exist(security_groups):
|
|||||||
for sg in security_group_list], []))
|
for sg in security_group_list], []))
|
||||||
for sg in security_groups:
|
for sg in security_groups:
|
||||||
if sg not in allowed_groups:
|
if sg not in allowed_groups:
|
||||||
raise ex.InvalidException(_("Security group '%s' not found") % sg)
|
raise ex.NotFoundException(
|
||||||
|
sg, _("Security group '%s' not found"))
|
||||||
|
|
||||||
|
|
||||||
def check_floatingip_pool_exists(ng_name, pool_id):
|
def check_floatingip_pool_exists(ng_name, pool_id):
|
||||||
@ -176,14 +177,12 @@ def check_floatingip_pool_exists(ng_name, pool_id):
|
|||||||
break
|
break
|
||||||
|
|
||||||
if not network:
|
if not network:
|
||||||
raise ex.InvalidException(
|
raise ex.NotFoundException(pool_id, _("Floating IP pool %s not found"))
|
||||||
_("Floating IP pool %(pool)s for node group '%(group)s' "
|
|
||||||
"not found") % {'pool': pool_id, 'group': ng_name})
|
|
||||||
|
|
||||||
|
|
||||||
def check_node_processes(plugin_name, version, node_processes):
|
def check_node_processes(plugin_name, version, node_processes):
|
||||||
if len(set(node_processes)) != len(node_processes):
|
if len(set(node_processes)) != len(node_processes):
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidDataException(
|
||||||
_("Duplicates in node processes have been detected"))
|
_("Duplicates in node processes have been detected"))
|
||||||
plugin_processes = []
|
plugin_processes = []
|
||||||
for process in plugin_base.PLUGINS.get_plugin(
|
for process in plugin_base.PLUGINS.get_plugin(
|
||||||
@ -191,7 +190,7 @@ def check_node_processes(plugin_name, version, node_processes):
|
|||||||
plugin_processes += process
|
plugin_processes += process
|
||||||
|
|
||||||
if not set(node_processes).issubset(set(plugin_processes)):
|
if not set(node_processes).issubset(set(plugin_processes)):
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Plugin supports the following node procesess: %s")
|
_("Plugin supports the following node procesess: %s")
|
||||||
% sorted(plugin_processes))
|
% sorted(plugin_processes))
|
||||||
|
|
||||||
@ -199,7 +198,7 @@ def check_node_processes(plugin_name, version, node_processes):
|
|||||||
def check_duplicates_node_groups_names(node_groups):
|
def check_duplicates_node_groups_names(node_groups):
|
||||||
ng_names = [ng['name'] for ng in node_groups]
|
ng_names = [ng['name'] for ng in node_groups]
|
||||||
if len(set(ng_names)) < len(node_groups):
|
if len(set(ng_names)) < len(node_groups):
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidDataException(
|
||||||
_("Duplicates in node group names are detected"))
|
_("Duplicates in node group names are detected"))
|
||||||
|
|
||||||
|
|
||||||
@ -207,16 +206,16 @@ def check_availability_zone_exist(az):
|
|||||||
az_list = nova.client().availability_zones.list(False)
|
az_list = nova.client().availability_zones.list(False)
|
||||||
az_names = [a.zoneName for a in az_list]
|
az_names = [a.zoneName for a in az_list]
|
||||||
if az not in az_names:
|
if az not in az_names:
|
||||||
raise ex.InvalidException(_("Nova availability zone '%s' not found")
|
raise ex.NotFoundException(
|
||||||
% az)
|
az, _("Nova availability zone '%s' not found"))
|
||||||
|
|
||||||
|
|
||||||
def check_volume_availability_zone_exist(az):
|
def check_volume_availability_zone_exist(az):
|
||||||
az_list = cinder.client().availability_zones.list()
|
az_list = cinder.client().availability_zones.list()
|
||||||
az_names = [a.zoneName for a in az_list]
|
az_names = [a.zoneName for a in az_list]
|
||||||
if az not in az_names:
|
if az not in az_names:
|
||||||
raise ex.InvalidException(_("Cinder availability zone '%s' not found")
|
raise ex.NotFoundException(
|
||||||
% az)
|
az, _("Cinder availability zone '%s' not found"))
|
||||||
|
|
||||||
|
|
||||||
def check_volume_type_exists(volume_type):
|
def check_volume_type_exists(volume_type):
|
||||||
@ -224,7 +223,7 @@ def check_volume_type_exists(volume_type):
|
|||||||
volume_type})
|
volume_type})
|
||||||
if len(volume_types) == 1 and volume_types[0] == volume_type:
|
if len(volume_types) == 1 and volume_types[0] == volume_type:
|
||||||
return
|
return
|
||||||
raise ex.NotFoundException(_("Volume type '%s' not found") % volume_type)
|
raise ex.NotFoundException(volume_type, _("Volume type '%s' not found"))
|
||||||
|
|
||||||
|
|
||||||
# Cluster creation related checks
|
# Cluster creation related checks
|
||||||
@ -252,7 +251,7 @@ def check_cluster_hostnames_lengths(cluster_name, node_groups):
|
|||||||
longest_hostname += '.'
|
longest_hostname += '.'
|
||||||
longest_hostname += CONF.node_domain
|
longest_hostname += CONF.node_domain
|
||||||
if len(longest_hostname) > MAX_HOSTNAME_LENGTH:
|
if len(longest_hostname) > MAX_HOSTNAME_LENGTH:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidDataException(
|
||||||
_("Composite hostname %(host)s in provisioned cluster exceeds"
|
_("Composite hostname %(host)s in provisioned cluster exceeds"
|
||||||
" maximum limit %(limit)s characters") %
|
" maximum limit %(limit)s characters") %
|
||||||
{'host': longest_hostname,
|
{'host': longest_hostname,
|
||||||
@ -263,13 +262,13 @@ def check_keypair_exists(keypair):
|
|||||||
try:
|
try:
|
||||||
nova.client().keypairs.get(keypair)
|
nova.client().keypairs.get(keypair)
|
||||||
except nova_ex.NotFound:
|
except nova_ex.NotFound:
|
||||||
raise ex.InvalidException(
|
raise ex.NotFoundException(
|
||||||
_("Requested keypair '%s' not found") % keypair)
|
keypair, _("Requested keypair '%s' not found"))
|
||||||
|
|
||||||
|
|
||||||
def check_network_exists(net_id):
|
def check_network_exists(net_id):
|
||||||
if not nova.get_network(id=net_id):
|
if not nova.get_network(id=net_id):
|
||||||
raise ex.InvalidException(_("Network %s not found") % net_id)
|
raise ex.NotFoundException(net_id, _("Network %s not found"))
|
||||||
|
|
||||||
|
|
||||||
# Cluster templates related checks
|
# Cluster templates related checks
|
||||||
@ -282,9 +281,9 @@ def check_cluster_template_unique_name(name):
|
|||||||
|
|
||||||
def check_cluster_template_exists(cluster_template_id):
|
def check_cluster_template_exists(cluster_template_id):
|
||||||
if not api.get_cluster_template(id=cluster_template_id):
|
if not api.get_cluster_template(id=cluster_template_id):
|
||||||
raise ex.InvalidException(
|
raise ex.NotFoundException(
|
||||||
_("Cluster template with id '%s' doesn't exist")
|
cluster_template_id,
|
||||||
% cluster_template_id)
|
_("Cluster template with id '%s' not found"))
|
||||||
|
|
||||||
|
|
||||||
def check_node_groups_in_cluster_templates(cluster_name, plugin_name,
|
def check_node_groups_in_cluster_templates(cluster_name, plugin_name,
|
||||||
@ -308,8 +307,8 @@ def check_node_group_template_unique_name(name):
|
|||||||
|
|
||||||
def check_node_group_template_exists(ng_tmpl_id):
|
def check_node_group_template_exists(ng_tmpl_id):
|
||||||
if not api.get_node_group_template(id=ng_tmpl_id):
|
if not api.get_node_group_template(id=ng_tmpl_id):
|
||||||
raise ex.InvalidException(
|
raise ex.NotFoundException(
|
||||||
_("NodeGroup template with id '%s' doesn't exist") % ng_tmpl_id)
|
ng_tmpl_id, _("NodeGroup template with id '%s' not found"))
|
||||||
|
|
||||||
|
|
||||||
def check_network_config(node_groups):
|
def check_network_config(node_groups):
|
||||||
@ -343,7 +342,7 @@ def check_resize(cluster, r_node_groups):
|
|||||||
|
|
||||||
for ng in r_node_groups:
|
for ng in r_node_groups:
|
||||||
if ng['name'] not in cluster_ng_names:
|
if ng['name'] not in cluster_ng_names:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Cluster doesn't contain node group with name '%s'")
|
_("Cluster doesn't contain node group with name '%s'")
|
||||||
% ng['name'])
|
% ng['name'])
|
||||||
|
|
||||||
@ -357,7 +356,7 @@ def check_add_node_groups(cluster, add_node_groups):
|
|||||||
|
|
||||||
for ng in add_node_groups:
|
for ng in add_node_groups:
|
||||||
if ng['name'] in cluster_ng_names:
|
if ng['name'] in cluster_ng_names:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Can't add new nodegroup. Cluster already has nodegroup with"
|
_("Can't add new nodegroup. Cluster already has nodegroup with"
|
||||||
" name '%s'") % ng['name'])
|
" name '%s'") % ng['name'])
|
||||||
|
|
||||||
@ -371,7 +370,7 @@ def check_cinder_exists():
|
|||||||
services = [service.name for service in
|
services = [service.name for service in
|
||||||
keystone.client_for_admin().services.list()]
|
keystone.client_for_admin().services.list()]
|
||||||
if 'cinder' not in services:
|
if 'cinder' not in services:
|
||||||
raise ex.InvalidException(_("Cinder is not supported"))
|
raise ex.InvalidReferenceException(_("Cinder is not supported"))
|
||||||
|
|
||||||
|
|
||||||
# Tags
|
# Tags
|
||||||
@ -382,7 +381,7 @@ def check_required_image_tags(plugin_name, hadoop_version, image_id):
|
|||||||
plugin = plugin_base.PLUGINS.get_plugin(plugin_name)
|
plugin = plugin_base.PLUGINS.get_plugin(plugin_name)
|
||||||
req_tags = set(plugin.get_required_image_tags(hadoop_version))
|
req_tags = set(plugin.get_required_image_tags(hadoop_version))
|
||||||
if not req_tags.issubset(set(image.tags)):
|
if not req_tags.issubset(set(image.tags)):
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Tags of requested image '%(image)s' don't contain required"
|
_("Tags of requested image '%(image)s' don't contain required"
|
||||||
" tags ['%(name)s', '%(version)s']")
|
" tags ['%(name)s', '%(version)s']")
|
||||||
% {'image': image_id, 'name': plugin_name,
|
% {'image': image_id, 'name': plugin_name,
|
||||||
|
@ -133,7 +133,7 @@ def check_cluster_template_usage(cluster_template_id, **kwargs):
|
|||||||
users.append(cluster.name)
|
users.append(cluster.name)
|
||||||
|
|
||||||
if users:
|
if users:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Cluster template %(id)s in use by %(clusters)s") %
|
_("Cluster template %(id)s in use by %(clusters)s") %
|
||||||
{'id': cluster_template_id,
|
{'id': cluster_template_id,
|
||||||
'clusters': ', '.join(users)})
|
'clusters': ', '.join(users)})
|
||||||
|
@ -87,7 +87,7 @@ def check_cluster_create(data, **kwargs):
|
|||||||
neutron_net_id = _get_cluster_field(data, 'neutron_management_network')
|
neutron_net_id = _get_cluster_field(data, 'neutron_management_network')
|
||||||
if neutron_net_id:
|
if neutron_net_id:
|
||||||
if not CONF.use_neutron:
|
if not CONF.use_neutron:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("'neutron_management_network' field can't be used "
|
_("'neutron_management_network' field can't be used "
|
||||||
"with 'use_neutron=False'"))
|
"with 'use_neutron=False'"))
|
||||||
b.check_network_exists(neutron_net_id)
|
b.check_network_exists(neutron_net_id)
|
||||||
|
@ -76,14 +76,14 @@ def check_cluster_scaling(data, cluster_id, **kwargs):
|
|||||||
engine_type_and_version = api.OPS.get_engine_type_and_version()
|
engine_type_and_version = api.OPS.get_engine_type_and_version()
|
||||||
if (not cluster_engine and
|
if (not cluster_engine and
|
||||||
not engine_type_and_version.startswith('direct')):
|
not engine_type_and_version.startswith('direct')):
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Cluster created before Juno release "
|
_("Cluster created before Juno release "
|
||||||
"can't be scaled with %(engine)s engine") %
|
"can't be scaled with %(engine)s engine") %
|
||||||
{"engine": engine_type_and_version})
|
{"engine": engine_type_and_version})
|
||||||
|
|
||||||
if (cluster.sahara_info and
|
if (cluster.sahara_info and
|
||||||
cluster_engine != engine_type_and_version):
|
cluster_engine != engine_type_and_version):
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Cluster created with %(old_engine)s infrastructure engine "
|
_("Cluster created with %(old_engine)s infrastructure engine "
|
||||||
"can't be scaled with %(new_engine)s engine") %
|
"can't be scaled with %(new_engine)s engine") %
|
||||||
{"old_engine": cluster.sahara_info.get('infrastructure_engine'),
|
{"old_engine": cluster.sahara_info.get('infrastructure_engine'),
|
||||||
@ -93,12 +93,12 @@ def check_cluster_scaling(data, cluster_id, **kwargs):
|
|||||||
'scale_cluster') and (
|
'scale_cluster') and (
|
||||||
plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
|
plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name,
|
||||||
'decommission_nodes'))):
|
'decommission_nodes'))):
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Requested plugin '%s' doesn't support cluster scaling feature")
|
_("Requested plugin '%s' doesn't support cluster scaling feature")
|
||||||
% cluster.plugin_name)
|
% cluster.plugin_name)
|
||||||
|
|
||||||
if cluster.status != 'Active':
|
if cluster.status != 'Active':
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Cluster cannot be scaled not in 'Active' status. "
|
_("Cluster cannot be scaled not in 'Active' status. "
|
||||||
"Cluster status: %s") % cluster.status)
|
"Cluster status: %s") % cluster.status)
|
||||||
|
|
||||||
|
@ -56,8 +56,8 @@ def check_data_source_unique_name(name):
|
|||||||
|
|
||||||
def check_data_source_exists(data_source_id):
|
def check_data_source_exists(data_source_id):
|
||||||
if not conductor.data_source_get(context.ctx(), data_source_id):
|
if not conductor.data_source_get(context.ctx(), data_source_id):
|
||||||
raise ex.InvalidException(_("DataSource with id '%s'"
|
raise ex.InvalidReferenceException(
|
||||||
" doesn't exist") % data_source_id)
|
_("DataSource with id '%s' doesn't exist") % data_source_id)
|
||||||
|
|
||||||
|
|
||||||
def check_job_unique_name(name):
|
def check_job_unique_name(name):
|
||||||
@ -68,8 +68,8 @@ def check_job_unique_name(name):
|
|||||||
|
|
||||||
def check_job_binary_internal_exists(jbi_id):
|
def check_job_binary_internal_exists(jbi_id):
|
||||||
if not conductor.job_binary_internal_get(context.ctx(), jbi_id):
|
if not conductor.job_binary_internal_get(context.ctx(), jbi_id):
|
||||||
raise ex.InvalidException(_("JobBinaryInternal with id '%s'"
|
raise ex.InvalidReferenceException(
|
||||||
" doesn't exist") % jbi_id)
|
_("JobBinaryInternal with id '%s' doesn't exist") % jbi_id)
|
||||||
|
|
||||||
|
|
||||||
def check_data_sources_are_different(data_source_1_id, data_source_2_id):
|
def check_data_sources_are_different(data_source_1_id, data_source_2_id):
|
||||||
|
@ -67,10 +67,10 @@ def check_data_source_create(data, **kwargs):
|
|||||||
|
|
||||||
def _check_swift_data_source_create(data):
|
def _check_swift_data_source_create(data):
|
||||||
if len(data['url']) == 0:
|
if len(data['url']) == 0:
|
||||||
raise ex.InvalidException(_("Swift url must not be empty"))
|
raise ex.InvalidDataException(_("Swift url must not be empty"))
|
||||||
url = urlparse.urlparse(data['url'])
|
url = urlparse.urlparse(data['url'])
|
||||||
if url.scheme != "swift":
|
if url.scheme != "swift":
|
||||||
raise ex.InvalidException(_("URL scheme must be 'swift'"))
|
raise ex.InvalidDataException(_("URL scheme must be 'swift'"))
|
||||||
|
|
||||||
# The swift url suffix does not have to be included in the netloc.
|
# The swift url suffix does not have to be included in the netloc.
|
||||||
# However, if the swift suffix indicator is part of the netloc then
|
# However, if the swift suffix indicator is part of the netloc then
|
||||||
@ -78,7 +78,7 @@ def _check_swift_data_source_create(data):
|
|||||||
# Additionally, the path must be more than '/'
|
# Additionally, the path must be more than '/'
|
||||||
if (su.SWIFT_URL_SUFFIX_START in url.netloc and not url.netloc.endswith(
|
if (su.SWIFT_URL_SUFFIX_START in url.netloc and not url.netloc.endswith(
|
||||||
su.SWIFT_URL_SUFFIX)) or len(url.path) <= 1:
|
su.SWIFT_URL_SUFFIX)) or len(url.path) <= 1:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidDataException(
|
||||||
_("URL must be of the form swift://container%s/object")
|
_("URL must be of the form swift://container%s/object")
|
||||||
% su.SWIFT_URL_SUFFIX)
|
% su.SWIFT_URL_SUFFIX)
|
||||||
|
|
||||||
@ -96,20 +96,20 @@ def _check_swift_data_source_create(data):
|
|||||||
|
|
||||||
def _check_hdfs_data_source_create(data):
|
def _check_hdfs_data_source_create(data):
|
||||||
if len(data['url']) == 0:
|
if len(data['url']) == 0:
|
||||||
raise ex.InvalidException(_("HDFS url must not be empty"))
|
raise ex.InvalidDataException(_("HDFS url must not be empty"))
|
||||||
url = urlparse.urlparse(data['url'])
|
url = urlparse.urlparse(data['url'])
|
||||||
if url.scheme:
|
if url.scheme:
|
||||||
if url.scheme != "hdfs":
|
if url.scheme != "hdfs":
|
||||||
raise ex.InvalidException(_("URL scheme must be 'hdfs'"))
|
raise ex.InvalidDataException(_("URL scheme must be 'hdfs'"))
|
||||||
if not url.hostname:
|
if not url.hostname:
|
||||||
raise ex.InvalidException(_("HDFS url is incorrect, "
|
raise ex.InvalidDataException(
|
||||||
"cannot determine a hostname"))
|
_("HDFS url is incorrect, cannot determine a hostname"))
|
||||||
|
|
||||||
|
|
||||||
def _check_maprfs_data_source_create(data):
|
def _check_maprfs_data_source_create(data):
|
||||||
if len(data['url']) == 0:
|
if len(data['url']) == 0:
|
||||||
raise ex.InvalidException(_("MapR FS url must not be empty"))
|
raise ex.InvalidDataException(_("MapR FS url must not be empty"))
|
||||||
url = urlparse.urlparse(data['url'])
|
url = urlparse.urlparse(data['url'])
|
||||||
if url.scheme:
|
if url.scheme:
|
||||||
if url.scheme != "maprfs":
|
if url.scheme != "maprfs":
|
||||||
raise ex.InvalidException(_("URL scheme must be 'maprfs'"))
|
raise ex.InvalidDataException(_("URL scheme must be 'maprfs'"))
|
||||||
|
@ -79,7 +79,7 @@ def check_job_execution(data, job_id):
|
|||||||
|
|
||||||
cluster = conductor.cluster_get(ctx, data['cluster_id'])
|
cluster = conductor.cluster_get(ctx, data['cluster_id'])
|
||||||
if not cluster:
|
if not cluster:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Cluster with id '%s' doesn't exist") % data['cluster_id'])
|
_("Cluster with id '%s' doesn't exist") % data['cluster_id'])
|
||||||
|
|
||||||
job = conductor.job_get(ctx, job_id)
|
job = conductor.job_get(ctx, job_id)
|
||||||
@ -87,7 +87,7 @@ def check_job_execution(data, job_id):
|
|||||||
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
|
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
|
||||||
edp_engine = plugin.get_edp_engine(cluster, job.type)
|
edp_engine = plugin.get_edp_engine(cluster, job.type)
|
||||||
if not edp_engine:
|
if not edp_engine:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Cluster with id '%(cluster_id)s' doesn't support job type "
|
_("Cluster with id '%(cluster_id)s' doesn't support job type "
|
||||||
"'%(job_type)s'") % {"cluster_id": cluster.id,
|
"'%(job_type)s'") % {"cluster_id": cluster.id,
|
||||||
"job_type": job.type})
|
"job_type": job.type})
|
||||||
|
@ -125,7 +125,7 @@ def check_node_group_template_usage(node_group_template_id, **kwargs):
|
|||||||
template_users += [cluster_template.name]
|
template_users += [cluster_template.name]
|
||||||
|
|
||||||
if cluster_users or template_users:
|
if cluster_users or template_users:
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Node group template %(template)s is in use by "
|
_("Node group template %(template)s is in use by "
|
||||||
"cluster templates: %(users)s; and clusters: %(clusters)s") %
|
"cluster templates: %(users)s; and clusters: %(clusters)s") %
|
||||||
{'template': node_group_template_id,
|
{'template': node_group_template_id,
|
||||||
|
@ -23,6 +23,6 @@ CONVERT_TO_TEMPLATE_SCHEMA = None
|
|||||||
|
|
||||||
def check_convert_to_template(plugin_name, version, **kwargs):
|
def check_convert_to_template(plugin_name, version, **kwargs):
|
||||||
if not plugin_base.PLUGINS.is_plugin_implements(plugin_name, 'convert'):
|
if not plugin_base.PLUGINS.is_plugin_implements(plugin_name, 'convert'):
|
||||||
raise ex.InvalidException(
|
raise ex.InvalidReferenceException(
|
||||||
_("Requested plugin '%s' doesn't support converting config files "
|
_("Requested plugin '%s' doesn't support converting config files "
|
||||||
"to cluster templates") % plugin_name)
|
"to cluster templates") % plugin_name)
|
||||||
|
@ -119,7 +119,7 @@ class TestDataSourceValidation(u.ValidationTestCase):
|
|||||||
"type": "swift",
|
"type": "swift",
|
||||||
"description": "incorrect url schema"
|
"description": "incorrect url schema"
|
||||||
}
|
}
|
||||||
with testtools.ExpectedException(ex.InvalidException):
|
with testtools.ExpectedException(ex.InvalidDataException):
|
||||||
ds.check_data_source_create(data)
|
ds.check_data_source_create(data)
|
||||||
|
|
||||||
@mock.patch("sahara.service.validations."
|
@mock.patch("sahara.service.validations."
|
||||||
@ -152,7 +152,7 @@ class TestDataSourceValidation(u.ValidationTestCase):
|
|||||||
"type": "swift",
|
"type": "swift",
|
||||||
"description": "incorrect url schema"
|
"description": "incorrect url schema"
|
||||||
}
|
}
|
||||||
with testtools.ExpectedException(ex.InvalidException):
|
with testtools.ExpectedException(ex.InvalidDataException):
|
||||||
ds.check_data_source_create(data)
|
ds.check_data_source_create(data)
|
||||||
|
|
||||||
@mock.patch("sahara.service.validations."
|
@mock.patch("sahara.service.validations."
|
||||||
@ -167,7 +167,7 @@ class TestDataSourceValidation(u.ValidationTestCase):
|
|||||||
"type": "swift",
|
"type": "swift",
|
||||||
"description": "incorrect url schema"
|
"description": "incorrect url schema"
|
||||||
}
|
}
|
||||||
with testtools.ExpectedException(ex.InvalidException):
|
with testtools.ExpectedException(ex.InvalidDataException):
|
||||||
ds.check_data_source_create(data)
|
ds.check_data_source_create(data)
|
||||||
|
|
||||||
@mock.patch("sahara.service.validations."
|
@mock.patch("sahara.service.validations."
|
||||||
@ -181,7 +181,7 @@ class TestDataSourceValidation(u.ValidationTestCase):
|
|||||||
"type": "hdfs",
|
"type": "hdfs",
|
||||||
"description": "incorrect url schema"
|
"description": "incorrect url schema"
|
||||||
}
|
}
|
||||||
with testtools.ExpectedException(ex.InvalidException):
|
with testtools.ExpectedException(ex.InvalidDataException):
|
||||||
ds.check_data_source_create(data)
|
ds.check_data_source_create(data)
|
||||||
|
|
||||||
@mock.patch("sahara.service.validations."
|
@mock.patch("sahara.service.validations."
|
||||||
@ -232,7 +232,7 @@ class TestDataSourceValidation(u.ValidationTestCase):
|
|||||||
"type": "maprfs",
|
"type": "maprfs",
|
||||||
"description": "incorrect url schema"
|
"description": "incorrect url schema"
|
||||||
}
|
}
|
||||||
with testtools.ExpectedException(ex.InvalidException):
|
with testtools.ExpectedException(ex.InvalidDataException):
|
||||||
ds.check_data_source_create(data)
|
ds.check_data_source_create(data)
|
||||||
|
|
||||||
@mock.patch("sahara.service.validations."
|
@mock.patch("sahara.service.validations."
|
||||||
|
@ -117,7 +117,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
'hadoop_version': "1.2.1",
|
'hadoop_version': "1.2.1",
|
||||||
'user_keypair_id': 'wrong_keypair'
|
'user_keypair_id': 'wrong_keypair'
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'NOT_FOUND',
|
||||||
"Requested keypair 'wrong_keypair' not found")
|
"Requested keypair 'wrong_keypair' not found")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -178,7 +178,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
'neutron_management_network': '53a36917-ab9f-4589-'
|
'neutron_management_network': '53a36917-ab9f-4589-'
|
||||||
'94ce-b6df85a68332'
|
'94ce-b6df85a68332'
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE', "Network 53a36917-ab9f-4589-"
|
bad_req_i=(1, 'NOT_FOUND', "Network 53a36917-ab9f-4589-"
|
||||||
"94ce-b6df85a68332 not found")
|
"94ce-b6df85a68332 not found")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -228,7 +228,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'INVALID_DATA',
|
||||||
"Composite hostname long-long-cluster-name-long-long-"
|
"Composite hostname long-long-cluster-name-long-long-"
|
||||||
"long-very-long-node-group-name-100.novalocal "
|
"long-very-long-node-group-name-100.novalocal "
|
||||||
"in provisioned cluster exceeds maximum limit 64 "
|
"in provisioned cluster exceeds maximum limit 64 "
|
||||||
@ -333,7 +333,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'NOT_FOUND',
|
||||||
"Security group 'group3' not found")
|
"Security group 'group3' not found")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -389,7 +389,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'NOT_FOUND',
|
||||||
"Nova availability zone 'nonexistent' not found")
|
"Nova availability zone 'nonexistent' not found")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -418,7 +418,7 @@ class TestClusterCreateValidation(u.ValidationTestCase):
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'NOT_FOUND',
|
||||||
"Cinder availability zone 'nonexistent' not found")
|
"Cinder availability zone 'nonexistent' not found")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -551,11 +551,12 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
||||||
}
|
}
|
||||||
for values in [data, data1]:
|
for values in [data, data1]:
|
||||||
with testtools.ExpectedException(exceptions.InvalidException):
|
with testtools.ExpectedException(
|
||||||
|
exceptions.NotFoundException):
|
||||||
patchers = u.start_patch(False)
|
patchers = u.start_patch(False)
|
||||||
try:
|
try:
|
||||||
c.check_cluster_create(values)
|
c.check_cluster_create(values)
|
||||||
except exceptions.InvalidException as e:
|
except exceptions.NotFoundException as e:
|
||||||
message = six.text_type(e).split('\n')[0]
|
message = six.text_type(e).split('\n')[0]
|
||||||
self.assertEqual("Requested flavor '10' not found",
|
self.assertEqual("Requested flavor '10' not found",
|
||||||
message)
|
message)
|
||||||
@ -613,12 +614,12 @@ class TestClusterCreateFlavorValidation(base.SaharaWithDbTestCase):
|
|||||||
],
|
],
|
||||||
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
|
||||||
}
|
}
|
||||||
with testtools.ExpectedException(exceptions.InvalidException):
|
with testtools.ExpectedException(exceptions.NotFoundException):
|
||||||
try:
|
try:
|
||||||
patchers = u.start_patch(False)
|
patchers = u.start_patch(False)
|
||||||
c.check_cluster_create(data)
|
c.check_cluster_create(data)
|
||||||
u.stop_patch(patchers)
|
u.stop_patch(patchers)
|
||||||
except exceptions.InvalidException as e:
|
except exceptions.NotFoundException as e:
|
||||||
message = six.text_type(e).split('\n')[0]
|
message = six.text_type(e).split('\n')[0]
|
||||||
self.assertEqual("Requested flavor '23' not found",
|
self.assertEqual("Requested flavor '23' not found",
|
||||||
message)
|
message)
|
||||||
|
@ -44,15 +44,16 @@ class TestScalingValidation(u.ValidationTestCase):
|
|||||||
get_plugin_p=None,
|
get_plugin_p=None,
|
||||||
get_cluster_p=None,
|
get_cluster_p=None,
|
||||||
data=None, cluster=None,
|
data=None, cluster=None,
|
||||||
expected_message=None):
|
expected_message=None,
|
||||||
|
expected_exception=ex.InvalidReferenceException):
|
||||||
|
|
||||||
get_cluster_p.return_value = cluster
|
get_cluster_p.return_value = cluster
|
||||||
get_plugin_p.side_effect = _get_plugin
|
get_plugin_p.side_effect = _get_plugin
|
||||||
|
|
||||||
with testtools.ExpectedException(ex.InvalidException):
|
with testtools.ExpectedException(expected_exception):
|
||||||
try:
|
try:
|
||||||
c_s.check_cluster_scaling(data, cluster.id)
|
c_s.check_cluster_scaling(data, cluster.id)
|
||||||
except ex.InvalidException as e:
|
except expected_exception as e:
|
||||||
message = six.text_type(e).split('\n')[0]
|
message = six.text_type(e).split('\n')[0]
|
||||||
self.assertEqual(expected_message, message)
|
self.assertEqual(expected_message, message)
|
||||||
raise e
|
raise e
|
||||||
@ -101,7 +102,8 @@ class TestScalingValidation(u.ValidationTestCase):
|
|||||||
self._assert_check_scaling(
|
self._assert_check_scaling(
|
||||||
data=data, cluster=cluster,
|
data=data, cluster=cluster,
|
||||||
expected_message='Duplicates in node '
|
expected_message='Duplicates in node '
|
||||||
'group names are detected')
|
'group names are detected',
|
||||||
|
expected_exception=ex.InvalidDataException)
|
||||||
|
|
||||||
@mock.patch("sahara.service.api.OPS")
|
@mock.patch("sahara.service.api.OPS")
|
||||||
def test_check_cluster_scaling_add_ng(self, ops):
|
def test_check_cluster_scaling_add_ng(self, ops):
|
||||||
@ -127,7 +129,8 @@ class TestScalingValidation(u.ValidationTestCase):
|
|||||||
self._assert_check_scaling(
|
self._assert_check_scaling(
|
||||||
data=data, cluster=cluster,
|
data=data, cluster=cluster,
|
||||||
expected_message='Duplicates in node '
|
expected_message='Duplicates in node '
|
||||||
'group names are detected')
|
'group names are detected',
|
||||||
|
expected_exception=ex.InvalidDataException)
|
||||||
data = {
|
data = {
|
||||||
'add_node_groups': [
|
'add_node_groups': [
|
||||||
{
|
{
|
||||||
@ -159,7 +162,8 @@ class TestScalingValidation(u.ValidationTestCase):
|
|||||||
expected_message="Composite hostname test-cluster-very-"
|
expected_message="Composite hostname test-cluster-very-"
|
||||||
"very-very-very-very-very-long-ng-name-"
|
"very-very-very-very-very-long-ng-name-"
|
||||||
"010.novalocal in provisioned cluster exceeds "
|
"010.novalocal in provisioned cluster exceeds "
|
||||||
"maximum limit 64 characters")
|
"maximum limit 64 characters",
|
||||||
|
expected_exception=ex.InvalidDataException)
|
||||||
u.stop_patch(patchers)
|
u.stop_patch(patchers)
|
||||||
|
|
||||||
@mock.patch("sahara.utils.api.request_data")
|
@mock.patch("sahara.utils.api.request_data")
|
||||||
|
@ -94,7 +94,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
bad_req_i=(1, "INVALID_REFERENCE",
|
bad_req_i=(1, "INVALID_DATA",
|
||||||
"Duplicates in node group names are detected")
|
"Duplicates in node group names are detected")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -235,7 +235,7 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
}
|
}
|
||||||
self._assert_create_object_validation(
|
self._assert_create_object_validation(
|
||||||
data=data,
|
data=data,
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE', "Network 53a36917-ab9f-4589-"
|
bad_req_i=(1, 'NOT_FOUND', "Network 53a36917-ab9f-4589-"
|
||||||
"94ce-b6df85a68332 not found")
|
"94ce-b6df85a68332 not found")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '1.2.1',
|
||||||
'node_processes': ["namenode", "namenode"]
|
'node_processes': ["namenode", "namenode"]
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'INVALID_DATA',
|
||||||
'Duplicates in node processes have been detected')
|
'Duplicates in node processes have been detected')
|
||||||
)
|
)
|
||||||
self._assert_create_object_validation(
|
self._assert_create_object_validation(
|
||||||
@ -188,7 +188,7 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'hadoop_version': '1.2.1',
|
'hadoop_version': '1.2.1',
|
||||||
'node_processes': ['namenode']
|
'node_processes': ['namenode']
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE',
|
bad_req_i=(1, 'NOT_FOUND',
|
||||||
"Requested flavor '1' not found")
|
"Requested flavor '1' not found")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -289,6 +289,6 @@ class TestNGTemplateCreateValidation(u.ValidationTestCase):
|
|||||||
'node_processes': ['datanode', 'tasktracker'],
|
'node_processes': ['datanode', 'tasktracker'],
|
||||||
'floating_ip_pool': 'network_bad'
|
'floating_ip_pool': 'network_bad'
|
||||||
},
|
},
|
||||||
bad_req_i=(1, 'INVALID_REFERENCE', "Floating IP pool network_bad "
|
bad_req_i=(1, 'NOT_FOUND', "Floating IP pool network_bad "
|
||||||
"for node group 'a' not found")
|
"not found")
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user