Remove vanilla 2.6.0 code
As far as vanilla 2.6.0 is deprecated, we should remove these code Change-Id: Ib8756d5083a5630d6f746d4ab79251d105cf4116 Partially-Implements bp: deprecate-plugin-vanilla2.6.0
This commit is contained in:
parent
84f727e084
commit
73fb2ca4f6
@ -0,0 +1,3 @@
|
||||
---
|
||||
deprecations:
|
||||
- Removed support of Vanilla 2.6.0 plugin.
|
@ -1,25 +0,0 @@
|
||||
{
|
||||
"plugin_name": "vanilla",
|
||||
"hadoop_version": "2.6.0",
|
||||
"node_groups": [
|
||||
{
|
||||
"name": "worker",
|
||||
"count": 3,
|
||||
"node_group_template_id": "{vanilla-260-default-worker}"
|
||||
},
|
||||
{
|
||||
"name": "secondary-master",
|
||||
"count": 1,
|
||||
"node_group_template_id": "{vanilla-260-default-secondary-master}"
|
||||
},
|
||||
{
|
||||
"name": "master",
|
||||
"count": 1,
|
||||
"node_group_template_id": "{vanilla-260-default-master}"
|
||||
}
|
||||
],
|
||||
"name": "vanilla-260-default-cluster",
|
||||
"neutron_management_network": "{neutron_management_network}",
|
||||
"cluster_configs": {},
|
||||
"is_protected": true
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
{
|
||||
"plugin_name": "vanilla",
|
||||
"hadoop_version": "2.6.0",
|
||||
"node_processes": [
|
||||
"namenode",
|
||||
"resourcemanager",
|
||||
"hiveserver"
|
||||
],
|
||||
"name": "vanilla-260-default-master",
|
||||
"floating_ip_pool": "{floating_ip_pool}",
|
||||
"flavor_id": "{flavor_id}",
|
||||
"auto_security_group": "{auto_security_group}",
|
||||
"security_groups": "{security_groups}",
|
||||
"is_protected": true
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
{
|
||||
"plugin_name": "vanilla",
|
||||
"hadoop_version": "2.6.0",
|
||||
"node_processes": [
|
||||
"secondarynamenode",
|
||||
"oozie",
|
||||
"historyserver"
|
||||
],
|
||||
"name": "vanilla-260-default-secondary-master",
|
||||
"floating_ip_pool": "{floating_ip_pool}",
|
||||
"flavor_id": "{flavor_id}",
|
||||
"auto_security_group": "{auto_security_group}",
|
||||
"security_groups": "{security_groups}",
|
||||
"is_protected": true
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
{
|
||||
"plugin_name": "vanilla",
|
||||
"hadoop_version": "2.6.0",
|
||||
"node_processes": [
|
||||
"nodemanager",
|
||||
"datanode"
|
||||
],
|
||||
"name": "vanilla-260-default-worker",
|
||||
"floating_ip_pool": "{floating_ip_pool}",
|
||||
"flavor_id": "{flavor_id}",
|
||||
"auto_security_group": "{auto_security_group}",
|
||||
"security_groups": "{security_groups}",
|
||||
"is_protected": true
|
||||
}
|
@ -1,98 +0,0 @@
|
||||
# Copyright (c) 2014 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
|
||||
from sahara.utils import xmlutils as x
|
||||
|
||||
CONF = cfg.CONF
|
||||
CONF.import_opt("enable_data_locality", "sahara.topology.topology_helper")
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CORE_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_6_0/resources/core-default.xml')
|
||||
|
||||
HDFS_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_6_0/resources/hdfs-default.xml')
|
||||
|
||||
MAPRED_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_6_0/resources/mapred-default.xml')
|
||||
|
||||
YARN_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_6_0/resources/yarn-default.xml')
|
||||
|
||||
OOZIE_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_6_0/resources/oozie-default.xml')
|
||||
|
||||
HIVE_DEFAULT = x.load_hadoop_xml_defaults(
|
||||
'plugins/vanilla/v2_6_0/resources/hive-default.xml')
|
||||
|
||||
XML_CONFS = {
|
||||
"Hadoop": [CORE_DEFAULT],
|
||||
"HDFS": [HDFS_DEFAULT],
|
||||
"YARN": [YARN_DEFAULT],
|
||||
"MapReduce": [MAPRED_DEFAULT],
|
||||
"JobFlow": [OOZIE_DEFAULT],
|
||||
"Hive": [HIVE_DEFAULT]
|
||||
}
|
||||
|
||||
ENV_CONFS = {
|
||||
"YARN": {
|
||||
'ResourceManager Heap Size': 1024,
|
||||
'NodeManager Heap Size': 1024
|
||||
},
|
||||
"HDFS": {
|
||||
'NameNode Heap Size': 1024,
|
||||
'SecondaryNameNode Heap Size': 1024,
|
||||
'DataNode Heap Size': 1024
|
||||
},
|
||||
"MapReduce": {
|
||||
'JobHistoryServer Heap Size': 1024
|
||||
},
|
||||
"JobFlow": {
|
||||
'Oozie Heap Size': 1024
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Initialise plugin Hadoop configurations
|
||||
PLUGIN_XML_CONFIGS = c_helper.init_xml_configs(XML_CONFS)
|
||||
PLUGIN_ENV_CONFIGS = c_helper.init_env_configs(ENV_CONFS)
|
||||
|
||||
|
||||
def _init_all_configs():
|
||||
configs = []
|
||||
configs.extend(PLUGIN_XML_CONFIGS)
|
||||
configs.extend(PLUGIN_ENV_CONFIGS)
|
||||
configs.extend(c_helper.PLUGIN_GENERAL_CONFIGS)
|
||||
return configs
|
||||
|
||||
|
||||
PLUGIN_CONFIGS = _init_all_configs()
|
||||
|
||||
|
||||
def get_plugin_configs():
|
||||
return PLUGIN_CONFIGS
|
||||
|
||||
|
||||
def get_xml_configs():
|
||||
return PLUGIN_XML_CONFIGS
|
||||
|
||||
|
||||
def get_env_configs():
|
||||
return ENV_CONFS
|
@ -1,35 +0,0 @@
|
||||
# Copyright (c) 2015 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from sahara.plugins.vanilla import confighints_helper as ch_helper
|
||||
from sahara.plugins.vanilla.hadoop2 import edp_engine
|
||||
from sahara.utils import edp
|
||||
|
||||
|
||||
class EdpOozieEngine(edp_engine.EdpOozieEngine):
|
||||
@staticmethod
|
||||
def get_possible_job_config(job_type):
|
||||
if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
|
||||
return {'job_config': ch_helper.get_possible_hive_config_from(
|
||||
'plugins/vanilla/v2_6_0/resources/hive-default.xml')}
|
||||
if edp.compare_job_type(job_type,
|
||||
edp.JOB_TYPE_MAPREDUCE,
|
||||
edp.JOB_TYPE_MAPREDUCE_STREAMING):
|
||||
return {'job_config': ch_helper.get_possible_mapreduce_config_from(
|
||||
'plugins/vanilla/v2_6_0/resources/mapred-default.xml')}
|
||||
if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
|
||||
return {'job_config': ch_helper.get_possible_pig_config_from(
|
||||
'plugins/vanilla/v2_6_0/resources/mapred-default.xml')}
|
||||
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
|
@ -1,28 +0,0 @@
|
||||
Apache Hadoop Configurations for Sahara
|
||||
========================================
|
||||
|
||||
This directory contains default XML configuration files:
|
||||
|
||||
* core-default.xml
|
||||
* hdfs-default.xml
|
||||
* mapred-default.xml
|
||||
* yarn-default.xml
|
||||
* oozie-default.xml
|
||||
* hive-default.xml
|
||||
|
||||
These files are applied for Sahara's plugin of Apache Hadoop version 2.6.0
|
||||
|
||||
|
||||
Files were taken from here:
|
||||
|
||||
* `core-default.xml <https://github.com/apache/hadoop/blob/release-2.6.0/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml>`_
|
||||
* `hdfs-default.xml <https://github.com/apache/hadoop/blob/release-2.6.0/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml>`_
|
||||
* `yarn-default.xml <https://github.com/apache/hadoop/blob/release-2.6.0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml>`_
|
||||
* `mapred-default.xml <https://github.com/apache/hadoop/blob/release-2.6.0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml>`_
|
||||
* `oozie-default.xml <https://github.com/apache/oozie/blob/release-4.0.1/core/src/main/resources/oozie-default.xml>`_
|
||||
* `hive-default.xml <https://github.com/apache/hive/blob/release-0.11.0/conf/hive-default.xml.template>`_
|
||||
|
||||
XML configs are used to expose default Hadoop configurations to the users
|
||||
through Sahara's REST API. It allows users to override some config values which
|
||||
will be pushed to the provisioned VMs running Hadoop services as part of
|
||||
appropriate xml config.
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,149 +0,0 @@
|
||||
# Copyright (c) 2014 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
from sahara import conductor
|
||||
from sahara import context
|
||||
from sahara import exceptions as ex
|
||||
from sahara.i18n import _
|
||||
from sahara.plugins import utils
|
||||
from sahara.plugins.vanilla import abstractversionhandler as avm
|
||||
from sahara.plugins.vanilla.hadoop2 import config as c
|
||||
from sahara.plugins.vanilla.hadoop2 import recommendations_utils as ru
|
||||
from sahara.plugins.vanilla.hadoop2 import run_scripts as run
|
||||
from sahara.plugins.vanilla.hadoop2 import scaling as sc
|
||||
from sahara.plugins.vanilla.hadoop2 import starting_scripts as s_scripts
|
||||
from sahara.plugins.vanilla.hadoop2 import validation as vl
|
||||
from sahara.plugins.vanilla import utils as vu
|
||||
from sahara.plugins.vanilla.v2_6_0 import config_helper as c_helper
|
||||
from sahara.plugins.vanilla.v2_6_0 import edp_engine
|
||||
from sahara.swift import swift_helper
|
||||
from sahara.utils import cluster as cluster_utils
|
||||
|
||||
|
||||
conductor = conductor.API
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class VersionHandler(avm.AbstractVersionHandler):
|
||||
def __init__(self):
|
||||
self.pctx = {
|
||||
'env_confs': c_helper.get_env_configs(),
|
||||
'all_confs': c_helper.get_plugin_configs()
|
||||
}
|
||||
|
||||
def get_plugin_configs(self):
|
||||
return self.pctx['all_confs']
|
||||
|
||||
def get_node_processes(self):
|
||||
return {
|
||||
"Hadoop": [],
|
||||
"MapReduce": ["historyserver"],
|
||||
"HDFS": ["namenode", "datanode", "secondarynamenode"],
|
||||
"YARN": ["resourcemanager", "nodemanager"],
|
||||
"JobFlow": ["oozie"],
|
||||
"Hive": ["hiveserver"]
|
||||
}
|
||||
|
||||
def validate(self, cluster):
|
||||
raise ex.DeprecatedException(
|
||||
_("The vanilla 2.6.0 plugin is now deprecated and will be removed"
|
||||
" in M release. The vanilla 2.7.1 plugin remains and "
|
||||
" continues to be supported."))
|
||||
|
||||
def update_infra(self, cluster):
|
||||
pass
|
||||
|
||||
def configure_cluster(self, cluster):
|
||||
c.configure_cluster(self.pctx, cluster)
|
||||
|
||||
def start_cluster(self, cluster):
|
||||
s_scripts.start_namenode(cluster)
|
||||
s_scripts.start_secondarynamenode(cluster)
|
||||
s_scripts.start_resourcemanager(cluster)
|
||||
|
||||
run.start_dn_nm_processes(utils.get_instances(cluster))
|
||||
run.await_datanodes(cluster)
|
||||
|
||||
s_scripts.start_historyserver(cluster)
|
||||
s_scripts.start_oozie(self.pctx, cluster)
|
||||
s_scripts.start_hiveserver(self.pctx, cluster)
|
||||
|
||||
swift_helper.install_ssl_certs(cluster_utils.get_instances(cluster))
|
||||
|
||||
self._set_cluster_info(cluster)
|
||||
|
||||
def decommission_nodes(self, cluster, instances):
|
||||
sc.decommission_nodes(self.pctx, cluster, instances)
|
||||
|
||||
def validate_scaling(self, cluster, existing, additional):
|
||||
vl.validate_additional_ng_scaling(cluster, additional)
|
||||
vl.validate_existing_ng_scaling(self.pctx, cluster, existing)
|
||||
|
||||
def scale_cluster(self, cluster, instances):
|
||||
sc.scale_cluster(self.pctx, cluster, instances)
|
||||
|
||||
def _set_cluster_info(self, cluster):
|
||||
nn = vu.get_namenode(cluster)
|
||||
rm = vu.get_resourcemanager(cluster)
|
||||
hs = vu.get_historyserver(cluster)
|
||||
oo = vu.get_oozie(cluster)
|
||||
|
||||
info = {}
|
||||
|
||||
if rm:
|
||||
info['YARN'] = {
|
||||
'Web UI': 'http://%s:%s' % (rm.management_ip, '8088'),
|
||||
'ResourceManager': 'http://%s:%s' % (rm.management_ip, '8032')
|
||||
}
|
||||
|
||||
if nn:
|
||||
info['HDFS'] = {
|
||||
'Web UI': 'http://%s:%s' % (nn.management_ip, '50070'),
|
||||
'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000')
|
||||
}
|
||||
|
||||
if oo:
|
||||
info['JobFlow'] = {
|
||||
'Oozie': 'http://%s:%s' % (oo.management_ip, '11000')
|
||||
}
|
||||
|
||||
if hs:
|
||||
info['MapReduce JobHistory Server'] = {
|
||||
'Web UI': 'http://%s:%s' % (hs.management_ip, '19888')
|
||||
}
|
||||
|
||||
ctx = context.ctx()
|
||||
conductor.cluster_update(ctx, cluster, {'info': info})
|
||||
|
||||
def get_edp_engine(self, cluster, job_type):
|
||||
if job_type in edp_engine.EdpOozieEngine.get_supported_job_types():
|
||||
return edp_engine.EdpOozieEngine(cluster)
|
||||
return None
|
||||
|
||||
def get_edp_job_types(self):
|
||||
return edp_engine.EdpOozieEngine.get_supported_job_types()
|
||||
|
||||
def get_edp_config_hints(self, job_type):
|
||||
return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
|
||||
|
||||
def get_open_ports(self, node_group):
|
||||
return c.get_open_ports(node_group)
|
||||
|
||||
def recommend_configs(self, cluster, scaling):
|
||||
ru.recommend_configs(cluster, self.get_plugin_configs(), scaling)
|
@ -53,34 +53,34 @@ class TestUtils(base.SaharaWithDbTestCase):
|
||||
[tu.make_inst_dict('snn1', 'secondarynamenode')])
|
||||
|
||||
def test_get_namenode(self):
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
|
||||
[self.ng_manager, self.ng_namenode])
|
||||
self.assertEqual('nn1', u.get_namenode(cl).instance_id)
|
||||
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
|
||||
[self.ng_manager])
|
||||
self.assertIsNone(u.get_namenode(cl))
|
||||
|
||||
def test_get_oozie(self):
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
|
||||
[self.ng_manager, self.ng_oozie])
|
||||
self.assertEqual('ooz1', u.get_oozie(cl).instance_id)
|
||||
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
|
||||
[self.ng_manager])
|
||||
self.assertIsNone(u.get_oozie(cl))
|
||||
|
||||
def test_get_hiveserver(self):
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
|
||||
[self.ng_manager, self.ng_hiveserver])
|
||||
self.assertEqual('hs1', u.get_hiveserver(cl).instance_id)
|
||||
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
|
||||
[self.ng_manager])
|
||||
self.assertIsNone(u.get_hiveserver(cl))
|
||||
|
||||
def test_get_datanodes(self):
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
|
||||
[self.ng_manager, self.ng_namenode,
|
||||
self.ng_datanode])
|
||||
datanodes = u.get_datanodes(cl)
|
||||
@ -89,16 +89,16 @@ class TestUtils(base.SaharaWithDbTestCase):
|
||||
set([datanodes[0].instance_id,
|
||||
datanodes[1].instance_id]))
|
||||
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
|
||||
[self.ng_manager])
|
||||
self.assertEqual([], u.get_datanodes(cl))
|
||||
|
||||
def test_get_secondarynamenodes(self):
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
|
||||
[self.ng_manager, self.ng_namenode,
|
||||
self.ng_secondarynamenode])
|
||||
self.assertEqual('snn1', u.get_secondarynamenode(cl).instance_id)
|
||||
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
|
||||
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.7.1',
|
||||
[self.ng_manager])
|
||||
self.assertIsNone(u.get_secondarynamenode(cl))
|
||||
|
@ -64,7 +64,7 @@ def create_job_binary(id, type):
|
||||
return binary
|
||||
|
||||
|
||||
def create_cluster(plugin_name='vanilla', hadoop_version='2.6.0'):
|
||||
def create_cluster(plugin_name='vanilla', hadoop_version='2.7.1'):
|
||||
cluster = mock.Mock()
|
||||
cluster.plugin_name = plugin_name
|
||||
cluster.hadoop_version = hadoop_version
|
||||
|
@ -67,7 +67,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase):
|
||||
ng = tu.make_ng_dict('master', 42, ['oozie'], 1,
|
||||
instances=[tu.make_inst_dict('id', 'name')])
|
||||
get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
|
||||
"vanilla", "2.6.0", [ng])
|
||||
"vanilla", "2.7.1", [ng])
|
||||
|
||||
self._assert_create_object_validation(
|
||||
data={
|
||||
@ -118,7 +118,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase):
|
||||
ng = tu.make_ng_dict('master', 42, ['oozie'], 1,
|
||||
instances=[tu.make_inst_dict('id', 'name')])
|
||||
get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
|
||||
"vanilla", "2.6.0", [ng])
|
||||
"vanilla", "2.7.1", [ng])
|
||||
|
||||
self._assert_create_object_validation(
|
||||
data={
|
||||
@ -163,7 +163,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase):
|
||||
ng = tu.make_ng_dict('master', 42, ['namenode'], 1,
|
||||
instances=[tu.make_inst_dict('id', 'name')])
|
||||
get_cluster.return_value = tu.create_cluster("cluster", "tenant1",
|
||||
"vanilla", "2.6.0", [ng])
|
||||
"vanilla", "2.7.1", [ng])
|
||||
|
||||
self._assert_create_object_validation(
|
||||
data={
|
||||
@ -206,7 +206,7 @@ class TestJobExecCreateValidation(u.ValidationTestCase):
|
||||
ng = tu.make_ng_dict('master', 42, ['namenode', 'oozie'], 1,
|
||||
instances=[tu.make_inst_dict('id', 'name')])
|
||||
cluster_get.return_value = tu.create_cluster("cluster", "tenant1",
|
||||
"vanilla", "2.6.0", [ng])
|
||||
"vanilla", "2.7.1", [ng])
|
||||
|
||||
self._assert_create_object_validation(
|
||||
data={
|
||||
|
Loading…
Reference in New Issue
Block a user