Corrected a number of pep8 errors

* E128 continuation line under-indented for visual indent
 * F402 import 'u' from line 23 shadowed by loop variable
 * H202 assertRaises Exception too broad
 * H305 imports not grouped correctly (re: stdlib, six: third-party)
 * H307 like imports should be grouped together (six and oslo.config.cfg from third-party are separated by whitespace)
 * H405 multi line docstring summary not separated with an empty line
 * H904 Wrap long lines in parentheses instead of a backslash

Change-Id: If2eb58b9a54198e7b9e81e213742f5759515ef8f
This commit is contained in:
Vitaly Gridnev 2014-06-23 13:45:13 +04:00
parent 6d7450eb8c
commit 221168c4b0
35 changed files with 324 additions and 283 deletions

View File

@ -155,8 +155,8 @@ class ConductorManager(db_base.Base):
c_tmpl.get('cluster_configs'),
values.get('cluster_configs'))
merged_values['node_groups'] = \
self._populate_node_groups(context, merged_values)
merged_values['node_groups'] = self._populate_node_groups(
context, merged_values)
return self.db.cluster_create(context, merged_values)

View File

@ -83,8 +83,8 @@ class Context(object):
}
def is_auth_capable(self):
return self.service_catalog and self.token and self.tenant_id and \
self.user_id
return (self.service_catalog and self.token and self.tenant_id and
self.user_id)
def get_admin_context():

View File

@ -492,8 +492,8 @@ def data_source_destroy(context, data_source_id):
"Data Source id '%s' not found!")
session.delete(data_source)
except db_exc.DBError as e:
msg = "foreign key constraint" in six.text_type(e) and\
" on foreign key constraint" or ""
msg = ("foreign key constraint" in six.text_type(e) and
" on foreign key constraint" or "")
raise ex.DeletionFailed("Data Source deletion failed%s" % msg)
# JobExecution ops
@ -629,8 +629,8 @@ def job_destroy(context, job_id):
"Job id '%s' not found!")
session.delete(job)
except db_exc.DBError as e:
msg = "foreign key constraint" in six.text_type(e) and\
" on foreign key constraint" or ""
msg = ("foreign key constraint" in six.text_type(e) and
" on foreign key constraint" or "")
raise ex.DeletionFailed("Job deletion failed%s" % msg)

View File

@ -111,8 +111,8 @@ class InvalidDataException(SaharaException):
class BadJobBinaryInternalException(SaharaException):
message = "Job binary internal data must be a string of length " \
"greater than zero"
message = ("Job binary internal data must be a string of length "
"greater than zero")
def __init__(self, message=None):
if message:
@ -121,8 +121,8 @@ class BadJobBinaryInternalException(SaharaException):
class BadJobBinaryException(SaharaException):
message = "To work with JobBinary located in internal swift add 'user'" \
" and 'password' to extra"
message = ("To work with JobBinary located in internal swift add 'user'"
" and 'password' to extra")
def __init__(self, message=None):
if message:

View File

@ -43,11 +43,11 @@ class RequiredServiceMissingException(e.SaharaException):
"""Exception indicating that a required service has not been deployed."""
def __init__(self, service_name, required_by=None):
self.message = 'Cluster is missing a service: %s'\
% service_name
self.message = ('Cluster is missing a service: %s'
% service_name)
if required_by:
self.message = '%s, required by service: %s'\
% (self.message, required_by)
self.message = ('%s, required by service: %s'
% (self.message, required_by))
self.code = 'MISSING_SERVICE'

View File

@ -206,18 +206,18 @@ class AmbariPlugin(p.ProvisioningPluginBase):
is_admin_provided = False
admin_user = ambari_info.user
admin_password = ambari_info.password
for u in service.users:
if u.name == 'admin':
for user in service.users:
if user.name == 'admin':
ambari_client.update_ambari_admin_user(
u.password, ambari_info)
user.password, ambari_info)
is_admin_provided = True
ambari_info.user = 'admin'
ambari_info.password = u.password
ambari_info.password = user.password
else:
ambari_client.add_ambari_user(u, ambari_info)
if 'admin' in u.groups:
admin_user = u.name
admin_password = u.password
ambari_client.add_ambari_user(user, ambari_info)
if 'admin' in user.groups:
admin_user = user.name
admin_password = user.password
if not is_admin_provided:
if admin_user is None:

View File

@ -270,8 +270,8 @@ class NormalizedClusterConfig():
self.hadoop_version = cluster_spec.version
self.cluster_configs = []
self.node_groups = []
self.handler = vhf.VersionHandlerFactory.get_instance().\
get_version_handler(self.hadoop_version)
self.handler = (vhf.VersionHandlerFactory.get_instance().
get_version_handler(self.hadoop_version))
self._parse_configurations(cluster_spec.configurations)
self._parse_node_groups(cluster_spec.node_groups)

View File

@ -63,6 +63,6 @@ class ConfigurationProvider:
raise exceptions.InvalidDataException(
'Internal Error. Duplicate property '
'name detected: %s' % property_name)
self.config_mapper[service_property['name']] = \
self.config_mapper[service_property['name']] = (
self._get_target(
service_property['applicable_target'])
service_property['applicable_target']))

View File

@ -21,16 +21,16 @@ from sahara.plugins.hdp import saharautils
from sahara.utils import files as f
AMBARI_RPM = 'http://s3.amazonaws.com/public-repo-1.hortonworks.com/' \
'ambari/centos6/1.x/updates/1.6.0/ambari.repo'
AMBARI_RPM = ('http://s3.amazonaws.com/public-repo-1.hortonworks.com/'
'ambari/centos6/1.x/updates/1.6.0/ambari.repo')
EPEL_RELEASE_PACKAGE_NAME = 'epel-release'
HADOOP_SWIFT_RPM = 'https://s3.amazonaws.com/public-repo-1.hortonworks.com/' \
'sahara/swift/hadoop-swift-1.0-1.x86_64.rpm'
HADOOP_SWIFT_RPM = ('https://s3.amazonaws.com/public-repo-1.hortonworks.com/'
'sahara/swift/hadoop-swift-1.0-1.x86_64.rpm')
HADOOP_SWIFT_LOCAL_RPM = '/opt/hdp-local-repos/hadoop-swift/' \
'hadoop-swift-1.0-1.x86_64.rpm'
HADOOP_SWIFT_LOCAL_RPM = ('/opt/hdp-local-repos/hadoop-swift/'
'hadoop-swift-1.0-1.x86_64.rpm')
LOG = logging.getLogger(__name__)
@ -67,8 +67,8 @@ class HadoopServer:
"{0}: Installing rpm's ...".format(self.instance.hostname()))
# TODO(jspeidel): based on image type, use correct command
curl_cmd = 'curl -f -s -o /etc/yum.repos.d/ambari.repo %s' % \
self.ambari_rpm
curl_cmd = ('curl -f -s -o /etc/yum.repos.d/ambari.repo %s' %
self.ambari_rpm)
ret_code, stdout = r.execute_command(curl_cmd,
run_as_root=True,
raise_when_error=False)

View File

@ -260,8 +260,8 @@ class MapReduceService(Service):
# add HISTORYSERVER, since HDP 1.3.2 stack was
# modified in Ambari 1.5.1/1.6.0 to include this component
# in the MAPREDUCE service
ambari_server_ngs = \
cluster_spec.get_node_groups_containing_component('JOBTRACKER')
ambari_server_ngs = (
cluster_spec.get_node_groups_containing_component('JOBTRACKER'))
for ng in ambari_server_ngs:
if 'HISTORYSERVER' not in ng.components:
ng.components.append('HISTORYSERVER')
@ -321,22 +321,22 @@ class HiveService(Service):
{'global': ['hive_jdbc_connection_url']})
def register_user_input_handlers(self, ui_handlers):
ui_handlers['hive-site/javax.jdo.option.ConnectionUserName'] =\
self._handle_user_property_metastore_user
ui_handlers['hive-site/javax.jdo.option.ConnectionPassword'] = \
self._handle_user_property_metastore_pwd
ui_handlers['hive-site/javax.jdo.option.ConnectionUserName'] = (
self._handle_user_property_metastore_user)
ui_handlers['hive-site/javax.jdo.option.ConnectionPassword'] = (
self._handle_user_property_metastore_pwd)
def _handle_user_property_metastore_user(self, user_input, configurations):
hive_site_config_map = configurations['hive-site']
hive_site_config_map['javax.jdo.option.ConnectionUserName'] = \
user_input.value
hive_site_config_map['javax.jdo.option.ConnectionUserName'] = (
user_input.value)
global_config_map = configurations['global']
global_config_map['hive_metastore_user_name'] = user_input.value
def _handle_user_property_metastore_pwd(self, user_input, configurations):
hive_site_config_map = configurations['hive-site']
hive_site_config_map['javax.jdo.option.ConnectionPassword'] = \
user_input.value
hive_site_config_map['javax.jdo.option.ConnectionPassword'] = (
user_input.value)
global_config_map = configurations['global']
global_config_map['hive_metastore_user_passwd'] = user_input.value
@ -550,11 +550,11 @@ class HBaseService(Service):
def register_user_input_handlers(self, ui_handlers):
for prop_name in self.property_map:
ui_handlers[prop_name] = \
self._handle_config_property_update
ui_handlers[prop_name] = (
self._handle_config_property_update)
ui_handlers['hbase-site/hbase.rootdir'] = \
self._handle_user_property_root_dir
ui_handlers['hbase-site/hbase.rootdir'] = (
self._handle_user_property_root_dir)
def _handle_config_property_update(self, user_input, configurations):
self._update_config_values(configurations, user_input.value,
@ -675,22 +675,22 @@ class OozieService(Service):
return url_info
def register_user_input_handlers(self, ui_handlers):
ui_handlers['oozie-site/oozie.service.JPAService.jdbc.username'] = \
self._handle_user_property_db_user
ui_handlers['oozie.service.JPAService.jdbc.password'] = \
self._handle_user_property_db_pwd
ui_handlers['oozie-site/oozie.service.JPAService.jdbc.username'] = (
self._handle_user_property_db_user)
ui_handlers['oozie.service.JPAService.jdbc.password'] = (
self._handle_user_property_db_pwd)
def _handle_user_property_db_user(self, user_input, configurations):
oozie_site_config_map = configurations['oozie-site']
oozie_site_config_map['oozie.service.JPAService.jdbc.username'] = \
user_input.value
oozie_site_config_map['oozie.service.JPAService.jdbc.username'] = (
user_input.value)
global_config_map = configurations['global']
global_config_map['oozie_metastore_user_name'] = user_input.value
def _handle_user_property_db_pwd(self, user_input, configurations):
oozie_site_config_map = configurations['oozie-site']
oozie_site_config_map['oozie.service.JPAService.jdbc.password'] = \
user_input.value
oozie_site_config_map['oozie.service.JPAService.jdbc.password'] = (
user_input.value)
global_config_map = configurations['global']
global_config_map['oozie_metastore_user_passwd'] = user_input.value
@ -749,10 +749,10 @@ class AmbariService(Service):
return component.name != 'AMBARI_AGENT'
def register_user_input_handlers(self, ui_handlers):
ui_handlers['ambari-stack/ambari.admin.user'] =\
self._handle_user_property_admin_user
ui_handlers['ambari-stack/ambari.admin.password'] =\
self._handle_user_property_admin_password
ui_handlers['ambari-stack/ambari.admin.user'] = (
self._handle_user_property_admin_user)
ui_handlers['ambari-stack/ambari.admin.password'] = (
self._handle_user_property_admin_password)
def is_mandatory(self):
return True

View File

@ -45,7 +45,8 @@ class VersionHandler(avm.AbstractVersionHandler):
def _get_config_provider(self):
if self.config_provider is None:
self.config_provider = cfgprov.ConfigurationProvider(
json.load(pkg.resource_stream(version.version_info.package,
json.load(pkg.resource_stream(
version.version_info.package,
'plugins/hdp/versions/version_1_3_2/resources/'
'ambari-config-resource.json')))
@ -171,9 +172,9 @@ class AmbariClient():
def _add_configurations_to_cluster(
self, cluster_spec, ambari_info, name):
existing_config_url = 'http://{0}/api/v1/clusters/{1}?fields=' \
'Clusters/desired_configs'.format(
ambari_info.get_address(), name)
existing_config_url = ('http://{0}/api/v1/clusters/{1}?fields='
'Clusters/desired_configs'.format(
ambari_info.get_address(), name))
result = self._get(existing_config_url, ambari_info)
@ -197,8 +198,8 @@ class AmbariClient():
for config_name in configs:
if config_name in existing_configs:
if config_name == 'core-site' or config_name == 'global':
existing_version = existing_configs[config_name]['tag']\
.lstrip('v')
existing_version = (
existing_configs[config_name]['tag'].lstrip('v'))
version = int(existing_version) + 1
else:
continue
@ -207,8 +208,8 @@ class AmbariClient():
clusters['desired_config'] = config_body
config_body['type'] = config_name
config_body['tag'] = 'v%s' % version
config_body['properties'] = \
cluster_spec.configurations[config_name]
config_body['properties'] = (
cluster_spec.configurations[config_name])
result = self._put(config_url, ambari_info, data=json.dumps(body))
if result.status_code != 200:
LOG.error(
@ -234,8 +235,8 @@ class AmbariClient():
'Failed to add services to cluster: %s' % result.text)
def _add_components_to_services(self, cluster_spec, ambari_info, name):
add_component_url = 'http://{0}/api/v1/clusters/{1}/services/{'\
'2}/components/{3}'
add_component_url = ('http://{0}/api/v1/clusters/{1}/services/{'
'2}/components/{3}')
for service in cluster_spec.services:
if service.deployed and service.name != 'AMBARI':
for component in service.components:
@ -255,8 +256,8 @@ class AmbariClient():
self, cluster_spec, servers, ambari_info, name):
add_host_url = 'http://{0}/api/v1/clusters/{1}/hosts/{2}'
add_host_component_url = 'http://{0}/api/v1/clusters/{1}' \
'/hosts/{2}/host_components/{3}'
add_host_component_url = ('http://{0}/api/v1/clusters/{1}'
'/hosts/{2}/host_components/{3}')
for host in servers:
hostname = host.instance.fqdn().lower()
result = self._post(
@ -288,11 +289,11 @@ class AmbariClient():
LOG.info('Installing required Hadoop services ...')
ambari_address = ambari_info.get_address()
install_url = 'http://{0}/api/v1/clusters/{' \
'1}/services?ServiceInfo/state=INIT'.format(
ambari_address, cluster_name)
body = '{"RequestInfo" : { "context" : "Install all services" },'\
'"Body" : {"ServiceInfo": {"state" : "INSTALLED"}}}'
install_url = ('http://{0}/api/v1/clusters/{'
'1}/services?ServiceInfo/state=INIT'.format(
ambari_address, cluster_name))
body = ('{"RequestInfo" : { "context" : "Install all services" },'
'"Body" : {"ServiceInfo": {"state" : "INSTALLED"}}}')
result = self._put(install_url, ambari_info, data=body)
@ -316,10 +317,10 @@ class AmbariClient():
'Installation of Hadoop stack failed.')
def _get_async_request_uri(self, ambari_info, cluster_name, request_id):
return 'http://{0}/api/v1/clusters/{1}/requests/{' \
'2}/tasks?fields=Tasks/status'.format(
ambari_info.get_address(), cluster_name,
request_id)
return ('http://{0}/api/v1/clusters/{1}/requests/{'
'2}/tasks?fields=Tasks/status'.format(
ambari_info.get_address(), cluster_name,
request_id))
def _wait_for_async_request(self, request_url, ambari_info):
started = False
@ -347,8 +348,8 @@ class AmbariClient():
ambari_info.get_address())
# this post data has non-standard format because persist
# resource doesn't comply with Ambari API standards
persist_data = '{ "CLUSTER_CURRENT_STATUS":' \
'"{\\"clusterState\\":\\"CLUSTER_STARTED_5\\"}" }'
persist_data = ('{ "CLUSTER_CURRENT_STATUS":'
'"{\\"clusterState\\":\\"CLUSTER_STARTED_5\\"}" }')
result = self._post(persist_state_uri, ambari_info, data=persist_data)
if result.status_code != 201 and result.status_code != 202:
@ -360,11 +361,11 @@ class AmbariClient():
LOG.info('Starting Hadoop services ...')
LOG.info('Cluster name: {0}, Ambari server address: {1}'
.format(cluster_name, ambari_info.get_address()))
start_url = 'http://{0}/api/v1/clusters/{1}/services?ServiceInfo/' \
'state=INSTALLED'.format(
ambari_info.get_address(), cluster_name)
body = '{"RequestInfo" : { "context" : "Start all services" },'\
'"Body" : {"ServiceInfo": {"state" : "STARTED"}}}'
start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/'
'state=INSTALLED'.format(
ambari_info.get_address(), cluster_name))
body = ('{"RequestInfo" : { "context" : "Start all services" },'
'"Body" : {"ServiceInfo": {"state" : "STARTED"}}}')
self._fire_service_start_notifications(
cluster_name, cluster_spec, ambari_info)
@ -437,22 +438,22 @@ class AmbariClient():
# INIT state
# TODO(jspeidel): provide request context
body = '{"HostRoles": {"state" : "INSTALLED"}}'
install_uri = 'http://{0}/api/v1/clusters/{' \
'1}/host_components?HostRoles/state=INIT&' \
'HostRoles/host_name.in({2})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers))
install_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INIT&'
'HostRoles/host_name.in({2})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers)))
self._exec_ambari_command(ambari_info, body, install_uri)
def _start_components(self, ambari_info, auth, cluster_name, servers,
cluster_spec):
# query for all the host components in the INSTALLED state,
# then get a list of the client services in the list
installed_uri = 'http://{0}/api/v1/clusters/{'\
'1}/host_components?HostRoles/state=INSTALLED&'\
'HostRoles/host_name.in({2})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers))
installed_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INSTALLED&'
'HostRoles/host_name.in({2})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers)))
result = self._get(installed_uri, ambari_info)
if result.status_code == 200:
LOG.debug(
@ -470,13 +471,13 @@ class AmbariClient():
# hosts
# TODO(jspeidel): Provide request context
body = '{"HostRoles": {"state" : "STARTED"}}'
start_uri = 'http://{0}/api/v1/clusters/{'\
'1}/host_components?HostRoles/state=INSTALLED&'\
'HostRoles/host_name.in({2})'\
'&HostRoles/component_name.in({3})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers),
",".join(inclusion_list))
start_uri = ('http://{0}/api/v1/clusters/{'
'1}/host_components?HostRoles/state=INSTALLED&'
'HostRoles/host_name.in({2})'
'&HostRoles/component_name.in({3})'.format(
ambari_info.get_address(), cluster_name,
self._get_host_list(servers),
",".join(inclusion_list)))
self._exec_ambari_command(ambari_info, body, start_uri)
else:
raise ex.HadoopProvisionError(
@ -512,8 +513,8 @@ class AmbariClient():
old_pwd = ambari_info.password
user_url = 'http://{0}/api/v1/users/admin'.format(
ambari_info.get_address())
update_body = '{{"Users":{{"roles":"admin","password":"{0}",' \
'"old_password":"{1}"}} }}'.format(password, old_pwd)
update_body = ('{{"Users":{{"roles":"admin","password":"{0}",'
'"old_password":"{1}"}} }}'.format(password, old_pwd))
result = self._put(user_url, ambari_info, data=update_body)
@ -526,8 +527,9 @@ class AmbariClient():
user_url = 'http://{0}/api/v1/users/{1}'.format(
ambari_info.get_address(), user.name)
create_body = '{{"Users":{{"password":"{0}","roles":"{1}"}} }}'. \
format(user.password, '%s' % ','.join(map(str, user.groups)))
create_body = ('{{"Users":{{"password":"{0}","roles":"{1}"}} }}'.
format(user.password, '%s' %
','.join(map(str, user.groups))))
result = self._post(user_url, ambari_info, data=create_body)
@ -586,9 +588,9 @@ class AmbariClient():
ambari_info.host.remote().close_http_sessions()
def _get_services_in_state(self, cluster_name, ambari_info, state):
services_url = 'http://{0}/api/v1/clusters/{1}/services?' \
'ServiceInfo/state.in({2})'.format(
ambari_info.get_address(), cluster_name, state)
services_url = ('http://{0}/api/v1/clusters/{1}/services?'
'ServiceInfo/state.in({2})'.format(
ambari_info.get_address(), cluster_name, state))
result = self._get(services_url, ambari_info)

View File

@ -14,9 +14,9 @@
# limitations under the License.
import re
import six
from oslo.config import cfg
import six
from sahara import exceptions as e
from sahara.openstack.common import log as logging

View File

@ -15,10 +15,10 @@
import json
import logging
import requests
from oslo.config import cfg
import pkg_resources as pkg
import requests
from sahara import context
from sahara import exceptions as exc
@ -44,7 +44,8 @@ class VersionHandler(avm.AbstractVersionHandler):
def _get_config_provider(self):
if self.config_provider is None:
self.config_provider = cfgprov.ConfigurationProvider(
json.load(pkg.resource_stream(version.version_info.package,
json.load(pkg.resource_stream(
version.version_info.package,
'plugins/hdp/versions/version_2_0_6/resources/'
'ambari-config-resource.json')))
@ -262,8 +263,8 @@ class AmbariClient():
# Don't add any AMBARI or HUE components
# TODO(rlevas): Pragmatically determine if component is
# managed by Ambari
if component.find('AMBARI') != 0 \
and component.find('HUE') != 0:
if (component.find('AMBARI') != 0
and component.find('HUE') != 0):
result = self._post(add_host_component_url.format(
ambari_info.get_address(), name, hostname, component),
ambari_info)
@ -523,7 +524,7 @@ class AmbariClient():
create_body = ('{{"Users":{{"password":"{0}","roles":"{1}"}} }}'.
format(user.password, '%s' % ','.
join(map(str, user.groups))))
join(map(str, user.groups))))
result = self._post(user_url, ambari_info, data=create_body)
@ -580,9 +581,9 @@ class AmbariClient():
# template for request body
body_header = ('{"RequestInfo" : { "context": "Decommission DataNode",'
' "command" : "DECOMMISSION", "service_name" : "HDFS",'
' "component_name" : "NAMENODE", '
' "parameters" : { "slave_type" : "DATANODE", ')
' "command" : "DECOMMISSION", "service_name" : "HDFS",'
' "component_name" : "NAMENODE", '
' "parameters" : { "slave_type" : "DATANODE", ')
excluded_hosts_request = '"excluded_hosts" : "{0}"'
@ -592,7 +593,8 @@ class AmbariClient():
LOG.debug('AmbariClient: list_of_hosts = ' + list_of_hosts)
# create the request body
request_body = (body_header +
request_body = (
body_header +
excluded_hosts_request.format(list_of_hosts)
+ '}}'
+ ', "Requests/resource_filters":[{"service_name":"HDFS",'
@ -608,7 +610,8 @@ class AmbariClient():
if result.status_code != 202:
LOG.error('AmbariClient: error while making decommision post ' +
'request. Error is = ' + result.text)
raise exc.InvalidException('An error occurred while trying to ' +
raise exc.InvalidException(
'An error occurred while trying to ' +
'decommission the DataNode instances that are ' +
'being shut down. ' +
'Please consult the Ambari server logs on the ' +
@ -618,7 +621,7 @@ class AmbariClient():
LOG.info('AmbariClient: decommission post request succeeded!')
status_template = ('http://{0}/api/v1/clusters/{1}/hosts/{2}/'
'host_components/{3}')
'host_components/{3}')
# find the host that the NameNode is deployed on
name_node_host = clusterspec.determine_component_hosts(
@ -645,8 +648,8 @@ class AmbariClient():
LOG.info('AmbariClient: decommission status request ok, ' +
'result = ' + result.text)
json_result = json.loads(result.text)
live_nodes = \
json_result['metrics']['dfs']['namenode']['LiveNodes']
live_nodes = (
json_result['metrics']['dfs']['namenode']['LiveNodes'])
# parse out the map of live hosts associated with the NameNode
json_result_nodes = json.loads(live_nodes)
for node in json_result_nodes.keys():

View File

@ -31,9 +31,9 @@ class VersionHandlerFactory():
os.path.join(src_dir, name))]
VersionHandlerFactory.modules = {}
for version in VersionHandlerFactory.versions:
module_name = 'sahara.plugins.hdp.versions.version_{0}.'\
'versionhandler'.format(
version.replace('.', '_'))
module_name = ('sahara.plugins.hdp.versions.version_{0}.'
'versionhandler'.format(
version.replace('.', '_')))
module_class = getattr(
__import__(module_name, fromlist=['sahara']),
'VersionHandler')

View File

@ -191,9 +191,9 @@ def get_config_value(service, name, cluster=None):
ng.configuration()[service].get(name)):
return ng.configuration()[service][name]
for c in PLUGIN_CONFIGS:
if c.applicable_target == service and c.name == name:
return c.default_value
for configs in PLUGIN_CONFIGS:
if configs.applicable_target == service and configs.name == name:
return configs.default_value
raise RuntimeError("Unable to get parameter '%s' from service %s",
name, service)
@ -271,8 +271,8 @@ def generate_spark_env_configs(cluster):
configs.append('SPARK_MASTER_PORT=' + str(masterport))
masterwebport = get_config_value("Spark", "Master webui port", cluster)
if masterwebport and \
masterwebport != _get_spark_opt_default("Master webui port"):
if (masterwebport and
masterwebport != _get_spark_opt_default("Master webui port")):
configs.append('SPARK_MASTER_WEBUI_PORT=' + str(masterwebport))
# configuration for workers
@ -281,8 +281,8 @@ def generate_spark_env_configs(cluster):
configs.append('SPARK_WORKER_CORES=' + str(workercores))
workermemory = get_config_value("Spark", "Worker memory", cluster)
if workermemory and \
workermemory != _get_spark_opt_default("Worker memory"):
if (workermemory and
workermemory != _get_spark_opt_default("Worker memory")):
configs.append('SPARK_WORKER_MEMORY=' + str(workermemory))
workerport = get_config_value("Spark", "Worker port", cluster)
@ -290,13 +290,13 @@ def generate_spark_env_configs(cluster):
configs.append('SPARK_WORKER_PORT=' + str(workerport))
workerwebport = get_config_value("Spark", "Worker webui port", cluster)
if workerwebport and \
workerwebport != _get_spark_opt_default("Worker webui port"):
if (workerwebport and
workerwebport != _get_spark_opt_default("Worker webui port")):
configs.append('SPARK_WORKER_WEBUI_PORT=' + str(workerwebport))
workerinstances = get_config_value("Spark", "Worker instances", cluster)
if workerinstances and \
workerinstances != _get_spark_opt_default("Worker instances"):
if (workerinstances and
workerinstances != _get_spark_opt_default("Worker instances")):
configs.append('SPARK_WORKER_INSTANCES=' + str(workerinstances))
return '\n'.join(configs)

View File

@ -109,8 +109,8 @@ class SparkProvider(p.ProvisioningPluginBase):
with remote.get_remote(nn_instance) as r:
r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
r.execute_command("sudo -u hdfs hdfs dfs -chown $USER \
/user/$USER/")
r.execute_command(("sudo -u hdfs hdfs dfs -chown $USER "
"/user/$USER/"))
# start spark nodes
if sm_instance:
@ -217,21 +217,21 @@ class SparkProvider(p.ProvisioningPluginBase):
# pietro: This is required because the (secret) key is not stored in
# .ssh which hinders password-less ssh required by spark scripts
key_cmd = 'sudo cp $HOME/id_rsa $HOME/.ssh/; '\
'sudo chown $USER $HOME/.ssh/id_rsa; '\
'sudo chmod 600 $HOME/.ssh/id_rsa'
key_cmd = ('sudo cp $HOME/id_rsa $HOME/.ssh/; '
'sudo chown $USER $HOME/.ssh/id_rsa; '
'sudo chmod 600 $HOME/.ssh/id_rsa')
for ng in cluster.node_groups:
dn_path = c_helper.extract_hadoop_path(ng.storage_paths(),
'/dfs/dn')
nn_path = c_helper.extract_hadoop_path(ng.storage_paths(),
'/dfs/nn')
hdfs_dir_cmd = 'sudo mkdir -p %s %s;'\
'sudo chown -R hdfs:hadoop %s %s;'\
'sudo chmod 755 %s %s;'\
% (nn_path, dn_path,
nn_path, dn_path,
nn_path, dn_path)
hdfs_dir_cmd = (('sudo mkdir -p %s %s;'
'sudo chown -R hdfs:hadoop %s %s;'
'sudo chmod 755 %s %s;')
% (nn_path, dn_path,
nn_path, dn_path,
nn_path, dn_path))
with remote.get_remote(instance) as r:
r.execute_command(

View File

@ -39,10 +39,10 @@ def format_namenode(nn_remote):
def clean_port_hadoop(nn_remote):
nn_remote.execute_command("sudo netstat -tlnp \
| awk '/:8020 */ \
{split($NF,a,\"/\"); print a[1]}' \
| xargs sudo kill -9")
nn_remote.execute_command(("sudo netstat -tlnp"
"| awk '/:8020 */"
"{split($NF,a,\"/\"); print a[1]}'"
"| xargs sudo kill -9"))
def start_spark_master(nn_remote):

View File

@ -203,9 +203,9 @@ def get_config_value(service, name, cluster=None):
ng.configuration()[service].get(name)):
return ng.configuration()[service][name]
for c in PLUGIN_CONFIGS:
if c.applicable_target == service and c.name == name:
return c.default_value
for configs in PLUGIN_CONFIGS:
if configs.applicable_target == service and configs.name == name:
return configs.default_value
raise ex.ConfigurationError("Unable get parameter '%s' from service %s" %
(name, service))

View File

@ -288,10 +288,10 @@ class VersionHandler(avm.AbstractVersionHandler):
'authorized_keys': public_key
}
key_cmd = 'sudo mkdir -p /home/hadoop/.ssh/ && ' \
'sudo mv id_rsa authorized_keys /home/hadoop/.ssh && ' \
'sudo chown -R hadoop:hadoop /home/hadoop/.ssh && ' \
'sudo chmod 600 /home/hadoop/.ssh/{id_rsa,authorized_keys}'
key_cmd = ('sudo mkdir -p /home/hadoop/.ssh/ && '
'sudo mv id_rsa authorized_keys /home/hadoop/.ssh && '
'sudo chown -R hadoop:hadoop /home/hadoop/.ssh && '
'sudo chmod 600 /home/hadoop/.ssh/{id_rsa,authorized_keys}')
with remote.get_remote(instance) as r:
# TODO(aignatov): sudo chown is wrong solution. But it works.
@ -465,8 +465,8 @@ class VersionHandler(avm.AbstractVersionHandler):
dn_to_delete = 0
for ng in cluster.node_groups:
if ng.id in existing:
if ng.count > existing[ng.id] and "datanode" in \
ng.node_processes:
if (ng.count > existing[ng.id] and "datanode" in
ng.node_processes):
dn_to_delete += ng.count - existing[ng.id]
if not set(ng.node_processes).issubset(scalable_processes):
raise ex.NodeGroupCannotBeScaled(

View File

@ -71,8 +71,8 @@ def create_dir(r, dir_name, hdfs_user):
def _get_cluster_hosts_information(host, cluster):
for c in conductor.cluster_get_all(context.ctx()):
if c.id == cluster.id:
for clust in conductor.cluster_get_all(context.ctx()):
if clust.id == cluster.id:
continue
for i in u.get_instances(c):

View File

@ -44,11 +44,11 @@ class HDP2GatingTest(swift.SwiftTest, scaling.ScalingTest,
self.internal_neutron_net = None
if self.common_config.NEUTRON_ENABLED:
self.internal_neutron_net = self.get_internal_neutron_net_id()
self.floating_ip_pool = \
self.get_floating_ip_pool_id_for_neutron_net()
self.floating_ip_pool = (
self.get_floating_ip_pool_id_for_neutron_net())
self.hdp2_config.IMAGE_ID, self.hdp2_config.SSH_USERNAME\
= (self.get_image_id_and_ssh_username(self.hdp2_config))
self.hdp2_config.IMAGE_ID, self.hdp2_config.SSH_USERNAME = (
self.get_image_id_and_ssh_username(self.hdp2_config))
@b.errormsg("Failure while 'rm-nn' node group template creation: ")
def _create_rm_nn_ng_template(self):

View File

@ -79,8 +79,8 @@ class HDPGatingTest(cinder.CinderVolumeTest, edp.EDPTest,
except Exception as e:
with excutils.save_and_reraise_exception():
message = 'Failure while \'tt-dn\' node group ' \
'template creation: '
message = ('Failure while \'tt-dn\' node group '
'template creation: ')
self.print_error_log(message, e)
# --------------------------Cluster template creation--------------------------
@ -291,8 +291,8 @@ class HDPGatingTest(cinder.CinderVolumeTest, edp.EDPTest,
new_cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = 'Failure while Cinder testing after cluster ' \
'scaling: '
message = ('Failure while Cinder testing after cluster '
'scaling: ')
self.print_error_log(message, e)
# ----------------------MAP REDUCE TESTING AFTER SCALING-----------------------
@ -306,8 +306,8 @@ class HDPGatingTest(cinder.CinderVolumeTest, edp.EDPTest,
new_cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = 'Failure while Map Reduce testing after ' \
'cluster scaling: '
message = ('Failure while Map Reduce testing after '
'cluster scaling: ')
self.print_error_log(message, e)
# -------------------CHECK SWIFT AVAILABILITY AFTER SCALING--------------------
@ -321,8 +321,8 @@ class HDPGatingTest(cinder.CinderVolumeTest, edp.EDPTest,
new_cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = 'Failure during check of Swift availability ' \
'after cluster scaling: '
message = ('Failure during check of Swift availability '
'after cluster scaling: ')
self.print_error_log(message, e)
# ---------------------------DELETE CREATED OBJECTS----------------------------

View File

@ -91,8 +91,8 @@ class VanillaGatingTest(cinder.CinderVolumeTest,
except Exception as e:
with excutils.save_and_reraise_exception():
message = 'Failure while \'tt-dn\' node group ' \
'template creation: '
message = ('Failure while \'tt-dn\' node group '
'template creation: ')
self.print_error_log(message, e)
# ----------------------"tt" node group template creation----------------------
@ -406,8 +406,8 @@ class VanillaGatingTest(cinder.CinderVolumeTest,
new_cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = 'Failure while Cinder testing after cluster ' \
'scaling: '
message = ('Failure while Cinder testing after cluster '
'scaling: ')
self.print_error_log(message, e)
# --------------------CLUSTER CONFIG TESTING AFTER SCALING---------------------
@ -421,8 +421,8 @@ class VanillaGatingTest(cinder.CinderVolumeTest,
new_cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = 'Failure while cluster config testing after ' \
'cluster scaling: '
message = ('Failure while cluster config testing after '
'cluster scaling: ')
self.print_error_log(message, e)
# ----------------------MAP REDUCE TESTING AFTER SCALING-----------------------
@ -436,8 +436,8 @@ class VanillaGatingTest(cinder.CinderVolumeTest,
new_cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = 'Failure while Map Reduce testing after ' \
'cluster scaling: '
message = ('Failure while Map Reduce testing after '
'cluster scaling: ')
self.print_error_log(message, e)
# -------------------CHECK SWIFT AVAILABILITY AFTER SCALING--------------------
@ -451,8 +451,8 @@ class VanillaGatingTest(cinder.CinderVolumeTest,
new_cluster_info['cluster_id'], cluster_template_id,
node_group_template_id_list
)
message = 'Failure during check of Swift availability ' \
'after cluster scaling: '
message = ('Failure during check of Swift availability '
'after cluster scaling: ')
self.print_error_log(message, e)
# ---------------------------DELETE CREATED OBJECTS----------------------------

View File

@ -51,11 +51,12 @@ class VanillaTwoGatingTest(cluster_configs.ClusterConfigTest,
self.internal_neutron_net = None
if self.common_config.NEUTRON_ENABLED:
self.internal_neutron_net = self.get_internal_neutron_net_id()
self.floating_ip_pool = \
self.get_floating_ip_pool_id_for_neutron_net()
self.floating_ip_pool = (
self.get_floating_ip_pool_id_for_neutron_net())
self.vanilla_two_config.IMAGE_ID, self.vanilla_two_config.SSH_USERNAME\
= (self.get_image_id_and_ssh_username(self.vanilla_two_config))
(self.vanilla_two_config.IMAGE_ID,
self.vanilla_two_config.SSH_USERNAME) = (
self.get_image_id_and_ssh_username(self.vanilla_two_config))
self.volumes_per_node = 0
self.volume_size = 0

View File

@ -247,8 +247,8 @@ class BaseMigrationTestCase(testtools.TestCase):
@synchronized('pgadmin', external=True, lock_path='/tmp')
def _reset_pg(self, conn_pieces):
(user, password, database, host) = \
get_pgsql_connection_info(conn_pieces)
(user, password, database, host) = (
get_pgsql_connection_info(conn_pieces))
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
@ -274,8 +274,8 @@ class BaseMigrationTestCase(testtools.TestCase):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
(user, password, database, host) = \
get_mysql_connection_info(conn_pieces)
(user, password, database, host) = (
get_mysql_connection_info(conn_pieces))
sql = ("drop database if exists %(database)s; "
"create database %(database)s;" % {'database': database})
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s -e \"%(sql)s\""
@ -304,16 +304,16 @@ class BaseMigrationTestCase(testtools.TestCase):
conn_pieces = urlparse.urlparse(conn_string)
if conn_string.startswith('mysql'):
(user, password, database, host) = \
get_mysql_connection_info(conn_pieces)
(user, password, database, host) = (
get_mysql_connection_info(conn_pieces))
sql = "create database if not exists %s;" % database
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
"-e \"%(sql)s\"" % {'user': user, 'password': password,
'host': host, 'sql': sql})
self.execute_cmd(cmd)
elif conn_string.startswith('postgresql'):
(user, password, database, host) = \
get_pgsql_connection_info(conn_pieces)
(user, password, database, host) = (
get_pgsql_connection_info(conn_pieces))
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
@ -410,8 +410,8 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
# automatically in tearDown so no need to clean it up here.
connect_string = _get_connect_string(
"mysql", self.USER, self.PASSWD, self.DATABASE)
(user, password, database, host) = \
get_mysql_connection_info(urlparse.urlparse(connect_string))
(user, password, database, host) = (
get_mysql_connection_info(urlparse.urlparse(connect_string)))
engine = sqlalchemy.create_engine(connect_string)
self.engines[database] = engine
self.test_databases[database] = connect_string
@ -442,8 +442,8 @@ class BaseWalkMigrationTestCase(BaseMigrationTestCase):
connect_string = _get_connect_string(
"postgres", self.USER, self.PASSWD, self.DATABASE)
engine = sqlalchemy.create_engine(connect_string)
(user, password, database, host) = \
get_mysql_connection_info(urlparse.urlparse(connect_string))
(user, password, database, host) = (
get_mysql_connection_info(urlparse.urlparse(connect_string)))
self.engines[database] = engine
self.test_databases[database] = connect_string

View File

@ -26,8 +26,8 @@ import sahara.tests.unit.plugins.hdp.hdp_test_base as base
from sahara import version
GET_REST_REQ = "sahara.plugins.hdp.versions.version_1_3_2.versionhandler." \
"AmbariClient._get_http_session"
GET_REST_REQ = ("sahara.plugins.hdp.versions.version_1_3_2.versionhandler."
"AmbariClient._get_http_session")
def create_cluster_template(ctx, dct):

View File

@ -344,9 +344,10 @@ class ClusterSpecTest(testtools.TestCase):
'222.22.6666', '333.22.6666')
master_ng = TestNodeGroup(
'master', [master_host], ['GANGLIA_SERVER',
'GANGLIA_MONITOR', 'NAGIOS_SERVER',
'AMBARI_SERVER', 'AMBARI_AGENT'])
'master', [master_host],
['GANGLIA_SERVER',
'GANGLIA_MONITOR', 'NAGIOS_SERVER',
'AMBARI_SERVER', 'AMBARI_AGENT'])
jt_ng = TestNodeGroup('jt', [jt_host], ["JOBTRACKER",
"GANGLIA_MONITOR", "AMBARI_AGENT"])
nn_ng = TestNodeGroup('nn', [nn_host], ["NAMENODE",
@ -354,9 +355,10 @@ class ClusterSpecTest(testtools.TestCase):
snn_ng = TestNodeGroup('snn', [snn_host], ["SECONDARY_NAMENODE",
"GANGLIA_MONITOR", "AMBARI_AGENT"])
slave_ng = TestNodeGroup(
'slave', [slave_host], ["DATANODE", "TASKTRACKER",
"GANGLIA_MONITOR", "HDFS_CLIENT", "MAPREDUCE_CLIENT",
"AMBARI_AGENT"])
'slave', [slave_host],
["DATANODE", "TASKTRACKER",
"GANGLIA_MONITOR", "HDFS_CLIENT", "MAPREDUCE_CLIENT",
"AMBARI_AGENT"])
cluster = base.TestCluster([master_ng, jt_ng, nn_ng,
snn_ng, slave_ng])
@ -565,12 +567,14 @@ class ClusterSpecTest(testtools.TestCase):
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1], ["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER", "GANGLIA_MONITOR",
"NAGIOS_SERVER", "AMBARI_SERVER", "AMBARI_AGENT"])
'master', [server1],
["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER", "GANGLIA_MONITOR",
"NAGIOS_SERVER", "AMBARI_SERVER", "AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'slave', [server2], ["TASKTRACKER", "DATANODE", "AMBARI_AGENT",
"GANGLIA_MONITOR"])
'slave', [server2],
["TASKTRACKER", "DATANODE", "AMBARI_AGENT",
"GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file)
@ -700,12 +704,14 @@ class ClusterSpecTest(testtools.TestCase):
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1], ["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER", "GANGLIA_MONITOR",
"NAGIOS_SERVER", "AMBARI_SERVER", "AMBARI_AGENT"])
'master', [server1],
["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER", "GANGLIA_MONITOR",
"NAGIOS_SERVER", "AMBARI_SERVER", "AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'slave', [server2], ["TASKTRACKER", "DATANODE",
"AMBARI_AGENT", "GANGLIA_MONITOR"])
'slave', [server2],
["TASKTRACKER", "DATANODE",
"AMBARI_AGENT", "GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file)
@ -732,12 +738,14 @@ class ClusterSpecTest(testtools.TestCase):
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1], ["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER", "GANGLIA_MONITOR",
"NAGIOS_SERVER", "AMBARI_SERVER", "AMBARI_AGENT"])
'master', [server1],
["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "GANGLIA_SERVER", "GANGLIA_MONITOR",
"NAGIOS_SERVER", "AMBARI_SERVER", "AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'slave', [server2], ["TASKTRACKER", "DATANODE",
"AMBARI_AGENT", "GANGLIA_MONITOR"])
'slave', [server2],
["TASKTRACKER", "DATANODE",
"AMBARI_AGENT", "GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file)

View File

@ -525,9 +525,10 @@ class ClusterSpecTestForHDP2(testtools.TestCase):
'222.22.6666', '333.22.6666')
master_ng = TestNodeGroup(
'master', [master_host], ['GANGLIA_SERVER',
'GANGLIA_MONITOR', 'NAGIOS_SERVER',
'AMBARI_SERVER', 'AMBARI_AGENT', 'ZOOKEEPER_SERVER'])
'master', [master_host],
['GANGLIA_SERVER',
'GANGLIA_MONITOR', 'NAGIOS_SERVER',
'AMBARI_SERVER', 'AMBARI_AGENT', 'ZOOKEEPER_SERVER'])
jt_ng = TestNodeGroup('jt', [jt_host], ["RESOURCEMANAGER",
"HISTORYSERVER",
"GANGLIA_MONITOR",
@ -537,9 +538,10 @@ class ClusterSpecTestForHDP2(testtools.TestCase):
snn_ng = TestNodeGroup('snn', [snn_host], ["SECONDARY_NAMENODE",
"GANGLIA_MONITOR", "AMBARI_AGENT"])
slave_ng = TestNodeGroup(
'slave', [slave_host], ["DATANODE", "NODEMANAGER",
"GANGLIA_MONITOR", "HDFS_CLIENT", "MAPREDUCE2_CLIENT",
"AMBARI_AGENT"])
'slave', [slave_host],
["DATANODE", "NODEMANAGER",
"GANGLIA_MONITOR", "HDFS_CLIENT", "MAPREDUCE2_CLIENT",
"AMBARI_AGENT"])
cluster = base.TestCluster([master_ng, jt_ng, nn_ng,
snn_ng, slave_ng])
@ -784,13 +786,14 @@ class ClusterSpecTestForHDP2(testtools.TestCase):
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
"HISTORYSERVER", "SECONDARY_NAMENODE", "GANGLIA_SERVER",
"GANGLIA_MONITOR", "NAGIOS_SERVER", "AMBARI_SERVER",
"ZOOKEEPER_SERVER", "AMBARI_AGENT"])
'master', [server1],
["NAMENODE", "RESOURCEMANAGER",
"HISTORYSERVER", "SECONDARY_NAMENODE", "GANGLIA_SERVER",
"GANGLIA_MONITOR", "NAGIOS_SERVER", "AMBARI_SERVER",
"ZOOKEEPER_SERVER", "AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'slave', [server2], ["NODEMANAGER", "DATANODE", "AMBARI_AGENT",
"GANGLIA_MONITOR"])
"GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
@ -924,13 +927,25 @@ class ClusterSpecTestForHDP2(testtools.TestCase):
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
"HISTORYSERVER", "SECONDARY_NAMENODE", "GANGLIA_SERVER",
"GANGLIA_MONITOR", "NAGIOS_SERVER", "AMBARI_SERVER",
"ZOOKEEPER_SERVER", "AMBARI_AGENT"])
'master',
[server1],
["NAMENODE",
"RESOURCEMANAGER",
"HISTORYSERVER",
"SECONDARY_NAMENODE",
"GANGLIA_SERVER",
"GANGLIA_MONITOR",
"NAGIOS_SERVER",
"AMBARI_SERVER",
"ZOOKEEPER_SERVER",
"AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'slave', [server2], ["NODEMANAGER", "DATANODE",
"AMBARI_AGENT", "GANGLIA_MONITOR"])
'slave',
[server2],
["NODEMANAGER",
"DATANODE",
"AMBARI_AGENT",
"GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')
@ -957,13 +972,25 @@ class ClusterSpecTestForHDP2(testtools.TestCase):
'222.22.2222', '333.22.2222')
node_group1 = TestNodeGroup(
'master', [server1], ["NAMENODE", "RESOURCEMANAGER",
"HISTORYSERVER", "SECONDARY_NAMENODE", "GANGLIA_SERVER",
"GANGLIA_MONITOR", "NAGIOS_SERVER", "AMBARI_SERVER",
"ZOOKEEPER_SERVER", "AMBARI_AGENT"])
'master',
[server1],
["NAMENODE",
"RESOURCEMANAGER",
"HISTORYSERVER",
"SECONDARY_NAMENODE",
"GANGLIA_SERVER",
"GANGLIA_MONITOR",
"NAGIOS_SERVER",
"AMBARI_SERVER",
"ZOOKEEPER_SERVER",
"AMBARI_AGENT"])
node_group2 = TestNodeGroup(
'slave', [server2], ["NODEMANAGER", "DATANODE",
"AMBARI_AGENT", "GANGLIA_MONITOR"])
'slave',
[server2],
["NODEMANAGER",
"DATANODE",
"AMBARI_AGENT",
"GANGLIA_MONITOR"])
cluster = base.TestCluster([node_group1, node_group2])
cluster_config = cs.ClusterSpec(cluster_config_file, version='2.0.6')

View File

@ -254,8 +254,14 @@ class ServicesTest(testtools.TestCase):
'master.novalocal', 'master', '11111', 3,
'111.11.1111', '222.11.1111')
master_ng = hdp_test_base.TestNodeGroup(
'master', [master_host], ["NAMENODE", "JOBTRACKER",
"SECONDARY_NAMENODE", "TASKTRACKER", "DATANODE", "AMBARI_SERVER"])
'master',
[master_host],
["NAMENODE",
"JOBTRACKER",
"SECONDARY_NAMENODE",
"TASKTRACKER",
"DATANODE",
"AMBARI_SERVER"])
sqoop_host = hdp_test_base.TestServer(
'sqoop.novalocal', 'sqoop', '11111', 3,
'111.11.1111', '222.11.1111')

View File

@ -95,18 +95,18 @@ def start_patch(patch_templates=True):
get_clusters_p = mock.patch("sahara.service.api.get_clusters")
get_cluster_p = mock.patch("sahara.service.api.get_cluster")
if patch_templates:
get_ng_templates_p = \
mock.patch("sahara.service.api.get_node_group_templates")
get_ng_template_p = \
mock.patch("sahara.service.api.get_node_group_template")
get_ng_templates_p = mock.patch(
"sahara.service.api.get_node_group_templates")
get_ng_template_p = mock.patch(
"sahara.service.api.get_node_group_template")
get_plugins_p = mock.patch("sahara.service.api.get_plugins")
get_plugin_p = \
mock.patch("sahara.plugins.base.PluginManager.get_plugin")
get_plugin_p = mock.patch(
"sahara.plugins.base.PluginManager.get_plugin")
if patch_templates:
get_cl_templates_p = \
mock.patch("sahara.service.api.get_cluster_templates")
get_cl_template_p = \
mock.patch("sahara.service.api.get_cluster_template")
get_cl_templates_p = mock.patch(
"sahara.service.api.get_cluster_templates")
get_cl_template_p = mock.patch(
"sahara.service.api.get_cluster_template")
nova_p = mock.patch("sahara.utils.openstack.nova.client")
keystone_p = mock.patch("sahara.utils.openstack.keystone.client")
heat_p = mock.patch("sahara.utils.openstack.heat.client")

View File

@ -40,8 +40,8 @@ class TopologyTestCase(base.SaharaTestCase):
self.assertIn({'name': "net.topology.nodegroup.aware",
'value': 'true'},
result)
className = 'org.apache.hadoop.hdfs.server.namenode.' \
'BlockPlacementPolicyWithNodeGroup'
className = ('org.apache.hadoop.hdfs.server.namenode.'
'BlockPlacementPolicyWithNodeGroup')
self.assertIn({'name': "dfs.block.replicator.classname",
'value': className},
result)

View File

@ -47,8 +47,8 @@ class TestHeat(testtools.TestCase):
def test_get_anti_affinity_scheduler_hints(self):
inst_names = ['i1', 'i2']
expected = '"scheduler_hints" : {"different_host": ' \
'[{"Ref": "i1"}, {"Ref": "i2"}]},'
expected = ('"scheduler_hints" : {"different_host": '
'[{"Ref": "i1"}, {"Ref": "i2"}]},')
actual = h._get_anti_affinity_scheduler_hints(inst_names)
self.assertEqual(expected, actual)

View File

@ -101,8 +101,8 @@ class NeutronClientRemoteWrapper():
LOG.debug('Creating neutron adapter for {0}:{1}'
.format(host, port))
qrouter = self.get_router()
adapter = \
NeutronHttpAdapter(qrouter, host, port)
adapter = (
NeutronHttpAdapter(qrouter, host, port))
self.adapters[(host, port)] = adapter
adapters = [adapter]
@ -123,8 +123,8 @@ class NeutronHttpAdapter(adapters.HTTPAdapter):
self.host = host
def get_connection(self, url, proxies=None):
pool_conn = \
super(NeutronHttpAdapter, self).get_connection(url, proxies)
pool_conn = (
super(NeutronHttpAdapter, self).get_connection(url, proxies))
if hasattr(pool_conn, '_get_conn'):
http_conn = pool_conn._get_conn()
if http_conn.sock is None:

View File

@ -295,8 +295,8 @@ class InstanceInteropHelper(remote.Remote):
def get_neutron_info(self):
neutron_info = h.HashableDict()
neutron_info['network'] = \
self.instance.node_group.cluster.neutron_management_network
neutron_info['network'] = (
self.instance.node_group.cluster.neutron_management_network)
ctx = context.current()
neutron_info['uri'] = base.url_for(ctx.service_catalog, 'network')
neutron_info['token'] = ctx.token
@ -344,8 +344,9 @@ class InstanceInteropHelper(remote.Remote):
_release_remote_semaphore()
def get_http_client(self, port, info=None, *args, **kwargs):
self._log_command('Retrieving http session for {0}:{1}'
.format(self.instance.management_ip, port))
self._log_command('Retrieving http session for {0}:{1}'.format(
self.instance.management_ip,
port))
if CONF.use_namespaces and not CONF.use_floating_ips:
# need neutron info
if not info:

View File

@ -48,13 +48,6 @@ setenv = VIRTUAL_ENV={envdir}
commands = bash tools/lintstack.sh
[flake8]
# E128 continuation line under-indented for visual indent
# F402 import 'u' from line 23 shadowed by loop variable
# H202 assertRaises Exception too broad
# H305 imports not grouped correctly (re: stdlib, six: third-party)
# H307 like imports should be grouped together (six and oslo.config.cfg from third-party are separated by whitespace)
# H904 Wrap long lines in parentheses instead of a backslash
ignore = E128,F402,H202,H305,H307,H904
show-source = true
builtins = _
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,tools