Rewrite log levels and messages

This is an initial commit to make sahara logs relevant to new
logging spec.
We need to change some log levels to become closer to the spec, also
we need to change some log messages (e.g. for INFO level).

partially-implement bp new-style-logging

Change-Id: I8e7b7c4c3f375648d2999b1d0e19b68390cc22a8
This commit is contained in:
Nikolay Starodubtsev 2015-02-09 16:32:52 +03:00
parent 3814c2796e
commit ce62f364cd
45 changed files with 602 additions and 523 deletions

View File

@ -17,7 +17,6 @@ from oslo_log import log as logging
import webob.exc as ex
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
import sahara.openstack.commons as commons
@ -43,7 +42,7 @@ class AuthValidator(object):
"""
token_tenant = env['HTTP_X_TENANT_ID']
if not token_tenant:
LOG.warn(_LW("Can't get tenant_id from env"))
LOG.warning(_LW("Can't get tenant_id from env"))
resp = ex.HTTPServiceUnavailable()
return resp(env, start_response)
@ -51,7 +50,7 @@ class AuthValidator(object):
if path != '/':
version, url_tenant, rest = commons.split_path(path, 3, 3, True)
if not version or not url_tenant or not rest:
LOG.info(_LI("Incorrect path: %s"), path)
LOG.warning(_LW("Incorrect path: {path}").format(path=path))
resp = ex.HTTPNotFound(_("Incorrect path"))
return resp(env, start_response)

View File

@ -48,8 +48,8 @@ class Context(context.RequestContext):
overwrite=True,
**kwargs):
if kwargs:
LOG.warn(_LW('Arguments dropped when creating context: %s'),
kwargs)
LOG.warning(_LW('Arguments dropped when creating context: '
'{args}').format(args=kwargs))
super(Context, self).__init__(auth_token=auth_token,
user=user_id,
@ -189,8 +189,8 @@ def _wrapper(ctx, thread_description, thread_group, func, *args, **kwargs):
func(*args, **kwargs)
except BaseException as e:
LOG.debug(
"Thread '%(thread)s' failed with exception: '%(exception)s'",
{'thread': thread_description, 'exception': e})
"Thread {thread} failed with exception: {exception}".format(
thread=thread_description, exception=e))
if thread_group and not thread_group.exc:
thread_group.exc = e
thread_group.failed_thread = thread_description

View File

@ -28,7 +28,7 @@ import sqlalchemy as sa
from sahara.db.sqlalchemy import models as m
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LW
LOG = logging.getLogger(__name__)
@ -174,7 +174,8 @@ def setup_db():
engine = get_engine()
m.Cluster.metadata.create_all(engine)
except sa.exc.OperationalError as e:
LOG.exception(_LE("Database registration exception: %s"), e)
LOG.warning(_LW("Database registration exception: {exc}")
.format(exc=e))
return False
return True
@ -184,7 +185,7 @@ def drop_db():
engine = get_engine()
m.Cluster.metadata.drop_all(engine)
except Exception as e:
LOG.exception(_LE("Database shutdown exception: %s"), e)
LOG.warning(_LW("Database shutdown exception: {exc}").format(exc=e))
return False
return True

View File

@ -78,8 +78,6 @@ def setup_common(possible_topdir, service_name):
config.parse_configs(config_files)
log.setup(CONF, "sahara")
LOG.info(_LI('Starting Sahara %s'), service_name)
# Validate other configurations (that may produce logs) here
cinder.validate_config()
@ -88,6 +86,8 @@ def setup_common(possible_topdir, service_name):
plugins_base.setup_plugins()
LOG.info(_LI('Sahara {service} started').format(service=service_name))
def setup_sahara_api(mode):
ops = _get_ops_driver(mode)
@ -162,6 +162,7 @@ def make_app():
def _load_driver(namespace, name):
# TODO(starodubcevna): add LI here in the future for logging improvement
extension_manager = stevedore.DriverManager(
namespace=namespace,
name=name,
@ -174,21 +175,21 @@ def _load_driver(namespace, name):
def _get_infrastructure_engine():
"""Import and return one of sahara.service.*_engine.py modules."""
LOG.info(_LI("Loading '%s' infrastructure engine"),
CONF.infrastructure_engine)
LOG.debug("Infrastructure engine {engine} is loading".format(
engine=CONF.infrastructure_engine))
return _load_driver('sahara.infrastructure.engine',
CONF.infrastructure_engine)
def _get_remote_driver():
LOG.info(_LI("Loading '%s' remote"), CONF.remote)
LOG.debug("Remote {remote} is loading".format(remote=CONF.remote))
return _load_driver('sahara.remote', CONF.remote)
def _get_ops_driver(driver_name):
LOG.info(_LI("Loading '%s' ops"), driver_name)
LOG.debug("Ops {driver} is loading".format(driver=driver_name))
return _load_driver('sahara.run.mode', driver_name)

View File

@ -103,9 +103,9 @@ class PluginManager(object):
_("Plugin with name '%s' already exists.") % ext.name)
ext.obj.name = ext.name
self.plugins[ext.name] = ext.obj
LOG.info(_LI("Plugin '%(plugin_name)s' loaded %(entry_point)s"),
{'plugin_name': ext.name,
'entry_point': ext.entry_point_target})
LOG.info(_LI("Plugin {plugin_name} loaded {entry_point}").format(
plugin_name=ext.name,
entry_point=ext.entry_point_target))
if len(self.plugins) < len(config_plugins):
loaded_plugins = set(six.iterkeys(self.plugins))

View File

@ -109,9 +109,9 @@ class HttpClient(object):
url = self._make_url(path, params)
if http_method in ("GET", "DELETE"):
if data is not None:
LOG.warn(_LW("%(method)s method does not pass any data."
" Path '%(path)s'"),
{'method': http_method, 'path': path})
LOG.warning(_LW("{method} method does not pass any data. "
"Path {path}").format(method=http_method,
path=path))
data = None
# Setup the request
@ -124,7 +124,8 @@ class HttpClient(object):
request.add_header(k, v)
# Call it
LOG.debug("Method: %s, URL: %s" % (http_method, url))
LOG.debug("Method: {method}, URL: {url}".format(method=http_method,
url=url))
try:
return self._opener.open(request)
except urllib2.HTTPError as ex:

View File

@ -84,9 +84,8 @@ class Resource(object):
_("Command %(method)s %(path)s failed: %(msg)s")
% {'method': method, 'path': path, 'msg': six.text_type(ex)})
LOG.debug("%s Got response: %s%s"
% (method, body[:32], "..." if len(body) > 32 else ""))
LOG.debug("{method} got response: {body}".format(method=method,
body=body[:32]))
# Is the response application/json?
if (len(body) != 0 and resp.info().getmaintype() == "application"
and resp.info().getsubtype() == "json"):
@ -94,7 +93,7 @@ class Resource(object):
json_dict = json.loads(body)
return json_dict
except Exception as ex:
LOG.exception(_LE('JSON decode error: %s'), body)
LOG.error(_LE('JSON decode error: {body}').format(body=body))
raise ex
else:
return body
@ -114,13 +113,14 @@ class Resource(object):
return self.invoke("GET", relpath, params)
except (socket.error, urllib2.URLError) as e:
if "timed out" in six.text_type(e).lower():
LOG.warn(
_LW("Timeout issuing GET request for"
" %(path)s. %(post_msg)s")
% {'path': self._join_uri(relpath),
'post_msg':
_LW("Will retry") if retry < self.retries
else _LW("No retries left.")})
if retry < self.retries:
LOG.warning(_LW("Timeout issuing GET request for "
"{path}. Will retry").format(
path=self._join_uri(relpath)))
else:
LOG.warning(_LW("Timeout issuing GET request for "
"{path}. No retries left").format(
path=self._join_uri(relpath)))
else:
raise e
else:

View File

@ -208,8 +208,8 @@ class ClouderaUtils(object):
def await_agents(self, cluster, instances):
api = self.get_api_client(instances[0].cluster)
timeout = 300
LOG.debug("Waiting %(timeout)s seconds for agent connected to manager"
% {'timeout': timeout})
LOG.debug("Waiting {timeout} seconds for agent connected to manager"
.format(timeout=timeout))
s_time = timeutils.utcnow()
while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
hostnames = [i.fqdn() for i in instances]

View File

@ -271,8 +271,8 @@ class AbstractPluginUtils(object):
cmd.start_manager(r)
timeout = 300
LOG.debug("Waiting %(timeout)s seconds for Manager to start : " % {
'timeout': timeout})
LOG.debug("Waiting {timeout} seconds for Manager to start: "
.format(timeout=timeout))
s_time = timeutils.utcnow()
while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
try:
@ -303,8 +303,8 @@ class AbstractPluginUtils(object):
@cpo.event_wrapper(True)
def _configure_repo_from_inst(self, instance):
LOG.debug("Configure repos from instance '%(instance)s'" % {
'instance': instance.instance_name})
LOG.debug("Configure repos from instance {instance}".format(
instance=instance.instance_name))
cluster = instance.cluster
cdh5_key = self.c_helper.get_cdh5_key_url(cluster)

View File

@ -153,9 +153,9 @@ class AmbariPlugin(p.ProvisioningPluginBase):
service_dict[ci.name] = entry.value
cluster_configs[target] = service_dict
else:
LOG.debug('Template based input "{0}" is being filtered out as'
' it is not considered a user input'
.format(entry.config.name))
LOG.debug('Template based input "{entry_name}" is being'
' filtered out as it is not considered a user input'
.format(entry_name=entry.config.name))
ctx = context.ctx()
return cluster_template_create(ctx,
@ -183,9 +183,6 @@ class AmbariPlugin(p.ProvisioningPluginBase):
servers, version):
# TODO(jspeidel): encapsulate in another class
LOG.info(_LI('Provisioning Cluster via Ambari Server: {0} ...')
.format(ambari_info.get_address()))
for server in servers:
self._spawn(
"hdp-provision-instance-%s" % server.instance.hostname(),
@ -200,6 +197,9 @@ class AmbariPlugin(p.ProvisioningPluginBase):
ambari_client.provision_cluster(
cluster_spec, servers, ambari_info, name)
LOG.info(_LI('Cluster provisioned via Ambari Server: {server_ip}')
.format(server_ip=ambari_info.get_address()))
# TODO(jspeidel): invoke during scale cluster. Will need to handle dups
def _set_cluster_info(self, cluster, cluster_spec):
info = {}
@ -252,8 +252,8 @@ class AmbariPlugin(p.ProvisioningPluginBase):
ambari_info.user = admin_user.name
ambari_info.password = admin_user.password
LOG.info(_LI('Using "{0}" as admin user for scaling of cluster')
.format(ambari_info.user))
LOG.info(_LI('Using "{username}" as admin user for scaling of cluster')
.format(username=ambari_info.user))
# PLUGIN SPI METHODS:
def get_versions(self):
@ -335,7 +335,8 @@ class AmbariPlugin(p.ProvisioningPluginBase):
def decommission_nodes(self, cluster, instances):
LOG.info(_LI('AmbariPlugin: decommission_nodes called for '
'HDP version = %s'), cluster.hadoop_version)
'HDP version = {version}')
.format(version=cluster.hadoop_version))
handler = self.version_factory.get_version_handler(
cluster.hadoop_version)

View File

@ -66,8 +66,9 @@ class HadoopServer(object):
@saharautils.inject_remote('r')
def install_rpms(self, r):
LOG.info(
_LI("{0}: Installing rpm's ...").format(self.instance.hostname()))
LOG.debug(
"{hostname}: Installing rpm's"
.format(hostname=self.instance.hostname()))
# TODO(jspeidel): based on image type, use correct command
curl_cmd = ('curl -f -s -o /etc/yum.repos.d/ambari.repo %s' %
@ -79,27 +80,27 @@ class HadoopServer(object):
yum_cmd = 'yum -y install %s' % EPEL_RELEASE_PACKAGE_NAME
r.execute_command(yum_cmd, run_as_root=True)
else:
LOG.info(_LI("{0}: Unable to install rpm's from repo, "
"checking for local install.")
.format(self.instance.hostname()))
LOG.debug("{hostname}: Unable to install rpm's from repo, "
"checking for local install.".format(
hostname=self.instance.hostname()))
if not self.rpms_installed():
raise ex.HadoopProvisionError(
_('Failed to install Hortonworks Ambari'))
@saharautils.inject_remote('r')
def install_swift_integration(self, r):
LOG.info(
_LI("{0}: Installing swift integration ...")
.format(self.instance.hostname()))
LOG.debug(
"{hostname}: Installing swift integration"
.format(hostname=self.instance.hostname()))
base_rpm_cmd = 'rpm -U --quiet '
rpm_cmd = base_rpm_cmd + HADOOP_SWIFT_RPM
ret_code, stdout = r.execute_command(rpm_cmd,
run_as_root=True,
raise_when_error=False)
if ret_code != 0:
LOG.info(_LI("{0}: Unable to install swift integration from "
"source, checking for local rpm.")
.format(self.instance.hostname()))
LOG.debug("{hostname}: Unable to install swift integration from "
"source, checking for local rpm.".format(
hostname=self.instance.hostname()))
ret_code, stdout = r.execute_command(
'ls ' + HADOOP_SWIFT_LOCAL_RPM,
run_as_root=True,
@ -124,11 +125,11 @@ class HadoopServer(object):
@saharautils.inject_remote('r')
def _setup_and_start_ambari_server(self, port, jdk_path, r):
LOG.info(_LI('{0}: Installing ambari-server ...').format(
self.instance.hostname()))
LOG.debug('{hostname}: Installing ambari-server'.format(
hostname=self.instance.hostname()))
r.execute_command('yum -y install ambari-server', run_as_root=True)
LOG.info(_LI('Running Ambari Server setup ...'))
LOG.debug('Running Ambari Server setup')
# remove postgres data directory as a precaution since its existence
# has prevented successful postgres installation
r.execute_command('rm -rf /var/lib/pgsql/data', run_as_root=True)
@ -138,8 +139,8 @@ class HadoopServer(object):
return_code, stdout = r.execute_command('ls -l {jdk_location}'.format(
jdk_location=jdk_path), raise_when_error=False)
LOG.debug('Queried for JDK location on VM instance, return code = ' +
str(return_code))
LOG.debug('Queried for JDK location on VM instance, return code = '
'{code}'.format(code=str(return_code)))
# do silent setup since we only use default responses now
# only add -j command if the JDK is configured for the template,
@ -154,12 +155,12 @@ class HadoopServer(object):
self._configure_ambari_server_api_port(port)
LOG.info(_LI('Starting Ambari ...'))
# NOTE(dmitryme): Reading stdout from 'ambari-server start'
# hangs ssh. Redirecting output to /dev/null fixes that
r.execute_command(
'ambari-server start > /dev/null 2>&1', run_as_root=True
)
LOG.info(_LI('Ambari started'))
@saharautils.inject_remote('r')
def _configure_ambari_server_api_port(self, port, r):
@ -168,7 +169,8 @@ class HadoopServer(object):
return
ambari_config_file = '/etc/ambari-server/conf/ambari.properties'
LOG.debug('Configuring Ambari Server API port: {0}'.format(port))
LOG.debug('Configuring Ambari Server API port: {port}'.format(
port=port))
# read the current contents
data = r.read_file_from(ambari_config_file)
data = '{0}\nclient.api.port={1}\n'.format(data, port)
@ -178,26 +180,26 @@ class HadoopServer(object):
@saharautils.inject_remote('r')
def _setup_and_start_ambari_agent(self, ambari_server_ip, r):
LOG.info(_LI('{0}: Installing Ambari Agent ...').format(
self.instance.hostname()))
LOG.debug('{hostname}: Installing Ambari agent'.format(
hostname=self.instance.hostname()))
r.execute_command('yum -y install ambari-agent', run_as_root=True)
LOG.debug(
'{0}: setting master-ip: {1} in ambari-agent.ini'.format(
self.instance.hostname(), ambari_server_ip))
'{hostname}: setting master-ip: {ip} in ambari-agent.ini'.format(
hostname=self.instance.hostname(), ip=ambari_server_ip))
r.replace_remote_string(
'/etc/ambari-agent/conf/ambari-agent.ini', 'localhost',
ambari_server_ip)
LOG.info(
_LI('{0}: Starting Ambari Agent ...').format(
self.instance.hostname()))
# If the HDP 2 ambari agent is pre-installed on an image, the agent
# will start up during instance launch and therefore the agent
# registration will fail. It is therefore more appropriate to call
# restart since it will either start (if stopped) or restart (if
# running)
r.execute_command('ambari-agent restart', run_as_root=True)
LOG.info(
_LI('{hostname}: Ambari Agent started').format(
hostname=self.instance.hostname()))
@saharautils.inject_remote('r')
def set_namenode_safemode(self, jh, r):
@ -243,6 +245,7 @@ class HadoopServer(object):
run_as_root=True)
def _log(self, buf):
# FIXME(Kezar): I don't know what's this. Will check later.
LOG.debug(buf)
def _is_component_available(self, component):

View File

@ -22,7 +22,6 @@ import pkg_resources as pkg
from sahara import context
from sahara import exceptions as exc
from sahara.i18n import _
from sahara.i18n import _LC
from sahara.i18n import _LE
from sahara.i18n import _LI
from sahara.i18n import _LW
@ -177,10 +176,10 @@ class AmbariClient(object):
def _post(self, url, ambari_info, data=None):
if data:
LOG.debug('AmbariClient:_post call, url = ' + url +
' data = ' + str(data))
LOG.debug('AmbariClient:_post call, url = {url} data = {data}'
.format(url=url, data=str(data)))
else:
LOG.debug('AmbariClient:_post call, url = ' + url)
LOG.debug('AmbariClient:_post call, url = {url}'.format(url=url))
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.post(url, data=data,
@ -188,7 +187,7 @@ class AmbariClient(object):
headers=self._get_standard_headers())
def _delete(self, url, ambari_info):
LOG.debug('AmbariClient:_delete call, url = ' + url)
LOG.debug('AmbariClient:_delete call, url = {url}'.format(url=url))
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.delete(url,
auth=(ambari_info.user, ambari_info.password),
@ -196,10 +195,10 @@ class AmbariClient(object):
def _put(self, url, ambari_info, data=None):
if data:
LOG.debug('AmbariClient:_put call, url = ' + url +
' data = ' + str(data))
LOG.debug('AmbariClient:_put call, url = {url} data = {data}'
.format(url=url, data=str(data)))
else:
LOG.debug('AmbariClient:_put call, url = ' + url)
LOG.debug('AmbariClient:_put call, url = {url}'.format(url=url))
session = self._get_http_session(ambari_info.host, ambari_info.port)
auth = (ambari_info.user, ambari_info.password)
@ -207,7 +206,7 @@ class AmbariClient(object):
headers=self._get_standard_headers())
def _get(self, url, ambari_info):
LOG.debug('AmbariClient:_get call, url = ' + url)
LOG.debug('AmbariClient:_get call, url = {url}'.format(url=url))
session = self._get_http_session(ambari_info.host, ambari_info.port)
return session.get(url, auth=(ambari_info.user, ambari_info.password),
headers=self._get_standard_headers())
@ -220,7 +219,8 @@ class AmbariClient(object):
self.handler.get_version() + '"}}')
if result.status_code != 201:
LOG.error(_LE('Create cluster command failed. %s') % result.text)
LOG.error(_LE('Create cluster command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add cluster: %s') % result.text)
@ -268,8 +268,8 @@ class AmbariClient(object):
result = self._put(config_url, ambari_info, data=json.dumps(body))
if result.status_code != 200:
LOG.error(
_LE('Set configuration command failed. {0}').format(
result.text))
_LE('Set configuration command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to set configurations on cluster: %s')
% result.text)
@ -284,8 +284,8 @@ class AmbariClient(object):
ambari_info)
if result.status_code not in [201, 409]:
LOG.error(
_LE('Create service command failed. {0}').format(
result.text))
_LE('Create service command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add services to cluster: %s')
% result.text)
@ -302,8 +302,8 @@ class AmbariClient(object):
ambari_info)
if result.status_code not in [201, 409]:
LOG.error(
_LE('Create component command failed. {0}').format(
result.text))
_LE('Create component command failed. {result}')
.format(result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add components to services: %s')
% result.text)
@ -321,7 +321,8 @@ class AmbariClient(object):
ambari_info)
if result.status_code != 201:
LOG.error(
_LE('Create host command failed. {0}').format(result.text))
_LE('Create host command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add host: %s') % result.text)
@ -336,14 +337,13 @@ class AmbariClient(object):
ambari_info)
if result.status_code != 201:
LOG.error(
_LE('Create host_component command failed. %s'),
result.text)
_LE('Create host_component command failed. '
'{result}').format(result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add host component: %s')
% result.text)
def _install_services(self, cluster_name, ambari_info):
LOG.info(_LI('Installing required Hadoop services ...'))
ambari_address = ambari_info.get_address()
install_url = ('http://{0}/api/v1/clusters/{'
@ -361,15 +361,16 @@ class AmbariClient(object):
ambari_info, cluster_name, request_id),
ambari_info)
if success:
LOG.info(_LI("Install of Hadoop stack successful."))
LOG.info(_LI("Hadoop stack installed successfully."))
self._finalize_ambari_state(ambari_info)
else:
LOG.critical(_LC('Install command failed.'))
LOG.error(_LE('Install command failed.'))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Install command failed. {0}').format(result.text))
_LE('Install command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
@ -384,7 +385,8 @@ class AmbariClient(object):
while not started:
result = self._get(request_url, ambari_info)
LOG.debug(
'async request ' + request_url + ' response:\n' + result.text)
'async request {url} response: {response}'.format(
url=request_url, response=result.text))
json_result = json.loads(result.text)
started = True
for items in json_result['items']:
@ -399,7 +401,6 @@ class AmbariClient(object):
return started
def _finalize_ambari_state(self, ambari_info):
LOG.info(_LI('Finalizing Ambari cluster state.'))
persist_state_uri = 'http://{0}/api/v1/persist'.format(
ambari_info.get_address())
@ -410,17 +411,13 @@ class AmbariClient(object):
result = self._post(persist_state_uri, ambari_info, data=persist_data)
if result.status_code != 201 and result.status_code != 202:
LOG.warning(_LW('Finalizing of Ambari cluster state failed. {0}').
format(result.text))
LOG.warning(_LW('Finalizing of Ambari cluster state failed. '
'{result}').format(result.text))
raise ex.HadoopProvisionError(_('Unable to finalize Ambari '
'state.'))
LOG.info(_LI('Ambari cluster state finalized.'))
def start_services(self, cluster_name, cluster_spec, ambari_info):
LOG.info(_LI('Starting Hadoop services ...'))
LOG.info(_LI('Cluster name: %(cluster_name)s, Ambari server address: '
'%(server_address)s'),
{'cluster_name': cluster_name,
'server_address': ambari_info.get_address()})
start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/'
'state=INSTALLED'.format(
ambari_info.get_address(), cluster_name))
@ -438,28 +435,32 @@ class AmbariClient(object):
request_id), ambari_info)
if success:
LOG.info(
_LI("Successfully started Hadoop cluster '{0}'.").format(
cluster_name))
_LI("Successfully started Hadoop cluster '{name}'.")
.format(name=cluster_name))
LOG.info(_LI('Cluster name: {cluster_name}, '
'Ambari server address: {server_address}')
.format(cluster_name=cluster_name,
server_address=ambari_info.get_address()))
else:
LOG.critical(_LC('Failed to start Hadoop cluster.'))
LOG.error(_LE('Failed to start Hadoop cluster.'))
raise ex.HadoopProvisionError(
_('Start of Hadoop services failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Start command failed. Status: %(status)s, '
'response: %(response)s'),
{'status': result.status_code, 'response': result.text})
_LE('Start command failed. Status: {status}, '
'response: {response}').format(status=result.status_code,
response=result.text))
raise ex.HadoopProvisionError(
_('Start of Hadoop services failed.'))
def _exec_ambari_command(self, ambari_info, body, cmd_uri):
LOG.debug('PUT URI: {0}'.format(cmd_uri))
LOG.debug('PUT URI: {uri}'.format(uri=cmd_uri))
result = self._put(cmd_uri, ambari_info, data=body)
if result.status_code == 202:
LOG.debug(
'PUT response: {0}'.format(result.text))
'PUT response: {result}'.format(result=result.text))
json_result = json.loads(result.text)
href = json_result['href'] + '/tasks?fields=Tasks/status'
success = self._wait_for_async_request(href, ambari_info)
@ -467,16 +468,15 @@ class AmbariClient(object):
LOG.info(
_LI("Successfully changed state of Hadoop components "))
else:
LOG.critical(_LC('Failed to change state of Hadoop '
'components'))
LOG.error(_LE('Failed to change state of Hadoop components'))
raise ex.HadoopProvisionError(
_('Failed to change state of Hadoop components'))
else:
LOG.error(
_LE('Command failed. Status: %(status)s, response: '
'%(response)s'),
{'status': result.status_code, 'response': result.text})
_LE('Command failed. Status: {status}, response: '
'{response}').format(status=result.status_code,
response=result.text))
raise ex.HadoopProvisionError(_('Hadoop/Ambari command failed.'))
def _get_host_list(self, servers):
@ -493,10 +493,6 @@ class AmbariClient(object):
servers, cluster_spec)
def _install_components(self, ambari_info, auth, cluster_name, servers):
LOG.info(_LI('Starting Hadoop components while scaling up'))
LOG.info(_LI('Cluster name %(cluster_name)s, Ambari server ip %(ip)s'),
{'cluster_name': cluster_name,
'ip': ambari_info.get_address()})
# query for the host components on the given hosts that are in the
# INIT state
# TODO(jspeidel): provide request context
@ -507,6 +503,10 @@ class AmbariClient(object):
ambari_info.get_address(), cluster_name,
self._get_host_list(servers)))
self._exec_ambari_command(ambari_info, body, install_uri)
LOG.info(_LI('Started Hadoop components while scaling up'))
LOG.info(_LI('Cluster name {cluster_name}, Ambari server ip {ip}')
.format(cluster_name=cluster_name,
ip=ambari_info.get_address()))
def _start_components(self, ambari_info, auth, cluster_name, servers,
cluster_spec):
@ -520,7 +520,7 @@ class AmbariClient(object):
result = self._get(installed_uri, ambari_info)
if result.status_code == 200:
LOG.debug(
'GET response: {0}'.format(result.text))
'GET response: {result}'.format(result=result.text))
json_result = json.loads(result.text)
items = json_result['items']
@ -556,16 +556,16 @@ class AmbariClient(object):
result = self._get(url, ambari_info)
json_result = json.loads(result.text)
LOG.info(_LI('Registered Hosts: %(current_number)s '
'of %(final_number)s'),
{'current_number': len(json_result['items']),
'final_number': num_hosts})
LOG.debug('Registered Hosts: {current_number} '
'of {final_number}'.format(
current_number=len(json_result['items']),
final_number=num_hosts))
for hosts in json_result['items']:
LOG.debug('Registered Host: {0}'.format(
hosts['Hosts']['host_name']))
LOG.debug('Registered Host: {host}'.format(
host=hosts['Hosts']['host_name']))
return result and len(json_result['items']) >= num_hosts
except Exception:
LOG.info(_LI('Waiting to connect to ambari server ...'))
LOG.debug('Waiting to connect to ambari server')
return False
def update_ambari_admin_user(self, password, ambari_info):
@ -648,7 +648,7 @@ class AmbariClient(object):
try:
ambari_info.host.remote().close_http_session(ambari_info.port)
except exc.NotFoundException:
LOG.info(_LI("HTTP session is not cached"))
LOG.warning(_LW("HTTP session is not cached"))
def _get_services_in_state(self, cluster_name, ambari_info, state):
services_url = ('http://{0}/api/v1/clusters/{1}/services?'

View File

@ -951,16 +951,14 @@ class HueService(Service):
def _create_hue_property_tree(cluster_spec):
config_name = 'hue-ini'
LOG.info(_LI('Creating Hue ini property tree from configuration named '
'{0}').format(config_name))
hue_ini_property_tree = {'sections': {}, 'properties': {}}
config = cluster_spec.configurations[config_name]
if config is None:
LOG.warning(_LW('Missing configuration named {0}, aborting Hue ini'
' file creation').format(config_name))
LOG.warning(_LW('Missing configuration named {config_name}, '
'aborting Hue ini file creation').format(
config_name=config_name))
else:
# replace values in hue-ini configuration
subs = {}
@ -1020,8 +1018,11 @@ class HueService(Service):
if prop_value.find(placeholder) >= 0:
value = prop_value.replace(placeholder, sub)
LOG.debug('Converting placeholder in property '
'{0}:\n\t\t{1}\n\tto\n\t\t{2}\n'.
format(prop_name, prop_value, value))
'{p_name}:\n\t\t{p_value}\n\tto\n\t'
'\t{value}\n'.
format(p_name=prop_name,
p_value=prop_value,
value=value))
prop_value = value
# If the property value still is a value, add it and it's
@ -1047,61 +1048,69 @@ class HueService(Service):
# TODO(rlevas) : handle collisions
node['properties'][name] = prop_value
LOG.info(_LI('Created Hue ini property tree from configuration named '
'{config_name}').format(config_name=config_name))
return hue_ini_property_tree
@staticmethod
def _merge_configurations(cluster_spec, src_config_name, dst_config_name):
LOG.info(_LI('Merging configuration properties: %(source)s -> '
'%(destination)s'),
{'source': src_config_name, 'destination': dst_config_name})
src_config = cluster_spec.configurations[src_config_name]
dst_config = cluster_spec.configurations[dst_config_name]
if src_config is None:
LOG.warning(_LW('Missing source configuration property set, '
'aborting merge: {0}').format(src_config_name))
'aborting merge: {config_name}').
format(config_name=src_config_name))
elif dst_config is None:
LOG.warning(_LW('Missing destination configuration property set, '
'aborting merge: {0}').format(dst_config_name))
'aborting merge: {config_name}').
format(config_name=dst_config_name))
else:
for property_name, property_value in six.iteritems(src_config):
if property_name in dst_config:
if dst_config[property_name] == src_config[property_name]:
LOG.debug('Skipping unchanged configuration property '
'in {0} and {1}: {2}'.format(dst_config_name,
src_config_name,
property_name))
'in {d_config_name} and {s_config_name}: '
'{property_name}'
.format(d_config_name=dst_config_name,
s_config_name=src_config_name,
property_name=property_name))
else:
LOG.warning(_LW('Overwriting existing configuration '
'property in %(dst_config_name)s from '
'%(src_config_name)s for Hue: '
'%(property_name)s '
'[%(dst_config)s -> %(src_config)s]'),
{'dst_config_name': dst_config_name,
'src_config_name': src_config_name,
'property_name': property_name,
'dst_config': dst_config[property_name],
'src_config': src_config[property_name]})
'property in {dst_config_name} from '
'{src_config_name} for Hue: '
'{property_name} '
'[{dst_config} -> {src_config}]').
format(dst_config_name=dst_config_name,
src_config_name=src_config_name,
property_name=property_name,
dst_config=dst_config[
property_name],
src_config=src_config[
property_name]))
else:
LOG.debug('Adding Hue configuration property to {0} from '
'{1}: {2}'.format(dst_config_name,
src_config_name,
property_name))
LOG.debug('Adding Hue configuration property to {d_config}'
' from {s_config}: {p_name}'.format(
d_config=dst_config_name,
s_config=src_config_name,
p_name=property_name))
dst_config[property_name] = property_value
LOG.info(_LI('Merged configuration properties: {source} -> '
'{destination}')
.format(source=src_config_name,
destination=dst_config_name))
@staticmethod
def _handle_pre_service_start(instance, cluster_spec, hue_ini,
create_user):
with instance.remote() as r:
LOG.info(_LI('Installing Hue on {0}')
.format(instance.fqdn()))
r.execute_command('yum -y install hue',
run_as_root=True)
LOG.info(_LI('Installed Hue on {fqdn}')
.format(fqdn=instance.fqdn()))
LOG.info(_LI('Setting Hue configuration on {0}')
.format(instance.fqdn()))
r.write_file_to('/etc/hue/conf/hue.ini',
hue_ini,
True)
@ -1116,23 +1125,23 @@ class HueService(Service):
'"s/http.*.\\/webhdfs\\/v1\\//http:\\/\\'
'/localhost:14000\\/webhdfs\\/v1\\//g" '
'/etc/hue/conf/hue.ini', run_as_root=True)
LOG.info(_LI('Set Hue configuration on {fqdn}')
.format(fqdn=instance.fqdn()))
LOG.info(_LI('Uninstalling Shell, if it is installed '
'on {0}').format(instance.fqdn()))
r.execute_command(
'/usr/lib/hue/build/env/bin/python '
'/usr/lib/hue/tools/app_reg/app_reg.py '
'--remove shell',
run_as_root=True)
LOG.info(_LI('Shell uninstalled, if it was installed '
'on {fqdn}').format(fqdn=instance.fqdn()))
if create_user:
LOG.info(_LI('Creating initial Hue user on {0}')
.format(instance.fqdn()))
r.execute_command('/usr/lib/hue/build/env/bin/hue '
'create_sandbox_user', run_as_root=True)
LOG.info(_LI('Initial Hue user created on {fqdn}')
.format(fqdn=instance.fqdn()))
LOG.info(_LI('(Re)starting Hue on {0}')
.format(instance.fqdn()))
java_home = HueService._get_java_home(cluster_spec)
if java_home:
@ -1148,6 +1157,8 @@ class HueService(Service):
r.execute_command('[ ! -f /tmp/hueini-hdfsha ] || '
'service hadoop-httpfs start',
run_as_root=True)
LOG.info(_LI('Hue (re)started on {fqdn}')
.format(fqdn=instance.fqdn()))
def finalize_configuration(self, cluster_spec):
# add Hue-specific properties to the core-site file ideally only on
@ -1227,16 +1238,16 @@ class HueService(Service):
components = hue_ng.components
if 'HDFS_CLIENT' not in components:
LOG.info(_LI('Missing HDFS client from Hue node... adding '
'it since it is required for Hue'))
components.append('HDFS_CLIENT')
LOG.info(_LI('HDFS client was missed from Hue node. '
'Added it since it is required for Hue'))
if cluster_spec.get_deployed_node_group_count('HIVE_SERVER'):
if 'HIVE_CLIENT' not in components:
LOG.info(_LI('Missing HIVE client from Hue node... '
'adding it since it is required for '
'Beeswax and HCatalog'))
components.append('HIVE_CLIENT')
LOG.info(_LI('HIVE client was missed from Hue node. '
'Added it since it is required for '
'Beeswax and HCatalog'))
def pre_service_start(self, cluster_spec, ambari_info, started_services):

View File

@ -23,7 +23,6 @@ import six
from sahara import context
from sahara import exceptions as exc
from sahara.i18n import _
from sahara.i18n import _LC
from sahara.i18n import _LE
from sahara.i18n import _LI
from sahara.i18n import _LW
@ -196,7 +195,8 @@ class AmbariClient(object):
self.handler.get_version() + '"}}')
if result.status_code != 201:
LOG.error(_LE('Create cluster command failed. %s'), result.text)
LOG.error(_LE('Create cluster command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add cluster: %s') % result.text)
@ -244,8 +244,8 @@ class AmbariClient(object):
result = self._put(config_url, ambari_info, data=json.dumps(body))
if result.status_code != 200:
LOG.error(
_LE('Set configuration command failed. {0}').format(
result.text))
_LE('Set configuration command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to set configurations on cluster: %s')
% result.text)
@ -261,8 +261,8 @@ class AmbariClient(object):
ambari_info)
if result.status_code not in [201, 409]:
LOG.error(
_LE('Create service command failed. {0}').format(
result.text))
_LE('Create service command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add services to cluster: %s')
% result.text)
@ -280,8 +280,8 @@ class AmbariClient(object):
ambari_info)
if result.status_code not in [201, 409]:
LOG.error(
_LE('Create component command failed. {0}').format(
result.text))
_LE('Create component command failed. {result}')
.format(result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add components to services: %s')
% result.text)
@ -299,7 +299,8 @@ class AmbariClient(object):
ambari_info)
if result.status_code != 201:
LOG.error(
_LE('Create host command failed. {0}').format(result.text))
_LE('Create host command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add host: %s') % result.text)
@ -317,15 +318,13 @@ class AmbariClient(object):
ambari_info)
if result.status_code != 201:
LOG.error(
_LE('Create host_component command failed. %s'),
result.text)
_LE('Create host_component command failed. '
'{result}').format(result=result.text))
raise ex.HadoopProvisionError(
_('Failed to add host component: %s')
% result.text)
def _install_services(self, cluster_name, ambari_info):
LOG.info(_LI('Installing required Hadoop services ...'))
ambari_address = ambari_info.get_address()
install_url = ('http://{0}/api/v1/clusters/{'
'1}/services?ServiceInfo/state=INIT'.format(
@ -345,12 +344,13 @@ class AmbariClient(object):
LOG.info(_LI("Install of Hadoop stack successful."))
self._finalize_ambari_state(ambari_info)
else:
LOG.critical(_LC('Install command failed.'))
LOG.error(_LE('Install command failed.'))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Install command failed. {0}').format(result.text))
_LE('Install command failed. {result}').format(
result=result.text))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
@ -369,8 +369,8 @@ class AmbariClient(object):
started = False
while not started:
result = self._get(request_url, ambari_info)
LOG.debug(
'async request ' + request_url + ' response:\n' + result.text)
LOG.debug('Async request url: {url} response:\n{response}'.format(
url=request_url, response=result.text))
json_result = json.loads(result.text)
started = True
for items in json_result['items']:
@ -386,8 +386,6 @@ class AmbariClient(object):
return started
def _finalize_ambari_state(self, ambari_info):
LOG.info(_LI('Finalizing Ambari cluster state.'))
persist_state_uri = 'http://{0}/api/v1/persist'.format(
ambari_info.get_address())
# this post data has non-standard format because persist
@ -397,17 +395,13 @@ class AmbariClient(object):
result = self._post(persist_state_uri, ambari_info, data=persist_data)
if result.status_code != 201 and result.status_code != 202:
LOG.warning(_LW('Finalizing of Ambari cluster state failed. {0}').
format(result.text))
LOG.warning(_LW('Ambari cluster state not finalized. {result}').
format(result=result.text))
raise ex.HadoopProvisionError(
_('Unable to finalize Ambari state.'))
LOG.info(_LI('Ambari cluster state finalized.'))
def start_services(self, cluster_name, cluster_spec, ambari_info):
LOG.info(_LI('Starting Hadoop services ...'))
LOG.info(_LI('Cluster name: %(cluster_name)s, Ambari server address:'
' %(server_address)s'),
{'cluster_name': cluster_name,
'server_address': ambari_info.get_address()})
start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/'
'state=INSTALLED'.format(
ambari_info.get_address(), cluster_name))
@ -425,28 +419,32 @@ class AmbariClient(object):
request_id), ambari_info)
if success:
LOG.info(
_LI("Successfully started Hadoop cluster '{0}'.").format(
cluster_name))
_LI("Successfully started Hadoop cluster '{name}'.")
.format(name=cluster_name))
LOG.info(_LI('Cluster name: {cluster_name}, Ambari server '
'address: {server_address}').format(
cluster_name=cluster_name,
server_address=ambari_info.get_address()))
else:
LOG.critical(_LC('Failed to start Hadoop cluster.'))
LOG.error(_LE('Failed to start Hadoop cluster.'))
raise ex.HadoopProvisionError(
_('Start of Hadoop services failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Start command failed. Status: %(status)s, response: '
'%(response)s'),
{'status': result.status_code, 'result': result.text})
_LE('Start command failed. Status: {status}, response: '
'{response}').format(status=result.status_code,
response=result.text))
raise ex.HadoopProvisionError(
_('Start of Hadoop services failed.'))
def _exec_ambari_command(self, ambari_info, body, cmd_uri):
LOG.debug('PUT URI: {0}'.format(cmd_uri))
LOG.debug('PUT URI: {uri}'.format(uri=cmd_uri))
result = self._put(cmd_uri, ambari_info, data=body)
if result.status_code == 202:
LOG.debug(
'PUT response: {0}'.format(result.text))
'PUT response: {result}'.format(result=result.text))
json_result = json.loads(result.text)
href = json_result['href'] + '/tasks?fields=Tasks/status'
success = self._wait_for_async_request(href, ambari_info)
@ -454,16 +452,15 @@ class AmbariClient(object):
LOG.info(
_LI("Successfully changed state of Hadoop components "))
else:
LOG.critical(_LC('Failed to change state of Hadoop '
'components'))
LOG.error(_LE('Failed to change state of Hadoop components'))
raise ex.HadoopProvisionError(
_('Failed to change state of Hadoop components'))
else:
LOG.error(
_LE('Command failed. Status: %(status)s, response: '
'%(response)s'),
{'status': result.status_code, 'result': result.text})
_LE('Command failed. Status: {status}, response: '
'{response}').format(status=result.status_code,
response=result.text))
raise ex.HadoopProvisionError(_('Hadoop/Ambari command failed.'))
def _get_host_list(self, servers):
@ -480,10 +477,6 @@ class AmbariClient(object):
servers, cluster_spec)
def _install_components(self, ambari_info, auth, cluster_name, servers):
LOG.info(_LI('Starting Hadoop components while scaling up'))
LOG.info(_LI('Cluster name %(cluster_name)s, Ambari server ip %(ip)s'),
{'cluster_name': cluster_name,
'ip': ambari_info.get_address()})
# query for the host components on the given hosts that are in the
# INIT state
# TODO(jspeidel): provide request context
@ -494,6 +487,10 @@ class AmbariClient(object):
ambari_info.get_address(), cluster_name,
self._get_host_list(servers)))
self._exec_ambari_command(ambari_info, body, install_uri)
LOG.info(_LI('Started Hadoop components while scaling up'))
LOG.info(_LI('Cluster name {cluster_name}, Ambari server ip {ip}')
.format(cluster_name=cluster_name,
ip=ambari_info.get_address()))
def _start_components(self, ambari_info, auth, cluster_name, servers,
cluster_spec):
@ -507,7 +504,7 @@ class AmbariClient(object):
result = self._get(installed_uri, ambari_info)
if result.status_code == 200:
LOG.debug(
'GET response: {0}'.format(result.text))
'GET response: {result}'.format(result=result.text))
json_result = json.loads(result.text)
items = json_result['items']
@ -543,16 +540,16 @@ class AmbariClient(object):
result = self._get(url, ambari_info)
json_result = json.loads(result.text)
LOG.info(_LI('Registered Hosts: %(current_number)s '
'of %(final_number)s'),
{'current_number': len(json_result['items']),
'final_number': num_hosts})
LOG.info(_LI('Registered Hosts: {current_number} '
'of {final_number}').format(
current_number=len(json_result['items']),
final_number=num_hosts))
for hosts in json_result['items']:
LOG.debug('Registered Host: {0}'.format(
hosts['Hosts']['host_name']))
LOG.debug('Registered Host: {host}'.format(
host=hosts['Hosts']['host_name']))
return result and len(json_result['items']) >= num_hosts
except Exception:
LOG.info(_LI('Waiting to connect to ambari server ...'))
LOG.debug('Waiting to connect to ambari server')
return False
def update_ambari_admin_user(self, password, ambari_info):
@ -628,8 +625,8 @@ class AmbariClient(object):
# determine the instances that include HDFS support
hosts_to_decommission.append(instance.fqdn())
LOG.debug('AmbariClient: hosts_to_decommission = '
+ str(hosts_to_decommission))
LOG.debug('AmbariClient: hosts_to_decommission = {hosts}'.format(
hosts=str(hosts_to_decommission)))
# template for request body
body_header = ('{"RequestInfo" : { "context": "Decommission DataNode",'
@ -642,7 +639,8 @@ class AmbariClient(object):
# generate comma-separated list of hosts to de-commission
list_of_hosts = ",".join(hosts_to_decommission)
LOG.debug('AmbariClient: list_of_hosts = ' + list_of_hosts)
LOG.debug('AmbariClient: list_of_hosts = {hosts}'.format(
hosts=list_of_hosts))
# create the request body
request_body = (
@ -652,16 +650,17 @@ class AmbariClient(object):
+ ', "Requests/resource_filters":[{"service_name":"HDFS",'
'"component_name":"NAMENODE"}]}')
LOG.debug('AmbariClient: about to make decommission request, uri = ' +
request_uri)
LOG.debug('AmbariClient: about to make decommission request, ' +
'request body = ' + request_body)
LOG.debug('AmbariClient: about to make decommission request, uri = '
'{uri}'.format(uri=request_uri))
LOG.debug('AmbariClient: about to make decommission request, '
'request body = {body}'.format(body=request_body))
# ask Ambari to decommission the datanodes
result = self._post(request_uri, ambari_info, request_body)
if result.status_code != 202:
LOG.error(_LE('AmbariClient: error while making decommission post '
'request. Error is = %s'), result.text)
'request. Error is = {result}').format(
result=result.text))
raise ex.DecommissionError(
_('An error occurred while trying to '
'decommission the DataNode instances that are '
@ -683,22 +682,23 @@ class AmbariClient(object):
cluster.name, name_node_host.fqdn(),
'NAMENODE')
LOG.debug('AmbariClient: about to make decommission status request,' +
'uri = ' + status_request)
LOG.debug('AmbariClient: about to make decommission status request,'
'uri = {uri}'.format(uri=status_request))
count = 0
while count < 100 and len(hosts_to_decommission) > 0:
LOG.info(_LI('AmbariClient: number of hosts waiting for '
'decommissioning to complete = %s'),
str(len(hosts_to_decommission)))
LOG.debug('AmbariClient: number of hosts waiting for '
'decommissioning to complete = {count}'.format(
count=str(len(hosts_to_decommission))))
result = self._get(status_request, ambari_info)
if result.status_code != 200:
LOG.error(_LE('AmbariClient: error in making decommission '
'status request, error = %s'), result.text)
'status request, error = {result}').format(
result=result.text))
else:
LOG.info(_LI('AmbariClient: decommission status request ok, '
'result = %s'), result.text)
'result = {result}').format(result=result.text))
json_result = json.loads(result.text)
live_nodes = (
json_result['metrics']['dfs']['namenode']['LiveNodes'])
@ -707,15 +707,14 @@ class AmbariClient(object):
for node, val in six.iteritems(json_result_nodes):
admin_state = val['adminState']
if admin_state == 'Decommissioned':
LOG.info(_LI('AmbariClient: node = %(node)s is '
'now in adminState = %(admin_state)s'),
{'node': node,
'admin_state': admin_state})
LOG.debug('AmbariClient: node = {node} is '
'now in adminState = {admin_state}'.format(
node=node, admin_state=admin_state))
# remove from list, to track which nodes
# are now in Decommissioned state
hosts_to_decommission.remove(node)
LOG.info(_LI('AmbariClient: sleeping for 5 seconds'))
LOG.debug('AmbariClient: sleeping for 5 seconds')
context.sleep(5)
# increment loop counter
@ -741,7 +740,7 @@ class AmbariClient(object):
try:
ambari_info.host.remote().close_http_session(ambari_info.port)
except exc.NotFoundException:
LOG.info(_LI("HTTP session is not cached"))
LOG.debug("HTTP session is not cached")
def _get_services_in_state(self, cluster_name, ambari_info, state):
services_url = ('http://{0}/api/v1/clusters/{1}/services?'
@ -959,7 +958,8 @@ class AmbariClient(object):
result = self._delete(delete_service_component_url, hac['ambari_info'])
if result.status_code != 200:
LOG.error(_LE('Configuring HDFS HA failed. %s'), result.text)
LOG.error(_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
'Configuring HDFS HA failed. %s' % result.text)
@ -971,7 +971,8 @@ class AmbariClient(object):
result = self._post(add_host_component_url, hac['ambari_info'])
if result.status_code != 201:
LOG.error(_LE('Configuring HDFS HA failed. %s'), result.text)
LOG.error(_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
'Configuring HDFS HA failed. %s' % result.text)
@ -995,15 +996,18 @@ class AmbariClient(object):
hac['ambari_info'])
if success:
LOG.info(_LI("HDFS-HA: Host component updated successfully: "
"{0} {1}").format(host, component))
"{host} {component}").format(host=host,
component=component))
else:
LOG.critical(_LC("HDFS-HA: Host component update failed: "
"{0} {1}").format(host, component))
LOG.error(_LE("HDFS-HA: Host component update failed: "
"{host} {component}").format(
host=host, component=component))
raise ex.NameNodeHAConfigurationError(
'Configuring HDFS HA failed. %s' % result.text)
elif result.status_code != 200:
LOG.error(
_LE('Configuring HDFS HA failed. {0}').format(result.text))
_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
'Configuring HDFS HA failed. %s' % result.text)
@ -1021,7 +1025,8 @@ class AmbariClient(object):
return items[0]['tag']
else:
LOG.error(
_LE('Configuring HDFS HA failed. {0}').format(result.text))
_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
'Configuring HDFS HA failed. %s' % result.text)
@ -1039,7 +1044,8 @@ class AmbariClient(object):
return items[0]['properties']
else:
LOG.error(
_LE('Configuring HDFS HA failed. {0}').format(result.text))
_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
'Configuring HDFS HA failed. %s' % result.text)
@ -1056,13 +1062,14 @@ class AmbariClient(object):
body['Clusters']['desired_config']['tag'] = tag
body['Clusters']['desired_config']['properties'] = properties
LOG.debug(("body: %s") % (body))
LOG.debug("body: {body}".format(body=body))
result = self._put(config_url, hac['ambari_info'],
data=json.dumps(body))
if result.status_code != 200:
LOG.error(
_LE('Configuring HDFS HA failed. {0}').format(result.text))
_LE('Configuring HDFS HA failed. {result}').format(
result=result.text))
raise ex.NameNodeHAConfigurationError(
'Configuring HDFS HA failed. %s' % result.text)

View File

@ -21,6 +21,8 @@ import six
from sahara import conductor
from sahara import context
from sahara.i18n import _LI
from sahara.i18n import _LW
import sahara.plugins.mapr.abstract.configurer as ac
import sahara.plugins.mapr.services.management.management as mng
import sahara.plugins.mapr.services.mapreduce.mapreduce as mr
@ -83,7 +85,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
self._write_config_files(cluster_context, existing)
self._update_services(cluster_context, existing)
self._restart_services(cluster_context)
LOG.debug('Existing instances successfully configured')
LOG.info(_LI('Existing instances successfully configured'))
def _configure_services(self, cluster_context, instances):
for service in cluster_context.cluster_services:
@ -132,7 +134,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
instances, util.run_script, _TOPO_SCRIPT, 'root', data_path)
else:
LOG.debug('Data locality is disabled.')
LOG.debug('Cluster topology successfully configured')
LOG.info(_LI('Cluster topology successfully configured'))
def _write_config_files(self, cluster_context, instances):
LOG.debug('Writing config files')
@ -154,7 +156,8 @@ class BaseConfigurer(ac.AbstractConfigurer):
configs=ng_configs[service.ui_name],
instance=ng.instances[0]
)
LOG.debug('Rendering %s config files', service.ui_name)
LOG.debug('Rendering {ui_name} config files'.format(
ui_name=service.ui_name))
for conf_file in service_conf_files:
ng_config_files.update({
conf_file.remote_path: conf_file.render()
@ -162,7 +165,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
ng_instances = filter(lambda i: i in instances, ng.instances)
self._write_ng_config_files(ng_instances, ng_config_files)
LOG.debug('Config files successfully written')
LOG.debug('Config files successfully wrote')
def _write_ng_config_files(self, instances, conf_files):
with context.ThreadGroup() as tg:
@ -195,7 +198,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
LOG.debug('Executing service post install hooks')
for s in cluster_context.cluster_services:
s.post_install(cluster_context, instances)
LOG.debug('Post install hooks execution successfully executed')
LOG.info(_LI('Post install hooks execution successfully executed'))
def _update_cluster_info(self, cluster_context):
LOG.debug('Updating UI information.')
@ -226,7 +229,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
'echo "%s:%s"|chpasswd' % ('mapr', 'mapr'),
run_as_root=True)
else:
LOG.debug('user "mapr" does not exists')
LOG.warning(_LW('User "mapr" does not exists'))
def create_home_mapr(instance):
target_path = '/home/mapr'
@ -237,7 +240,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
with instance.remote() as r:
r.execute_command(cmd, run_as_root=True)
else:
LOG.debug('user "mapr" does not exists')
LOG.warning(_LW('User "mapr" does not exists'))
util.execute_on_instances(instances, set_user_password)
util.execute_on_instances(instances, create_home_mapr)

View File

@ -21,6 +21,7 @@ from oslo_utils import timeutils
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
import sahara.plugins.exceptions as ex
import sahara.plugins.mapr.abstract.node_manager as s
import sahara.plugins.mapr.services.management.management as mng
@ -56,7 +57,7 @@ class BaseNodeManager(s.AbstractNodeManager):
ec, out = r.execute_command(command, run_as_root=True)
command = MOVE_NODE_CMD % out.strip()
cldb_remote.execute_command(command, run_as_root=True)
LOG.debug("Nodes successfully moved")
LOG.info(_LI("Nodes successfully moved"))
def remove_nodes(self, c_context, instances):
LOG.debug("Removing nodes from cluster")
@ -70,7 +71,7 @@ class BaseNodeManager(s.AbstractNodeManager):
}
command = REMOVE_NODE_CMD % args
cldb_remote.execute_command(command, run_as_root=True)
LOG.debug("Nodes successfully removed")
LOG.info(_LI("Nodes successfully removed"))
def start(self, cluster_context, instances=None):
instances = instances or cluster_context.get_instances()
@ -96,7 +97,8 @@ class BaseNodeManager(s.AbstractNodeManager):
start_time = timeutils.utcnow()
retry_count = 0
with cldb_node.remote() as r:
LOG.debug("Waiting %s seconds for CLDB initialization", timeout)
LOG.debug("Waiting {count} seconds for CLDB initialization".format(
count=timeout))
while timeutils.delta_seconds(start_time,
timeutils.utcnow()) < timeout:
ec, out = r.execute_command(NODE_LIST_CMD,
@ -133,17 +135,17 @@ class BaseNodeManager(s.AbstractNodeManager):
def _start_zk_nodes(self, instances):
LOG.debug('Starting ZooKeeper nodes')
self._start_nodes(instances, mng.ZOOKEEPER.ui_name)
LOG.debug('ZooKeeper nodes successfully started')
LOG.info(_LI('ZooKeeper nodes successfully started'))
def _start_cldb_nodes(self, instances):
LOG.debug('Starting CLDB nodes')
self._start_nodes(instances, WARDEN_SERVICE)
LOG.debug('CLDB nodes successfully started')
LOG.info(_LI('CLDB nodes successfully started'))
def _start_non_cldb_nodes(self, instances):
LOG.debug('Starting non-control nodes')
self._start_nodes(instances, WARDEN_SERVICE)
LOG.debug('Non-control nodes successfully started')
LOG.info(_LI('Non-control nodes successfully started'))
def _stop_zk_nodes(self, instances):
self._stop_nodes(instances, mng.ZOOKEEPER.ui_name)
@ -158,9 +160,8 @@ class BaseNodeManager(s.AbstractNodeManager):
args = {'service': service.lower(), 'action': action}
cmd = cmd % args
LOG.debug(
'Executing "%(command)s" on node=%(ip)s',
{'command': cmd, 'ip': instance.management_ip}
)
'Executing "{command}" on node={ip}'.format(
command=cmd, ip=instance.management_ip))
r.execute_command(cmd, run_as_root=True)
def _start_service(self, instance, service):

View File

@ -18,6 +18,7 @@ import six
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
import sahara.plugins.mapr.domain.configuration_file as bcf
import sahara.plugins.mapr.domain.node_process as np
import sahara.plugins.mapr.domain.service as s
@ -96,7 +97,7 @@ class MapRFS(s.Service):
for instance in file_servers:
tg.spawn('init-mfs-%s' % instance.id,
self._init_mfs_instance, instance)
LOG.debug('MapR FS successfully initialized')
LOG.info(_LI('MapR FS successfully initialized'))
def _init_mfs_instance(self, instance):
self._generate_disk_list_file(instance, self._CREATE_DISK_LIST)

View File

@ -68,7 +68,7 @@ class MySQL(s.Service):
with instance.remote() as r:
if script_text:
r.write_file_to(script_path, script_text, run_as_root=True)
LOG.debug('Executing SQL script %s', script_path)
LOG.debug('Executing SQL script {path}'.format(path=script_path))
r.execute_command(("mysql %s %s < %s" %
('-u' + user if user else '',
'-p' + password if password else '',

View File

@ -19,7 +19,6 @@ import six
from sahara import conductor as c
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.plugins import provisioning as p
from sahara.plugins import utils
from sahara.swift import swift_helper as swift
@ -263,7 +262,7 @@ def generate_cfg_from_general(cfg, configs, general_config,
for name, value in configs['general'].items():
if value:
cfg = _set_config(cfg, general_config, name)
LOG.info(_LI("Applying config: %s"), name)
LOG.debug("Applying config: {name}".format(name=name))
else:
cfg = _set_config(cfg, general_config)
return cfg

View File

@ -123,8 +123,8 @@ class SparkProvider(p.ProvisioningPluginBase):
def _start_spark(self, cluster, sm_instance):
with remote.get_remote(sm_instance) as r:
run.start_spark_master(r, self._spark_home(cluster))
LOG.info(_LI("Spark service at '%s' has been started"),
sm_instance.hostname())
LOG.info(_LI("Spark service at {host} has been started").format(
host=sm_instance.hostname()))
def start_cluster(self, cluster):
nn_instance = utils.get_instance(cluster, "namenode")
@ -136,8 +136,8 @@ class SparkProvider(p.ProvisioningPluginBase):
# start the data nodes
self._start_datanode_processes(dn_instances)
LOG.info(_LI("Hadoop services in cluster %s have been started"),
cluster.name)
LOG.info(_LI("Hadoop services in cluster {cluster} have been started")
.format(cluster=cluster.name))
with remote.get_remote(nn_instance) as r:
r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
@ -147,8 +147,8 @@ class SparkProvider(p.ProvisioningPluginBase):
# start spark nodes
self.start_spark(cluster)
LOG.info(_LI('Cluster %s has been started successfully'),
cluster.name)
LOG.info(_LI('Cluster {cluster} has been started successfully').format(
cluster=cluster.name))
self._set_cluster_info(cluster)
def _spark_home(self, cluster):
@ -441,8 +441,8 @@ class SparkProvider(p.ProvisioningPluginBase):
self._start_datanode_processes(dn_instances)
run.start_spark_master(r_master, self._spark_home(cluster))
LOG.info(_LI("Spark master service at '%s' has been restarted"),
master.hostname())
LOG.info(_LI("Spark master service at {host} has been restarted")
.format(host=master.hostname()))
def _get_scalable_processes(self):
return ["datanode", "slave"]

View File

@ -96,8 +96,8 @@ class StormProvider(p.ProvisioningPluginBase):
# start storm slaves
self._start_slave_processes(sl_instances)
LOG.info(_LI('Cluster %s has been started successfully'),
cluster.name)
LOG.info(_LI('Cluster {cluster} has been started successfully').format(
cluster=cluster.name))
self._set_cluster_info(cluster)
def _extract_configs_to_extra(self, cluster):
@ -137,8 +137,8 @@ class StormProvider(p.ProvisioningPluginBase):
def _start_storm_master(self, sm_instance):
with remote.get_remote(sm_instance) as r:
run.start_storm_nimbus_and_ui(r)
LOG.info(_LI("Storm master at '%s' has been started"),
sm_instance.hostname())
LOG.info(_LI("Storm master at {host} has been started").format(
host=sm_instance.hostname()))
def _start_slave_processes(self, sl_instances):
if len(sl_instances) == 0:

View File

@ -18,7 +18,7 @@ from oslo_log import log as logging
import six
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
from sahara.plugins.vanilla.hadoop2 import oozie_helper as o_helper
from sahara.plugins.vanilla import utils as vu
@ -40,7 +40,7 @@ HADOOP_GROUP = 'hadoop'
def configure_cluster(pctx, cluster):
LOG.debug("Configuring cluster \"%s\"", cluster.name)
LOG.debug("Configuring cluster {cluster}".format(cluster=cluster.name))
if (CONF.use_identity_api_v3 and CONF.use_domain_for_proxy_users and
vu.get_hiveserver(cluster) and
c_helper.is_swift_enabled(pctx, cluster)):
@ -274,7 +274,8 @@ def _push_xml_configs(instance, configs):
def _push_configs_to_instance(instance, configs):
LOG.debug("Push configs to instance \"%s\"", instance.instance_name)
LOG.debug("Push configs to instance {instance}".format(
instance=instance.instance_name))
with instance.remote() as r:
for fl, data in six.iteritems(configs):
r.write_file_to(fl, data, run_as_root=True)
@ -352,8 +353,9 @@ def _merge_configs(a, b):
True, step=_("Configure topology data"), param=('cluster', 1))
def configure_topology_data(pctx, cluster):
if c_helper.is_data_locality_enabled(pctx, cluster):
LOG.info(_LI("Node group awareness is not implemented in YARN yet "
"so enable_hypervisor_awareness set to False explicitly"))
LOG.warning(_LW("Node group awareness is not implemented in YARN yet "
"so enable_hypervisor_awareness set to False "
"explicitly"))
tpl_map = th.generate_topology_map(cluster, is_node_awareness=False)
topology_data = "\n".join(
[k + " " + v for k, v in tpl_map.items()]) + "\n"

View File

@ -91,7 +91,7 @@ def start_oozie_process(pctx, instance):
with instance.remote() as r:
if c_helper.is_mysql_enabled(pctx, instance.cluster):
_start_mysql(r)
LOG.debug("Creating Oozie DB Schema...")
LOG.debug("Creating Oozie DB Schema")
sql_script = files.get_file_text(
'plugins/vanilla/hadoop2/resources/create_oozie_db.sql')
script_location = "create_oozie_db.sql"
@ -166,21 +166,22 @@ def await_datanodes(cluster):
if datanodes_count < 1:
return
LOG.info(_LI("Waiting %s datanodes to start up"), datanodes_count)
LOG.debug("Waiting {count} datanodes to start up".format(
count=datanodes_count))
with vu.get_namenode(cluster).remote() as r:
while True:
if _check_datanodes_count(r, datanodes_count):
LOG.info(
_LI('Datanodes on cluster %s have been started'),
cluster.name)
_LI('Datanodes on cluster {cluster} have been started')
.format(cluster=cluster.name))
return
context.sleep(1)
if not g.check_cluster_exists(cluster):
LOG.info(
_LI('Stop waiting datanodes on cluster %s since it has '
'been deleted'), cluster.name)
_LI('Stop waiting for datanodes on cluster {cluster} since'
' it has been deleted').format(cluster=cluster.name))
return
@ -193,7 +194,7 @@ def _check_datanodes_count(remote, count):
'sudo su -lc "hdfs dfsadmin -report" hadoop | '
'grep \'Live datanodes\|Datanodes available:\' | '
'grep -o \'[0-9]\+\' | head -n 1')
LOG.debug("Datanode count='%s'" % stdout.rstrip())
LOG.debug("Datanode count='{count}'".format(count=stdout.rstrip()))
return exit_code == 0 and stdout and int(stdout) == count
@ -214,12 +215,12 @@ def _hive_copy_shared_conf(remote, dest):
def _hive_create_db(remote):
LOG.debug("Creating Hive metastore db...")
LOG.debug("Creating Hive metastore db")
remote.execute_command("mysql -u root < /tmp/create_hive_db.sql")
def _hive_metastore_start(remote):
LOG.debug("Starting Hive Metastore Server...")
LOG.debug("Starting Hive Metastore Server")
remote.execute_command("sudo su - -c 'nohup /opt/hive/bin/hive"
" --service metastore > /dev/null &' hadoop")
@ -243,6 +244,5 @@ def start_hiveserver_process(pctx, instance):
r.write_file_to('/tmp/create_hive_db.sql', sql_script)
_hive_create_db(r)
_hive_metastore_start(r)
LOG.info(_LI("Hive Metastore server at %s has been "
"started"),
instance.hostname())
LOG.info(_LI("Hive Metastore server at {host} has been "
"started").format(host=instance.hostname()))

View File

@ -20,7 +20,6 @@ from sahara import conductor as c
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.plugins import provisioning as p
from sahara.plugins import utils
@ -224,7 +223,7 @@ def generate_cfg_from_general(cfg, configs, general_config,
for name, value in configs['general'].items():
if value:
cfg = _set_config(cfg, general_config, name)
LOG.info(_LI("Applying config: %s"), name)
LOG.debug("Applying config: {config}".format(config=name))
else:
cfg = _set_config(cfg, general_config)
return cfg
@ -343,12 +342,14 @@ def generate_xml_configs(cluster, node_group, hive_mysql_passwd):
proxy_configs)
xml_configs.update({'hive-site':
x.create_hadoop_xml(cfg, cfg_filter)})
LOG.debug('Generated hive-site.xml for hive % s', hive_hostname)
LOG.debug('Generated hive-site.xml for hive {host}'.format(
host=hive_hostname))
if oozie_hostname:
xml_configs.update({'oozie-site':
x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)})
LOG.debug('Generated oozie-site.xml for oozie % s', oozie_hostname)
LOG.debug('Generated oozie-site.xml for oozie {host}'.format(
host=oozie_hostname))
return xml_configs
@ -387,8 +388,9 @@ def extract_environment_confs(configs):
if param_name == cfg_name and param_value is not None:
lst.append(cfg_format_str % param_value)
else:
LOG.warn(_LW("Plugin received wrong applicable target '%s' in "
"environmental configs"), service)
LOG.warning(_LW("Plugin received wrong applicable target {service}"
" in environmental configs").format(
service=service))
return sorted(lst)
@ -408,8 +410,8 @@ def extract_xml_confs(configs):
if param_name in names and param_value is not None:
lst.append((param_name, param_value))
else:
LOG.warn(_LW("Plugin received wrong applicable target '%s' for "
"xml configs"), service)
LOG.warning(_LW("Plugin received wrong applicable target {service}"
" for xml configs").format(service=service))
return sorted(lst)

View File

@ -50,7 +50,8 @@ def hive_copy_shared_conf(remote, dest):
def oozie_share_lib(remote, nn_hostname):
LOG.debug("Sharing Oozie libs to hdfs://%s:8020" % nn_hostname)
LOG.debug("Sharing Oozie libs to hdfs://{host}:8020".format(
host=nn_hostname))
# remote.execute_command('sudo su - -c "/opt/oozie/bin/oozie-setup.sh '
# 'sharelib create -fs hdfs://%s:8020" hadoop'
# % nn_hostname)
@ -78,18 +79,19 @@ def check_datanodes_count(remote, count):
'sudo su -c "hadoop dfsadmin -report | '
'grep \'Datanodes available:\' | '
'awk \'{print \\$3}\'" hadoop')
LOG.debug("Datanode count='%s'" % stdout.rstrip())
LOG.debug("Datanode count={count}".format(count=stdout.rstrip()))
return exit_code == 0 and stdout and int(stdout) == count
def mysql_start(remote, mysql_instance):
LOG.debug("Starting mysql at %s" % mysql_instance.hostname())
LOG.debug("Starting mysql at {host}".format(
host=mysql_instance.hostname()))
remote.execute_command("/opt/start-mysql.sh")
def oozie_create_db(remote):
LOG.debug("Creating Oozie DB Schema...")
LOG.debug("Creating Oozie DB Schema")
sql_script = files.get_file_text(
'plugins/vanilla/v1_2_1/resources/create_oozie_db.sql')
script_location = "create_oozie_db.sql"
@ -105,7 +107,7 @@ def start_oozie(remote):
def hive_create_db(remote, hive_mysql_passwd):
LOG.debug("Creating Hive metastore db...")
LOG.debug("Creating Hive metastore db")
sql_script = files.get_file_text(
'plugins/vanilla/v1_2_1/resources/create_hive_db.sql')
sql_script = sql_script.replace('pass', hive_mysql_passwd)
@ -117,6 +119,6 @@ def hive_create_db(remote, hive_mysql_passwd):
def hive_metastore_start(remote):
LOG.debug("Starting Hive Metastore Server...")
LOG.debug("Starting Hive Metastore Server")
remote.execute_command("sudo su - -c 'nohup /opt/hive/bin/hive"
" --service metastore > /dev/null &' hadoop")

View File

@ -156,8 +156,8 @@ class VersionHandler(avm.AbstractVersionHandler):
run.oozie_create_db(r)
run.oozie_share_lib(r, nn_instance.hostname())
run.start_oozie(r)
LOG.info(_LI("Oozie service at '%s' has been started"),
nn_instance.hostname())
LOG.info(_LI("Oozie service at {host} has been started").format(
host=nn_instance.hostname()))
def start_hiveserver(self, cluster):
hs = vu.get_hiveserver(cluster)
@ -179,9 +179,9 @@ class VersionHandler(avm.AbstractVersionHandler):
run.mysql_start(r, hive_server)
run.hive_create_db(r, cluster.extra['hive_mysql_passwd'])
run.hive_metastore_start(r)
LOG.info(_LI("Hive Metastore server at %s has been "
"started"),
hive_server.hostname())
LOG.info(_LI("Hive Metastore server at {host} has been "
"started").format(
host=hive_server.hostname()))
def start_cluster(self, cluster):
self.start_namenode(cluster)
@ -194,14 +194,15 @@ class VersionHandler(avm.AbstractVersionHandler):
self._await_datanodes(cluster)
LOG.info(_LI("Hadoop services in cluster %s have been started"),
cluster.name)
LOG.info(_LI("Hadoop services in cluster {cluster} have been started")
.format(cluster=cluster.name))
self.start_oozie(cluster)
self.start_hiveserver(cluster)
LOG.info(_LI('Cluster %s has been started successfully'), cluster.name)
LOG.info(_LI('Cluster {cluster} has been started successfully')
.format(cluster=cluster.name))
self._set_cluster_info(cluster)
@cpo.event_wrapper(
@ -211,21 +212,22 @@ class VersionHandler(avm.AbstractVersionHandler):
if datanodes_count < 1:
return
LOG.info(_LI("Waiting %s datanodes to start up"), datanodes_count)
LOG.debug("Waiting {count} datanodes to start up".format(
count=datanodes_count))
with remote.get_remote(vu.get_namenode(cluster)) as r:
while True:
if run.check_datanodes_count(r, datanodes_count):
LOG.info(
_LI('Datanodes on cluster %s have been started'),
cluster.name)
_LI('Datanodes on cluster {cluster} have been started')
.format(cluster=cluster.name))
return
context.sleep(1)
if not g.check_cluster_exists(cluster):
LOG.info(
_LI('Stop waiting datanodes on cluster %s since it has'
' been deleted'), cluster.name)
LOG.debug('Stop waiting for datanodes on cluster {cluster}'
' since it has been deleted'.format(
cluster=cluster.name))
return
def _generate_hive_mysql_password(self, cluster):

View File

@ -22,7 +22,6 @@ from sahara import conductor as c
from sahara import context
from sahara import exceptions as exc
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.service import engine as e
@ -439,7 +438,8 @@ class DirectEngine(e.Engine):
context.sleep(1)
LOG.info(_LI("Cluster '%s': all instances are active"), cluster.id)
LOG.info(_LI("Cluster {cluster_id}: all instances are active").format(
cluster_id=cluster.id))
def _await_deleted(self, cluster, instances):
"""Await all instances are deleted."""
@ -455,8 +455,8 @@ class DirectEngine(e.Engine):
for instance in instances:
if instance.id not in deleted_ids:
if self._check_if_deleted(instance):
LOG.debug("Instance '%s' is deleted" %
instance.instance_name)
LOG.debug("Instance {instance} is deleted".format(
instance=instance.instance_name))
deleted_ids.add(instance.id)
cpo.add_successful_event(instance)
@ -481,17 +481,19 @@ class DirectEngine(e.Engine):
def _rollback_cluster_creation(self, cluster, ex):
"""Shutdown all instances and update cluster status."""
LOG.info(_LI("Cluster '%(name)s' creation rollback "
"(reason: %(reason)s)"),
{'name': cluster.name, 'reason': ex})
# TODO(starodubcevna): Need to add LOG.warning to upper level in next
# commits
LOG.debug("Cluster {name} creation rollback "
"(reason: {reason})".format(name=cluster.name, reason=ex))
self.shutdown_cluster(cluster)
def _rollback_cluster_scaling(self, cluster, instances, ex):
# TODO(starodubcevna): Need to add LOG.warning to upper level in next
# commits
"""Attempt to rollback cluster scaling."""
LOG.info(_LI("Cluster '%(name)s' scaling rollback "
"(reason: %(reason)s)"),
{'name': cluster.name, 'reason': ex})
LOG.debug("Cluster {name} scaling rollback "
"(reason: {reason})".format(name=cluster.name, reason=ex))
for i in instances:
self._shutdown_instance(i)
@ -523,12 +525,13 @@ class DirectEngine(e.Engine):
security_group = client.get(name)
if (security_group.name !=
g.generate_auto_security_group_name(node_group)):
LOG.warn(_LW("Auto security group for node group %s is not "
"found"), node_group.name)
LOG.warning(_LW("Auto security group for node group {name} is "
"not found").format(name=node_group.name))
return
client.delete(name)
except Exception:
LOG.exception(_LE("Failed to delete security group %s"), name)
LOG.warning(_LW("Failed to delete security group {name}").format(
name=name))
def _shutdown_instance(self, instance):
ctx = context.ctx()
@ -537,22 +540,22 @@ class DirectEngine(e.Engine):
try:
networks.delete_floating_ip(instance.instance_id)
except nova_exceptions.NotFound:
LOG.warn(_LW("Attempted to delete non-existent floating IP in "
"pool %(pool)s from instance %(instance)s"),
{'pool': instance.node_group.floating_ip_pool,
'instance': instance.instance_id})
LOG.warning(_LW("Attempted to delete non-existent floating IP "
"in pool {pool} from instance {instance}")
.format(pool=instance.node_group.floating_ip_pool,
instance=instance.instance_id))
try:
volumes.detach_from_instance(instance)
except Exception:
LOG.warn(_LW("Detaching volumes from instance %s failed"),
instance.instance_id)
LOG.warning(_LW("Detaching volumes from instance {id} failed")
.format(id=instance.instance_id))
try:
nova.client().servers.delete(instance.instance_id)
except nova_exceptions.NotFound:
LOG.warn(_LW("Attempted to delete non-existent instance %s"),
instance.instance_id)
LOG.warning(_LW("Attempted to delete non-existent instance {id}")
.format(id=instance.instance_id))
conductor.instance_remove(ctx, instance)

View File

@ -66,8 +66,9 @@ def execute_job(job_id, data):
try:
p.create_proxy_user_for_job_execution(job_execution)
except ex.SaharaException as e:
LOG.exception(_LE("Can't run job execution '{0}' "
"(reasons: {1})").format(job_execution.id, e))
LOG.error(_LE("Can't run job execution {job} "
"(reasons: {reason})").format(job=job_execution.id,
reason=e))
conductor.job_execution_destroy(context.ctx(), job_execution)
raise e

View File

@ -25,6 +25,7 @@ from sahara import exceptions as e
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import engine as oozie_engine
from sahara.service.edp.spark import engine as spark_engine
@ -120,9 +121,9 @@ def run_job(job_execution_id):
try:
_run_job(job_execution_id)
except Exception as ex:
LOG.exception(
_LE("Can't run job execution '%(job)s' (reason: %(reason)s)"),
{'job': job_execution_id, 'reason': ex})
LOG.warning(
_LW("Can't run job execution {job} (reason: {reason})").format(
job=job_execution_id, reason=ex))
conductor.job_execution_update(
context.ctx(), job_execution_id,
@ -153,27 +154,28 @@ def cancel_job(job_execution_id):
job_info = engine.cancel_job(job_execution)
except Exception as ex:
job_info = None
LOG.exception(
_LE("Error during cancel of job execution %(job)s: "
"%(error)s"), {'job': job_execution.id,
'error': ex})
LOG.warning(
_LW("Error during cancel of job execution {job}: "
"{error}").format(job=job_execution.id,
error=ex))
if job_info is not None:
job_execution = _write_job_status(job_execution, job_info)
LOG.info(_LI("Job execution %s was canceled successfully"),
job_execution.id)
LOG.info(_LI("Job execution {job_id} was canceled "
"successfully").format(
job_id=job_execution.id))
return job_execution
context.sleep(3)
job_execution = conductor.job_execution_get(
ctx, job_execution_id)
if not job_execution:
LOG.info(_LI("Job execution %(job_exec_id)s was deleted. "
"Canceling current operation."),
{'job_exec_id': job_execution_id})
LOG.info(_LI("Job execution {job_exec_id} was deleted. "
"Canceling current operation.").format(
job_exec_id=job_execution_id))
return job_execution
else:
LOG.info(_LI("Job execution status %(job)s: %(status)s"),
{'job': job_execution.id,
'status': job_execution.info['status']})
LOG.info(_LI("Job execution status {job}: {status}").format(
job=job_execution.id,
status=job_execution.info['status']))
return job_execution
else:
raise e.CancelingFailed(_('Job execution %s was not canceled')
@ -198,9 +200,8 @@ def update_job_statuses():
try:
get_job_status(je.id)
except Exception as e:
LOG.exception(
_LE("Error during update job execution %(job)s: %(error)s"),
{'job': je.id, 'error': e})
LOG.error(_LE("Error during update job execution {job}: {error}")
.format(job=je.id, error=e))
def get_job_config_hints(job_type):

View File

@ -84,7 +84,8 @@ class Engine(object):
context.sleep(1)
LOG.info(
_LI("Cluster '%s': all instances have IPs assigned"), cluster.id)
_LI("Cluster {cluster_id}: all instances have IPs assigned")
.format(cluster_id=cluster.id))
cluster = conductor.cluster_get(context.ctx(), cluster)
instances = g.get_instances(cluster, ips_assigned)
@ -97,7 +98,8 @@ class Engine(object):
tg.spawn("wait-for-ssh-%s" % instance.instance_name,
self._wait_until_accessible, instance)
LOG.info(_LI("Cluster '%s': all instances are accessible"), cluster.id)
LOG.info(_LI("Cluster {cluster_id}: all instances are accessible")
.format(cluster_id=cluster.id))
@cpo.event_wrapper(mark_successful_on_exit=True)
def _wait_until_accessible(self, instance):
@ -110,11 +112,14 @@ class Engine(object):
if exit_code == 0:
LOG.debug(
'Instance %s is accessible' % instance.instance_name)
'Instance {instance_name} is accessible'.format(
instance_name=instance.instance_name))
return
except Exception as ex:
LOG.debug("Can't login to node %s (%s), reason %s",
instance.instance_name, instance.management_ip, ex)
LOG.debug("Can't login to node {instance_name} {mgmt_ip}, "
"reason {reason}".format(
instance_name=instance.instance_name,
mgmt_ip=instance.management_ip, reason=ex))
context.sleep(5)
@ -140,7 +145,8 @@ class Engine(object):
@cpo.event_wrapper(mark_successful_on_exit=True)
def _configure_instance(self, instance, hosts_file):
LOG.debug('Configuring instance %s' % instance.instance_name)
LOG.debug('Configuring instance {instance_name}'.format(
instance_name=instance.instance_name))
with instance.remote() as r:
r.write_file_to('etc-hosts', hosts_file)

View File

@ -20,7 +20,6 @@ from oslo_log import log as logging
from sahara import conductor as c
from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.service import engine as e
from sahara.service.heat import templates as ht
@ -149,9 +148,12 @@ class HeatEngine(e.Engine):
def _rollback_cluster_creation(self, cluster, ex):
"""Shutdown all instances and update cluster status."""
LOG.info(_LI("Cluster '%(name)s' creation rollback "
"(reason: %(reason)s)"),
{'name': cluster.name, 'reason': ex})
# TODO(starodubcevna): Need to add LOG.warning to upper level in next
# commits
LOG.debug("Cluster {name} creation rollback "
"(reason: {reason})".format(name=cluster.name,
reason=ex))
self.shutdown_cluster(cluster)
@ -166,9 +168,11 @@ class HeatEngine(e.Engine):
maximize the chance of rollback success.
"""
LOG.info(_LI("Cluster '%(name)s' scaling rollback "
"(reason: %(reason)s)"),
{'name': cluster.name, 'reason': ex})
# TODO(starodubcevna): Need to add LOG.warning to upper level in next
# commits
LOG.debug("Cluster {name} scaling rollback "
"(reason: {reason})".format(name=cluster.name,
reason=ex))
for ng in rollback_count:
if rollback_count[ng] > target_count[ng]:
@ -184,7 +188,8 @@ class HeatEngine(e.Engine):
stack = heat.get_stack(cluster.name)
heat.wait_stack_completion(stack)
except heat_exc.HTTPNotFound:
LOG.warn(_LW('Did not found stack for cluster %s') % cluster.name)
LOG.warning(_LW('Did not found stack for cluster {cluster_name}')
.format(cluster_name=cluster.name))
self._clean_job_executions(cluster)

View File

@ -57,9 +57,11 @@ def init_instances_ips(instance):
# NOTE(aignatov): Once bug #1262529 is fixed this 'if' block should be
# reviewed and reformatted again, probably removed completely.
if CONF.use_neutron and not (management_ip and internal_ip):
LOG.debug("Instance %s doesn't contain yet Floating IP or Internal IP."
" Floating IP=%s, Internal IP=%s. Trying to get via Neutron."
% (server.name, management_ip, internal_ip))
LOG.debug("Instance {instance_name} doesn't yet contain Floating "
"IP or Internal IP. Floating IP={mgmt_ip}, Internal IP="
"{internal_ip}. Trying to get via Neutron.".format(
instance_name=server.name, mgmt_ip=management_ip,
internal_ip=internal_ip))
neutron_client = neutron.client()
ports = neutron_client.list_ports(device_id=server.id)["ports"]
if ports:
@ -70,15 +72,17 @@ def init_instances_ips(instance):
fl_ip = fl_ips[0]
if not internal_ip:
internal_ip = fl_ip['fixed_ip_address']
LOG.debug('Found fixed IP %s for %s' % (internal_ip,
server.name))
LOG.debug('Found fixed IP {internal_ip} for {server}'
.format(internal_ip=internal_ip,
server=server.name))
# Zeroing management_ip if Sahara in private network
if not CONF.use_floating_ips:
management_ip = internal_ip
elif not management_ip:
management_ip = fl_ip['floating_ip_address']
LOG.debug('Found floating IP %s for %s' % (management_ip,
server.name))
LOG.debug('Found floating IP {mgmt_ip} for {server}'
.format(mgmt_ip=management_ip,
server=server.name))
conductor.instance_update(context.ctx(), instance,
{"management_ip": management_ip,

View File

@ -166,13 +166,13 @@ def ops_error_handler(description):
# check if cluster still exists (it might have been removed)
if cluster is None or cluster.status == 'Deleting':
LOG.debug(
"Cluster id=%(id)s was deleted or marked for "
"deletion. Canceling current operation.",
{"id": cluster_id})
"Cluster id={id} was deleted or marked for "
"deletion. Canceling current operation.".format(
id=cluster_id))
return
msg = six.text_type(ex)
LOG.exception(
LOG.error(
_LE("Error during operating on cluster {name} (reason: "
"{reason})").format(name=cluster.name, reason=msg))
@ -189,12 +189,12 @@ def ops_error_handler(description):
# removed during rollback)
if cluster is None or cluster.status == 'Deleting':
LOG.debug(
"Cluster id=%(id)s was deleted or marked for "
"deletion. Canceling current operation.",
{"id": cluster_id})
"Cluster id={id} was deleted or marked for "
"deletion. Canceling current operation."
.format(id=cluster_id))
return
LOG.exception(
LOG.error(
_LE("Error during rollback of cluster {name} (reason:"
" {reason})").format(name=cluster.name,
reason=six.text_type(rex)))
@ -333,6 +333,6 @@ def _delete_job_execution(job_execution_id):
# job_execution was deleted already, nothing to do
return
except exceptions.CancelingFailed:
LOG.error(_LE("Job execution %s can't be cancelled in time. "
"Deleting it anyway."), job_execution_id)
LOG.error(_LE("Job execution {j_id} can't be cancelled in time. "
"Deleting it anyway.").format(j_id=job_execution_id))
conductor.job_execution_destroy(context.ctx(), job_execution_id)

View File

@ -82,23 +82,23 @@ def terminate_cluster(ctx, cluster, description):
if CONF.use_identity_api_v3:
trusts.use_os_admin_auth_token(cluster)
LOG.debug('Terminating %(description)s cluster %(cluster)s '
'in "%(status)s" state with id %(id)s',
{'cluster': cluster.name, 'id': cluster.id,
'status': cluster.status,
'description': description})
LOG.debug('Terminating {description} cluster {cluster} '
'in "{status}" state with id {id}'
.format(cluster=cluster.name,
id=cluster.id,
status=cluster.status,
description=description))
try:
ops.terminate_cluster(cluster.id)
except Exception as e:
LOG.warn(_LW('Failed to terminate %(description)s cluster '
'%(cluster)s in "%(status)s" state with id %(id)s: '
'%(error)s.'),
{'cluster': cluster.name,
'id': cluster.id,
'error': six.text_type(e),
'status': cluster.status,
'description': description})
LOG.warning(_LW('Failed to terminate {description} cluster '
'{cluster} in "{status}" state with id {id}: '
'{error}.').format(cluster=cluster.name,
id=cluster.id,
error=six.text_type(e),
status=cluster.status,
description=description))
else:
if cluster.status != 'AwaitingTermination':
@ -159,8 +159,8 @@ def _make_periodic_tasks():
je = conductor.job_execution_get(ctx, je_id)
if je is None or (je.info['status'] in
edp.JOB_STATUSES_TERMINATED):
LOG.debug('Found zombie proxy user {0}'.format(
user.name))
LOG.debug('Found zombie proxy user {username}'.format(
username=user.name))
p.proxy_user_delete(user_id=user.id)
context.set_ctx(None)
@ -195,8 +195,8 @@ def setup():
if CONF.periodic_enable:
if CONF.periodic_fuzzy_delay:
initial_delay = random.randint(0, CONF.periodic_fuzzy_delay)
LOG.debug("Starting periodic tasks with initial delay '%s' "
"seconds", initial_delay)
LOG.debug("Starting periodic tasks with initial delay {seconds} "
"seconds".format(seconds=initial_delay))
else:
initial_delay = None

View File

@ -58,10 +58,12 @@ def create_trust(trustor,
impersonation=impersonation,
role_names=role_names,
project=project_id)
LOG.debug('Created trust {0}'.format(six.text_type(trust.id)))
LOG.debug('Created trust {trust_id}'.format(
trust_id=six.text_type(trust.id)))
return trust.id
except Exception as e:
LOG.exception(_LE('Unable to create trust (reason: %s)'), e)
LOG.error(_LE('Unable to create trust (reason: {reason})').format(
reason=e))
raise ex.CreationFailed(_('Failed to create trust'))
@ -96,9 +98,11 @@ def delete_trust(trustee, trust_id):
'''
try:
trustee.trusts.delete(trust_id)
LOG.debug('Deleted trust {0}'.format(six.text_type(trust_id)))
LOG.debug('Deleted trust {trust_id}'.format(
trust_id=six.text_type(trust_id)))
except Exception as e:
LOG.exception(_LE('Unable to delete trust (reason: %s)'), e)
LOG.error(_LE('Unable to delete trust (reason: {reason})').format(
reason=e))
raise ex.DeletionFailed(
_('Failed to delete trust {0}').format(trust_id))

View File

@ -99,8 +99,8 @@ def _attach_volumes_to_node(node_group, instance):
ctx, instance, size, volume_type, display_name,
node_group.volumes_availability_zone)
devices.append(device)
LOG.debug("Attached volume %s to instance %s" %
(device, instance.instance_id))
LOG.debug("Attached volume {device} to instance {uuid}".format(
device=device, uuid=instance.instance_id))
_await_attach_volumes(instance, devices)
@ -176,11 +176,12 @@ def _find_instance_volume_devices(instance):
@cpo.event_wrapper(mark_successful_on_exit=True)
def _mount_volume_to_node(instance, idx, device):
LOG.debug("Mounting volume %s to instance %s" %
(device, instance.instance_name))
LOG.debug("Mounting volume {device} to instance {id}".format(
device=device, id=instance.instance_id))
mount_point = instance.node_group.storage_paths()[idx]
_mount_volume(instance, device, mount_point)
LOG.debug("Mounted volume to instance %s" % instance.instance_id)
LOG.debug("Mounted volume to instance {id}".format(
id=instance.instance_id))
def _mount_volume(instance, device_path, mount_point):
@ -201,8 +202,8 @@ def _mount_volume(instance, device_path, mount_point):
r.execute_command('sudo mount %s %s %s' %
(mount_opts, device_path, mount_point))
except Exception:
LOG.error(_LE("Error mounting volume to instance %s"),
instance.instance_id)
LOG.error(_LE("Error mounting volume to instance {id}")
.format(id=instance.instance_id))
raise
@ -215,34 +216,35 @@ def detach_from_instance(instance):
def _detach_volume(instance, volume_id):
volume = cinder.get_volume(volume_id)
try:
LOG.debug("Detaching volume %s from instance %s" % (
volume_id, instance.instance_name))
LOG.debug("Detaching volume {id} from instance {instance}".format(
id=volume_id, instance=instance.instance_name))
nova.client().volumes.delete_server_volume(instance.instance_id,
volume_id)
except Exception:
LOG.exception(_LE("Can't detach volume %s"), volume.id)
LOG.error(_LE("Can't detach volume {id}").format(id=volume.id))
detach_timeout = CONF.detach_volume_timeout
LOG.debug("Waiting %d seconds to detach %s volume" % (detach_timeout,
volume_id))
LOG.debug("Waiting {timeout} seconds to detach {id} volume".format(
timeout=detach_timeout, id=volume_id))
s_time = tu.utcnow()
while tu.delta_seconds(s_time, tu.utcnow()) < detach_timeout:
volume = cinder.get_volume(volume_id)
if volume.status not in ['available', 'error']:
context.sleep(2)
else:
LOG.debug("Volume %s has been detached" % volume_id)
LOG.debug("Volume {id} has been detached".format(id=volume_id))
return
else:
LOG.warn(_LW("Can't detach volume %(volume)s. "
"Current status of volume: %(status)s"),
{'volume': volume_id, 'status': volume.status})
LOG.warning(_LW("Can't detach volume {volume}. "
"Current status of volume: {status}").format(
volume=volume_id, status=volume.status))
def _delete_volume(volume_id):
LOG.debug("Deleting volume %s" % volume_id)
LOG.debug("Deleting volume {volume}".format(volume=volume_id))
volume = cinder.get_volume(volume_id)
try:
volume.delete()
except Exception:
LOG.exception(_LE("Can't delete volume %s"), volume.id)
LOG.error(_LE("Can't delete volume {volume}").format(
volume=volume.id))

View File

@ -49,7 +49,7 @@ def get_swift_configs():
result = [cfg for cfg in configs if cfg['value']]
LOG.info(_LI("Swift would be integrated with the following "
"params: %s"), result)
"params: {result}").format(result=result))
return result

View File

@ -73,7 +73,8 @@ class BaseWalkMigrationTestCase(object):
sa.cleanup()
getattr(command, alembic_command)(*args, **kwargs)
res = buf.getvalue().strip()
LOG.debug('Alembic command `%s` returns: %s' % (alembic_command, res))
LOG.debug('Alembic command {command} returns: {result}'.format(
command=alembic_command, result=res))
sa.cleanup()
return res
@ -190,8 +191,8 @@ class BaseWalkMigrationTestCase(object):
if check:
check(engine, data)
except Exception:
LOG.error("Failed to migrate to version %s on engine %s" %
(version, engine))
LOG.error("Failed to migrate to version {version} on engine "
"{engine}".format(version=version, engine=engine))
raise

View File

@ -21,7 +21,7 @@ from oslo_log import log
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.utils.openstack import nova
from sahara.utils import xmlutils as x
@ -64,7 +64,8 @@ CONF.register_opts(opts)
def _read_swift_topology():
LOG.debug("Reading Swift nodes topology from %s", CONF.swift_topology_file)
LOG.debug("Reading Swift nodes topology from {config}".format(
config=CONF.swift_topology_file))
topology = {}
try:
with open(CONF.swift_topology_file) as f:
@ -75,16 +76,16 @@ def _read_swift_topology():
(host, path) = line.split()
topology[host] = path
except IOError:
LOG.debug("Unable to read Swift nodes topology from %s",
CONF.swift_topology_file)
LOG.warning(_LW("Unable to read Swift nodes topology from {config}")
.format(config=CONF.swift_topology_file))
return {}
return topology
def _read_compute_topology():
LOG.debug("Reading compute nodes topology from %s",
CONF.compute_topology_file)
LOG.debug("Reading compute nodes topology from {config}".format(
config=CONF.compute_topology_file))
ctx = context.ctx()
tenant_id = str(ctx.tenant_id)
topology = {}
@ -145,16 +146,16 @@ def vm_awareness_core_config():
if param:
param['value'] = 'org.apache.hadoop.net.NetworkTopology'
LOG.info(_LI("Vm awareness will add following configs in core-site "
"params: %s"), result)
LOG.debug("Vm awareness will add following configs in core-site "
"params: {result}".format(result=result))
return result
def vm_awareness_mapred_config():
c = x.load_hadoop_xml_defaults('topology/resources/mapred-template.xml')
result = [cfg for cfg in c if cfg['value']]
LOG.info(_LI("Vm awareness will add following configs in map-red "
"params: %s"), result)
LOG.debug("Vm awareness will add following configs in map-red "
"params: {result}".format(result=result))
return result

View File

@ -63,7 +63,8 @@ class Rest(flask.Blueprint):
def handler(**kwargs):
context.set_ctx(None)
LOG.debug("Rest.route.decorator.handler, kwargs=%s", kwargs)
LOG.debug("Rest.route.decorator.handler, kwargs={kwargs}"
.format(kwargs=kwargs))
_init_resp_type(file_upload)
@ -201,9 +202,9 @@ def get_request_args():
def abort_and_log(status_code, descr, exc=None):
LOG.error(_LE("Request aborted with status code %(code)s and "
"message '%(message)s'"),
{'code': status_code, 'message': descr})
LOG.error(_LE("Request aborted with status code {code} and "
"message '{message}'").format(code=status_code,
message=descr))
if exc is not None:
LOG.error(traceback.format_exc())
@ -225,9 +226,9 @@ def render_error_message(error_code, error_message, error_name):
def internal_error(status_code, descr, exc=None):
LOG.error(_LE("Request aborted with status code %(code)s and "
"message '%(message)s'"),
{'code': status_code, 'message': descr})
LOG.error(_LE("Request aborted with status code {code} and "
"message '{message}'").format(code=status_code,
message=descr))
if exc is not None:
LOG.error(traceback.format_exc())
@ -242,9 +243,11 @@ def internal_error(status_code, descr, exc=None):
def bad_request(error):
error_code = 400
LOG.debug("Validation Error occurred: "
"error_code=%s, error_message=%s, error_name=%s",
error_code, error.message, error.code)
LOG.error(_LE("Validation Error occurred: "
"error_code={code}, error_message={message}, "
"error_name={name}").format(code=error_code,
message=error.message,
name=error.code))
return render_error_message(error_code, error.message, error.code)
@ -252,9 +255,11 @@ def bad_request(error):
def access_denied(error):
error_code = 403
LOG.debug("Access Denied: "
"error_code=%s, error_message=%s, error_name=%s",
error_code, error.message, error.code)
LOG.error(_LE("Access Denied: "
"error_code={code}, error_message={message}, "
"error_name={name}").format(code=error_code,
message=error.message,
name=error.code))
return render_error_message(error_code, error.message, error.code)
@ -262,8 +267,10 @@ def access_denied(error):
def not_found(error):
error_code = 404
LOG.debug("Not Found exception occurred: "
"error_code=%s, error_message=%s, error_name=%s",
error_code, error.message, error.code)
LOG.error(_LE("Not Found exception occurred: "
"error_code={code}, error_message={message}, "
"error_name={name}").format(code=error_code,
message=error.message,
name=error.code))
return render_error_message(error_code, error.message, error.code)

View File

@ -103,8 +103,8 @@ def change_cluster_status(cluster, status, status_description=None):
cluster = conductor.cluster_update(ctx, cluster, update_dict)
LOG.info(_LI("Cluster status has been changed: id=%(id)s, New status="
"%(status)s"), {'id': cluster.id, 'status': cluster.status})
LOG.info(_LI("Cluster status has been changed: id={id}, New status="
"{status}").format(id=cluster.id, status=cluster.status))
sender.notify(ctx, cluster.id, cluster.name, cluster.status,
"update")
@ -195,9 +195,9 @@ def await_process(timeout, sleeping_time, op_name, check_object):
consumed = _get_consumed(start_time)
if func(*args, **kwargs):
LOG.info(
_LI("Operation %(op_name)s was successfully executed "
"in seconds: %(sec)s"), {'op_name': op_name,
'sec': consumed})
_LI("Operation {op_name} was successfully executed "
"in seconds: {sec}").format(op_name=op_name,
sec=consumed))
return
if not check_cluster_exists(cluster):

View File

@ -73,10 +73,10 @@ def notify(context, cluster_id, cluster_name, cluster_status, ev_type):
if not cfg.CONF.enable_notifications:
return
LOG.debug("Notification about cluster (id=%(id)s, name=%(name)s, "
"type=%(type)s, status = %(status)s) is going to be sent",
{'id': cluster_id, 'name': cluster_name, 'type': ev_type,
'status': cluster_status})
LOG.debug("Notification about cluster (id={id}, name={name}, "
"type={type}, status = {status}) is going to be sent"
.format(id=cluster_id, name=cluster_name, type=ev_type,
status=cluster_status))
level = CONF.notification_level

View File

@ -50,15 +50,16 @@ CONF.register_opts(opts, group=cinder_group)
def validate_config():
if CONF.cinder.api_version == 1:
LOG.warn(_LW('The Cinder v1 API is deprecated and will be removed '
'after the Juno release. You should set '
'cinder.api_version=2 in your sahara.conf file.'))
LOG.warning(_LW('The Cinder v1 API is deprecated and will be removed '
'after the Juno release. You should set '
'cinder.api_version=2 in your sahara.conf file.'))
elif CONF.cinder.api_version != 2:
LOG.warn(_LW('Unsupported Cinder API version: %(bad)s. Please set a '
'correct value for cinder.api_version in your sahara.conf'
' file (currently supported versions are: %(supported)s).'
' Falling back to Cinder API version 2.'),
{'bad': CONF.cinder.api_version, 'supported': [1, 2]})
LOG.warning(_LW('Unsupported Cinder API version: {bad}. Please set a '
'correct value for cinder.api_version in your '
'sahara.conf file (currently supported versions are: '
'{supported}). Falling back to Cinder API version 2.')
.format(bad=CONF.cinder.api_version,
supported=[1, 2]))
CONF.set_override('api_version', 2, group='cinder')

View File

@ -263,7 +263,7 @@ def proxy_user_create(username):
domain = domain_for_proxy()
password = six.text_type(uuid.uuid4())
admin.users.create(name=username, password=password, domain=domain.id)
LOG.debug('created proxy user {0}'.format(username))
LOG.debug('Created proxy user {username}'.format(username=username))
return password
@ -290,4 +290,4 @@ def proxy_user_delete(username=None, user_id=None):
'when searching for user %s'))
user_id = user_list[0].id
admin.users.delete(user_id)
LOG.debug('deleted proxy user id {0}'.format(user_id))
LOG.debug('Deleted proxy user id {user_id}'.format(user_id=user_id))

View File

@ -90,14 +90,16 @@ def _connect(host, username, private_key, proxy_command=None,
proxy = None
if proxy_command:
LOG.debug('creating proxy using command: %s', proxy_command)
LOG.debug('Creating proxy using command: {command}'.format(
command=proxy_command))
proxy = paramiko.ProxyCommand(proxy_command)
if gateway_host:
_proxy_ssh = paramiko.SSHClient()
_proxy_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
LOG.debug('connecting to proxy gateway at: %s', gateway_host)
LOG.debug('Connecting to proxy gateway at: {gateway}'.format(
gateway=gateway_host))
_proxy_ssh.connect(gateway_host, username=gateway_image_username,
pkey=private_key, sock=proxy)
@ -197,38 +199,38 @@ def _get_http_client(host, port, proxy_command=None, gateway_host=None,
global _sessions
_http_session = _sessions.get((host, port), None)
LOG.debug('cached HTTP session for {0}:{1} is {2}'.format(host, port,
_http_session))
LOG.debug('Cached HTTP session for {host}:{port} is {session}'.format(
host=host, port=port, session=_http_session))
if not _http_session:
if gateway_host:
_http_session = _get_proxy_gateway_http_session(
gateway_host, gateway_username,
gateway_private_key, host, port, proxy_command)
LOG.debug('created ssh proxied HTTP session for {0}:{1}'
.format(host, port))
LOG.debug('Created ssh proxied HTTP session for {host}:{port}'
.format(host=host, port=port))
elif proxy_command:
# can return a new session here because it actually uses
# the same adapter (and same connection pools) for a given
# host and port tuple
_http_session = _get_proxied_http_session(
proxy_command, host, port=port)
LOG.debug('created proxied HTTP session for {0}:{1}'
.format(host, port))
LOG.debug('Created proxied HTTP session for {host}:{port}'
.format(host=host, port=port))
else:
# need to cache the sessions that are not proxied through
# HTTPRemoteWrapper so that a new session with a new HTTPAdapter
# and associated pools is not recreated for each HTTP invocation
_http_session = requests.Session()
LOG.debug('created standard HTTP session for {0}:{1}'
.format(host, port))
LOG.debug('Created standard HTTP session for {host}:{port}'
.format(host=host, port=port))
adapter = requests.adapters.HTTPAdapter()
for prefix in ['http://', 'https://']:
_http_session.mount(prefix + '%s:%s' % (host, port),
adapter)
LOG.debug('caching session {0} for {1}:{2}'
.format(_http_session, host, port))
LOG.debug('Caching session {session} for {host}:{port}'
.format(session=_http_session, host=host, port=port))
_sessions[(host, port)] = _http_session
return _http_session
@ -315,7 +317,8 @@ def _read_file_from(remote_file, run_as_root=False):
try:
return _read_file(_ssh.open_sftp(), fl)
except IOError:
LOG.error(_LE('Can\'t read file "%s"') % remote_file)
LOG.error(_LE("Can't read file {filename}").format(
filename=remote_file))
raise
finally:
if run_as_root:
@ -336,9 +339,9 @@ def _execute_on_vm_interactive(cmd, matcher):
buf = ''
channel = _ssh.invoke_shell()
LOG.debug('channel is {0}'.format(channel))
LOG.debug('Channel is {channel}'.format(channel=channel))
try:
LOG.debug('sending cmd {0}'.format(cmd))
LOG.debug('Sending cmd {command}'.format(command=cmd))
channel.send(cmd + '\n')
while not matcher.is_eof(buf):
buf += channel.recv(4096)
@ -347,7 +350,7 @@ def _execute_on_vm_interactive(cmd, matcher):
channel.send(response + '\n')
buf = ''
finally:
LOG.debug('closing channel')
LOG.debug('Closing channel')
channel.close()
@ -419,7 +422,8 @@ def _proxy_gateway_func(gateway_host, gateway_username,
class ProxiedHTTPAdapter(adapters.HTTPAdapter):
def __init__(self, create_process_func, host, port):
super(ProxiedHTTPAdapter, self).__init__()
LOG.debug('HTTP adapter created for {0}:{1}'.format(host, port))
LOG.debug('HTTP adapter created for {host}:{port}'.format(host=host,
port=port))
self.create_process_func = create_process_func
self.port = port
self.host = host
@ -432,13 +436,14 @@ class ProxiedHTTPAdapter(adapters.HTTPAdapter):
if http_conn.sock is None:
if hasattr(http_conn, 'connect'):
sock = self._connect()
LOG.debug('HTTP connection {0} getting new '
'netcat socket {1}'.format(http_conn, sock))
LOG.debug('HTTP connection {connection} getting new '
'netcat socket {socket}'.format(
connection=http_conn, socket=sock))
http_conn.sock = sock
else:
if hasattr(http_conn.sock, 'is_netcat_socket'):
LOG.debug('pooled http connection has existing '
'netcat socket. resetting pipe...')
LOG.debug('Pooled http connection has existing '
'netcat socket. resetting pipe')
http_conn.sock.reset()
pool_conn._put_conn(http_conn)
@ -446,13 +451,13 @@ class ProxiedHTTPAdapter(adapters.HTTPAdapter):
return pool_conn
def close(self):
LOG.debug('Closing HTTP adapter for {0}:{1}'
.format(self.host, self.port))
LOG.debug('Closing HTTP adapter for {host}:{port}'
.format(host=self.host, port=self.port))
super(ProxiedHTTPAdapter, self).close()
def _connect(self):
LOG.debug('Returning netcat socket for {0}:{1}'
.format(self.host, self.port))
LOG.debug('Returning netcat socket for {host}:{port}'
.format(host=self.host, port=self.port))
rootwrap_command = CONF.rootwrap_command if CONF.use_rootwrap else ''
return NetcatSocket(self.create_process_func, rootwrap_command)
@ -546,7 +551,7 @@ class InstanceInteropHelper(remote.Remote):
neutron_info['tenant'] = ctx.tenant_name
neutron_info['host'] = instance.management_ip
LOG.debug('Returning neutron info: {0}'.format(neutron_info))
LOG.debug('Returning neutron info: {info}'.format(info=neutron_info))
return neutron_info
def _build_proxy_command(self, command, instance=None, port=None,
@ -572,7 +577,8 @@ class InstanceInteropHelper(remote.Remote):
try:
command = command.format(**keywords)
except KeyError as e:
LOG.error(_LE('Invalid keyword in proxy_command: %s'), str(e))
LOG.error(_LE('Invalid keyword in proxy_command: {result}').format(
result=e))
# Do not give more details to the end-user
raise ex.SystemError('Misconfiguration')
if rootwrap_command:
@ -757,7 +763,8 @@ class InstanceInteropHelper(remote.Remote):
self._run_s(_execute_on_vm_interactive, timeout, cmd, matcher)
def _log_command(self, str):
LOG.debug('[%s] %s' % (self.instance.instance_name, str))
LOG.debug('[{instance}] {command}'.format(
instance=self.instance.instance_name, command=str))
class BulkInstanceInteropHelper(InstanceInteropHelper):