Merge "Capture metalsmith python logging"

This commit is contained in:
Zuul 2020-07-20 19:05:31 +00:00 committed by Gerrit Code Review
commit 77bd1a910b
2 changed files with 99 additions and 35 deletions

View File

@ -16,6 +16,8 @@
__metaclass__ = type __metaclass__ = type
from concurrent import futures from concurrent import futures
import io
import logging
from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_cloud_from_module from ansible.module_utils.openstack import openstack_cloud_from_module
@ -182,9 +184,34 @@ options:
- Maximum number of instances to provision at once. Set to 0 to have no - Maximum number of instances to provision at once. Set to 0 to have no
concurrency limit concurrency limit
type: int type: int
log_level:
description:
- Set the logging level for the log which is available in the
returned 'logging' result.
default: info
choices:
- debug
- info
- warning
- error
''' '''
METALSMITH_LOG_MAP = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR
}
BASE_LOG_MAP = {
'debug': logging.INFO,
'info': logging.WARNING,
'warning': logging.WARNING,
'error': logging.ERROR
}
def _get_source(instance): def _get_source(instance):
image = instance.get('image') image = instance.get('image')
return sources.detect(image=image.get('href'), return sources.detect(image=image.get('href'),
@ -310,6 +337,22 @@ def unprovision(provisioner, instances):
return True return True
def _configure_logging(log_level):
log_fmt = ('%(asctime)s %(levelname)s %(name)s: %(message)s')
urllib_level = logging.CRITICAL
base_level = BASE_LOG_MAP[log_level]
metalsmith_level = METALSMITH_LOG_MAP[log_level]
logging.basicConfig(level=base_level, format=log_fmt)
logging.getLogger('urllib3.connectionpool').setLevel(urllib_level)
logger = logging.getLogger('metalsmith')
logger.setLevel(metalsmith_level)
log_stream = io.StringIO()
logger.addHandler(logging.StreamHandler(log_stream))
return log_stream
def main(): def main():
argument_spec = openstack_full_argument_spec( argument_spec = openstack_full_argument_spec(
**yaml.safe_load(DOCUMENTATION)['options'] **yaml.safe_load(DOCUMENTATION)['options']
@ -321,44 +364,55 @@ def main():
**module_kwargs **module_kwargs
) )
sdk, cloud = openstack_cloud_from_module(module) log_stream = _configure_logging(module.params['log_level'])
provisioner = metalsmith.Provisioner(cloud_region=cloud.config)
instances = module.params['instances']
state = module.params['state']
concurrency = module.params['concurrency']
timeout = module.params['timeout']
wait = module.params['wait']
clean_up = module.params['clean_up']
if state == 'present': try:
changed, nodes = provision(provisioner, instances, sdk, cloud = openstack_cloud_from_module(module)
timeout, concurrency, clean_up, provisioner = metalsmith.Provisioner(cloud_region=cloud.config)
wait) instances = module.params['instances']
instances = [{ state = module.params['state']
'name': i.node.name or i.uuid, concurrency = module.params['concurrency']
'hostname': i.hostname, timeout = module.params['timeout']
'id': i.uuid, wait = module.params['wait']
} for i in nodes] clean_up = module.params['clean_up']
module.exit_json(
changed=changed,
msg="{} instances provisioned".format(len(nodes)),
instances=instances,
)
if state == 'reserved': if state == 'present':
changed, nodes = reserve(provisioner, instances, clean_up) changed, nodes = provision(provisioner, instances,
module.exit_json( timeout, concurrency, clean_up,
changed=changed, wait)
msg="{} instances reserved".format(len(nodes)), instances = [{
ids=[node.id for node in nodes], 'name': i.node.name or i.uuid,
instances=instances 'hostname': i.hostname,
) 'id': i.uuid,
} for i in nodes]
module.exit_json(
changed=changed,
msg="{} instances provisioned".format(len(nodes)),
instances=instances,
logging=log_stream.getvalue()
)
if state == 'absent': if state == 'reserved':
changed = unprovision(provisioner, instances) changed, nodes = reserve(provisioner, instances, clean_up)
module.exit_json( module.exit_json(
changed=changed, changed=changed,
msg="{} nodes unprovisioned".format(len(instances)) msg="{} instances reserved".format(len(nodes)),
ids=[node.id for node in nodes],
instances=instances,
logging=log_stream.getvalue()
)
if state == 'absent':
changed = unprovision(provisioner, instances)
module.exit_json(
changed=changed,
msg="{} nodes unprovisioned".format(len(instances)),
logging=log_stream.getvalue()
)
except Exception as e:
module.fail_json(
msg=str(e),
logging=log_stream.getvalue()
) )

View File

@ -100,8 +100,13 @@
instances: "{{ baremetal_existing.not_found }}" instances: "{{ baremetal_existing.not_found }}"
state: reserved state: reserved
clean_up: true clean_up: true
log_level: info
register: baremetal_reserved register: baremetal_reserved
- name: Metalsmith log for reserve instances
debug:
var: baremetal_reserved.logging
# NOTE(cloudnull): This limits the concurrency so that we're not adding # NOTE(cloudnull): This limits the concurrency so that we're not adding
# more threads than needed. # more threads than needed.
- name: Set concurrency fact - name: Set concurrency fact
@ -119,8 +124,13 @@
clean_up: false clean_up: false
timeout: "{{ node_timeout }}" timeout: "{{ node_timeout }}"
concurrency: "{{ runtime_concurrency }}" concurrency: "{{ runtime_concurrency }}"
log_level: info
register: baremetal_provisioned register: baremetal_provisioned
- name: Metalsmith log for provision instances
debug:
var: baremetal_provisioned.logging
- name: Populate environment - name: Populate environment
tripleo_baremetal_populate_environment: tripleo_baremetal_populate_environment:
environment: "{{ baremetal_instances.environment }}" environment: "{{ baremetal_instances.environment }}"