Merge branch 'defect/configure_hw_subtask_issue'

This commit is contained in:
Scott Hussey
2017-07-04 17:11:24 -05:00
7 changed files with 221 additions and 30 deletions

View File

@@ -0,0 +1,13 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,49 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import click
@click.group()
@click.option('--debug/--no-debug', default=False)
@click.option('--token')
@click.option('--url')
@click.pass_context
def drydock(ctx, debug, token, url):
ctx.obj['DEBUG'] = debug
if not token:
ctx.obj['TOKEN'] = os.environ['DD_TOKEN']
else:
ctx.obj['TOKEN'] = token
if not url:
ctx.obj['URL'] = os.environ['DD_URL']
else:
ctx.obj['URL'] = url
@drydock.group()
def create():
pass
@drydock.group()
def list()
pass
@drydock.group()
def show():
pass
@create.command()
def design

View File

@@ -220,21 +220,24 @@ class MaasNodeDriver(NodeDriver):
runner.start()
subtasks.append(subtask.get_id())
running_subtasks = len(subtasks)
cleaned_subtasks = []
attempts = 0
max_attempts = config.conf.timeouts.identify_node * (60 // config.conf.poll_interval)
worked = failed = False
self.logger.debug("Polling for subtask completetion every %d seconds, a max of %d polls." %
(config.conf.poll_interval, max_attempts))
while running_subtasks > 0 and attempts < max_attempts:
while len(cleaned_subtasks) < len(subtasks) and attempts < max_attempts:
for t in subtasks:
if t in cleaned_subtasks:
continue
subtask = self.state_manager.get_task(t)
if subtask.status == hd_fields.TaskStatus.Complete:
self.logger.info("Task %s to identify node %s complete - status %s" %
(subtask.get_id(), n, subtask.get_result()))
running_subtasks = running_subtasks - 1
self.logger.info("Task %s to identify node complete - status %s" %
(subtask.get_id(), subtask.get_result()))
cleaned_subtasks.append(t)
if subtask.result == hd_fields.ActionResult.Success:
result_detail['successful_nodes'].extend(subtask.node_list)
@@ -248,7 +251,7 @@ class MaasNodeDriver(NodeDriver):
time.sleep(config.conf.maasdriver.poll_interval)
attempts = attempts + 1
if running_subtasks > 0:
if len(cleaned_subtasks) < len(subtasks):
self.logger.warning("Time out for task %s before all subtask threads complete" % (task.get_id()))
result = hd_fields.ActionResult.DependentFailure
result_detail['detail'].append('Some subtasks did not complete before the timeout threshold')
@@ -292,7 +295,7 @@ class MaasNodeDriver(NodeDriver):
runner.start()
subtasks.append(subtask.get_id())
running_subtasks = len(subtasks)
cleaned_subtasks = []
attempts = 0
max_attempts = config.conf.timeouts.configure_hardware * (60 // config.conf.poll_interval)
worked = failed = False
@@ -300,14 +303,17 @@ class MaasNodeDriver(NodeDriver):
self.logger.debug("Polling for subtask completetion every %d seconds, a max of %d polls." %
(config.conf.poll_interval, max_attempts))
#TODO Add timeout to config
while running_subtasks > 0 and attempts < max_attempts:
while len(cleaned_subtasks) < len(subtasks) and attempts < max_attempts:
for t in subtasks:
if t in cleaned_subtasks:
continue
subtask = self.state_manager.get_task(t)
if subtask.status == hd_fields.TaskStatus.Complete:
self.logger.info("Task %s to commission node %s complete - status %s" %
(subtask.get_id(), n, subtask.get_result()))
running_subtasks = running_subtasks - 1
self.logger.info("Task %s to commission node complete - status %s" %
(subtask.get_id(), subtask.get_result()))
cleaned_subtasks.append(t)
if subtask.result == hd_fields.ActionResult.Success:
result_detail['successful_nodes'].extend(subtask.node_list)
@@ -321,7 +327,7 @@ class MaasNodeDriver(NodeDriver):
time.sleep(config.conf.maasdriver.poll_interval)
attempts = attempts + 1
if running_subtasks > 0:
if len(cleaned_subtasks) < len(subtasks):
self.logger.warning("Time out for task %s before all subtask threads complete" % (task.get_id()))
result = hd_fields.ActionResult.DependentFailure
result_detail['detail'].append('Some subtasks did not complete before the timeout threshold')
@@ -365,21 +371,24 @@ class MaasNodeDriver(NodeDriver):
runner.start()
subtasks.append(subtask.get_id())
running_subtasks = len(subtasks)
cleaned_subtasks = []
attempts = 0
max_attempts = config.conf.timeouts.apply_node_networking * (60 // config.conf.poll_interval)
worked = failed = False
self.logger.debug("Polling for subtask completetion every %d seconds, a max of %d polls." %
(config.conf.poll_interval, max_attempts))
while running_subtasks > 0 and attempts < max_attempts:
while len(cleaned_subtasks) < len(subtasks) and attempts < max_attempts:
for t in subtasks:
if t in cleaned_subtasks:
continue
subtask = self.state_manager.get_task(t)
if subtask.status == hd_fields.TaskStatus.Complete:
self.logger.info("Task %s to apply networking on node %s complete - status %s" %
(subtask.get_id(), n, subtask.get_result()))
running_subtasks = running_subtasks - 1
self.logger.info("Task %s to apply networking complete - status %s" %
(subtask.get_id(), subtask.get_result()))
cleaned_subtasks.append(t)
if subtask.result == hd_fields.ActionResult.Success:
result_detail['successful_nodes'].extend(subtask.node_list)
@@ -393,7 +402,7 @@ class MaasNodeDriver(NodeDriver):
time.sleep(config.conf.poll_interval)
attempts = attempts + 1
if running_subtasks > 0:
if len(cleaned_subtasks) < len(subtasks):
self.logger.warning("Time out for task %s before all subtask threads complete" % (task.get_id()))
result = hd_fields.ActionResult.DependentFailure
result_detail['detail'].append('Some subtasks did not complete before the timeout threshold')
@@ -437,7 +446,7 @@ class MaasNodeDriver(NodeDriver):
runner.start()
subtasks.append(subtask.get_id())
running_subtasks = len(subtasks)
cleaned_subtasks = []
attempts = 0
max_attempts = config.conf.timeouts.apply_node_platform * (60 // config.conf.poll_interval)
worked = failed = False
@@ -445,14 +454,17 @@ class MaasNodeDriver(NodeDriver):
self.logger.debug("Polling for subtask completetion every %d seconds, a max of %d polls." %
(config.conf.poll_interval, max_attempts))
while running_subtasks > 0 and attempts < max_attempts:
while len(cleaned_subtasks) < len(subtasks) and attempts < max_attempts:
for t in subtasks:
if t in cleaned_subtasks:
continue
subtask = self.state_manager.get_task(t)
if subtask.status == hd_fields.TaskStatus.Complete:
self.logger.info("Task %s to configure node %s platform complete - status %s" %
(subtask.get_id(), n, subtask.get_result()))
running_subtasks = running_subtasks - 1
self.logger.info("Task %s to configure node platform complete - status %s" %
(subtask.get_id(), subtask.get_result()))
cleaned_subtasks.append(t)
if subtask.result == hd_fields.ActionResult.Success:
result_detail['successful_nodes'].extend(subtask.node_list)
@@ -466,7 +478,7 @@ class MaasNodeDriver(NodeDriver):
time.sleep(config.conf.poll_interval)
attempts = attempts + 1
if running_subtasks > 0:
if len(cleaned_subtasks) < len(subtasks):
self.logger.warning("Time out for task %s before all subtask threads complete" % (task.get_id()))
result = hd_fields.ActionResult.DependentFailure
result_detail['detail'].append('Some subtasks did not complete before the timeout threshold')
@@ -510,7 +522,7 @@ class MaasNodeDriver(NodeDriver):
runner.start()
subtasks.append(subtask.get_id())
running_subtasks = len(subtasks)
cleaned_subtasks = []
attempts = 0
max_attempts = config.conf.timeouts.deploy_node * (60 // config.conf.poll_interval)
worked = failed = False
@@ -518,14 +530,17 @@ class MaasNodeDriver(NodeDriver):
self.logger.debug("Polling for subtask completetion every %d seconds, a max of %d polls." %
(config.conf.poll_interval, max_attempts))
while running_subtasks > 0 and attempts < max_attempts:
while len(cleaned_subtasks) < len(subtasks) and attempts < max_attempts:
for t in subtasks:
if t in cleaned_subtasks:
continue
subtask = self.state_manager.get_task(t)
if subtask.status == hd_fields.TaskStatus.Complete:
self.logger.info("Task %s to deploy node %s complete - status %s" %
(subtask.get_id(), n, subtask.get_result()))
running_subtasks = running_subtasks - 1
self.logger.info("Task %s to deploy node complete - status %s" %
(subtask.get_id(), subtask.get_result()))
cleaned_subtasks.append(t)
if subtask.result == hd_fields.ActionResult.Success:
result_detail['successful_nodes'].extend(subtask.node_list)
@@ -539,7 +554,7 @@ class MaasNodeDriver(NodeDriver):
time.sleep(max_attempts)
attempts = attempts + 1
if running_subtasks > 0:
if len(cleaned_subtasks) < len(subtasks):
self.logger.warning("Time out for task %s before all subtask threads complete" % (task.get_id()))
result = hd_fields.ActionResult.DependentFailure
result_detail['detail'].append('Some subtasks did not complete before the timeout threshold')

Binary file not shown.

View File

@@ -0,0 +1,88 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import requests
from .session import DrydockSession
class DrydockClient(object):
""""
A client for the Drydock API
:param string host: Hostname or IP address of Drydock API
:param string port: Port number of Drydock API
:param string version: API version to access
:param string token: Authentication token to use
:param string marker: (optional) External marker to include with requests
"""
def __init__(self, host=None, port=9000, version='1.0', token=None, marker=None):
self.version = version
self.session = DrydockSession(token=token, ext_marker=marker)
self.base_url = "http://%s:%d/api/%s/" % (host, port, version)
def send_get(self, api_url, query=None):
"""
Send a GET request to Drydock.
:param string api_url: The URL string following the hostname and API prefix
:param dict query: A dict of k, v pairs to add to the query string
:return: A requests.Response object
"""
resp = requests.get(self.base_url + api_url, params=query)
return resp
def send_post(self, api_url, query=None, body=None, data=None):
"""
Send a POST request to Drydock. If both body and data are specified,
body will will be used.
:param string api_url: The URL string following the hostname and API prefix
:param dict query: A dict of k, v parameters to add to the query string
:param string body: A string to use as the request body. Will be treated as raw
:param data: Something json.dumps(s) can serialize. Result will be used as the request body
:return: A requests.Response object
"""
if body is not None:
resp = requests.post(self.base_url + api_url, params=query, data=body)
else:
resp = requests.post(self.base_url + api_url, params=query, json=data)
return resp
def get_designs(self):
"""
Get list of Drydock design_ids
:return: A list of string design_ids
"""
def get_design(self, design_id):
def create_design(self):
def get_parts(self, design_id):
def get_part(self, design_id, kind, key):
def load_parts(self, design_id, yaml_string=None):
def get_tasks(self):
def get_task(self, task_id):
def create_task(self, design_id, task_action, node_filter=None):

View File

@@ -0,0 +1,26 @@
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DrydockSession(object)
"""
A session to the Drydock API maintaining credentials and API options
:param string token: Auth token
:param string marker: (optional) external context marker
"""
def __init__(self, token=None, marker=None):
self.token = token
self.marker = marker