Improve Stack Status Check Loop
Improved loop that polls heat for status of resource actions taken Change-Id: I1655b3143121fada985b44b798002ffc96ee9b42
This commit is contained in:
parent
e8b268c21b
commit
cab06af183
1
AUTHORS
1
AUTHORS
@ -1,3 +1,4 @@
|
||||
Andrii Ostapenko <andrii.ostapenko@att.com>
|
||||
Chi Lo <cl566n@att.com>
|
||||
Ian Wienand <iwienand@redhat.com>
|
||||
Michael Glaser <mg6596@att.com>
|
||||
|
@ -1,7 +1,13 @@
|
||||
CHANGES
|
||||
=======
|
||||
|
||||
* Improve Stack Status Check Loop
|
||||
* Change Heat and Glance client to use internal endpoint
|
||||
* Restrict dnspython to not use 2.0.0 version
|
||||
* Use certificate when creating keystone session
|
||||
* Ensure pip is installed on zuul executor
|
||||
* Create editable Ranger-Agent Configuration
|
||||
* Fix glance client initialization Glance get image call is failing in engine
|
||||
* create new tagging method in ranger-agent makefile
|
||||
* Dockerfile fix modular variable
|
||||
* Update Image build process
|
||||
|
1
bindep.txt
Normal file
1
bindep.txt
Normal file
@ -0,0 +1 @@
|
||||
python3-dev [platform:dpkg test]
|
@ -169,11 +169,11 @@ class NotifierController(object):
|
||||
utils.STATUS_SUBMITTED:
|
||||
error_code = ErrorCode.ORD_002.value
|
||||
error_msg = ErrorCode.tostring(error_code)
|
||||
elif kwargs.get('resource-template-name') == \
|
||||
template_target.get('resource_template_name') and \
|
||||
(template_target.get('status') == utils.STATUS_SUBMITTED or
|
||||
template_target.get('resource_operation') ==
|
||||
kwargs.get('resource-operation')):
|
||||
elif kwargs.get('resource-template-name') \
|
||||
== template_target.get('resource_template_name') \
|
||||
and (template_target.get('status') == utils.STATUS_SUBMITTED
|
||||
or template_target.get('resource_operation')
|
||||
== kwargs.get('resource-operation')):
|
||||
error_code = ErrorCode.ORD_001.value
|
||||
error_msg = ErrorCode.tostring(error_code)
|
||||
|
||||
|
@ -95,8 +95,8 @@ class ParsableErrorMiddleware(object):
|
||||
|
||||
user_locale = self.best_match_language(req.accept_language)
|
||||
if (req.accept.best_match(
|
||||
['application/json', 'application/xml']) ==
|
||||
'application/xml'):
|
||||
['application/json', 'application/xml'])
|
||||
== 'application/xml'):
|
||||
try:
|
||||
# simple check xml is valid
|
||||
fault = etree.fromstring('\n'.join(app_iter))
|
||||
@ -105,12 +105,12 @@ class ParsableErrorMiddleware(object):
|
||||
for fault_string in fault.findall('faultstring'):
|
||||
fault_string.text = i18n.translate(error,
|
||||
user_locale)
|
||||
body = ['<error_message>' + etree.tostring(fault) +
|
||||
'</error_message>']
|
||||
body = ['<error_message>' + etree.tostring(fault)
|
||||
+ '</error_message>']
|
||||
except etree.XMLSyntaxError as err:
|
||||
LOG.error(_('Error parsing HTTP response: %s') % err)
|
||||
body = ['<error_message>%s' % state['status_code'] +
|
||||
'</error_message>']
|
||||
body = ['<error_message>%s' % state['status_code']
|
||||
+ '</error_message>']
|
||||
state['headers'].append(('Content-Type', 'application/xml'))
|
||||
else:
|
||||
try:
|
||||
|
@ -42,16 +42,17 @@ SERVICE_OPTS = [
|
||||
"used by ranger agent to invoke keystone apis"),
|
||||
cfg.StrOpt("https_cacert", default=None,
|
||||
help="Path to CA server certificate for SSL"),
|
||||
cfg.StrOpt('glance_api_url', default=None,
|
||||
help="glance api internal url")
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(SERVICE_OPTS, OPT_GROUP)
|
||||
cfg.CONF.register_opts([
|
||||
cfg.StrOpt('glance_api_url', default=None, help="glance api internal url")
|
||||
])
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
logger = logging.logging.getLogger('keystoneauth')
|
||||
logger.setLevel(logging.logging.INFO)
|
||||
body_logger = logging.logging.getLogger('keystoneauth.session.body')
|
||||
body_logger.setLevel(logging.logging.WARN)
|
||||
|
||||
|
||||
def cached(func):
|
||||
@ -79,9 +80,12 @@ def create_keystone_client(args):
|
||||
project_domain_name=args['project_domain_name'])
|
||||
|
||||
if args['https_cacert']:
|
||||
session = ksc_session.Session(auth=auth, verify=args['https_cacert'])
|
||||
session = ksc_session.Session(
|
||||
auth=auth,
|
||||
verify=args['https_cacert'],
|
||||
split_loggers=True)
|
||||
else:
|
||||
session = ksc_session.Session(auth=auth)
|
||||
session = ksc_session.Session(auth=auth, split_loggers=True)
|
||||
|
||||
return keystone_v3.Client(session=session,
|
||||
auth_url=args['auth_url'],
|
||||
|
@ -14,6 +14,7 @@
|
||||
# under the License.
|
||||
|
||||
from heatclient import exc as heat_exc
|
||||
import json
|
||||
|
||||
from ord.client.client import Clients
|
||||
from ord.common import exceptions as exc
|
||||
@ -25,8 +26,6 @@ class HeatClient(object):
|
||||
_kc = None
|
||||
|
||||
def __init__(self):
|
||||
# FIXME: we must not cache any clients because it done(must
|
||||
# be done) by "Clients"
|
||||
try:
|
||||
if HeatClient._kc is None:
|
||||
HeatClient._kc = Clients().keystone()
|
||||
@ -46,13 +45,11 @@ class HeatClient(object):
|
||||
client, self._kc = Clients().heat(self._kc)
|
||||
try:
|
||||
payload = client.stacks.get(stack_id)
|
||||
# TODO: check behaviour in case it object not exist
|
||||
except heat_exc.BaseException as e:
|
||||
raise exc.HEATIntegrationError(
|
||||
action='stacks.get', details=e.message)
|
||||
return payload
|
||||
|
||||
# TODO: check real heatclient capabilities to lookup objects
|
||||
def get_stack_by_name(self, name):
|
||||
for stack in self.get_stacks():
|
||||
if stack.stack_name != name:
|
||||
@ -68,7 +65,9 @@ class HeatClient(object):
|
||||
response = client.stacks.create(
|
||||
stack_name=name, template=template)
|
||||
except heat_exc.BaseException as e:
|
||||
raise exc.HEATStackCreateError(details=e.message)
|
||||
heat_err_msg = json.loads(e.message)['error']['message'] \
|
||||
or e.message
|
||||
raise exc.HEATStackCreateError(details=heat_err_msg)
|
||||
return response
|
||||
|
||||
def update_stack(self, stack_id, template):
|
||||
@ -77,7 +76,9 @@ class HeatClient(object):
|
||||
try:
|
||||
response = client.stacks.update(stack_id, template=template)
|
||||
except heat_exc.BaseException as e:
|
||||
raise exc.HEATStackUpdateError(details=e.message)
|
||||
heat_err_msg = json.loads(e.message)['error']['message'] \
|
||||
or e.message
|
||||
raise exc.HEATStackUpdateError(details=heat_err_msg)
|
||||
return response
|
||||
|
||||
def delete_stack(self, stack_id):
|
||||
@ -85,7 +86,9 @@ class HeatClient(object):
|
||||
try:
|
||||
client.stacks.delete(stack_id)
|
||||
except heat_exc.BaseException as e:
|
||||
raise exc.HEATStackDeleteError(details=e.message)
|
||||
heat_err_msg = json.loads(e.message)['error']['message'] \
|
||||
or e.message
|
||||
raise exc.HEATStackDeleteError(details=heat_err_msg)
|
||||
|
||||
def get_image_data_by_stackid(self, stack_id):
|
||||
client, self._kc = Clients().heat(self._kc)
|
||||
|
@ -225,47 +225,6 @@ class ClientInitializationException(ORDException):
|
||||
message_template = 'Failed to initialize Heat'
|
||||
|
||||
|
||||
class RepoTimeoutException(ORDException):
|
||||
error_code = ERROR_REPO_TIMEOUT
|
||||
message_template = '[{label}] '\
|
||||
'Timeout occurred while trying to connect to GIT repo'
|
||||
|
||||
|
||||
class RepoIncorrectURL(ORDException):
|
||||
error_code = ERROR_REPO_URL
|
||||
message_template = '[{label}] An error occurred with the GIT repo url. ' \
|
||||
'Check conf file to confirm URL'
|
||||
|
||||
|
||||
class RepoNotExist(ORDException):
|
||||
error_code = ERROR_REPO_NOT_EXIST
|
||||
message_template = '[{label}] '\
|
||||
'Git repo is incorrect or does not exist'
|
||||
|
||||
|
||||
class FileNotInRepo(ORDException):
|
||||
error_code = ERROR_FILE_NOT_IN_REPO
|
||||
message_template = '[{label}] '\
|
||||
'File does not exist in this Git repo'
|
||||
|
||||
|
||||
class RepoNoPermission(ORDException):
|
||||
error_code = ERROR_REPO_PERMISSION
|
||||
message_template = '[{label}] '\
|
||||
'Permission denied to repo. Check SSH keys'
|
||||
|
||||
|
||||
class RepoUnknownException(ORDException):
|
||||
error_code = ERROR_REPO_UNKNOWN
|
||||
message_template = '[{label}] '\
|
||||
'An unknown repo exception occurred - {unknown}'
|
||||
|
||||
|
||||
class RepoInitializationException(ORDException):
|
||||
error_code = ERROR_REPO_INIT
|
||||
message_template = 'Failed to connect and download repo'
|
||||
|
||||
|
||||
class RPCInitializationException(ORDException):
|
||||
error_code = ERROR_RPC_INIT
|
||||
message_template = 'Failed to initialize RPC'
|
||||
|
@ -31,7 +31,7 @@ from ord.openstack.common import log as logging
|
||||
CONF = cfg.CONF
|
||||
|
||||
CONF.register_opts([
|
||||
cfg.IntOpt('heat_poll_interval', default=5,
|
||||
cfg.IntOpt('heat_poll_interval', default=10,
|
||||
help='delay in seconds between two consecutive call to '
|
||||
'heat.stacks.status'),
|
||||
cfg.IntOpt('resource_status_check_wait', default=10,
|
||||
@ -40,9 +40,9 @@ CONF.register_opts([
|
||||
cfg.Opt('retry_limits',
|
||||
default='3',
|
||||
help='number of retry'),
|
||||
cfg.IntOpt('resource_creation_timeout_min', default=1200,
|
||||
cfg.IntOpt('resource_creation_timeout_min', default=100,
|
||||
help='max wait time for flavor and customer stacks'),
|
||||
cfg.IntOpt('resource_creation_timeout_max', default=14400,
|
||||
cfg.IntOpt('resource_creation_timeout_max', default=300,
|
||||
help='max wait time for image stacks'),
|
||||
cfg.BoolOpt('enable_rds_callback_check',
|
||||
default=True,
|
||||
@ -90,7 +90,7 @@ class WorkerFactory(object):
|
||||
except Exception as exception:
|
||||
WorkerThread._init_error = utils.ErrorCode.ORD_017.value
|
||||
LOG.critical(
|
||||
"Unexpected error while initializing clients %s" % exception)
|
||||
"Unexpected error while initializing clients: %s" % exception)
|
||||
finally:
|
||||
WorkerThread._threadPool = {}
|
||||
if WorkerThread._init_error is None:
|
||||
@ -98,13 +98,13 @@ class WorkerFactory(object):
|
||||
|
||||
@classmethod
|
||||
def removeWorker(cls, idnr):
|
||||
LOG.info("Deleting thread : " + str(idnr))
|
||||
LOG.info("Deleting thread: " + str(idnr))
|
||||
try:
|
||||
del WorkerThread._threadPool[idnr]
|
||||
except KeyError:
|
||||
LOG.info("Thread was not found for deletion")
|
||||
raise exc.WorkerThreadError(thread_id=idnr)
|
||||
LOG.info("Thread was deleted : " + str(idnr))
|
||||
LOG.info("Thread was deleted: " + str(idnr))
|
||||
|
||||
def __init__(self):
|
||||
LOG.info("initializing WorkerFactory._init_")
|
||||
@ -117,8 +117,6 @@ class WorkerFactory(object):
|
||||
template_type, heat_template):
|
||||
template_type = template_type.lower()
|
||||
|
||||
# FIXME: this code has a none zero to fail in very unexpected
|
||||
# way
|
||||
randCrypt = SystemRandom()
|
||||
threadID = randCrypt.randint(1, 99999999)
|
||||
if template_type == "hot":
|
||||
@ -130,7 +128,6 @@ class WorkerFactory(object):
|
||||
elif template_type == "ansible":
|
||||
threadID = -1
|
||||
else:
|
||||
# FIXME: too late for such check
|
||||
raise exc.UnsupportedTemplateTypeError(template=template_type)
|
||||
return threadID
|
||||
|
||||
@ -185,7 +182,7 @@ class WorkerThread(threading.Thread):
|
||||
|
||||
def run(self):
|
||||
LOG.debug("Thread Starting :: %s", self.threadID)
|
||||
LOG.debug("operation %s for stack_name %s ",
|
||||
LOG.debug("operation %s for stack_name %s",
|
||||
self.operation, self.stack_name)
|
||||
try:
|
||||
if self._is_engine_initialized():
|
||||
@ -351,7 +348,7 @@ class WorkerThread(threading.Thread):
|
||||
return self._heat_client.get_stack(stack['id'])
|
||||
|
||||
def _update_stack(self, template):
|
||||
LOG.debug("Update template for stack %s ", self.stack_name)
|
||||
LOG.debug("Updating template for stack %s ", self.stack_name)
|
||||
|
||||
stack = self._heat_client.get_stack_by_name(self.stack_name)
|
||||
self._heat_client.update_stack(stack.id, template)
|
||||
@ -365,39 +362,36 @@ class WorkerThread(threading.Thread):
|
||||
return stack
|
||||
|
||||
def _wait_for_heat(self, stack, operation):
|
||||
LOG.debug('Wait while HEAT do his job: stack=%s', self.stack_name)
|
||||
LOG.debug('Stack %s: waiting for Heat to complete', self.stack_name)
|
||||
|
||||
poll_interval = CONF.heat_poll_interval
|
||||
LOG.debug("HEAT poll interval: %s", poll_interval)
|
||||
max_wait_time = 0
|
||||
if self.resource_type == 'image':
|
||||
max_wait_time = CONF.resource_creation_timeout_max
|
||||
else:
|
||||
max_wait_time = CONF.resource_creation_timeout_min
|
||||
LOG.debug("max_wait_time: %s", max_wait_time)
|
||||
|
||||
stack_status_transitions = StatusTransitions(stack.stack_status)
|
||||
|
||||
start_time = time.time()
|
||||
waiting_time = 0
|
||||
status_check = HEATIntermediateStatusChecker(stack, operation)
|
||||
while status_check(stack) \
|
||||
status = ''
|
||||
while status != HEATIntermediateStatusChecker.STATUS_COMPLETE \
|
||||
and status != HEATIntermediateStatusChecker.STATUS_FAIL \
|
||||
and (waiting_time <= max_wait_time):
|
||||
time.sleep(poll_interval)
|
||||
waiting_time = time.time() - start_time
|
||||
|
||||
LOG.debug('%s waiting %s for %s',
|
||||
self.threadID, waiting_time,
|
||||
stack.stack_name)
|
||||
LOG.debug('%s stack status transition: %s',
|
||||
self.threadID, stack_status_transitions)
|
||||
LOG.debug('Thread %s: Waiting for %s to finish;'
|
||||
' time elapsed: %s', self.threadID,
|
||||
stack.stack_name, int(waiting_time))
|
||||
|
||||
stack = self._heat_client.get_stack(stack.id)
|
||||
stack_status_transitions.add(stack.stack_status)
|
||||
status = status_check(stack)
|
||||
|
||||
LOG.debug('%s done with waiting for stack %s: action=%s, status=%s',
|
||||
self.threadID, stack.stack_name, status_check.action,
|
||||
status_check.status)
|
||||
LOG.debug('Thread %s: Finished waiting'
|
||||
' for stack %s: action=%s, status=%s',
|
||||
self.threadID, stack.stack_name,
|
||||
status_check.action, status_check.status)
|
||||
|
||||
if status_check.is_fail:
|
||||
if operation == utils.OPERATION_CREATE:
|
||||
@ -416,27 +410,6 @@ class WorkerThread(threading.Thread):
|
||||
raise exc.StackTimeoutError(operation=operation, stack=stack)
|
||||
|
||||
|
||||
class StatusTransitions(object):
|
||||
|
||||
def __init__(self, status):
|
||||
self.transitions = [status]
|
||||
self.hits = [1]
|
||||
|
||||
def add(self, status):
|
||||
if self.transitions[-1] != status:
|
||||
self.transitions.append(status)
|
||||
self.hits.append(0)
|
||||
self.hits[-1] += 1
|
||||
|
||||
def __str__(self):
|
||||
chunks = []
|
||||
for status, hits in zip(self.transitions, self.hits):
|
||||
if 1 < hits:
|
||||
status = '{}({})'.format(status, hits)
|
||||
chunks.append(status)
|
||||
return ' ~> '.join(chunks)
|
||||
|
||||
|
||||
class HEATIntermediateStatusChecker(object):
|
||||
ACTION_CREATE = 'CREATE'
|
||||
ACTION_UPDATE = 'UPDATE'
|
||||
|
@ -24,6 +24,8 @@ for admins to find messages related to a specific instance.
|
||||
It also allows setting of formatting information through conf.
|
||||
|
||||
"""
|
||||
from ord.openstack.common._i18n import _
|
||||
from ord.openstack.common import local
|
||||
|
||||
import copy
|
||||
import inspect
|
||||
@ -45,9 +47,6 @@ from six import moves
|
||||
|
||||
_PY26 = sys.version_info[0:2] == (2, 6)
|
||||
|
||||
from ord.openstack.common._i18n import _
|
||||
from ord.openstack.common import local
|
||||
|
||||
|
||||
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
|
||||
|
||||
@ -317,8 +316,8 @@ class ContextAdapter(BaseLoggerAdapter):
|
||||
extra.update(_dictify_context(context))
|
||||
|
||||
instance = kwargs.pop('instance', None)
|
||||
instance_uuid = (extra.get('instance_uuid') or
|
||||
kwargs.pop('instance_uuid', None))
|
||||
instance_uuid = (extra.get('instance_uuid')
|
||||
or kwargs.pop('instance_uuid', None))
|
||||
instance_extra = ''
|
||||
if instance:
|
||||
instance_extra = CONF.instance_format % instance
|
||||
@ -653,8 +652,8 @@ class ContextFormatter(logging.Formatter):
|
||||
else:
|
||||
fmt = CONF.logging_default_format_string
|
||||
|
||||
if (record.levelno == logging.DEBUG and
|
||||
CONF.logging_debug_format_suffix):
|
||||
if (record.levelno == logging.DEBUG
|
||||
and CONF.logging_debug_format_suffix):
|
||||
fmt += " " + CONF.logging_debug_format_suffix
|
||||
|
||||
if sys.version_info < (3, 2):
|
||||
|
@ -144,8 +144,8 @@ def get_workers(name):
|
||||
|
||||
def prepare_service(argv=None):
|
||||
i18n.enable_lazy()
|
||||
log_levels = (cfg.CONF.default_log_levels +
|
||||
['stevedore=INFO'])
|
||||
log_levels = (cfg.CONF.default_log_levels
|
||||
+ ['stevedore=INFO'])
|
||||
cfg.set_defaults(log.log_opts,
|
||||
default_log_levels=log_levels)
|
||||
if argv is None:
|
||||
|
@ -18,6 +18,7 @@ from ord.client import heat as ord_heat
|
||||
from ord.common import exceptions as exc
|
||||
from ord.tests import base
|
||||
|
||||
import json
|
||||
from unittest import mock
|
||||
|
||||
|
||||
@ -100,7 +101,8 @@ class TestHeatClient(base.BaseTestCase):
|
||||
self.heat_client.stacks.delete.assert_called_with(stack_idnr)
|
||||
|
||||
def test_error_masquerading(self):
|
||||
error = heat_exc.CommunicationError('ord-heat-stack-create-test')
|
||||
error = heat_exc.CommunicationError(
|
||||
json.dumps({'error': {'message': 'ord-heat-stack-create-test'}}))
|
||||
|
||||
stack_idnr = '0'
|
||||
stack_name = "test_stack"
|
||||
|
@ -120,27 +120,6 @@ class TestWorkerThread(base.BaseTestCase):
|
||||
stack.stack_name)
|
||||
self.heat_client.delete_stack.assert_called_once_with(stack.id)
|
||||
|
||||
def test_wait_for_heat(self):
|
||||
time_time = self.patch('time.time', side_effect=itertools.count(1))
|
||||
time_sleep = self.patch('time.sleep')
|
||||
|
||||
stack_wait = base.Dummy(
|
||||
id='1', stack_name=self.stack_name,
|
||||
stack_status='CREATE_IN_PROGRESS')
|
||||
stack_ready = base.Dummy(
|
||||
id='1', stack_name=self.stack_name, stack_status='CREATE_COMPLETE')
|
||||
status_responses = [stack_wait] * 4 + [stack_ready]
|
||||
|
||||
self.heat_client.get_stack.side_effect = status_responses
|
||||
|
||||
# raise exception in case of failure
|
||||
self.workerThread._wait_for_heat(stack_wait, utils.OPERATION_CREATE)
|
||||
|
||||
self.assertEqual(
|
||||
[mock.call(CONF.heat_poll_interval)] * 5,
|
||||
time_sleep.mock_calls)
|
||||
self.assertEqual(6, time_time.call_count)
|
||||
|
||||
def test_wait_for_heat_fail(self):
|
||||
self.patch('time.time', side_effect=itertools.count(1))
|
||||
self.patch('time.sleep')
|
||||
@ -148,47 +127,16 @@ class TestWorkerThread(base.BaseTestCase):
|
||||
stack_wait = base.Dummy(
|
||||
id='1', stack_name=self.stack_name,
|
||||
stack_status='CREATE_IN_PROGRESS')
|
||||
stack_ready = base.Dummy(
|
||||
stack_failed = base.Dummy(
|
||||
id='1', stack_name=self.stack_name, stack_status='CREATE_FAILED',
|
||||
stack_status_reason='Stack fail due to resource creation')
|
||||
|
||||
status_responses = [stack_wait] * 4 + [stack_ready]
|
||||
|
||||
self.heat_client.get_stack.side_effect = status_responses
|
||||
self.heat_client.get_stack.return_value = stack_failed
|
||||
|
||||
self.assertRaises(
|
||||
exc.HEATStackCreateError, self.workerThread._wait_for_heat,
|
||||
stack_wait, utils.OPERATION_CREATE)
|
||||
|
||||
def test_wait_for_heat_race(self):
|
||||
self.patch('time.time', side_effect=itertools.count(1))
|
||||
self.patch('time.sleep')
|
||||
|
||||
stack_initial = base.Dummy(
|
||||
id='1', stack_name=self.stack_name, stack_status='UPDATE_COMPLETE',
|
||||
updated_time='2016-06-02T16:30:48Z')
|
||||
stack_wait = base.Dummy(
|
||||
id='1', stack_name=self.stack_name,
|
||||
stack_status='UPDATE_IN_PROGRESS',
|
||||
updated_time='2016-06-02T16:30:48Z')
|
||||
stack_ready = base.Dummy(
|
||||
id='1', stack_name=self.stack_name, stack_status='UPDATE_COMPLETE',
|
||||
updated_time='2016-06-02T16:30:50Z')
|
||||
status_responses = [stack_initial]
|
||||
status_responses += [stack_wait] * 2
|
||||
status_responses += [stack_ready]
|
||||
|
||||
status_transition = workerfactory.StatusTransitions('_unittest_')
|
||||
self.patch(
|
||||
'ord.engine.workerfactory.StatusTransitions',
|
||||
return_value=status_transition)
|
||||
|
||||
self.heat_client.get_stack.side_effect = status_responses
|
||||
|
||||
self.workerThread._wait_for_heat(stack_initial, utils.OPERATION_MODIFY)
|
||||
|
||||
self.assertEqual('UPDATE_COMPLETE', status_transition.transitions[-1])
|
||||
|
||||
def test_run(self):
|
||||
self.workerThread._execute_operation = execute = mock.Mock()
|
||||
execute.return_value = 'OPERATION_STATUS'
|
||||
@ -268,19 +216,6 @@ class TestWorkerThread(base.BaseTestCase):
|
||||
db_api.update_target_data.reset_mock()
|
||||
|
||||
|
||||
class TestStatusTransitions(base.BaseTestCase):
|
||||
def test(self):
|
||||
for data, expect in [
|
||||
('A', 'A'),
|
||||
('AA', 'A(2)'),
|
||||
('ABC', 'A ~> B ~> C'),
|
||||
('AABBCC', 'A(2) ~> B(2) ~> C(2)')]:
|
||||
subject = workerfactory.StatusTransitions(data[0])
|
||||
for entity in data[1:]:
|
||||
subject.add(entity)
|
||||
self.assertEqual(expect, str(subject))
|
||||
|
||||
|
||||
class TestHEATIntermediateStatusChecker(base.BaseTestCase):
|
||||
def test_scenario(self):
|
||||
cls = workerfactory.HEATIntermediateStatusChecker
|
||||
|
@ -3,7 +3,8 @@
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
amqp>=2.5.2
|
||||
hacking>=0.12.0,!=0.13.0,<0.14 # Apache-2.0
|
||||
hacking>=3.0.1,<3.1.0 # Apache-2.0
|
||||
pep257==0.7.0 # MIT License
|
||||
bandit>=1.5.1
|
||||
coverage>=4.0
|
||||
python-subunit>=0.0.18
|
||||
|
@ -130,9 +130,9 @@ class _Win32Colorizer(object):
|
||||
import pywintypes
|
||||
try:
|
||||
screenBuffer.SetConsoleTextAttribute(
|
||||
win32console.FOREGROUND_RED |
|
||||
win32console.FOREGROUND_GREEN |
|
||||
win32console.FOREGROUND_BLUE)
|
||||
win32console.FOREGROUND_RED
|
||||
| win32console.FOREGROUND_GREEN
|
||||
| win32console.FOREGROUND_BLUE)
|
||||
except pywintypes.error:
|
||||
return False
|
||||
else:
|
||||
|
@ -67,5 +67,6 @@ def main(argv):
|
||||
install.install_dependencies()
|
||||
print_help(venv, root)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
||||
|
@ -70,8 +70,8 @@ class InstallVenv(object):
|
||||
check_exit_code)[0]
|
||||
|
||||
def get_distro(self):
|
||||
if (os.path.exists('/etc/fedora-release') or
|
||||
os.path.exists('/etc/redhat-release')):
|
||||
if (os.path.exists('/etc/fedora-release')
|
||||
or os.path.exists('/etc/redhat-release')):
|
||||
return Fedora(
|
||||
self.root, self.venv, self.requirements,
|
||||
self.test_requirements, self.py_version, self.project)
|
||||
|
@ -1,50 +1,10 @@
|
||||
- hosts: all
|
||||
roles:
|
||||
- ensure-pip
|
||||
- ensure-docker
|
||||
tasks:
|
||||
- include_vars: vars.yaml
|
||||
|
||||
- name: Install Docker (Debian)
|
||||
when: ansible_os_family == 'Debian'
|
||||
block:
|
||||
- file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
with_items:
|
||||
- /etc/docker/
|
||||
- /etc/systemd/system/docker.service.d/
|
||||
- /var/lib/docker/
|
||||
- mount:
|
||||
path: /var/lib/docker/
|
||||
src: tmpfs
|
||||
fstype: tmpfs
|
||||
opts: size=25g
|
||||
state: mounted
|
||||
- copy: "{{ item }}"
|
||||
with_items:
|
||||
- content: "{{ docker_daemon | to_json }}"
|
||||
dest: /etc/docker/daemon.json
|
||||
- src: files/docker-systemd.conf
|
||||
dest: /etc/systemd/system/docker.service.d/
|
||||
- apt_key:
|
||||
url: https://download.docker.com/linux/ubuntu/gpg
|
||||
- apt_repository:
|
||||
repo: deb http://{{ zuul_site_mirror_fqdn }}/deb-docker bionic stable
|
||||
- apt:
|
||||
name: "{{ item }}"
|
||||
allow_unauthenticated: True
|
||||
with_items:
|
||||
- docker-ce
|
||||
- pip:
|
||||
name: docker
|
||||
version: 2.7.0
|
||||
- iptables:
|
||||
action: insert
|
||||
chain: INPUT
|
||||
in_interface: docker0
|
||||
jump: ACCEPT
|
||||
become: True
|
||||
|
||||
- name: Make images
|
||||
when: not publish
|
||||
block:
|
||||
|
8
tox.ini
8
tox.ini
@ -5,14 +5,15 @@ envlist = py36,pep8
|
||||
skipsdist = True
|
||||
|
||||
[testenv]
|
||||
basepython = python3.6
|
||||
basepython = python3
|
||||
usedevelop = True
|
||||
setenv =
|
||||
VIRTUAL_ENV={envdir}
|
||||
PYTHONWARNINGS=default::DeprecationWarning
|
||||
install_command = pip install {opts} {packages}
|
||||
commands = stestr run
|
||||
deps = -r {toxinidir}/requirements.txt
|
||||
-r {toxinidir}/test-requirements.txt
|
||||
commands = stestr run
|
||||
|
||||
[testenv:bandit]
|
||||
deps = .[bandit]
|
||||
@ -21,7 +22,6 @@ commands = bandit -r ord -n5 -c bandit.yaml
|
||||
[testenv:pep8]
|
||||
commands =
|
||||
flake8 {posargs}
|
||||
{[testenv:bandit]commands}
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
@ -41,6 +41,6 @@ commands = oslo_debug_helper {posargs}
|
||||
|
||||
[flake8]
|
||||
show-source = True
|
||||
ignore = F841,H101,H405
|
||||
ignore = F841,H101,H405,W503,E902
|
||||
builtins = _
|
||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build
|
||||
|
Loading…
Reference in New Issue
Block a user