handlers: Remove dry-run functionality

Armada's dry-run option is incomplete, no longer maintained, and offers
little value for the complexity required to maintain it.

This commit is the final in a series of changes to remove the dry-run
feature. Specifically, this change removes the functionality associated
with the dry-run feature.

Story: 2005121

Change-Id: I7dfe5ab27511debe2b8ac01f8e0a696c6126a9f7
Signed-off-by: Drew Walters <andrew.walters@att.com>
This commit is contained in:
Drew Walters 2019-08-21 20:49:42 +00:00
parent 14be421c86
commit fe63df01b2
4 changed files with 10 additions and 68 deletions

View File

@ -48,7 +48,6 @@ class Armada(object):
disable_update_pre=False,
disable_update_post=False,
enable_chart_cleanup=False,
dry_run=False,
set_ovr=None,
force_wait=False,
timeout=None,
@ -65,7 +64,6 @@ class Armada(object):
:param bool disable_update_post: Disable post-update Tiller
operations.
:param bool enable_chart_cleanup: Clean up unmanaged charts.
:param bool dry_run: Run charts without installing them.
:param bool force_wait: Force Tiller to wait until all charts are
deployed, rather than using each chart's specified wait policy.
:param int timeout: Specifies overall time in seconds that Tiller
@ -79,7 +77,6 @@ class Armada(object):
'''
self.enable_chart_cleanup = enable_chart_cleanup
self.dry_run = dry_run
self.force_wait = force_wait
self.tiller = tiller
try:
@ -94,8 +91,7 @@ class Armada(object):
self.chart_cache = {}
self.chart_deploy = ChartDeploy(
self.manifest, disable_update_pre, disable_update_post,
self.dry_run, k8s_wait_attempts, k8s_wait_attempt_sleep, timeout,
self.tiller)
k8s_wait_attempts, k8s_wait_attempt_sleep, timeout, self.tiller)
def pre_flight_ops(self):
"""Perform a series of checks and operations to ensure proper
@ -184,9 +180,6 @@ class Armada(object):
return self._sync()
def _sync(self):
if self.dry_run:
LOG.info('Armada is in DRY RUN mode, no changes being made.')
msg = {
'install': [],
'upgrade': [],

View File

@ -34,12 +34,11 @@ LOG = logging.getLogger(__name__)
class ChartDeploy(object):
def __init__(
self, manifest, disable_update_pre, disable_update_post, dry_run,
self, manifest, disable_update_pre, disable_update_post,
k8s_wait_attempts, k8s_wait_attempt_sleep, timeout, tiller):
self.manifest = manifest
self.disable_update_pre = disable_update_pre
self.disable_update_post = disable_update_post
self.dry_run = dry_run
self.k8s_wait_attempts = k8s_wait_attempts
self.k8s_wait_attempt_sleep = k8s_wait_attempt_sleep
self.timeout = timeout
@ -298,12 +297,6 @@ class ChartDeploy(object):
return result
def _test_chart(self, release_name, test_handler):
if self.dry_run:
LOG.info(
'Skipping test during `dry-run`, would have tested '
'release=%s', release_name)
return True
success = test_handler.test_release_for_success()
if not success:
raise tiller_exceptions.TestFailedException(release_name)

View File

@ -82,13 +82,11 @@ class Tiller(object):
tiller_host=None,
tiller_port=None,
tiller_namespace=None,
bearer_token=None,
dry_run=None):
bearer_token=None):
self.tiller_host = tiller_host or CONF.tiller_host
self.tiller_port = tiller_port or CONF.tiller_port
self.tiller_namespace = tiller_namespace or CONF.tiller_namespace
self.bearer_token = bearer_token
self.dry_run = dry_run or False
# init k8s connectivity
self.k8s = K8s(bearer_token=self.bearer_token)
@ -288,7 +286,6 @@ class Tiller(object):
stub = ReleaseServiceStub(self.channel)
release_request = InstallReleaseRequest(
chart=chart,
dry_run=True,
values=values,
name=name,
namespace=namespace,
@ -392,9 +389,8 @@ class Tiller(object):
timeout = self._check_timeout(wait, timeout)
LOG.info(
'Helm update release%s: wait=%s, timeout=%s, force=%s, '
'recreate_pods=%s', (' (dry run)' if self.dry_run else ''), wait,
timeout, force, recreate_pods)
'Helm update release: wait=%s, timeout=%s, force=%s, '
'recreate_pods=%s', wait, timeout, force, recreate_pods)
if values is None:
values = Config(raw='')
@ -411,7 +407,6 @@ class Tiller(object):
stub = ReleaseServiceStub(self.channel)
release_request = UpdateReleaseRequest(
chart=chart,
dry_run=self.dry_run,
disable_hooks=disable_hooks,
values=values,
name=release,
@ -446,9 +441,7 @@ class Tiller(object):
'''
timeout = self._check_timeout(wait, timeout)
LOG.info(
'Helm install release%s: wait=%s, timeout=%s',
(' (dry run)' if self.dry_run else ''), wait, timeout)
LOG.info('Helm install release: wait=%s, timeout=%s', wait, timeout)
if values is None:
values = Config(raw='')
@ -460,7 +453,6 @@ class Tiller(object):
stub = ReleaseServiceStub(self.channel)
release_request = InstallReleaseRequest(
chart=chart,
dry_run=self.dry_run,
values=values,
name=release,
namespace=namespace,
@ -607,15 +599,6 @@ class Tiller(object):
if timeout is None:
timeout = const.DEFAULT_DELETE_TIMEOUT
# Helm client calls ReleaseContent in Delete dry-run scenario
if self.dry_run:
content = self.get_release_content(release)
LOG.info(
'Skipping delete during `dry-run`, would have deleted '
'release=%s from namespace=%s.', content.release.name,
content.release.namespace)
return
# build release uninstall request
try:
stub = ReleaseServiceStub(self.channel)
@ -665,13 +648,6 @@ class Tiller(object):
for jb in get_jobs.items:
jb_name = jb.metadata.name
if self.dry_run:
LOG.info(
'Skipping delete job during `dry-run`, would '
'have deleted job %s in namespace=%s.', jb_name,
namespace)
continue
LOG.info(
"Deleting job %s in namespace: %s", jb_name, namespace)
self.k8s.delete_job_action(jb_name, namespace, timeout=timeout)
@ -695,13 +671,6 @@ class Tiller(object):
"Deleting cronjobs via `type: job` is "
"deprecated, use `type: cronjob` instead")
if self.dry_run:
LOG.info(
'Skipping delete cronjob during `dry-run`, would '
'have deleted cronjob %s in namespace=%s.', jb_name,
namespace)
continue
LOG.info(
"Deleting cronjob %s in namespace: %s", jb_name, namespace)
self.k8s.delete_cron_job_action(jb_name, namespace)
@ -713,13 +682,6 @@ class Tiller(object):
for pod in release_pods.items:
pod_name = pod.metadata.name
if self.dry_run:
LOG.info(
'Skipping delete pod during `dry-run`, would '
'have deleted pod %s in namespace=%s.', pod_name,
namespace)
continue
LOG.info(
"Deleting pod %s in namespace: %s", pod_name, namespace)
self.k8s.delete_pod_action(pod_name, namespace)
@ -805,15 +767,13 @@ class Tiller(object):
timeout = self._check_timeout(wait, timeout)
LOG.debug(
'Helm rollback%s of release=%s, version=%s, '
'wait=%s, timeout=%s', (' (dry run)' if self.dry_run else ''),
release_name, version, wait, timeout)
'Helm rollback of release=%s, version=%s, '
'wait=%s, timeout=%s', release_name, version, wait, timeout)
try:
stub = ReleaseServiceStub(self.channel)
rollback_request = RollbackReleaseRequest(
name=release_name,
version=version,
dry_run=self.dry_run,
wait=wait,
timeout=timeout,
force=force,

View File

@ -392,9 +392,7 @@ class TillerTestCase(base.ArmadaTestCase):
mock_release_service_stub.return_value.RollbackRelease\
.return_value = {}
dry_run = True
tiller_obj = tiller.Tiller('host', '8080', None, dry_run=dry_run)
tiller_obj = tiller.Tiller('host', '8080', None)
release = 'release'
version = 0
@ -415,7 +413,6 @@ class TillerTestCase(base.ArmadaTestCase):
mock_rollback_release_request.assert_called_once_with(
name=release,
version=version,
dry_run=dry_run,
wait=wait,
timeout=timeout,
force=force,
@ -465,7 +462,7 @@ class TillerTestCase(base.ArmadaTestCase):
})
})
tiller_obj = tiller.Tiller('host', '8080', None, dry_run=False)
tiller_obj = tiller.Tiller('host', '8080', None)
# TODO: Test these methods as well, either by unmocking, or adding
# separate tests for them.
@ -499,7 +496,6 @@ class TillerTestCase(base.ArmadaTestCase):
mock_update_release_request.assert_called_once_with(
chart=chart,
name=release,
dry_run=tiller_obj.dry_run,
disable_hooks=False,
values=values,
wait=wait,