Fix mass mistypes at english verbs

Fix mistypes at commentaries, log outputs and LOCAL variables

Change-Id: I1fb8eb4ae10be5c2f174336d3a8f124e61da5954
Closes-Bug: #1513777
This commit is contained in:
penguinolog 2015-11-18 15:32:57 +03:00
parent 642b5fba8d
commit 2c7951055b
33 changed files with 90 additions and 90 deletions

View File

@ -682,7 +682,7 @@ def check_stats_private_info(collector_remote, postgres_actions,
return _has_private_data
def _contain_public_ip(data, _used_networks):
_has_puplic_ip = False
_has_public_ip = False
_ip_regex = (r'\b((\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])\.){3}'
r'(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])\b')
_not_public_regex = [
@ -700,12 +700,12 @@ def check_stats_private_info(collector_remote, postgres_actions,
and not any(IPAddress(_match.group()) in IPNetwork(net) for
net in _used_networks):
continue
logger.debug('Usage statistics with piblic IP(s):\n {0}'.
logger.debug('Usage statistics with public IP(s):\n {0}'.
format(data))
logger.error('Found public IP in usage statistics: "{0}"'.format(
_match.group()))
_has_puplic_ip = True
return _has_puplic_ip
_has_public_ip = True
return _has_public_ip
private_data = {
'hostname': _settings['HOSTNAME'],
@ -1013,7 +1013,7 @@ def is_ntpd_active(remote, ntpd_ip):
def check_repo_managment(remote):
"""Check repo managment
"""Check repo management
run 'yum -y clean all && yum check-update' or
'apt-get clean all && apt-get update' exit code should be 0
@ -1081,10 +1081,10 @@ def check_haproxy_backend(remote,
cmd = 'haproxy-status | egrep -v "BACKEND|FRONTEND" | grep "DOWN"'
positive_filter = (services, nodes)
negativ_filter = (ignore_services, ignore_nodes)
negative_filter = (ignore_services, ignore_nodes)
grep = ['|egrep "{}"'.format('|'.join(n)) for n in positive_filter if n]
grep.extend(
['|egrep -v "{}"'.format('|'.join(n)) for n in negativ_filter if n])
['|egrep -v "{}"'.format('|'.join(n)) for n in negative_filter if n])
return remote.execute("{}{}".format(cmd, ''.join(grep)))

View File

@ -246,7 +246,7 @@ class AdminActions(BaseActions):
@logwrap
def modify_configs(self, router):
# Slave nodes sould use the gateway of 'admin' network as the default
# Slave nodes should use the gateway of 'admin' network as the default
# gateway during provisioning and as an additional DNS server.
# resolv.conf should contains nameserver that resolve intranet URLs.
config = '/etc/fuel/astute.yaml'

View File

@ -48,7 +48,7 @@ def check_interface_status(remote, iname):
cmd = 'ethtools {0}| grep "Link detected"'.format(iname)
result = remote.execute(cmd)
assert_equal(0, result['exit_code'],
"Non-zero exit code sderr {0}, "
"Non-zero exit code stderr {0}, "
"stdout {1}".format(result['stderr'], result['stdout']))
assert_true('yes' in ''.join(result['stdout']),
@ -63,7 +63,7 @@ def ping_remote_net(remote, ip):
assert_equal(
res['exit_code'], 0,
"Ping of {0} ended with non zero exit-code. "
"Stdout is {1}, sderr {2}".format(
"Stdout is {1}, stderr {2}".format(
ip, ''.join(res['stdout']), ''.join(res['stderr'])))
@ -80,7 +80,7 @@ def check_tools_task(remote, tool_name):
assert_equal(
0, output['exit_code'],
"Command {0} failed with non zero exit code, current output is:"
" stdout {1}, sdterr: {2} ".format(
" stdout {1}, stderr: {2} ".format(
cmd_sh, ''.join(output['stdout']), ''.join(output['stderr'])))
@ -94,7 +94,7 @@ def run_check_from_task(remote, path):
path, res['stderr'], res['stdout']))
except AssertionError:
time.sleep(60)
logger.info('remoote is {0}'.format(remote))
logger.info('remote is {0}'.format(remote))
res = remote.execute('{0}'.format(path))
assert_equal(
0, res['exit_code'],

View File

@ -39,7 +39,7 @@ class HTTPClient(object):
self.keystone = keystoneclient(
auth_url=self.keystone_url, **self.creds)
# it depends on keystone version, some versions doing auth
# explicitly some dont, but we are making it explicitly always
# explicitly some don't, but we are making it explicitly always
self.keystone.authenticate()
logger.debug('Authorization token is successfully updated')
except exceptions.AuthorizationFailure:

View File

@ -135,7 +135,7 @@ class Ntp(object):
cls.node_name = node_name
cls.peers = []
# Get IP of a server from which the time will be syncronized.
# Get IP of a server from which the time will be synchronized.
cmd = "awk '/^server/ && $2 !~ /127.*/ {print $2}' /etc/ntp.conf"
cls.server = remote.execute(cmd)['stdout'][0]
@ -187,12 +187,12 @@ class Ntp(object):
if (abs(offset) > 500) or (abs(jitter) > 500):
return self.is_connected
# 2. remote should be marked whith tally '*'
# 2. remote should be marked with tally '*'
if remote[0] != '*':
continue
# 3. reachability bit array should have '1' at least in
# two lower bits as the last two sussesful checks
# two lower bits as the last two successful checks
if reach & 3 == 3:
self.is_connected = True
return self.is_connected

View File

@ -74,7 +74,7 @@ class CustomRepo(object):
Scenario:
1. Temporary set nameserver to local router on admin node
2. Install tools to manage rpm/deb repository
3. Retrive list of packages from custom repository
3. Retrieve list of packages from custom repository
4. Download packages to local rpm/deb repository
5. Update .yaml file with new packages version
6. Re-generate repo using shell scripts on admin node
@ -119,15 +119,16 @@ class CustomRepo(object):
def get_pkgs_list_ubuntu(self):
url = "{0}/{1}/Packages".format(self.custom_pkgs_mirror,
self.custom_pkgs_mirror_path)
logger.info("Retriving additional packages from the custom mirror:"
logger.info("Retrieving additional packages from the custom mirror:"
" {0}".format(url))
try:
pkgs_release = urllib2.urlopen(url).read()
except (urllib2.HTTPError, urllib2.URLError):
logger.error(traceback.format_exc())
url_gz = '{0}.gz'.format(url)
logger.info("Retriving additional packages from the custom mirror:"
" {0}".format(url_gz))
logger.info(
"Retrieving additional packages from the custom mirror:"
" {0}".format(url_gz))
try:
pkgs_release_gz = urllib2.urlopen(url_gz).read()
except (urllib2.HTTPError, urllib2.URLError):
@ -150,13 +151,13 @@ class CustomRepo(object):
assert_equal(True, all(x in upkg for x in upkg_keys),
'Missing one of the statements ["Package:", '
'"Version:", "Filename:"] in {0}'.format(url))
# TODO: add dependences list to upkg
# TODO: add dependencies list to upkg
self.pkgs_list.append(upkg)
# Centos: Creating list of packages from the additional mirror
def get_pkgs_list_centos(self):
logger.info("Retriving additional packages from the custom mirror: {0}"
.format(self.custom_pkgs_mirror))
logger.info("Retrieving additional packages from the custom mirror:"
" {0}".format(self.custom_pkgs_mirror))
url = "{0}/repodata/repomd.xml".format(self.custom_pkgs_mirror)
try:
repomd_data = urllib2.urlopen(url).read()
@ -208,7 +209,7 @@ class CustomRepo(object):
cpkg = {'package:': flist_name,
'version:': flist_ver,
'filename:': flist_file}
# TODO: add dependences list to cpkg
# TODO: add dependencies list to cpkg
self.pkgs_list.append(cpkg)
# Download packages (local_folder)
@ -219,7 +220,7 @@ class CustomRepo(object):
for npkg, pkg in enumerate(self.pkgs_list):
# TODO: Previous versions of the updating packages must be removed
# to avoid unwanted packet manager dependences resolution
# to avoid unwanted packet manager dependencies resolution
# (when some package still depends on other package which
# is not going to be installed)
@ -260,7 +261,7 @@ class CustomRepo(object):
'\n{0}'.format(traceback.format_exc()))
raise
# Update the local repository using prevously uploaded script.
# Update the local repository using previously uploaded script.
script_cmd = '{0}/{1} {2} {3}'.format(self.remote_path_scripts,
regenerate_script,
local_mirror_path,
@ -269,7 +270,7 @@ class CustomRepo(object):
assert_equal(0, script_result['exit_code'],
self.assert_msg(script_cmd, script_result['stderr']))
logger.info('Local repository {0} has been updated successfuly.'
logger.info('Local repository {0} has been updated successfully.'
.format(local_mirror_path))
def assert_msg(self, cmd, err):
@ -277,7 +278,7 @@ class CustomRepo(object):
.format(cmd, err)
def check_puppet_logs(self):
logger.info("Check puppet logs for packages with unmet dependences.")
logger.info("Check puppet logs for packages with unmet dependencies.")
if settings.OPENSTACK_RELEASE_UBUNTU in settings.OPENSTACK_RELEASE:
err_deps = self.check_puppet_logs_ubuntu()
else:

View File

@ -40,7 +40,6 @@ def replace_ubuntu_repo_url(repo_url, upstream_host):
def replace_ubuntu_repos(repos_attr, upstream_host):
# Walk thru repos_attr and replace/add extra Ubuntu mirrors
repos = []
if help_data.MIRROR_UBUNTU:
logger.debug("Adding new mirrors: '{0}'"
.format(help_data.MIRROR_UBUNTU))
@ -96,7 +95,7 @@ def replace_centos_repos(repos_attr, upstream_host):
def report_repos(repos_attr, release=help_data.OPENSTACK_RELEASE):
"""Show list of reposifories for specified cluster"""
"""Show list of repositories for specified cluster"""
if help_data.OPENSTACK_RELEASE_UBUNTU in release:
report_ubuntu_repos(repos_attr['value'])
else:

View File

@ -557,7 +557,7 @@ def get_node_hiera_roles(remote):
"""
cmd = 'hiera roles'
roles = ''.join(run_on_remote(remote, cmd)).strip()
# Contert string with roles like a ["ceph-osd", "controller"] to list
# Content string with roles like a ["ceph-osd", "controller"] to list
roles = map(lambda s: s.strip('" '), roles.strip("[]").split(','))
return roles

View File

@ -1757,9 +1757,9 @@ class FuelWebClient(object):
res = []
passed_count = []
failed_count = []
test_nama_to_ran = test_name or OSTF_TEST_NAME
test_name_to_run = test_name or OSTF_TEST_NAME
retr = test_retries or OSTF_TEST_RETRIES_COUNT
test_path = map_ostf.OSTF_TEST_MAPPING.get(test_nama_to_ran)
test_path = map_ostf.OSTF_TEST_MAPPING.get(test_name_to_run)
logger.info('Test path is {0}'.format(test_path))
for i in range(0, retr):
@ -1818,7 +1818,7 @@ class FuelWebClient(object):
if ceph.is_clock_skew(remote):
skewed = ceph.get_node_fqdns_w_clock_skew(remote)
logger.warning("Time on nodes {0} are to be "
"re-syncronized".format(skewed))
"re-synchronized".format(skewed))
nodes_to_sync = [
n for n in online_ceph_nodes
if n['fqdn'].split('.')[0] in skewed]

View File

@ -403,7 +403,7 @@ FUEL_PLUGIN_BUILDER_REPO = 'https://github.com/openstack/fuel-plugins.git'
# Change various Fuel master node default settings #
###############################################################################
# URL to custom mirror with new OSCI packages wich should be tested,
# URL to custom mirror with new OSCI packages which should be tested,
# for example:
# CentOS: http://osci-obs.vm.mirantis.net:82/centos-fuel-master-20921/centos/
# Ubuntu: http://osci-obs.vm.mirantis.net:82/ubuntu-fuel-master-20921/ubuntu/

View File

@ -211,7 +211,7 @@ class NeutronVlanCephMongo(TestBasic):
remote=remote,
path=self.get_post_test(tasks, 'globals')[0]['cmd'])
# check netcondfig
# check netconfig
if self.get_post_test(tasks, 'netconfig'):
for node in nodes:

View File

@ -103,7 +103,7 @@ class TestElasticsearchPlugin(TestBasic):
assert_is_not_none(es_server_ip,
"Failed to get the IP of Elasticsearch server")
logger.debug("Check that Elasticseach is ready")
logger.debug("Check that Elasticsearch is ready")
r = requests.get("http://{}:9200/".format(es_server_ip))
msg = "Elasticsearch responded with {}".format(r.status_code)

View File

@ -148,7 +148,7 @@ class GlusterfsPlugin(TestBasic):
14. Run ostf
Duration 50m
Snapshot deploy_glasterfs_ha
Snapshot deploy_glusterfs_ha
"""
self.env.revert_snapshot("ready_with_5_slaves")

View File

@ -459,7 +459,7 @@ class ZabbixPlugin(TestBasic):
7. Deploy the cluster
8. Run network verification
9. Run OSTF
10. Check Extreme Swich trigger with test SNMP message
10. Check Extreme Switch trigger with test SNMP message
Duration 70m
Snapshot deploy_zabbix_snmp_extreme_ha

View File

@ -393,7 +393,7 @@ class TestLogrotateBase(TestBasic):
free_inodes, i_suff = self.check_free_inodes(remote)
logger.debug('Free inodes before file '
'creation: {0}{1}'.format(free_inodes, i_suff))
# create 1 week old emty file
# create 1 week old empty file
self.create_old_file(remote, name='/var/log/messages')

View File

@ -283,7 +283,7 @@ class CephHA(TestBasic):
'slave-06': ['ceph-osd']
}
)
# Depoy cluster
# Deploy cluster
self.fuel_web.deploy_cluster_wait(cluster_id)
self.env.make_snapshot("ceph_ha", is_make=True)
@ -457,7 +457,7 @@ class CephRadosGW(TestBasic):
4. Deploy the cluster
5. Check ceph status
6. Run OSTF tests
7. Check the radosqw daemon is started
7. Check the radosgw daemon is started
Duration 90m
Snapshot ceph_rados_gw
@ -526,7 +526,7 @@ class CephRadosGW(TestBasic):
self.fuel_web.run_ostf(cluster_id=cluster_id,
test_sets=['ha', 'smoke', 'sanity'])
# Check the radosqw daemon is started
# Check the radosgw daemon is started
with self.fuel_web.get_ssh_for_node('slave-01') as remote:
assert_true(radosgw_started(remote), 'radosgw daemon started')
@ -684,7 +684,7 @@ class VmBackedWithCephMigrationBasic(TestBasic):
md5after in md5before,
"Md5 checksums don`t match."
"Before migration md5 was equal to: {bef}"
"Now it eqals: {aft}".format(bef=md5before, aft=md5after))
"Now it equals: {aft}".format(bef=md5before, aft=md5after))
self.show_step(9)
@ -781,7 +781,7 @@ class VmBackedWithCephMigrationBasic(TestBasic):
remote,
floating_ip.ip, "sudo ls /mnt", creds)
assert_true("file-on-volume" in out['stdout'],
"File is abscent in /mnt")
"File is absent in /mnt")
self.show_step(17)
logger.info("Check Ceph health is ok after migration")

View File

@ -78,7 +78,7 @@ class CommandLineMinimal(TestBasic):
'fuel task | grep deployment | awk \'{print $9}\'')
['stdout'][0].rstrip()) == 100, timeout=120)
except TimeoutError:
raise TimeoutError("hiera manifest was not applyed")
raise TimeoutError("hiera manifest was not applied")
role = remote.execute('ssh -q node-{0} "hiera role"'
.format(node_id))['stdout'][0].rstrip()
assert_equal(role, 'primary-controller', "node with deployed hiera "
@ -101,7 +101,7 @@ class CommandLineTest(test_cli_base.CommandLine):
3. Provision a controller node using Fuel CLI
4. Provision two compute+cinder nodes using Fuel CLI
5. Deploy the controller node using Fuel CLI
6. Deploy the compute+cinder nodes usin Fuel CLI
6. Deploy the compute+cinder nodes using Fuel CLI
7. Run OSTF
8. Make snapshot "cli_selected_nodes_deploy"
@ -224,9 +224,9 @@ class CommandLineTest(test_cli_base.CommandLine):
format(node_id))
with self.env.d_env.get_admin_remote() as remote:
is_cobler_node_exists = check_cobbler_node_exists(remote, node_id)
is_cobbler_node_exists = check_cobbler_node_exists(remote, node_id)
assert_false(is_cobler_node_exists,
assert_false(is_cobbler_node_exists,
"After deletion node-{0} is found in cobbler list".
format(node_id))

View File

@ -451,7 +451,7 @@ class MultiroleMultipleServices(TestBasic):
3. Run 'fuel-createmirror' to replace default repositories
with local mirrors
4. Create cluster with many components to check as many
packages in local mirrors have correct dependences
packages in local mirrors have correct dependencies
5. Deploy cluster
Duration 50m

View File

@ -36,7 +36,7 @@ class VcenterDeploy(TestBasic):
return self.fuel_web.get_nailgun_node_by_name(name_node)['hostname']
def create_vm(self, os_conn=None, vm_count=None):
# Get list of available images,flavors and hipervisors
# Get list of available images,flavors and hypervisors
images_list = os_conn.nova.images.list()
flavors_list = os_conn.nova.flavors.list()
hypervisors_list = os_conn.get_hypervisors()
@ -850,7 +850,7 @@ class VcenterDeploy(TestBasic):
groups=["vcenter_multiroles_ceilometer"])
@log_snapshot_after_test
def vcenter_multiroles_ceilometer(self):
"""Deploy enviroment with vCenter, Ceilometer and nodes with \
"""Deploy environment with vCenter, Ceilometer and nodes with \
multiroles (combinations with CinderVMDK, Cinder and MongoDB)
Scenario:
@ -910,7 +910,7 @@ class VcenterDeploy(TestBasic):
groups=["vcenter_add_delete_nodes"])
@log_snapshot_after_test
def vcenter_add_delete_nodes(self):
"""Deploy enviroment of vcenter+qemu nova vlan and default backend for
"""Deploy environment of vcenter+qemu nova vlan and default backend for
glance and with addition and deletion of nodes with different roles
Scenario:
@ -951,7 +951,7 @@ class VcenterDeploy(TestBasic):
logger.debug("cluster is {}".format(cluster_id))
# Add role controler for node 1
# Add role controller for node 1
self.fuel_web.update_nodes(
cluster_id,
{'slave-01': ['controller']}
@ -1099,7 +1099,7 @@ class VcenterDeploy(TestBasic):
groups=["vcenter_delete_controler"])
@log_snapshot_after_test
def vcenter_delete_controler(self):
"""Deploy enviroment of vcenter+qemu nova vlan, default backend for
"""Deploy environment of vcenter+qemu nova vlan, default backend for
glance and deletion one node with controller role
Scenario:
@ -1107,8 +1107,8 @@ class VcenterDeploy(TestBasic):
2. Add 4 nodes with Controller roles
3. Add 2 nodes with compute role
4. Add 1 node with cinder role
5. Add 1 node with cinder-vmvare role
6. Add a node with compute-vmvare role
5. Add 1 node with cinder-vmware role
6. Add a node with compute-vmware role
7. Set Nova-Network VlanManager as a network backend.
8. Deploy the cluster
9. Run OSTF.

View File

@ -216,7 +216,7 @@ class SeparateDbFailover(TestBasic):
Scenario:
1. Revert snapshot separate_db_service
2. Add one databse node and re-deploy cluster
2. Add one database node and re-deploy cluster
3. Run network verification
4. Run OSTF
5. Check hiera hosts are the same for

View File

@ -99,7 +99,7 @@ class SeparateHorizon(TestBasic):
'slave-02': ['controller'],
'slave-03': ['controller'],
'slave-04': ['standalone-horizon'],
'slave-05': ['stabdalone-horizon'],
'slave-05': ['standalone-horizon'],
'slave-06': ['standalone-horizon'],
'slave-07': ['compute'],
'slave-08': ['ceph-osd'],

View File

@ -1396,7 +1396,7 @@ class TestHaFailoverBase(TestBasic):
logger.info('Destroy master rabbit node {0} on count {1}'.format(
master_rabbit_2.name, count))
# detroy devops node with rabbit master
# destroy devops node with rabbit master
master_rabbit_2.destroy()
# Wait until Nailgun marked suspended controller as offline

View File

@ -125,7 +125,7 @@ class DeployHAOneControllerMasterNodeFail(base_test_case.TestBasic):
details = common_func.get_instance_detail(server)
assert_equal(details.name, 'test_instance')
# Check if instacne active
# Check if instance active
common_func.verify_instance_status(server, 'ACTIVE')
# delete instance

View File

@ -186,7 +186,7 @@ class TestNeutronFailoverGRE(TestNeutronFailoverBase):
"neutron_l3_migration_after_reset_gre"])
@log_snapshot_after_test
def neutron_l3_migration_after_reset_gre(self):
"""Check l3-agent rescheduling after reset no-nprimary controller (gre)
"""Check l3-agent rescheduling after reset non-primary controller (gre)
Scenario:
1. Revert snapshot with neutron cluster

View File

@ -206,7 +206,7 @@ class TestNeutronFailoverBase(base_test_case.TestBasic):
logger.debug('instance internal ip is {0}'.format(instance_ip))
# Reshedule router for net for created instance to new controller
# Reschedule router for net for created instance to new controller
self.reshedule_router_manually(os_conn, router_id)
# Get remote to the controller with running DHCP agent
@ -271,7 +271,7 @@ class TestNeutronFailoverBase(base_test_case.TestBasic):
label=net_name).addresses[net_name][0]['addr']
logger.debug('instance internal ip is {0}'.format(instance_ip))
# Reshedule router for net for created instance to new controller
# Reschedule router for net for created instance to new controller
self.reshedule_router_manually(os_conn, router_id)
# Get remote to the controller with running DHCP agent
@ -353,7 +353,7 @@ class TestNeutronFailoverBase(base_test_case.TestBasic):
label=net_name).addresses[net_name][0]['addr']
logger.debug('instance internal ip is {0}'.format(instance_ip))
# Reshedule router for net for created instance to new controller
# Reschedule router for net for created instance to new controller
self.reshedule_router_manually(os_conn, router_id)
# Get remote to the controller with running DHCP agent

View File

@ -125,7 +125,7 @@ class CephRestart(TestBasic):
self.fuel_web.run_ostf(cluster_id=cluster_id)
# Destroy osd-node
logger.info("Destory slave-06")
logger.info("Destroy slave-06")
slave_06 = self.env.d_env.get_node(name='slave-06')
slave_06.destroy()
@ -138,7 +138,7 @@ class CephRestart(TestBasic):
self.fuel_web.run_ostf(cluster_id=cluster_id)
# Destroy compute node
logger.info("Destory slave-05")
logger.info("Destroy slave-05")
slave_05 = self.env.d_env.get_node(name='slave-05')
slave_05.destroy()

View File

@ -33,9 +33,9 @@ class PrepareBase(base_actions_factory.BaseActionsFactory):
_action_setup_master - setup master node in environment
_action_config_release - preconfig releases if it needs
_action_make_slaves - boot slaves and snapshop environment with
bootstraped slaves
_action_revert_slaves - revert environment with bootstraped slaves
_action_make_slaves - boot slaves and snapshot environment with
bootstrapped slaves
_action_revert_slaves - revert environment with bootstrapped slaves
"""
@ -116,7 +116,7 @@ class PrepareBase(base_actions_factory.BaseActionsFactory):
@deferred_decorator([make_snapshot_if_step_fail])
@action
def revert_slaves(self):
"""Revert bootstraped nodes
"""Revert bootstrapped nodes
Skip if snapshot with cluster exists
"""
@ -129,7 +129,7 @@ class PrepareBase(base_actions_factory.BaseActionsFactory):
class ActionsBase(PrepareBase):
"""Basic actions for acceptance cases
For chousing action order use actions_order variable, set list of actions
For choosing action order use actions_order variable, set list of actions
order
Actions:
@ -269,7 +269,7 @@ class ActionsBase(PrepareBase):
@deferred_decorator([make_snapshot_if_step_fail])
@action
def save_load_environment(self):
"""Load existen environment from snapshot or save it"""
"""Load existent environment from snapshot or save it"""
env_name = self.env_config['name']
if self.cluster_id is None:
logger.info("Revert Environment from "

View File

@ -40,20 +40,20 @@ class BaseActionsFactory(base_test_case.TestBasic):
@classmethod
def caseclass_factory(cls, case_group):
"""Create new clonned cls class contains only action methods"""
"""Create new cloned cls class contains only action methods"""
test_steps, scenario = {}, []
actions_method = cls.get_actions()
# Generate human readeble class_name, if was method docstring not
# Generate human readable class_name, if was method docstring not
# described, use generated name
class_name = "Case_{}_{}".format(cls.__name__, case_group)
# Make methods for new testcase class, folowing by order
# Make methods for new testcase class, following by order
scenario.append(" Scenario:")
for step, action in enumerate(cls.get_actions_order()):
n_action = action.replace("_action_", "")
# Generate human readeble method name, if was method docstring not
# described, use generated name. Used when metod failed
# Generate human readable method name, if was method docstring not
# described, use generated name. Used when method failed
step_method_name = "{}.Step{:03d}_{}".format(class_name,
step,
n_action)
@ -69,7 +69,7 @@ class BaseActionsFactory(base_test_case.TestBasic):
# Add step to scenario
scenario.append(" {}. {}".format(step, _step_name))
# Add decorator to colonned method
# Add decorator to cloned method
for deco in getattr(method, '_deferred_decorator_', []):
method = deco(method)

View File

@ -68,7 +68,7 @@ class StrenghtDestroyFirstContorller(strength_base.StrenghtBaseActions):
@deferred_decorator([make_snapshot_if_step_fail])
@action
def destory_first_controller(self):
"""Destory first controller"""
"""Destroy first controller"""
self._destory_controller('slave-01')
@ -119,7 +119,7 @@ class StrenghtDestroySecondContorller(strength_base.StrenghtBaseActions):
@deferred_decorator([make_snapshot_if_step_fail])
@action
def destory_second_controller(self):
"""Destory second controller"""
"""Destroy second controller"""
self._destory_controller('slave-02')

View File

@ -41,14 +41,14 @@ class StrenghtBaseActions(actions_base.ActionsBase):
if d_node not in self.destroyed_devops_nodes:
self.destroyed_devops_nodes.append(d_node)
else:
logger.warning("Try destory allready destoryed node")
logger.warning("Try destroy already destroyed node")
@deferred_decorator([make_snapshot_if_step_fail])
@action
def wait_offline_nodes(self):
"""Wait offline status of destroyed nodes"""
assert_true(self.destroyed_devops_nodes,
"No destoryed nodes in Environment")
"No destroyed nodes in Environment")
def wait_offline_nodes():
n_nodes = map(self.fuel_web.get_nailgun_node_by_devops_node,

View File

@ -34,7 +34,7 @@ class DeployCheckRadosGW(actions_base.ActionsBase):
6. Check HAProxy backends
7. Check ceph status
8. Run OSTF
9. Check the radosqw daemon is started
9. Check the radosgw daemon is started
"""
@ -66,7 +66,7 @@ class DeployCheckRadosGW(actions_base.ActionsBase):
@deferred_decorator([make_snapshot_if_step_fail])
@action
def check_rados_daemon(self):
"""Check the radosqw daemon is started"""
"""Check the radosgw daemon is started"""
def radosgw_started(remote):
return len(remote.check_call(
'ps aux | grep "/usr/bin/radosgw -n '

View File

@ -288,7 +288,7 @@ class AbstractLog(object):
def each_record(self):
"""
Abstract record iterator that interates
Abstract record iterator that iterates
through the content lines
:return: iter
"""

View File

@ -55,7 +55,7 @@ if you do need to override them.
-r (yes/no) - Should built ISO file be places with build number tag and
symlinked to the last build or just copied over the last file.
-b (num) - Allows you to override Jenkins' build number if you need to.
-l (dir) - Path to logs directory. Can be set by LOGS_DIR evironment variable.
-l (dir) - Path to logs directory. Can be set by LOGS_DIR environment variable.
Uses WORKSPACE/logs if not set.
-L - Disable fuel_logs tool to extract the useful lines from Astute and Puppet logs
within the Fuel log snapshot or on the live Fuel Master node.
@ -65,7 +65,7 @@ if you do need to override them.
-K - Keep test environment after tests are finished
-h - Show this help page
Most variables uses guesing from Jenkins' job name but can be overriden
Most variables uses guessing from Jenkins' job name but can be overridden
by exported variable before script is run or by one of command line options.
You can override following variables using export VARNAME="value" before running this script
@ -110,7 +110,7 @@ GlobalVariables() {
# full path where iso file should be placed
# make from iso name and path to iso shared directory
# if was not overriden by options or export
# if was not overridden by options or export
if [ -z "${ISO_PATH}" ]; then
ISO_PATH="${ISO_DIR}/${ISO_NAME}"
fi
@ -278,7 +278,7 @@ MakeISO() {
fi
# copy ISO file to storage dir
# if rotation is enabled and build number is aviable
# if rotation is enabled and build number is available
# save iso to tagged file and symlink to the last build
# if rotation is not enabled just copy iso to iso_dir