Fix and simplify reboot stress test. Add new env stuff.
Added stress environment var for max_instances. Added tempest var for log_level of rest_client. Change-Id: Ia92536a547cdab4d2496bd9ba9067be3595b79cd xxxxx Change-Id: I8c2f499a41f74c2fe6fb08c80ab6fc31f6b93426
This commit is contained in:
parent
3f981df854
commit
180fed1afa
@ -66,6 +66,9 @@ create_image_enabled = true
|
||||
# Cluster: the 'nova' user must have scp access between cluster nodes
|
||||
resize_available = true
|
||||
|
||||
# Level to log Compute API request/response details.
|
||||
log_level = ERROR
|
||||
|
||||
[image]
|
||||
# This section contains configuration options used when executing tests
|
||||
# against the OpenStack Images API
|
||||
|
@ -32,6 +32,7 @@ value of --logdir in nova.conf:
|
||||
host_admin_user=<name of user for ssh command>
|
||||
nova_logdir=<value of --logdir in nova.conf>
|
||||
controller=<hostname for calling nova-manage>
|
||||
max_instances=<limit on instances that will be created>
|
||||
|
||||
The stress test needs the top-level tempest directory to be on PYTHONPATH
|
||||
if you are not using nosetests to run.
|
||||
|
@ -46,3 +46,8 @@ class StressConfig(object):
|
||||
def controller(self):
|
||||
"""Controller host"""
|
||||
return self.get("controller", None)
|
||||
|
||||
@property
|
||||
def max_instances(self):
|
||||
"""Maximum number of instances to create during test"""
|
||||
return self.get("max_instances", 16)
|
||||
|
@ -114,14 +114,14 @@ def bash_openstack(manager,
|
||||
(default: 32)
|
||||
`seed` = random seed (default: None)
|
||||
"""
|
||||
stress_config = StressConfig(manager.config._conf)
|
||||
# get keyword arguments
|
||||
duration = kwargs.get('duration', datetime.timedelta(seconds=10))
|
||||
seed = kwargs.get('seed', None)
|
||||
sleep_time = float(kwargs.get('sleep_time', 3000)) / 1000
|
||||
max_vms = int(kwargs.get('max_vms', 32))
|
||||
max_vms = int(kwargs.get('max_vms', stress_config.max_instances))
|
||||
test_name = kwargs.get('test_name', 'unamed test')
|
||||
|
||||
stress_config = StressConfig(manager.config._conf)
|
||||
keypath = stress_config.host_private_key_path
|
||||
user = stress_config.host_admin_user
|
||||
logdir = stress_config.nova_logdir
|
||||
@ -194,7 +194,8 @@ def bash_openstack(manager,
|
||||
break
|
||||
i += 1
|
||||
if i > 60:
|
||||
raise
|
||||
_error_in_logs(keypath, logdir, user, computes)
|
||||
raise Exception("Cleanup timed out")
|
||||
time.sleep(1)
|
||||
logging.info('killed %s' % kill_id)
|
||||
state.delete_instance_state(kill_id)
|
||||
|
@ -25,7 +25,7 @@ import time
|
||||
# local imports
|
||||
import test_case
|
||||
import pending_action
|
||||
from tempest.exceptions import TimeoutException
|
||||
from tempest.exceptions import TimeoutException, Duplicate
|
||||
from utils.util import *
|
||||
|
||||
|
||||
@ -52,51 +52,35 @@ class TestRebootVM(test_case.StressTestCase):
|
||||
self._logger.info('no ACTIVE instances to reboot')
|
||||
return
|
||||
|
||||
_reboot_type = kwargs.get('type', 'SOFT')
|
||||
_reboot_arg = kwargs.get('type', 'SOFT')
|
||||
|
||||
# select active vm to reboot and then send request to nova controller
|
||||
target = random.choice(active_vms)
|
||||
reboot_target = target[0]
|
||||
# It seems that doing a reboot when in reboot is an error.
|
||||
try:
|
||||
response, body = manager.servers_client.reboot(
|
||||
reboot_target['id'],
|
||||
_reboot_arg)
|
||||
except Duplicate:
|
||||
return
|
||||
|
||||
response, body = manager.servers_client.reboot(
|
||||
reboot_target['id'],
|
||||
_reboot_type)
|
||||
if (response.status != 202):
|
||||
self._logger.error("response: %s" % response)
|
||||
raise Exception
|
||||
|
||||
if _reboot_type == 'SOFT':
|
||||
state_name = 'REBOOT'
|
||||
if _reboot_arg == 'SOFT':
|
||||
reboot_state = 'REBOOT'
|
||||
else:
|
||||
state_name = 'REBOOT' # this is a bug, should be HARD_REBOOT
|
||||
reboot_state = 'HARD_REBOOT'
|
||||
|
||||
self._logger.info('waiting for machine %s to change to %s' %
|
||||
(reboot_target['id'], state_name))
|
||||
|
||||
# check for state transition
|
||||
_resp, body = manager.servers_client.get_server(reboot_target['id'])
|
||||
if body['status'] == state_name:
|
||||
state_string = state_name
|
||||
else:
|
||||
# grab the actual state as we think it is
|
||||
temp_obj = state.get_instances()[self._target['id']]
|
||||
self._logger.debug(
|
||||
"machine %s in state %s" %
|
||||
(reboot_target['id'], temp_obj[1])
|
||||
)
|
||||
state_string = temp_obj[1]
|
||||
|
||||
if state_string == state_name:
|
||||
self._logger.info('machine %s ACTIVE -> %s' %
|
||||
(reboot_target['id'], state_name))
|
||||
state.set_instance_state(reboot_target['id'],
|
||||
(reboot_target, state_name))
|
||||
(reboot_target['id'], reboot_state))
|
||||
|
||||
return VerifyRebootVM(manager,
|
||||
state,
|
||||
reboot_target,
|
||||
reboot_type=_reboot_type,
|
||||
state_name=state_string)
|
||||
reboot_state=reboot_state)
|
||||
|
||||
|
||||
class VerifyRebootVM(pending_action.PendingAction):
|
||||
@ -104,22 +88,13 @@ class VerifyRebootVM(pending_action.PendingAction):
|
||||
States = enum('REBOOT_CHECK', 'ACTIVE_CHECK')
|
||||
|
||||
def __init__(self, manager, state, target_server,
|
||||
reboot_type=None,
|
||||
state_name=None,
|
||||
reboot_state=None,
|
||||
ip_addr=None):
|
||||
super(VerifyRebootVM, self).__init__(manager,
|
||||
state,
|
||||
target_server)
|
||||
# FIX ME: this is a nova bug
|
||||
if reboot_type == 'SOFT':
|
||||
self._reboot_state = 'REBOOT'
|
||||
else:
|
||||
self._reboot_state = 'REBOOT' # should be HARD REBOOT
|
||||
|
||||
if state_name == 'ACTIVE': # was still active, check to see if REBOOT
|
||||
self._retry_state = self.States.REBOOT_CHECK
|
||||
else: # was REBOOT, so now check for ACTIVE
|
||||
self._retry_state = self.States.ACTIVE_CHECK
|
||||
self._reboot_state = reboot_state
|
||||
self._retry_state = self.States.REBOOT_CHECK
|
||||
|
||||
def retry(self):
|
||||
"""
|
||||
@ -155,8 +130,9 @@ class VerifyRebootVM(pending_action.PendingAction):
|
||||
if not self._check_for_status('ACTIVE'):
|
||||
return False
|
||||
target = self._target
|
||||
self._logger.info('machine %s REBOOT -> ACTIVE [%.1f secs elapsed]' %
|
||||
(target['id'], time.time() - self._start_time))
|
||||
self._logger.info('machine %s %s -> ACTIVE [%.1f secs elapsed]' %
|
||||
(target['id'], reboot_state,
|
||||
time.time() - self._start_time))
|
||||
self._state.set_instance_state(target['id'],
|
||||
(target, 'ACTIVE'))
|
||||
|
||||
|
@ -214,7 +214,7 @@ class VerifyKillActiveVM(pending_action.PendingAction):
|
||||
target = self._target
|
||||
self._logger.info('machine %s: DELETED [%.1f secs elapsed]' %
|
||||
(target['id'], time.time() - self._start_time))
|
||||
self._state.delete_machine_state(target['id'])
|
||||
self._state.delete_instance_state(target['id'])
|
||||
return True
|
||||
|
||||
return False
|
||||
|
@ -32,4 +32,4 @@ bash_openstack(nova,
|
||||
sleep_time=100, # in milliseconds
|
||||
seed=int(time.time()),
|
||||
test_name="create and delete",
|
||||
max_vms=32)
|
||||
)
|
||||
|
@ -34,4 +34,4 @@ bash_openstack(nova,
|
||||
sleep_time=500, # in milliseconds
|
||||
seed=int(time.time()),
|
||||
test_name="hard reboots",
|
||||
max_vms=32)
|
||||
)
|
||||
|
@ -35,4 +35,4 @@ bash_openstack(nova,
|
||||
sleep_time=1000, # in milliseconds
|
||||
seed=None,
|
||||
test_name="simple create and delete",
|
||||
max_vms=10)
|
||||
max_vms=4)
|
||||
|
@ -21,9 +21,10 @@ import tempest.config
|
||||
|
||||
# get the environment variables for credentials
|
||||
identity = tempest.config.TempestConfig().identity
|
||||
compute = tempest.config.TempestConfig().compute
|
||||
|
||||
nt = client.Client(identity.username, identity.password,
|
||||
identity.tenant_name, identity.auth_url)
|
||||
nt = client.Client(compute.username, compute.password,
|
||||
compute.tenant_name, identity.auth_url)
|
||||
|
||||
flavor_list = nt.flavors.list()
|
||||
server_list = nt.servers.list()
|
||||
|
@ -21,11 +21,12 @@ import tempest.config
|
||||
|
||||
# get the environment variables for credentials
|
||||
identity = tempest.config.TempestConfig().identity
|
||||
print identity.username, identity.password,\
|
||||
identity.tenant_name, identity.auth_url
|
||||
compute = tempest.config.TempestConfig().compute
|
||||
print compute.username, compute.password,\
|
||||
compute.tenant_name, identity.auth_url
|
||||
|
||||
nt = client.Client(identity.username, identity.password,
|
||||
identity.tenant_name, identity.auth_url)
|
||||
nt = client.Client(compute.username, compute.password,
|
||||
compute.tenant_name, identity.auth_url)
|
||||
|
||||
flavor_list = nt.flavors.list()
|
||||
server_list = nt.servers.list()
|
||||
|
@ -33,7 +33,7 @@ class RestClient(object):
|
||||
def __init__(self, config, user, password, auth_url, service,
|
||||
tenant_name=None):
|
||||
self.log = logging.getLogger(__name__)
|
||||
self.log.setLevel(logging.ERROR)
|
||||
self.log.setLevel(getattr(logging, config.compute.log_level))
|
||||
self.config = config
|
||||
if self.config.identity.strategy == 'keystone':
|
||||
self.token, self.base_url = self.keystone_auth(user,
|
||||
|
@ -164,6 +164,11 @@ class ComputeConfig(BaseConfig):
|
||||
"""Catalog type of the Compute service."""
|
||||
return self.get("catalog_type", 'compute')
|
||||
|
||||
@property
|
||||
def log_level(self):
|
||||
"""Level for logging compute API calls."""
|
||||
return self.get("log_level", 'ERROR')
|
||||
|
||||
|
||||
class ImagesConfig(BaseConfig):
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user