Merge "Rename config section of benchmark & tempest to openstack"

This commit is contained in:
Jenkins 2017-10-10 10:49:10 +00:00 committed by Gerrit Code Review
commit 783199468c
63 changed files with 802 additions and 604 deletions

View File

@ -15,30 +15,37 @@
from oslo_config import cfg
OPTS = {"benchmark": [
OPTS = {"openstack": [
cfg.FloatOpt("cinder_volume_create_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after creating a resource before"
" polling for it status"),
cfg.FloatOpt("cinder_volume_create_timeout",
default=600.0,
deprecated_group="benchmark",
help="Time to wait for cinder volume to be created."),
cfg.FloatOpt("cinder_volume_create_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for volume"
" creation."),
cfg.FloatOpt("cinder_volume_delete_timeout",
default=600.0,
deprecated_group="benchmark",
help="Time to wait for cinder volume to be deleted."),
cfg.FloatOpt("cinder_volume_delete_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for volume"
" deletion."),
cfg.FloatOpt("cinder_backup_restore_timeout",
default=600.0,
deprecated_group="benchmark",
help="Time to wait for cinder backup to be restored."),
cfg.FloatOpt("cinder_backup_restore_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for backup"
" restoring."),
]}

View File

@ -15,20 +15,23 @@
from oslo_config import cfg
OPTS = {"benchmark": [
OPTS = {"openstack": [
cfg.FloatOpt(
"ec2_server_boot_prepoll_delay",
default=1.0,
deprecated_group="benchmark",
help="Time to sleep after boot before polling for status"
),
cfg.FloatOpt(
"ec2_server_boot_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server boot timeout"
),
cfg.FloatOpt(
"ec2_server_boot_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Server boot poll interval"
)
]}

View File

@ -15,31 +15,38 @@
from oslo_config import cfg
OPTS = {"benchmark": [
OPTS = {"openstack": [
cfg.FloatOpt("glance_image_delete_timeout",
default=120.0,
deprecated_group="benchmark",
help="Time to wait for glance image to be deleted."),
cfg.FloatOpt("glance_image_delete_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for image "
"deletion."),
cfg.FloatOpt("glance_image_create_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after creating a resource before "
"polling for it status"),
cfg.FloatOpt("glance_image_create_timeout",
default=120.0,
deprecated_group="benchmark",
help="Time to wait for glance image to be created."),
cfg.FloatOpt("glance_image_create_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for image "
"creation."),
cfg.FloatOpt("glance_image_create_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after creating a resource before "
"polling for it status"),
cfg.FloatOpt("glance_image_create_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for image "
"creation.")
]}

View File

@ -15,78 +15,98 @@
from oslo_config import cfg
OPTS = {"benchmark": [
OPTS = {"openstack": [
cfg.FloatOpt("heat_stack_create_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time(in sec) to sleep after creating a resource before "
"polling for it status."),
cfg.FloatOpt("heat_stack_create_timeout",
default=3600.0,
deprecated_group="benchmark",
help="Time(in sec) to wait for heat stack to be created."),
cfg.FloatOpt("heat_stack_create_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Time interval(in sec) between checks when waiting for "
"stack creation."),
cfg.FloatOpt("heat_stack_delete_timeout",
default=3600.0,
deprecated_group="benchmark",
help="Time(in sec) to wait for heat stack to be deleted."),
cfg.FloatOpt("heat_stack_delete_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Time interval(in sec) between checks when waiting for "
"stack deletion."),
cfg.FloatOpt("heat_stack_check_timeout",
default=3600.0,
deprecated_group="benchmark",
help="Time(in sec) to wait for stack to be checked."),
cfg.FloatOpt("heat_stack_check_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Time interval(in sec) between checks when waiting for "
"stack checking."),
cfg.FloatOpt("heat_stack_update_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time(in sec) to sleep after updating a resource before "
"polling for it status."),
cfg.FloatOpt("heat_stack_update_timeout",
default=3600.0,
deprecated_group="benchmark",
help="Time(in sec) to wait for stack to be updated."),
cfg.FloatOpt("heat_stack_update_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Time interval(in sec) between checks when waiting for "
"stack update."),
cfg.FloatOpt("heat_stack_suspend_timeout",
default=3600.0,
deprecated_group="benchmark",
help="Time(in sec) to wait for stack to be suspended."),
cfg.FloatOpt("heat_stack_suspend_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Time interval(in sec) between checks when waiting for "
"stack suspend."),
cfg.FloatOpt("heat_stack_resume_timeout",
default=3600.0,
deprecated_group="benchmark",
help="Time(in sec) to wait for stack to be resumed."),
cfg.FloatOpt("heat_stack_resume_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Time interval(in sec) between checks when waiting for "
"stack resume."),
cfg.FloatOpt("heat_stack_snapshot_timeout",
default=3600.0,
deprecated_group="benchmark",
help="Time(in sec) to wait for stack snapshot to "
"be created."),
cfg.FloatOpt("heat_stack_snapshot_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Time interval(in sec) between checks when waiting for "
"stack snapshot to be created."),
cfg.FloatOpt("heat_stack_restore_timeout",
default=3600.0,
deprecated_group="benchmark",
help="Time(in sec) to wait for stack to be restored from "
"snapshot."),
cfg.FloatOpt("heat_stack_restore_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Time interval(in sec) between checks when waiting for "
"stack to be restored."),
cfg.FloatOpt("heat_stack_scale_timeout",
default=3600.0,
deprecated_group="benchmark",
help="Time (in sec) to wait for stack to scale up or down."),
cfg.FloatOpt("heat_stack_scale_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Time interval (in sec) between checks when waiting for "
"a stack to scale up or down.")
]}

View File

@ -15,18 +15,22 @@
from oslo_config import cfg
OPTS = {"benchmark": [
OPTS = {"openstack": [
cfg.FloatOpt("ironic_node_create_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Interval(in sec) between checks when waiting for node "
"creation."),
cfg.FloatOpt("ironic_node_create_timeout",
default=300,
deprecated_group="benchmark",
help="Ironic node create timeout"),
cfg.FloatOpt("ironic_node_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Ironic node poll interval"),
cfg.FloatOpt("ironic_node_delete_timeout",
default=300,
deprecated_group="benchmark",
help="Ironic node create timeout")
]}

View File

@ -15,31 +15,38 @@
from oslo_config import cfg
OPTS = {"benchmark": [
OPTS = {"openstack": [
cfg.FloatOpt("magnum_cluster_create_prepoll_delay",
default=5.0,
deprecated_group="benchmark",
help="Time(in sec) to sleep after creating a resource before "
"polling for the status."),
cfg.FloatOpt("magnum_cluster_create_timeout",
default=2400.0,
deprecated_group="benchmark",
help="Time(in sec) to wait for magnum cluster to be "
"created."),
cfg.FloatOpt("magnum_cluster_create_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Time interval(in sec) between checks when waiting for "
"cluster creation."),
cfg.FloatOpt("k8s_pod_create_timeout",
default=1200.0,
deprecated_group="benchmark",
help="Time(in sec) to wait for k8s pod to be created."),
cfg.FloatOpt("k8s_pod_create_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Time interval(in sec) between checks when waiting for "
"k8s pod creation."),
cfg.FloatOpt("k8s_rc_create_timeout",
default=1200.0,
deprecated_group="benchmark",
help="Time(in sec) to wait for k8s rc to be created."),
cfg.FloatOpt("k8s_rc_create_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Time interval(in sec) between checks when waiting for "
"k8s rc creation."),
"k8s rc creation.")
]}

View File

@ -15,46 +15,55 @@
from oslo_config import cfg
OPTS = {"benchmark": [
OPTS = {"openstack": [
cfg.FloatOpt(
"manila_share_create_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Delay between creating Manila share and polling for its "
"status."),
cfg.FloatOpt(
"manila_share_create_timeout",
default=300.0,
deprecated_group="benchmark",
help="Timeout for Manila share creation."),
cfg.FloatOpt(
"manila_share_create_poll_interval",
default=3.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for Manila share "
"creation."),
cfg.FloatOpt(
"manila_share_delete_timeout",
default=180.0,
deprecated_group="benchmark",
help="Timeout for Manila share deletion."),
cfg.FloatOpt(
"manila_share_delete_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for Manila share "
"deletion."),
cfg.FloatOpt(
"manila_access_create_timeout",
default=300.0,
deprecated_group="benchmark",
help="Timeout for Manila access creation."),
cfg.FloatOpt(
"manila_access_create_poll_interval",
default=3.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for Manila access "
"creation."),
cfg.FloatOpt(
"manila_access_delete_timeout",
default=180.0,
deprecated_group="benchmark",
help="Timeout for Manila access deletion."),
cfg.FloatOpt(
"manila_access_delete_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for Manila access "
"deletion."),
]}

View File

@ -1,9 +1,23 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
OPTS = {"benchmark": [
OPTS = {"openstack": [
cfg.IntOpt(
"mistral_execution_timeout",
default=200,
deprecated_group="benchmark",
help="mistral execution timeout")
]}

View File

@ -1,10 +1,24 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
OPTS = {"benchmark": [
OPTS = {"openstack": [
cfg.FloatOpt(
"monasca_metric_create_prepoll_delay",
default=15.0,
deprecated_group="benchmark",
help="Delay between creating Monasca metrics and polling for "
"its elements.")
]}

View File

@ -15,11 +15,15 @@
from oslo_config import cfg
OPTS = {"benchmark": [
cfg.IntOpt("murano_deploy_environment_timeout", default=1200,
OPTS = {"openstack": [
cfg.IntOpt("murano_deploy_environment_timeout",
default=1200,
deprecated_name="deploy_environment_timeout",
deprecated_group="benchmark",
help="A timeout in seconds for an environment deploy"),
cfg.IntOpt("murano_deploy_environment_check_interval", default=5,
cfg.IntOpt("murano_deploy_environment_check_interval",
default=5,
deprecated_name="deploy_environment_check_interval",
deprecated_group="benchmark",
help="Deploy environment check interval in seconds"),
]}

View File

@ -15,11 +15,13 @@
from oslo_config import cfg
OPTS = {"benchmark": [
OPTS = {"openstack": [
cfg.FloatOpt("neutron_create_loadbalancer_timeout",
default=float(500),
deprecated_group="benchmark",
help="Neutron create loadbalancer timeout"),
cfg.FloatOpt("neutron_create_loadbalancer_poll_interval",
default=float(2),
deprecated_group="benchmark",
help="Neutron create loadbalancer poll interval")
]}

View File

@ -15,244 +15,294 @@
from oslo_config import cfg
OPTS = {"benchmark": [
OPTS = {"openstack": [
# prepoll delay, timeout, poll interval
# "start": (0, 300, 1)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "start",
default=float(0),
help="Time to sleep after %s before polling"
" for status" % "start"),
cfg.FloatOpt("nova_server_%s_timeout" % "start",
default=float(300),
help="Server %s timeout" % "start"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "start",
default=float(1),
help="Server %s poll interval" % "start"),
cfg.FloatOpt("nova_server_start_prepoll_delay",
default=0.0,
deprecated_group="benchmark",
help="Time to sleep after start before polling for status"),
cfg.FloatOpt("nova_server_start_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server start timeout"),
cfg.FloatOpt("nova_server_start_poll_interval",
deprecated_group="benchmark",
default=1.0,
help="Server start poll interval"),
# "stop": (0, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "stop",
default=float(0),
help="Time to sleep after %s before polling"
" for status" % "stop"),
cfg.FloatOpt("nova_server_%s_timeout" % "stop",
default=float(300),
help="Server %s timeout" % "stop"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "stop",
default=float(2),
help="Server %s poll interval" % "stop"),
cfg.FloatOpt("nova_server_stop_prepoll_delay",
default=0.0,
help="Time to sleep after stop before polling for status"),
cfg.FloatOpt("nova_server_stop_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server stop timeout"),
cfg.FloatOpt("nova_server_stop_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server stop poll interval"),
# "boot": (1, 300, 1)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "boot",
default=float(1),
help="Time to sleep after %s before polling"
" for status" % "boot"),
cfg.FloatOpt("nova_server_%s_timeout" % "boot",
default=float(300),
help="Server %s timeout" % "boot"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "boot",
default=float(2),
help="Server %s poll interval" % "boot"),
cfg.FloatOpt("nova_server_boot_prepoll_delay",
default=1.0,
deprecated_group="benchmark",
help="Time to sleep after boot before polling for status"),
cfg.FloatOpt("nova_server_boot_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server boot timeout"),
cfg.FloatOpt("nova_server_boot_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server boot poll interval"),
# "delete": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "delete",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "delete"),
cfg.FloatOpt("nova_server_%s_timeout" % "delete",
default=float(300),
help="Server %s timeout" % "delete"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "delete",
default=float(2),
help="Server %s poll interval" % "delete"),
cfg.FloatOpt("nova_server_delete_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after delete before polling for status"),
cfg.FloatOpt("nova_server_delete_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server delete timeout"),
cfg.FloatOpt("nova_server_delete_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server delete poll interval"),
# "reboot": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "reboot",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "reboot"),
cfg.FloatOpt("nova_server_%s_timeout" % "reboot",
default=float(300),
help="Server %s timeout" % "reboot"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "reboot",
default=float(2),
help="Server %s poll interval" % "reboot"),
cfg.FloatOpt("nova_server_reboot_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after reboot before polling for status"),
cfg.FloatOpt("nova_server_reboot_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server reboot timeout"),
cfg.FloatOpt("nova_server_reboot_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server reboot poll interval"),
# "rebuild": (1, 300, 1)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "rebuild",
default=float(1),
help="Time to sleep after %s before polling"
" for status" % "rebuild"),
cfg.FloatOpt("nova_server_%s_timeout" % "rebuild",
default=float(300),
help="Server %s timeout" % "rebuild"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "rebuild",
default=float(1),
help="Server %s poll interval" % "rebuild"),
cfg.FloatOpt("nova_server_rebuild_prepoll_delay",
default=1.0,
deprecated_group="benchmark",
help="Time to sleep after rebuild before polling for status"),
cfg.FloatOpt("nova_server_rebuild_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server rebuild timeout"),
cfg.FloatOpt("nova_server_rebuild_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Server rebuild poll interval"),
# "rescue": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "rescue",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "rescue"),
cfg.FloatOpt("nova_server_%s_timeout" % "rescue",
default=float(300),
help="Server %s timeout" % "rescue"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "rescue",
default=float(2),
help="Server %s poll interval" % "rescue"),
cfg.FloatOpt("nova_server_rescue_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after rescue before polling for status"),
cfg.FloatOpt("nova_server_rescue_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server rescue timeout"),
cfg.FloatOpt("nova_server_rescue_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server rescue poll interval"),
# "unrescue": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "unrescue",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "unrescue"),
cfg.FloatOpt("nova_server_%s_timeout" % "unrescue",
default=float(300),
help="Server %s timeout" % "unrescue"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "unrescue",
default=float(2),
help="Server %s poll interval" % "unrescue"),
cfg.FloatOpt("nova_server_unrescue_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after unrescue "
"before polling for status"),
cfg.FloatOpt("nova_server_unrescue_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server unrescue timeout"),
cfg.FloatOpt("nova_server_unrescue_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server unrescue poll interval"),
# "suspend": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "suspend",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "suspend"),
cfg.FloatOpt("nova_server_%s_timeout" % "suspend",
default=float(300),
help="Server %s timeout" % "suspend"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "suspend",
default=float(2),
help="Server %s poll interval" % "suspend"),
cfg.FloatOpt("nova_server_suspend_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after suspend before polling for status"),
cfg.FloatOpt("nova_server_suspend_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server suspend timeout"),
cfg.FloatOpt("nova_server_suspend_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server suspend poll interval"),
# "resume": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "resume",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "resume"),
cfg.FloatOpt("nova_server_%s_timeout" % "resume",
default=float(300),
help="Server %s timeout" % "resume"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "resume",
default=float(2),
help="Server %s poll interval" % "resume"),
cfg.FloatOpt("nova_server_resume_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after resume before polling for status"),
cfg.FloatOpt("nova_server_resume_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server resume timeout"),
cfg.FloatOpt("nova_server_resume_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server resume poll interval"),
# "pause": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "pause",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "pause"),
cfg.FloatOpt("nova_server_%s_timeout" % "pause",
default=float(300),
help="Server %s timeout" % "pause"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "pause",
default=float(2),
help="Server %s poll interval" % "pause"),
cfg.FloatOpt("nova_server_pause_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after pause before polling for status"),
cfg.FloatOpt("nova_server_pause_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server pause timeout"),
cfg.FloatOpt("nova_server_pause_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server pause poll interval"),
# "unpause": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "unpause",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "unpause"),
cfg.FloatOpt("nova_server_%s_timeout" % "unpause",
default=float(300),
help="Server %s timeout" % "unpause"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "unpause",
default=float(2),
help="Server %s poll interval" % "unpause"),
cfg.FloatOpt("nova_server_unpause_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after unpause before polling for status"),
cfg.FloatOpt("nova_server_unpause_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server unpause timeout"),
cfg.FloatOpt("nova_server_unpause_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server unpause poll interval"),
# "shelve": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "shelve",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "shelve"),
cfg.FloatOpt("nova_server_%s_timeout" % "shelve",
default=float(300),
help="Server %s timeout" % "shelve"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "shelve",
default=float(2),
help="Server %s poll interval" % "shelve"),
cfg.FloatOpt("nova_server_shelve_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after shelve before polling for status"),
cfg.FloatOpt("nova_server_shelve_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server shelve timeout"),
cfg.FloatOpt("nova_server_shelve_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server shelve poll interval"),
# "unshelve": (2, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "unshelve",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "unshelve"),
cfg.FloatOpt("nova_server_%s_timeout" % "unshelve",
default=float(300),
help="Server %s timeout" % "unshelve"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "unshelve",
default=float(2),
help="Server %s poll interval" % "unshelve"),
cfg.FloatOpt("nova_server_unshelve_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after unshelve before "
"polling for status"),
cfg.FloatOpt("nova_server_unshelve_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server unshelve timeout"),
cfg.FloatOpt("nova_server_unshelve_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server unshelve poll interval"),
# "image_create": (0, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "image_create",
default=float(0),
help="Time to sleep after %s before polling"
" for status" % "image_create"),
cfg.FloatOpt("nova_server_%s_timeout" % "image_create",
default=float(300),
help="Server %s timeout" % "image_create"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "image_create",
default=float(2),
help="Server %s poll interval" % "image_create"),
cfg.FloatOpt("nova_server_image_create_prepoll_delay",
default=0.0,
deprecated_group="benchmark",
help="Time to sleep after image_create before polling"
" for status"),
cfg.FloatOpt("nova_server_image_create_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server image_create timeout"),
cfg.FloatOpt("nova_server_image_create_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server image_create poll interval"),
# "image_delete": (0, 300, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "image_delete",
default=float(0),
help="Time to sleep after %s before polling"
" for status" % "image_delete"),
cfg.FloatOpt("nova_server_%s_timeout" % "image_delete",
default=float(300),
help="Server %s timeout" % "image_delete"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "image_delete",
default=float(2),
help="Server %s poll interval" % "image_delete"),
cfg.FloatOpt("nova_server_image_delete_prepoll_delay",
default=0.0,
deprecated_group="benchmark",
help="Time to sleep after image_delete before polling"
" for status"),
cfg.FloatOpt("nova_server_image_delete_timeout",
default=300.0,
deprecated_group="benchmark",
help="Server image_delete timeout"),
cfg.FloatOpt("nova_server_image_delete_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server image_delete poll interval"),
# "resize": (2, 400, 5)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "resize",
default=float(2),
help="Time to sleep after %s before polling"
" for status" % "resize"),
cfg.FloatOpt("nova_server_%s_timeout" % "resize",
default=float(400),
help="Server %s timeout" % "resize"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "resize",
default=float(5),
help="Server %s poll interval" % "resize"),
cfg.FloatOpt("nova_server_resize_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after resize before polling for status"),
cfg.FloatOpt("nova_server_resize_timeout",
default=400.0,
deprecated_group="benchmark",
help="Server resize timeout"),
cfg.FloatOpt("nova_server_resize_poll_interval",
default=5.0,
deprecated_group="benchmark",
help="Server resize poll interval"),
# "resize_confirm": (0, 200, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "resize_confirm",
default=float(0),
help="Time to sleep after %s before polling"
" for status" % "resize_confirm"),
cfg.FloatOpt("nova_server_%s_timeout" % "resize_confirm",
default=float(200),
help="Server %s timeout" % "resize_confirm"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "resize_confirm",
default=float(2),
help="Server %s poll interval" % "resize_confirm"),
cfg.FloatOpt("nova_server_resize_confirm_prepoll_delay",
default=0.0,
deprecated_group="benchmark",
help="Time to sleep after resize_confirm before polling"
" for status"),
cfg.FloatOpt("nova_server_resize_confirm_timeout",
default=200.0,
deprecated_group="benchmark",
help="Server resize_confirm timeout"),
cfg.FloatOpt("nova_server_resize_confirm_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server resize_confirm poll interval"),
# "resize_revert": (0, 200, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "resize_revert",
default=float(0),
help="Time to sleep after %s before polling"
" for status" % "resize_revert"),
cfg.FloatOpt("nova_server_%s_timeout" % "resize_revert",
default=float(200),
help="Server %s timeout" % "resize_revert"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "resize_revert",
default=float(2),
help="Server %s poll interval" % "resize_revert"),
cfg.FloatOpt("nova_server_resize_revert_prepoll_delay",
default=0.0,
deprecated_group="benchmark",
help="Time to sleep after resize_revert before polling"
" for status"),
cfg.FloatOpt("nova_server_resize_revert_timeout",
default=200.0,
deprecated_group="benchmark",
help="Server resize_revert timeout"),
cfg.FloatOpt("nova_server_resize_revert_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server resize_revert poll interval"),
# "live_migrate": (1, 400, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "live_migrate",
default=float(1),
help="Time to sleep after %s before polling"
" for status" % "live_migrate"),
cfg.FloatOpt("nova_server_%s_timeout" % "live_migrate",
default=float(400),
help="Server %s timeout" % "live_migrate"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "live_migrate",
default=float(2),
help="Server %s poll interval" % "live_migrate"),
cfg.FloatOpt("nova_server_live_migrate_prepoll_delay",
default=1.0,
deprecated_group="benchmark",
help="Time to sleep after live_migrate before polling"
" for status"),
cfg.FloatOpt("nova_server_live_migrate_timeout",
default=400.0,
deprecated_group="benchmark",
help="Server live_migrate timeout"),
cfg.FloatOpt("nova_server_live_migrate_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server live_migrate poll interval"),
# "migrate": (1, 400, 2)
cfg.FloatOpt("nova_server_%s_prepoll_delay" % "migrate",
default=float(1),
help="Time to sleep after %s before polling"
" for status" % "migrate"),
cfg.FloatOpt("nova_server_%s_timeout" % "migrate",
default=float(400),
help="Server %s timeout" % "migrate"),
cfg.FloatOpt("nova_server_%s_poll_interval" % "migrate",
default=float(2),
help="Server %s poll interval" % "migrate"),
cfg.FloatOpt("nova_server_migrate_prepoll_delay",
default=1.0,
deprecated_group="benchmark",
help="Time to sleep after migrate before polling for status"),
cfg.FloatOpt("nova_server_migrate_timeout",
default=400.0,
deprecated_group="benchmark",
help="Server migrate timeout"),
cfg.FloatOpt("nova_server_migrate_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Server migrate poll interval"),
# "detach":
cfg.FloatOpt("nova_detach_volume_timeout",
default=float(200),
default=200.0,
deprecated_group="benchmark",
help="Nova volume detach timeout"),
cfg.FloatOpt("nova_detach_volume_poll_interval",
default=float(2),
default=2.0,
deprecated_group="benchmark",
help="Nova volume detach poll interval")
]}

View File

@ -15,7 +15,9 @@
from oslo_config import cfg
OPTS = {"benchmark": [
cfg.BoolOpt("enable_profiler", default=True,
OPTS = {"openstack": [
cfg.BoolOpt("enable_profiler",
default=True,
deprecated_group="benchmark",
help="Enable or disable osprofiler to trace the scenarios")
]}

View File

@ -15,22 +15,29 @@
from oslo_config import cfg
OPTS = {"benchmark": [
cfg.IntOpt("sahara_cluster_create_timeout", default=1800,
deprecated_name="cluster_create_timeout",
OPTS = {"openstack": [
cfg.IntOpt("sahara_cluster_create_timeout",
default=1800,
deprecated_group="benchmark",
help="A timeout in seconds for a cluster create operation"),
cfg.IntOpt("sahara_cluster_delete_timeout", default=900,
deprecated_name="cluster_delete_timeout",
cfg.IntOpt("sahara_cluster_delete_timeout",
default=900,
deprecated_group="benchmark",
help="A timeout in seconds for a cluster delete operation"),
cfg.IntOpt("sahara_cluster_check_interval", default=5,
deprecated_name="cluster_check_interval",
cfg.IntOpt("sahara_cluster_check_interval",
default=5,
deprecated_group="benchmark",
help="Cluster status polling interval in seconds"),
cfg.IntOpt("sahara_job_execution_timeout", default=600,
deprecated_name="job_execution_timeout",
cfg.IntOpt("sahara_job_execution_timeout",
default=600,
deprecated_group="benchmark",
help="A timeout in seconds for a Job Execution to complete"),
cfg.IntOpt("sahara_job_check_interval", default=5,
deprecated_name="job_check_interval",
cfg.IntOpt("sahara_job_check_interval",
default=5,
deprecated_group="benchmark",
help="Job Execution status polling interval in seconds"),
cfg.IntOpt("sahara_workers_per_proxy", default=20,
cfg.IntOpt("sahara_workers_per_proxy",
default=20,
deprecated_group="benchmark",
help="Amount of workers one proxy should serve to.")
]}

View File

@ -15,8 +15,9 @@
from oslo_config import cfg
OPTS = {"benchmark": [
OPTS = {"openstack": [
cfg.FloatOpt("senlin_action_timeout",
default=3600,
deprecated_group="benchmark",
help="Time in seconds to wait for senlin action to finish.")
]}

View File

@ -15,19 +15,23 @@
from oslo_config import cfg
OPTS = {"tempest": [
OPTS = {"openstack": [
cfg.StrOpt("img_url",
default="http://download.cirros-cloud.net/"
"0.3.5/cirros-0.3.5-x86_64-disk.img",
deprecated_group="tempest",
help="image URL"),
cfg.StrOpt("img_disk_format",
default="qcow2",
deprecated_group="tempest",
help="Image disk format to use when creating the image"),
cfg.StrOpt("img_container_format",
default="bare",
deprecated_group="tempest",
help="Image container format to use when creating the image"),
cfg.StrOpt("img_name_regex",
default="^.*(cirros|testvm).*$",
deprecated_group="tempest",
help="Regular expression for name of a public image to "
"discover it in the cloud and use it for the tests. "
"Note that when Rally is searching for the image, case "
@ -38,26 +42,33 @@ OPTS = {"tempest": [
"options are not specified in the Tempest config file"),
cfg.StrOpt("swift_operator_role",
default="Member",
deprecated_group="tempest",
help="Role required for users "
"to be able to create Swift containers"),
cfg.StrOpt("swift_reseller_admin_role",
default="ResellerAdmin",
deprecated_group="tempest",
help="User role that has reseller admin"),
cfg.StrOpt("heat_stack_owner_role",
default="heat_stack_owner",
deprecated_group="tempest",
help="Role required for users "
"to be able to manage Heat stacks"),
cfg.StrOpt("heat_stack_user_role",
default="heat_stack_user",
deprecated_group="tempest",
help="Role for Heat template-defined users"),
cfg.IntOpt("flavor_ref_ram",
default="64",
deprecated_group="tempest",
help="Primary flavor RAM size used by most of the test cases"),
cfg.IntOpt("flavor_ref_alt_ram",
default="128",
deprecated_group="tempest",
help="Alternate reference flavor RAM size used by test that"
"need two flavors, like those that resize an instance"),
cfg.IntOpt("heat_instance_type_ram",
default="64",
deprecated_group="tempest",
help="RAM size flavor used for orchestration test cases")
]}

View File

@ -1,10 +1,27 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
OPTS = {"benchmark": [
cfg.FloatOpt("vm_ping_poll_interval", default=1.0,
OPTS = {"openstack": [
cfg.FloatOpt("vm_ping_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for a VM to "
"become pingable"),
cfg.FloatOpt("vm_ping_timeout", default=120.0,
cfg.FloatOpt("vm_ping_timeout",
default=120.0,
deprecated_group="benchmark",
help="Time to wait for a VM to become pingable")
]}

View File

@ -1,9 +1,26 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
OPTS = {"benchmark": [
cfg.FloatOpt("watcher_audit_launch_poll_interval", default=2.0,
OPTS = {"openstack": [
cfg.FloatOpt("watcher_audit_launch_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Watcher audit launch interval"),
cfg.IntOpt("watcher_audit_launch_timeout", default=300,
cfg.IntOpt("watcher_audit_launch_timeout",
default=300,
deprecated_group="benchmark",
help="Watcher audit launch timeout")
]}

View File

@ -596,8 +596,8 @@ class GlanceImage(base.ResourceManager):
self.raw_resource, ["deleted"],
check_deletion=True,
update_resource=self._client().get_image,
timeout=CONF.benchmark.glance_image_delete_timeout,
check_interval=CONF.benchmark.glance_image_delete_poll_interval)
timeout=CONF.openstack.glance_image_delete_timeout,
check_interval=CONF.openstack.glance_image_delete_poll_interval)
# SAHARA

View File

@ -93,7 +93,7 @@ class MonascaMetricGenerator(context.Context):
scenario._create_metrics(**new_metric)
rutils.interruptable_sleep(0.001)
rutils.interruptable_sleep(
monasca_utils.CONF.benchmark.monasca_metric_create_prepoll_delay,
monasca_utils.CONF.openstack.monasca_metric_create_prepoll_delay,
atomic_delay=1)
def cleanup(self):

View File

@ -151,8 +151,8 @@ class SaharaCluster(context.Context):
resource=wait_dict,
update_resource=self.update_clusters_dict,
is_ready=self.all_clusters_active,
timeout=CONF.benchmark.sahara_cluster_create_timeout,
check_interval=CONF.benchmark.sahara_cluster_check_interval)
timeout=CONF.openstack.sahara_cluster_create_timeout,
check_interval=CONF.openstack.sahara_cluster_check_interval)
def update_clusters_dict(self, dct):
new_dct = {}

View File

@ -116,7 +116,7 @@ class OpenStackScenario(scenario.Scenario):
def _init_profiler(self, context):
"""Inits the profiler."""
if not CONF.benchmark.enable_profiler:
if not CONF.openstack.enable_profiler:
return
if context is not None:
cred = None

View File

@ -161,14 +161,14 @@ class CinderScenario(scenario.OpenStackScenario):
# NOTE(msdubov): It is reasonable to wait 5 secs before starting to
# check whether the volume is ready => less API calls.
self.sleep_between(CONF.benchmark.cinder_volume_create_prepoll_delay)
self.sleep_between(CONF.openstack.cinder_volume_create_prepoll_delay)
volume = bench_utils.wait_for(
volume,
ready_statuses=["available"],
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
return volume
@ -212,8 +212,8 @@ class CinderScenario(scenario.OpenStackScenario):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=CONF.benchmark.cinder_volume_delete_poll_interval
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=CONF.openstack.cinder_volume_delete_poll_interval
)
@atomic.action_timer("cinder.extend_volume")
@ -238,8 +238,8 @@ class CinderScenario(scenario.OpenStackScenario):
volume,
ready_statuses=["available"],
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
@atomic.action_timer("cinder.upload_volume_to_image")
@ -266,8 +266,8 @@ class CinderScenario(scenario.OpenStackScenario):
volume,
ready_statuses=["available"],
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
image_id = img["os-volume_upload_image"]["image_id"]
image = self.clients("glance").images.get(image_id)
@ -276,8 +276,8 @@ class CinderScenario(scenario.OpenStackScenario):
image,
ready_statuses=["active"],
update_resource=wrapper.get_image,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=CONF.benchmark.glance_image_create_poll_interval
timeout=CONF.openstack.glance_image_create_timeout,
check_interval=CONF.openstack.glance_image_create_poll_interval
)
return image
@ -300,13 +300,13 @@ class CinderScenario(scenario.OpenStackScenario):
client = cinder_wrapper.wrap(self._clients.cinder, self)
snapshot = client.create_snapshot(volume_id, **kwargs)
self.sleep_between(CONF.benchmark.cinder_volume_create_prepoll_delay)
self.sleep_between(CONF.openstack.cinder_volume_create_prepoll_delay)
snapshot = bench_utils.wait_for(
snapshot,
ready_statuses=["available"],
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
return snapshot
@ -324,8 +324,8 @@ class CinderScenario(scenario.OpenStackScenario):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=CONF.benchmark.cinder_volume_delete_poll_interval
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=CONF.openstack.cinder_volume_delete_poll_interval
)
@atomic.action_timer("cinder.create_backup")
@ -340,8 +340,8 @@ class CinderScenario(scenario.OpenStackScenario):
backup,
ready_statuses=["available"],
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
@atomic.action_timer("cinder.delete_backup")
@ -358,8 +358,8 @@ class CinderScenario(scenario.OpenStackScenario):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=CONF.benchmark.cinder_volume_delete_poll_interval
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=CONF.openstack.cinder_volume_delete_poll_interval
)
@atomic.action_timer("cinder.restore_backup")
@ -376,15 +376,15 @@ class CinderScenario(scenario.OpenStackScenario):
backup_for_restore,
ready_statuses=["available"],
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_backup_restore_timeout,
check_interval=CONF.benchmark.cinder_backup_restore_poll_interval
timeout=CONF.openstack.cinder_backup_restore_timeout,
check_interval=CONF.openstack.cinder_backup_restore_poll_interval
)
return bench_utils.wait_for(
restored_volume,
ready_statuses=["available"],
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
@atomic.action_timer("cinder.list_backups")

View File

@ -54,13 +54,13 @@ class EC2Scenario(scenario.OpenStackScenario):
**kwargs)
servers = [instance for instance in reservation.instances]
self.sleep_between(CONF.benchmark.ec2_server_boot_prepoll_delay)
self.sleep_between(CONF.openstack.ec2_server_boot_prepoll_delay)
servers = [utils.wait_for(
server,
ready_statuses=["RUNNING"],
update_resource=self._update_resource,
timeout=CONF.benchmark.ec2_server_boot_timeout,
check_interval=CONF.benchmark.ec2_server_boot_poll_interval
timeout=CONF.openstack.ec2_server_boot_timeout,
check_interval=CONF.openstack.ec2_server_boot_poll_interval
) for server in servers]
return servers

View File

@ -76,5 +76,5 @@ class GlanceScenario(scenario.OpenStackScenario):
image, ["deleted", "pending_delete"],
check_deletion=True,
update_resource=wrapper.get_image,
timeout=CONF.benchmark.glance_image_delete_timeout,
check_interval=CONF.benchmark.glance_image_delete_poll_interval)
timeout=CONF.openstack.glance_image_delete_timeout,
check_interval=CONF.openstack.glance_image_delete_poll_interval)

View File

@ -65,15 +65,15 @@ class HeatScenario(scenario.OpenStackScenario):
stack_id = self.clients("heat").stacks.create(**kw)["stack"]["id"]
stack = self.clients("heat").stacks.get(stack_id)
self.sleep_between(CONF.benchmark.heat_stack_create_prepoll_delay)
self.sleep_between(CONF.openstack.heat_stack_create_prepoll_delay)
stack = utils.wait_for(
stack,
ready_statuses=["CREATE_COMPLETE"],
failure_statuses=["CREATE_FAILED", "ERROR"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_create_timeout,
check_interval=CONF.benchmark.heat_stack_create_poll_interval)
timeout=CONF.openstack.heat_stack_create_timeout,
check_interval=CONF.openstack.heat_stack_create_poll_interval)
return stack
@ -101,15 +101,15 @@ class HeatScenario(scenario.OpenStackScenario):
}
self.clients("heat").stacks.update(stack.id, **kw)
self.sleep_between(CONF.benchmark.heat_stack_update_prepoll_delay)
self.sleep_between(CONF.openstack.heat_stack_update_prepoll_delay)
stack = utils.wait_for(
stack,
ready_statuses=["UPDATE_COMPLETE"],
failure_statuses=["UPDATE_FAILED", "ERROR"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_update_timeout,
check_interval=CONF.benchmark.heat_stack_update_poll_interval)
timeout=CONF.openstack.heat_stack_update_timeout,
check_interval=CONF.openstack.heat_stack_update_poll_interval)
return stack
@atomic.action_timer("heat.check_stack")
@ -126,8 +126,8 @@ class HeatScenario(scenario.OpenStackScenario):
ready_statuses=["CHECK_COMPLETE"],
failure_statuses=["CHECK_FAILED", "ERROR"],
update_resource=utils.get_from_manager(["CHECK_FAILED"]),
timeout=CONF.benchmark.heat_stack_check_timeout,
check_interval=CONF.benchmark.heat_stack_check_poll_interval)
timeout=CONF.openstack.heat_stack_check_timeout,
check_interval=CONF.openstack.heat_stack_check_poll_interval)
@atomic.action_timer("heat.delete_stack")
def _delete_stack(self, stack):
@ -144,8 +144,8 @@ class HeatScenario(scenario.OpenStackScenario):
failure_statuses=["DELETE_FAILED", "ERROR"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_delete_timeout,
check_interval=CONF.benchmark.heat_stack_delete_poll_interval)
timeout=CONF.openstack.heat_stack_delete_timeout,
check_interval=CONF.openstack.heat_stack_delete_poll_interval)
@atomic.action_timer("heat.suspend_stack")
def _suspend_stack(self, stack):
@ -160,8 +160,8 @@ class HeatScenario(scenario.OpenStackScenario):
ready_statuses=["SUSPEND_COMPLETE"],
failure_statuses=["SUSPEND_FAILED", "ERROR"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_suspend_timeout,
check_interval=CONF.benchmark.heat_stack_suspend_poll_interval)
timeout=CONF.openstack.heat_stack_suspend_timeout,
check_interval=CONF.openstack.heat_stack_suspend_poll_interval)
@atomic.action_timer("heat.resume_stack")
def _resume_stack(self, stack):
@ -176,8 +176,8 @@ class HeatScenario(scenario.OpenStackScenario):
ready_statuses=["RESUME_COMPLETE"],
failure_statuses=["RESUME_FAILED", "ERROR"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_resume_timeout,
check_interval=CONF.benchmark.heat_stack_resume_poll_interval)
timeout=CONF.openstack.heat_stack_resume_timeout,
check_interval=CONF.openstack.heat_stack_resume_poll_interval)
@atomic.action_timer("heat.snapshot_stack")
def _snapshot_stack(self, stack):
@ -193,8 +193,8 @@ class HeatScenario(scenario.OpenStackScenario):
ready_statuses=["SNAPSHOT_COMPLETE"],
failure_statuses=["SNAPSHOT_FAILED", "ERROR"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_snapshot_timeout,
check_interval=CONF.benchmark.heat_stack_snapshot_poll_interval)
timeout=CONF.openstack.heat_stack_snapshot_timeout,
check_interval=CONF.openstack.heat_stack_snapshot_poll_interval)
return snapshot
@atomic.action_timer("heat.restore_stack")
@ -210,8 +210,8 @@ class HeatScenario(scenario.OpenStackScenario):
ready_statuses=["RESTORE_COMPLETE"],
failure_statuses=["RESTORE_FAILED", "ERROR"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_restore_timeout,
check_interval=CONF.benchmark.heat_stack_restore_poll_interval
timeout=CONF.openstack.heat_stack_restore_timeout,
check_interval=CONF.openstack.heat_stack_restore_poll_interval
)
@atomic.action_timer("heat.show_output")
@ -302,8 +302,8 @@ class HeatScenario(scenario.OpenStackScenario):
self._count_instances(s) == expected_instances),
failure_statuses=["UPDATE_FAILED", "ERROR"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_scale_timeout,
check_interval=CONF.benchmark.heat_stack_scale_poll_interval)
timeout=CONF.openstack.heat_stack_scale_timeout,
check_interval=CONF.openstack.heat_stack_scale_poll_interval)
def _stack_webhook(self, stack, output_key):
"""POST to the URL given in the output value identified by output_key.

View File

@ -57,13 +57,13 @@ class IronicScenario(scenario.OpenStackScenario):
properties=properties,
**kwargs)
self.sleep_between(CONF.benchmark.ironic_node_create_poll_interval)
self.sleep_between(CONF.openstack.ironic_node_create_poll_interval)
node = utils.wait_for_status(
node,
ready_statuses=["AVAILABLE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.ironic_node_create_timeout,
check_interval=CONF.benchmark.ironic_node_poll_interval,
timeout=CONF.openstack.ironic_node_create_timeout,
check_interval=CONF.openstack.ironic_node_poll_interval,
id_attr="uuid", status_attr="provision_state"
)
@ -106,7 +106,7 @@ class IronicScenario(scenario.OpenStackScenario):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.ironic_node_delete_timeout,
check_interval=CONF.benchmark.ironic_node_poll_interval,
timeout=CONF.openstack.ironic_node_delete_timeout,
check_interval=CONF.openstack.ironic_node_poll_interval,
id_attr="uuid", status_attr="provision_state"
)

View File

@ -109,13 +109,13 @@ class MagnumScenario(scenario.OpenStackScenario):
node_count=node_count, **kwargs)
common_utils.interruptable_sleep(
CONF.benchmark.magnum_cluster_create_prepoll_delay)
CONF.openstack.magnum_cluster_create_prepoll_delay)
cluster = utils.wait_for_status(
cluster,
ready_statuses=["CREATE_COMPLETE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.magnum_cluster_create_timeout,
check_interval=CONF.benchmark.magnum_cluster_create_poll_interval,
timeout=CONF.openstack.magnum_cluster_create_timeout,
check_interval=CONF.openstack.magnum_cluster_create_poll_interval,
id_attr="uuid"
)
return cluster
@ -210,7 +210,7 @@ class MagnumScenario(scenario.OpenStackScenario):
condition.status.lower() == "true":
return resp
if (time.time() - start > CONF.benchmark.k8s_pod_create_timeout):
if (time.time() - start > CONF.openstack.k8s_pod_create_timeout):
raise exceptions.TimeoutException(
desired_status="Ready",
resource_name=podname,
@ -218,7 +218,7 @@ class MagnumScenario(scenario.OpenStackScenario):
resource_id=resp.metadata.uid,
resource_status=resp.status)
common_utils.interruptable_sleep(
CONF.benchmark.k8s_pod_create_poll_interval)
CONF.openstack.k8s_pod_create_poll_interval)
@atomic.action_timer("magnum.k8s_list_v1rcs")
def _list_v1rcs(self):
@ -254,7 +254,7 @@ class MagnumScenario(scenario.OpenStackScenario):
if status == expectd_status:
return resp
else:
if time.time() - start > CONF.benchmark.k8s_rc_create_timeout:
if time.time() - start > CONF.openstack.k8s_rc_create_timeout:
raise exceptions.TimeoutException(
desired_status=expectd_status,
resource_name=rcname,
@ -262,4 +262,4 @@ class MagnumScenario(scenario.OpenStackScenario):
resource_id=resp.metadata.uid,
resource_status=status)
common_utils.interruptable_sleep(
CONF.benchmark.k8s_rc_create_poll_interval)
CONF.openstack.k8s_rc_create_poll_interval)

View File

@ -60,13 +60,13 @@ class ManilaScenario(scenario.OpenStackScenario):
share = self.clients("manila").shares.create(
share_proto, size, **kwargs)
self.sleep_between(CONF.benchmark.manila_share_create_prepoll_delay)
self.sleep_between(CONF.openstack.manila_share_create_prepoll_delay)
share = utils.wait_for(
share,
ready_statuses=["available"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.manila_share_create_timeout,
check_interval=CONF.benchmark.manila_share_create_poll_interval,
timeout=CONF.openstack.manila_share_create_timeout,
check_interval=CONF.openstack.manila_share_create_poll_interval,
)
return share
@ -83,8 +83,8 @@ class ManilaScenario(scenario.OpenStackScenario):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=utils.get_from_manager(error_statuses),
timeout=CONF.benchmark.manila_share_delete_timeout,
check_interval=CONF.benchmark.manila_share_delete_poll_interval)
timeout=CONF.openstack.manila_share_delete_timeout,
check_interval=CONF.openstack.manila_share_delete_poll_interval)
def _get_access_from_share(self, share, access_id):
"""Get access from share
@ -135,8 +135,8 @@ class ManilaScenario(scenario.OpenStackScenario):
access,
ready_statuses=["active"],
update_resource=fn,
check_interval=CONF.benchmark.manila_access_create_poll_interval,
timeout=CONF.benchmark.manila_access_create_timeout)
check_interval=CONF.openstack.manila_access_create_poll_interval,
timeout=CONF.openstack.manila_access_create_timeout)
return access_result
@ -173,8 +173,8 @@ class ManilaScenario(scenario.OpenStackScenario):
ready_statuses=["deleted"],
update_resource=fn,
check_deletion=True,
check_interval=CONF.benchmark.manila_access_delete_poll_interval,
timeout=CONF.benchmark.manila_access_delete_timeout)
check_interval=CONF.openstack.manila_access_delete_poll_interval,
timeout=CONF.openstack.manila_access_delete_timeout)
@atomic.action_timer("manila.list_shares")
def _list_shares(self, detailed=True, search_opts=None):
@ -200,8 +200,8 @@ class ManilaScenario(scenario.OpenStackScenario):
share,
ready_statuses=["available"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.manila_share_create_timeout,
check_interval=CONF.benchmark.manila_share_create_poll_interval)
timeout=CONF.openstack.manila_share_create_timeout,
check_interval=CONF.openstack.manila_share_create_poll_interval)
@atomic.action_timer("manila.shrink_share")
def _shrink_share(self, share, new_size):
@ -215,8 +215,8 @@ class ManilaScenario(scenario.OpenStackScenario):
share,
ready_statuses=["available"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.manila_share_create_timeout,
check_interval=CONF.benchmark.manila_share_create_poll_interval)
timeout=CONF.openstack.manila_share_create_timeout,
check_interval=CONF.openstack.manila_share_create_poll_interval)
@atomic.action_timer("manila.create_share_network")
def _create_share_network(self, neutron_net_id=None,
@ -250,8 +250,8 @@ class ManilaScenario(scenario.OpenStackScenario):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.manila_share_delete_timeout,
check_interval=CONF.benchmark.manila_share_delete_poll_interval)
timeout=CONF.openstack.manila_share_delete_timeout,
check_interval=CONF.openstack.manila_share_delete_poll_interval)
@atomic.action_timer("manila.list_share_networks")
def _list_share_networks(self, detailed=True, search_opts=None):
@ -322,8 +322,8 @@ class ManilaScenario(scenario.OpenStackScenario):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.manila_share_delete_timeout,
check_interval=CONF.benchmark.manila_share_delete_poll_interval)
timeout=CONF.openstack.manila_share_delete_timeout,
check_interval=CONF.openstack.manila_share_delete_poll_interval)
@atomic.action_timer("manila.add_security_service_to_share_network")
def _add_security_service_to_share_network(self, share_network,

View File

@ -87,7 +87,7 @@ class MistralScenario(scenario.OpenStackScenario):
execution = utils.wait_for_status(
execution, ready_statuses=["SUCCESS"], failure_statuses=["ERROR"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.mistral_execution_timeout)
timeout=CONF.openstack.mistral_execution_timeout)
return execution

View File

@ -101,7 +101,7 @@ class MuranoScenario(scenario.OpenStackScenario):
self.clients("murano").sessions.deploy(environment.id,
session.id)
config = CONF.benchmark
config = CONF.openstack
utils.wait_for(
environment,
ready_statuses=["READY"],

View File

@ -689,9 +689,9 @@ class NeutronScenario(scenario.OpenStackScenario):
ready_statuses=["ACTIVE"],
status_attr="provisioning_status",
update_resource=self.update_loadbalancer_resource,
timeout=CONF.benchmark.neutron_create_loadbalancer_timeout,
timeout=CONF.openstack.neutron_create_loadbalancer_timeout,
check_interval=(
CONF.benchmark.neutron_create_loadbalancer_poll_interval)
CONF.openstack.neutron_create_loadbalancer_poll_interval)
)
return lb

View File

@ -79,25 +79,25 @@ class NovaScenario(scenario.OpenStackScenario):
server = self.clients("nova").servers.create(
server_name, image, flavor, **kwargs)
self.sleep_between(CONF.benchmark.nova_server_boot_prepoll_delay)
self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay)
server = utils.wait_for_status(
server,
ready_statuses=["ACTIVE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_boot_timeout,
check_interval=CONF.benchmark.nova_server_boot_poll_interval
timeout=CONF.openstack.nova_server_boot_timeout,
check_interval=CONF.openstack.nova_server_boot_poll_interval
)
return server
def _do_server_reboot(self, server, reboottype):
server.reboot(reboot_type=reboottype)
self.sleep_between(CONF.benchmark.nova_server_pause_prepoll_delay)
self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay)
utils.wait_for_status(
server,
ready_statuses=["ACTIVE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_reboot_timeout,
check_interval=CONF.benchmark.nova_server_reboot_poll_interval
timeout=CONF.openstack.nova_server_reboot_timeout,
check_interval=CONF.openstack.nova_server_reboot_poll_interval
)
@atomic.action_timer("nova.soft_reboot_server")
@ -168,13 +168,13 @@ class NovaScenario(scenario.OpenStackScenario):
:param kwargs: Optional additional arguments to pass to the rebuild
"""
server.rebuild(image, **kwargs)
self.sleep_between(CONF.benchmark.nova_server_rebuild_prepoll_delay)
self.sleep_between(CONF.openstack.nova_server_rebuild_prepoll_delay)
utils.wait_for_status(
server,
ready_statuses=["ACTIVE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_rebuild_timeout,
check_interval=CONF.benchmark.nova_server_rebuild_poll_interval
timeout=CONF.openstack.nova_server_rebuild_timeout,
check_interval=CONF.openstack.nova_server_rebuild_poll_interval
)
@atomic.action_timer("nova.start_server")
@ -191,8 +191,8 @@ class NovaScenario(scenario.OpenStackScenario):
server,
ready_statuses=["ACTIVE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_start_timeout,
check_interval=CONF.benchmark.nova_server_start_poll_interval
timeout=CONF.openstack.nova_server_start_timeout,
check_interval=CONF.openstack.nova_server_start_poll_interval
)
@atomic.action_timer("nova.stop_server")
@ -209,8 +209,8 @@ class NovaScenario(scenario.OpenStackScenario):
server,
ready_statuses=["SHUTOFF"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_stop_timeout,
check_interval=CONF.benchmark.nova_server_stop_poll_interval
timeout=CONF.openstack.nova_server_stop_timeout,
check_interval=CONF.openstack.nova_server_stop_poll_interval
)
@atomic.action_timer("nova.rescue_server")
@ -223,13 +223,13 @@ class NovaScenario(scenario.OpenStackScenario):
:param server: Server object
"""
server.rescue()
self.sleep_between(CONF.benchmark.nova_server_rescue_prepoll_delay)
self.sleep_between(CONF.openstack.nova_server_rescue_prepoll_delay)
utils.wait_for_status(
server,
ready_statuses=["RESCUE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_rescue_timeout,
check_interval=CONF.benchmark.nova_server_rescue_poll_interval
timeout=CONF.openstack.nova_server_rescue_timeout,
check_interval=CONF.openstack.nova_server_rescue_poll_interval
)
@atomic.action_timer("nova.unrescue_server")
@ -241,13 +241,13 @@ class NovaScenario(scenario.OpenStackScenario):
:param server: Server object
"""
server.unrescue()
self.sleep_between(CONF.benchmark.nova_server_unrescue_prepoll_delay)
self.sleep_between(CONF.openstack.nova_server_unrescue_prepoll_delay)
utils.wait_for_status(
server,
ready_statuses=["ACTIVE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_unrescue_timeout,
check_interval=CONF.benchmark.nova_server_unrescue_poll_interval
timeout=CONF.openstack.nova_server_unrescue_timeout,
check_interval=CONF.openstack.nova_server_unrescue_poll_interval
)
@atomic.action_timer("nova.suspend_server")
@ -260,13 +260,13 @@ class NovaScenario(scenario.OpenStackScenario):
:param server: Server object
"""
server.suspend()
self.sleep_between(CONF.benchmark.nova_server_suspend_prepoll_delay)
self.sleep_between(CONF.openstack.nova_server_suspend_prepoll_delay)
utils.wait_for_status(
server,
ready_statuses=["SUSPENDED"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_suspend_timeout,
check_interval=CONF.benchmark.nova_server_suspend_poll_interval
timeout=CONF.openstack.nova_server_suspend_timeout,
check_interval=CONF.openstack.nova_server_suspend_poll_interval
)
@atomic.action_timer("nova.resume_server")
@ -279,13 +279,13 @@ class NovaScenario(scenario.OpenStackScenario):
:param server: Server object
"""
server.resume()
self.sleep_between(CONF.benchmark.nova_server_resume_prepoll_delay)
self.sleep_between(CONF.openstack.nova_server_resume_prepoll_delay)
utils.wait_for_status(
server,
ready_statuses=["ACTIVE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_resume_timeout,
check_interval=CONF.benchmark.nova_server_resume_poll_interval
timeout=CONF.openstack.nova_server_resume_timeout,
check_interval=CONF.openstack.nova_server_resume_poll_interval
)
@atomic.action_timer("nova.pause_server")
@ -298,13 +298,13 @@ class NovaScenario(scenario.OpenStackScenario):
:param server: Server object
"""
server.pause()
self.sleep_between(CONF.benchmark.nova_server_pause_prepoll_delay)
self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay)
utils.wait_for_status(
server,
ready_statuses=["PAUSED"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_pause_timeout,
check_interval=CONF.benchmark.nova_server_pause_poll_interval
timeout=CONF.openstack.nova_server_pause_timeout,
check_interval=CONF.openstack.nova_server_pause_poll_interval
)
@atomic.action_timer("nova.unpause_server")
@ -317,13 +317,13 @@ class NovaScenario(scenario.OpenStackScenario):
:param server: Server object
"""
server.unpause()
self.sleep_between(CONF.benchmark.nova_server_pause_prepoll_delay)
self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay)
utils.wait_for_status(
server,
ready_statuses=["ACTIVE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_unpause_timeout,
check_interval=CONF.benchmark.nova_server_unpause_poll_interval
timeout=CONF.openstack.nova_server_unpause_timeout,
check_interval=CONF.openstack.nova_server_unpause_poll_interval
)
@atomic.action_timer("nova.shelve_server")
@ -336,13 +336,13 @@ class NovaScenario(scenario.OpenStackScenario):
:param server: Server object
"""
server.shelve()
self.sleep_between(CONF.benchmark.nova_server_pause_prepoll_delay)
self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay)
utils.wait_for_status(
server,
ready_statuses=["SHELVED_OFFLOADED"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_shelve_timeout,
check_interval=CONF.benchmark.nova_server_shelve_poll_interval
timeout=CONF.openstack.nova_server_shelve_timeout,
check_interval=CONF.openstack.nova_server_shelve_poll_interval
)
@atomic.action_timer("nova.unshelve_server")
@ -355,13 +355,13 @@ class NovaScenario(scenario.OpenStackScenario):
"""
server.unshelve()
self.sleep_between(CONF.benchmark. nova_server_unshelve_prepoll_delay)
self.sleep_between(CONF.openstack. nova_server_unshelve_prepoll_delay)
utils.wait_for_status(
server,
ready_statuses=["ACTIVE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_unshelve_timeout,
check_interval=CONF.benchmark.nova_server_unshelve_poll_interval
timeout=CONF.openstack.nova_server_unshelve_timeout,
check_interval=CONF.openstack.nova_server_unshelve_poll_interval
)
def _delete_server(self, server, force=False):
@ -384,8 +384,8 @@ class NovaScenario(scenario.OpenStackScenario):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_delete_timeout,
check_interval=CONF.benchmark.nova_server_delete_poll_interval
timeout=CONF.openstack.nova_server_delete_timeout,
check_interval=CONF.openstack.nova_server_delete_poll_interval
)
def _delete_servers(self, servers, force=False):
@ -408,9 +408,9 @@ class NovaScenario(scenario.OpenStackScenario):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_delete_timeout,
check_interval=CONF.
benchmark.nova_server_delete_poll_interval
timeout=CONF.openstack.nova_server_delete_timeout,
check_interval=(
CONF.openstack.nova_server_delete_poll_interval)
)
@atomic.action_timer("nova.create_server_group")
@ -472,14 +472,14 @@ class NovaScenario(scenario.OpenStackScenario):
glance = image_service.Image(self._clients,
atomic_inst=self.atomic_actions())
glance.delete_image(image.id)
check_interval = CONF.benchmark.nova_server_image_delete_poll_interval
check_interval = CONF.openstack.nova_server_image_delete_poll_interval
with atomic.ActionTimer(self, "glance.wait_for_delete"):
utils.wait_for_status(
image,
ready_statuses=["deleted", "pending_delete"],
check_deletion=True,
update_resource=glance.get_image,
timeout=CONF.benchmark.nova_server_image_delete_timeout,
timeout=CONF.openstack.nova_server_image_delete_timeout,
check_interval=check_interval
)
@ -499,13 +499,13 @@ class NovaScenario(scenario.OpenStackScenario):
glance = image_service.Image(self._clients,
atomic_inst=self.atomic_actions())
image = glance.get_image(image_uuid)
check_interval = CONF.benchmark.nova_server_image_create_poll_interval
check_interval = CONF.openstack.nova_server_image_create_poll_interval
with atomic.ActionTimer(self, "glance.wait_for_image"):
image = utils.wait_for_status(
image,
ready_statuses=["ACTIVE"],
update_resource=glance.get_image,
timeout=CONF.benchmark.nova_server_image_create_timeout,
timeout=CONF.openstack.nova_server_image_create_timeout,
check_interval=check_interval
)
return image
@ -592,14 +592,14 @@ class NovaScenario(scenario.OpenStackScenario):
# created servers manually.
servers = [s for s in self.clients("nova").servers.list()
if s.name.startswith(name_prefix)]
self.sleep_between(CONF.benchmark.nova_server_boot_prepoll_delay)
self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay)
servers = [utils.wait_for_status(
server,
ready_statuses=["ACTIVE"],
update_resource=utils.
get_from_manager(),
timeout=CONF.benchmark.nova_server_boot_timeout,
check_interval=CONF.benchmark.nova_server_boot_poll_interval
timeout=CONF.openstack.nova_server_boot_timeout,
check_interval=CONF.openstack.nova_server_boot_poll_interval
) for server in servers]
return servers
@ -654,8 +654,8 @@ class NovaScenario(scenario.OpenStackScenario):
server,
ready_statuses=["VERIFY_RESIZE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_resize_timeout,
check_interval=CONF.benchmark.nova_server_resize_poll_interval
timeout=CONF.openstack.nova_server_resize_timeout,
check_interval=CONF.openstack.nova_server_resize_poll_interval
)
@atomic.action_timer("nova.resize_confirm")
@ -665,9 +665,9 @@ class NovaScenario(scenario.OpenStackScenario):
server,
ready_statuses=[status],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_resize_confirm_timeout,
timeout=CONF.openstack.nova_server_resize_confirm_timeout,
check_interval=(
CONF.benchmark.nova_server_resize_confirm_poll_interval)
CONF.openstack.nova_server_resize_confirm_poll_interval)
)
@atomic.action_timer("nova.resize_revert")
@ -677,9 +677,9 @@ class NovaScenario(scenario.OpenStackScenario):
server,
ready_statuses=[status],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_resize_revert_timeout,
timeout=CONF.openstack.nova_server_resize_revert_timeout,
check_interval=(
CONF.benchmark.nova_server_resize_revert_poll_interval)
CONF.openstack.nova_server_resize_revert_poll_interval)
)
def _update_volume_resource(self, resource):
@ -696,9 +696,9 @@ class NovaScenario(scenario.OpenStackScenario):
volume,
ready_statuses=["in-use"],
update_resource=self._update_volume_resource,
timeout=CONF.benchmark.nova_server_resize_revert_timeout,
timeout=CONF.openstack.nova_server_resize_revert_timeout,
check_interval=(
CONF.benchmark.nova_server_resize_revert_poll_interval)
CONF.openstack.nova_server_resize_revert_poll_interval)
)
return attachment
@ -732,8 +732,8 @@ class NovaScenario(scenario.OpenStackScenario):
volume,
ready_statuses=["available"],
update_resource=self._update_volume_resource,
timeout=CONF.benchmark.nova_detach_volume_timeout,
check_interval=CONF.benchmark.nova_detach_volume_poll_interval
timeout=CONF.openstack.nova_detach_volume_timeout,
check_interval=CONF.openstack.nova_detach_volume_poll_interval
)
@atomic.action_timer("nova.live_migrate")
@ -758,9 +758,9 @@ class NovaScenario(scenario.OpenStackScenario):
server,
ready_statuses=["ACTIVE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_live_migrate_timeout,
timeout=CONF.openstack.nova_server_live_migrate_timeout,
check_interval=(
CONF.benchmark.nova_server_live_migrate_poll_interval)
CONF.openstack.nova_server_live_migrate_poll_interval)
)
server_admin = self.admin_clients("nova").servers.get(server.id)
if (host_pre_migrate == getattr(server_admin, "OS-EXT-SRV-ATTR:host")
@ -808,9 +808,9 @@ class NovaScenario(scenario.OpenStackScenario):
server,
ready_statuses=["VERIFY_RESIZE"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.nova_server_migrate_timeout,
timeout=CONF.openstack.nova_server_migrate_timeout,
check_interval=(
CONF.benchmark.nova_server_migrate_poll_interval)
CONF.openstack.nova_server_migrate_poll_interval)
)
if not skip_host_check:
server_admin = self.admin_clients("nova").servers.get(server.id)

View File

@ -110,8 +110,8 @@ class SaharaScenario(scenario.OpenStackScenario):
utils.wait_for(
resource=cluster_object, ready_statuses=["active"],
failure_statuses=["error"], update_resource=self._update_cluster,
timeout=CONF.benchmark.sahara_cluster_create_timeout,
check_interval=CONF.benchmark.sahara_cluster_check_interval)
timeout=CONF.openstack.sahara_cluster_create_timeout,
check_interval=CONF.openstack.sahara_cluster_check_interval)
def _setup_neutron_floating_ip_pool(self, name_or_id):
if name_or_id:
@ -299,7 +299,7 @@ class SaharaScenario(scenario.OpenStackScenario):
if enable_proxy:
proxies_count = int(
workers_count / CONF.benchmark.sahara_workers_per_proxy)
workers_count / CONF.openstack.sahara_workers_per_proxy)
else:
proxies_count = 0
@ -458,8 +458,8 @@ class SaharaScenario(scenario.OpenStackScenario):
utils.wait_for(
resource=cluster,
timeout=CONF.benchmark.sahara_cluster_delete_timeout,
check_interval=CONF.benchmark.sahara_cluster_check_interval,
timeout=CONF.openstack.sahara_cluster_delete_timeout,
check_interval=CONF.openstack.sahara_cluster_check_interval,
is_ready=self._is_cluster_deleted)
def _is_cluster_deleted(self, cluster):
@ -524,8 +524,8 @@ class SaharaScenario(scenario.OpenStackScenario):
utils.wait_for(
resource=job_execution.id,
is_ready=self._job_execution_is_finished,
timeout=CONF.benchmark.sahara_job_execution_timeout,
check_interval=CONF.benchmark.sahara_job_check_interval)
timeout=CONF.openstack.sahara_job_execution_timeout,
check_interval=CONF.openstack.sahara_job_check_interval)
run(self)

View File

@ -82,7 +82,7 @@ class SenlinScenario(scenario.OpenStackScenario):
ready_statuses=["ACTIVE"],
failure_statuses=["ERROR"],
update_resource=self._get_cluster,
timeout=CONF.benchmark.senlin_action_timeout)
timeout=CONF.openstack.senlin_action_timeout)
return cluster
@ -115,7 +115,7 @@ class SenlinScenario(scenario.OpenStackScenario):
failure_statuses=["ERROR"],
check_deletion=True,
update_resource=self._get_cluster,
timeout=CONF.benchmark.senlin_action_timeout)
timeout=CONF.openstack.senlin_action_timeout)
@atomic.action_timer("senlin.create_profile")
def _create_profile(self, spec, metadata=None):

View File

@ -201,8 +201,8 @@ class VMScenario(nova_utils.NovaScenario):
server,
ready_statuses=[Host.ICMP_UP_STATUS],
update_resource=Host.update_status,
timeout=CONF.benchmark.vm_ping_timeout,
check_interval=CONF.benchmark.vm_ping_poll_interval
timeout=CONF.openstack.vm_ping_timeout,
check_interval=CONF.openstack.vm_ping_poll_interval
)
def _run_command(self, server_ip, port, username, password, command,

View File

@ -63,8 +63,8 @@ class WatcherScenario(scenario.OpenStackScenario):
failure_statuses=["FAILED"],
status_attr="state",
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.watcher_audit_launch_timeout,
check_interval=CONF.benchmark.watcher_audit_launch_poll_interval,
timeout=CONF.openstack.watcher_audit_launch_timeout,
check_interval=CONF.openstack.watcher_audit_launch_poll_interval,
id_attr="uuid"
)
return audit

View File

@ -51,8 +51,8 @@ class Stack(common_utils.RandomNameGeneratorMixin):
def _wait(self, ready_statuses, failure_statuses):
self.stack = utils.wait_for_status(
self.stack,
check_interval=CONF.benchmark.heat_stack_create_poll_interval,
timeout=CONF.benchmark.heat_stack_create_timeout,
check_interval=CONF.openstack.heat_stack_create_poll_interval,
timeout=CONF.openstack.heat_stack_create_timeout,
ready_statuses=ready_statuses,
failure_statuses=failure_statuses,
update_resource=utils.get_from_manager(),

View File

@ -65,14 +65,14 @@ class GlanceV1Service(service.Service, glance_common.GlanceMixin):
properties=properties,
**kwargs)
rutils.interruptable_sleep(CONF.benchmark.
rutils.interruptable_sleep(CONF.openstack.
glance_image_create_prepoll_delay)
image_obj = utils.wait_for_status(
image_obj, ["active"],
update_resource=self.get_image,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=CONF.benchmark.glance_image_create_poll_interval
timeout=CONF.openstack.glance_image_create_timeout,
check_interval=CONF.openstack.glance_image_create_poll_interval
)
finally:

View File

@ -60,15 +60,15 @@ class GlanceV2Service(service.Service, glance_common.GlanceMixin):
**properties)
image_location = os.path.expanduser(image_location)
rutils.interruptable_sleep(CONF.benchmark.
rutils.interruptable_sleep(CONF.openstack.
glance_image_create_prepoll_delay)
start = time.time()
image_obj = utils.wait_for_status(
image_obj.id, ["queued"],
update_resource=self.get_image,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=CONF.benchmark.glance_image_create_poll_interval)
timeout=CONF.openstack.glance_image_create_timeout,
check_interval=CONF.openstack.glance_image_create_poll_interval)
timeout = time.time() - start
image_data = None
@ -90,7 +90,7 @@ class GlanceV2Service(service.Service, glance_common.GlanceMixin):
image_obj, ["active"],
update_resource=self.get_image,
timeout=timeout,
check_interval=CONF.benchmark.glance_image_create_poll_interval)
check_interval=CONF.openstack.glance_image_create_poll_interval)
return image_obj
@atomic.action_timer("glance_v2.update_image")

View File

@ -52,8 +52,8 @@ class CinderMixin(object):
volume,
ready_statuses=["available"],
update_resource=self._update_resource,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
def list_volumes(self, detailed=True):
@ -78,8 +78,8 @@ class CinderMixin(object):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=(CONF.benchmark
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=(CONF.openstack
.cinder_volume_delete_poll_interval)
)
@ -196,8 +196,8 @@ class CinderMixin(object):
image_inst,
ready_statuses=["active"],
update_resource=glance.get_image,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=(CONF.benchmark
timeout=CONF.openstack.glance_image_create_timeout,
check_interval=(CONF.openstack
.glance_image_create_poll_interval)
)
@ -291,8 +291,8 @@ class CinderMixin(object):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=(CONF.benchmark
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=(CONF.openstack
.cinder_volume_delete_poll_interval)
)
@ -311,8 +311,8 @@ class CinderMixin(object):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=(CONF.benchmark
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=(CONF.openstack
.cinder_volume_delete_poll_interval)
)

View File

@ -67,7 +67,7 @@ class CinderV1Service(service.Service, cinder_common.CinderMixin):
# NOTE(msdubov): It is reasonable to wait 5 secs before starting to
# check whether the volume is ready => less API calls.
rutils.interruptable_sleep(
CONF.benchmark.cinder_volume_create_prepoll_delay)
CONF.openstack.cinder_volume_create_prepoll_delay)
return self._wait_available_volume(volume)
@ -119,7 +119,7 @@ class CinderV1Service(service.Service, cinder_common.CinderMixin):
snapshot = self._get_client().volume_snapshots.create(volume_id,
**kwargs)
rutils.interruptable_sleep(
CONF.benchmark.cinder_volume_create_prepoll_delay)
CONF.openstack.cinder_volume_create_prepoll_delay)
snapshot = self._wait_available_volume(snapshot)
return snapshot

View File

@ -78,7 +78,7 @@ class CinderV2Service(service.Service, cinder_common.CinderMixin):
# NOTE(msdubov): It is reasonable to wait 5 secs before starting to
# check whether the volume is ready => less API calls.
rutils.interruptable_sleep(
CONF.benchmark.cinder_volume_create_prepoll_delay)
CONF.openstack.cinder_volume_create_prepoll_delay)
return self._wait_available_volume(volume)
@ -131,7 +131,7 @@ class CinderV2Service(service.Service, cinder_common.CinderMixin):
snapshot = self._get_client().volume_snapshots.create(volume_id,
**kwargs)
rutils.interruptable_sleep(
CONF.benchmark.cinder_volume_create_prepoll_delay)
CONF.openstack.cinder_volume_create_prepoll_delay)
snapshot = self._wait_available_volume(snapshot)
return snapshot

View File

@ -167,9 +167,9 @@ class TempestConfigfileManager(object):
def _configure_object_storage(self, section_name="object-storage"):
self.conf.set(section_name, "operator_role",
CONF.tempest.swift_operator_role)
CONF.openstack.swift_operator_role)
self.conf.set(section_name, "reseller_admin_role",
CONF.tempest.swift_reseller_admin_role)
CONF.openstack.swift_reseller_admin_role)
def _configure_service_available(self, section_name="service_available"):
services = ["cinder", "glance", "heat", "ironic", "neutron", "nova",
@ -188,9 +188,9 @@ class TempestConfigfileManager(object):
def _configure_orchestration(self, section_name="orchestration"):
self.conf.set(section_name, "stack_owner_role",
CONF.tempest.heat_stack_owner_role)
CONF.openstack.heat_stack_owner_role)
self.conf.set(section_name, "stack_user_role",
CONF.tempest.heat_stack_user_role)
CONF.openstack.heat_stack_user_role)
def create(self, conf_path, extra_options=None):
self.conf.read(os.path.join(os.path.dirname(__file__), "config.ini"))

View File

@ -76,10 +76,10 @@ class TempestContext(context.VerifierContext):
helper_method=self._discover_or_create_image)
self._configure_option("compute", "flavor_ref",
helper_method=self._discover_or_create_flavor,
flv_ram=conf.CONF.tempest.flavor_ref_ram)
flv_ram=conf.CONF.openstack.flavor_ref_ram)
self._configure_option("compute", "flavor_ref_alt",
helper_method=self._discover_or_create_flavor,
flv_ram=conf.CONF.tempest.flavor_ref_alt_ram)
flv_ram=conf.CONF.openstack.flavor_ref_alt_ram)
if "neutron" in self.available_services:
neutronclient = self.clients.neutron()
if neutronclient.list_networks(shared=True)["networks"]:
@ -99,7 +99,7 @@ class TempestContext(context.VerifierContext):
self._configure_option(
"orchestration", "instance_type",
helper_method=self._discover_or_create_flavor,
flv_ram=conf.CONF.tempest.heat_instance_type_ram)
flv_ram=conf.CONF.openstack.heat_instance_type_ram)
with open(self.conf_path, "w") as configfile:
self.conf.write(configfile)
@ -121,10 +121,10 @@ class TempestContext(context.VerifierContext):
def _create_tempest_roles(self):
keystoneclient = self.clients.verified_keystone()
roles = [conf.CONF.tempest.swift_operator_role,
conf.CONF.tempest.swift_reseller_admin_role,
conf.CONF.tempest.heat_stack_owner_role,
conf.CONF.tempest.heat_stack_user_role]
roles = [conf.CONF.openstack.swift_operator_role,
conf.CONF.openstack.swift_reseller_admin_role,
conf.CONF.openstack.heat_stack_owner_role,
conf.CONF.openstack.heat_stack_user_role]
existing_roles = set(role.name for role in keystoneclient.roles.list())
for role in roles:
@ -154,19 +154,20 @@ class TempestContext(context.VerifierContext):
def _discover_image(self):
LOG.debug("Trying to discover a public image with name matching "
"regular expression '%s'. Note that case insensitive "
"matching is performed." % conf.CONF.tempest.img_name_regex)
"matching is performed."
% conf.CONF.openstack.img_name_regex)
image_service = image.Image(self.clients)
images = image_service.list_images(status="active",
visibility="public")
for image_obj in images:
if image_obj.name and re.match(conf.CONF.tempest.img_name_regex,
if image_obj.name and re.match(conf.CONF.openstack.img_name_regex,
image_obj.name, re.IGNORECASE):
LOG.debug("The following public image discovered: '%s'."
% image_obj.name)
return image_obj
LOG.debug("There is no public image with name matching regular "
"expression '%s'." % conf.CONF.tempest.img_name_regex)
"expression '%s'." % conf.CONF.openstack.img_name_regex)
def _download_image_from_source(self, target_path, image=None):
if image:
@ -177,9 +178,10 @@ class TempestContext(context.VerifierContext):
image_file.write(chunk)
else:
LOG.debug("Downloading image from %s to %s."
% (conf.CONF.tempest.img_url, target_path))
% (conf.CONF.openstack.img_url, target_path))
try:
response = requests.get(conf.CONF.tempest.img_url, stream=True)
response = requests.get(conf.CONF.openstack.img_url,
stream=True)
except requests.ConnectionError as err:
msg = ("Failed to download image. Possibly there is no "
"connection to Internet. Error: %s."
@ -208,7 +210,7 @@ class TempestContext(context.VerifierContext):
LOG.debug("Image is already downloaded to %s." % image_path)
return
if conf.CONF.tempest.img_name_regex:
if conf.CONF.openstack.img_name_regex:
image = self._discover_image()
if image:
return self._download_image_from_source(image_path, image)
@ -216,7 +218,7 @@ class TempestContext(context.VerifierContext):
self._download_image_from_source(image_path)
def _discover_or_create_image(self):
if conf.CONF.tempest.img_name_regex:
if conf.CONF.openstack.img_name_regex:
image_obj = self._discover_image()
if image_obj:
LOG.debug("Using image '%s' (ID = %s) for the tests."
@ -225,8 +227,8 @@ class TempestContext(context.VerifierContext):
params = {
"image_name": self.generate_random_name(),
"disk_format": conf.CONF.tempest.img_disk_format,
"container_format": conf.CONF.tempest.img_container_format,
"disk_format": conf.CONF.openstack.img_disk_format,
"container_format": conf.CONF.openstack.img_container_format,
"image_location": os.path.join(self.data_dir, self.image_name),
"visibility": "public"
}
@ -297,8 +299,8 @@ class TempestContext(context.VerifierContext):
image_obj, ["deleted", "pending_delete"],
check_deletion=True,
update_resource=image_service.get_image,
timeout=conf.CONF.benchmark.glance_image_delete_timeout,
check_interval=conf.CONF.benchmark.
timeout=conf.CONF.openstack.glance_image_delete_timeout,
check_interval=conf.CONF.openstack.
glance_image_delete_poll_interval)
LOG.debug("Image '%s' has been deleted." % image_obj.name)
self._remove_opt_value_from_config("compute", image_obj.id)

View File

@ -98,14 +98,14 @@ class GlanceV1Wrapper(GlanceWrapper):
image = self.client.images.create(**kw)
rutils.interruptable_sleep(CONF.benchmark.
rutils.interruptable_sleep(CONF.openstack.
glance_image_create_prepoll_delay)
image = utils.wait_for_status(
image, ["active"],
update_resource=self.get_image,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=CONF.benchmark.
timeout=CONF.openstack.glance_image_create_timeout,
check_interval=CONF.openstack.
glance_image_create_poll_interval)
finally:
if "data" in kw:
@ -151,15 +151,15 @@ class GlanceV2Wrapper(GlanceWrapper):
image = self.client.images.create(**kw)
rutils.interruptable_sleep(CONF.benchmark.
rutils.interruptable_sleep(CONF.openstack.
glance_image_create_prepoll_delay)
start = time.time()
image = utils.wait_for_status(
image, ["queued"],
update_resource=self.get_image,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=CONF.benchmark.
timeout=CONF.openstack.glance_image_create_timeout,
check_interval=CONF.openstack.
glance_image_create_poll_interval)
timeout = time.time() - start
@ -182,7 +182,7 @@ class GlanceV2Wrapper(GlanceWrapper):
image, ["active"],
update_resource=self.get_image,
timeout=timeout,
check_interval=CONF.benchmark.
check_interval=CONF.openstack.
glance_image_create_poll_interval)
def set_visibility(self, image, visibility="public"):

View File

@ -90,7 +90,7 @@ class MonascaMetricGeneratorTestCase(test.TestCase):
"Total number of metrics created should be tenant"
"counts times metrics per tenant")
first_call = mock.call(0.001)
second_call = mock.call(monasca_utils.CONF.benchmark.
second_call = mock.call(monasca_utils.CONF.openstack.
monasca_metric_create_prepoll_delay,
atomic_delay=1)
self.assertEqual([first_call] * metrics_per_tenant * tenants_count +

View File

@ -46,7 +46,7 @@ class SaharaClusterTestCase(test.ScenarioTestCase):
"tenant_id": str(i),
"credential": mock.MagicMock()})
CONF.set_override("sahara_cluster_check_interval", 0, "benchmark")
CONF.set_override("sahara_cluster_check_interval", 0, "openstack")
self.context.update({
"config": {

View File

@ -152,8 +152,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
self.mock_wrap.return_value.create_volume.return_value,
ready_statuses=["available"],
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_volume)
@ -175,8 +175,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
self.mock_wrap.return_value.create_volume.return_value,
ready_statuses=["available"],
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_volume)
@ -214,8 +214,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
timeout=cfg.CONF.openstack.cinder_volume_create_timeout,
check_interval=cfg.CONF.openstack
.cinder_volume_create_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
@ -234,8 +234,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
volume,
ready_statuses=["available"],
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
@ -249,8 +249,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
volume,
ready_statuses=["available"],
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
@ -275,15 +275,15 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
volume,
ready_statuses=["available"],
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.
cinder_volume_create_poll_interval),
mock.call(
self.clients("glance").images.get.return_value,
ready_statuses=["active"],
update_resource=mock_wrap.return_value.get_image,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=CONF.benchmark.
timeout=CONF.openstack.glance_image_create_timeout,
check_interval=CONF.openstack.
glance_image_create_poll_interval)
])
self.mock_get_from_manager.mock.assert_called_once_with()
@ -296,8 +296,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
self.mock_wrap.return_value.create_snapshot.return_value,
ready_statuses=["available"],
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
timeout=cfg.CONF.openstack.cinder_volume_create_timeout,
check_interval=cfg.CONF.openstack
.cinder_volume_create_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_snapshot)
@ -313,8 +313,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
timeout=cfg.CONF.openstack.cinder_volume_create_timeout,
check_interval=cfg.CONF.openstack
.cinder_volume_create_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
@ -327,8 +327,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
self.clients("cinder").backups.create.return_value,
ready_statuses=["available"],
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
timeout=cfg.CONF.openstack.cinder_volume_create_timeout,
check_interval=cfg.CONF.openstack
.cinder_volume_create_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_backup)
@ -344,8 +344,8 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
timeout=cfg.CONF.openstack.cinder_volume_create_timeout,
check_interval=cfg.CONF.openstack
.cinder_volume_create_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
@ -353,7 +353,7 @@ class CinderScenarioTestCase(test.ScenarioTestCase):
def test__restore_backup(self):
# NOTE(mdovgal): added for pep8 visual indent test passing
bench_cfg = cfg.CONF.benchmark
bench_cfg = cfg.CONF.openstack
backup = mock.Mock()
restore = mock.Mock()

View File

@ -55,15 +55,15 @@ class EC2ScenarioTestCase(test.ScenarioTestCase):
self.server1,
ready_statuses=["RUNNING"],
update_resource=ec2_scenario._update_resource,
check_interval=CONF.benchmark.ec2_server_boot_poll_interval,
timeout=CONF.benchmark.ec2_server_boot_timeout
check_interval=CONF.openstack.ec2_server_boot_poll_interval,
timeout=CONF.openstack.ec2_server_boot_timeout
),
mock.call(
self.server2,
ready_statuses=["RUNNING"],
update_resource=ec2_scenario._update_resource,
check_interval=CONF.benchmark.ec2_server_boot_poll_interval,
timeout=CONF.benchmark.ec2_server_boot_timeout
check_interval=CONF.openstack.ec2_server_boot_poll_interval,
timeout=CONF.openstack.ec2_server_boot_timeout
)
]
self.mock_wait_for.mock.assert_has_calls(expected)

View File

@ -63,8 +63,8 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
update_resource=self.mock_get_from_manager.mock.return_value,
ready_statuses=["CREATE_COMPLETE"],
failure_statuses=["CREATE_FAILED", "ERROR"],
check_interval=CONF.benchmark.heat_stack_create_poll_interval,
timeout=CONF.benchmark.heat_stack_create_timeout)
check_interval=CONF.openstack.heat_stack_create_poll_interval,
timeout=CONF.openstack.heat_stack_create_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_stack)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
@ -87,8 +87,8 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
update_resource=self.mock_get_from_manager.mock.return_value,
ready_statuses=["UPDATE_COMPLETE"],
failure_statuses=["UPDATE_FAILED", "ERROR"],
check_interval=CONF.benchmark.heat_stack_update_poll_interval,
timeout=CONF.benchmark.heat_stack_update_timeout)
check_interval=CONF.openstack.heat_stack_update_poll_interval,
timeout=CONF.openstack.heat_stack_update_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"heat.update_stack")
@ -103,8 +103,8 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
update_resource=self.mock_get_from_manager.mock.return_value,
ready_statuses=["CHECK_COMPLETE"],
failure_statuses=["CHECK_FAILED", "ERROR"],
check_interval=CONF.benchmark.heat_stack_check_poll_interval,
timeout=CONF.benchmark.heat_stack_check_timeout)
check_interval=CONF.openstack.heat_stack_check_poll_interval,
timeout=CONF.openstack.heat_stack_check_timeout)
self._test_atomic_action_timer(scenario.atomic_actions(),
"heat.check_stack")
@ -118,8 +118,8 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
failure_statuses=["DELETE_FAILED", "ERROR"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.heat_stack_delete_poll_interval,
timeout=CONF.benchmark.heat_stack_delete_timeout)
check_interval=CONF.openstack.heat_stack_delete_poll_interval,
timeout=CONF.openstack.heat_stack_delete_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"heat.delete_stack")
@ -134,8 +134,8 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
update_resource=self.mock_get_from_manager.mock.return_value,
ready_statuses=["SUSPEND_COMPLETE"],
failure_statuses=["SUSPEND_FAILED", "ERROR"],
check_interval=CONF.benchmark.heat_stack_suspend_poll_interval,
timeout=CONF.benchmark.heat_stack_suspend_timeout)
check_interval=CONF.openstack.heat_stack_suspend_poll_interval,
timeout=CONF.openstack.heat_stack_suspend_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"heat.suspend_stack")
@ -150,8 +150,8 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
update_resource=self.mock_get_from_manager.mock.return_value,
ready_statuses=["RESUME_COMPLETE"],
failure_statuses=["RESUME_FAILED", "ERROR"],
check_interval=CONF.benchmark.heat_stack_resume_poll_interval,
timeout=CONF.benchmark.heat_stack_resume_timeout)
check_interval=CONF.openstack.heat_stack_resume_poll_interval,
timeout=CONF.openstack.heat_stack_resume_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"heat.resume_stack")
@ -166,8 +166,8 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
update_resource=self.mock_get_from_manager.mock.return_value,
ready_statuses=["SNAPSHOT_COMPLETE"],
failure_statuses=["SNAPSHOT_FAILED", "ERROR"],
check_interval=CONF.benchmark.heat_stack_snapshot_poll_interval,
timeout=CONF.benchmark.heat_stack_snapshot_timeout)
check_interval=CONF.openstack.heat_stack_snapshot_poll_interval,
timeout=CONF.openstack.heat_stack_snapshot_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"heat.snapshot_stack")
@ -182,8 +182,8 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
update_resource=self.mock_get_from_manager.mock.return_value,
ready_statuses=["RESTORE_COMPLETE"],
failure_statuses=["RESTORE_FAILED", "ERROR"],
check_interval=CONF.benchmark.heat_stack_restore_poll_interval,
timeout=CONF.benchmark.heat_stack_restore_timeout)
check_interval=CONF.openstack.heat_stack_restore_poll_interval,
timeout=CONF.openstack.heat_stack_restore_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"heat.restore_stack")
@ -213,8 +213,8 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
is_ready=mock.ANY,
failure_statuses=["UPDATE_FAILED", "ERROR"],
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=CONF.benchmark.heat_stack_scale_timeout,
check_interval=CONF.benchmark.heat_stack_scale_poll_interval)
timeout=CONF.openstack.heat_stack_scale_timeout,
check_interval=CONF.openstack.heat_stack_scale_poll_interval)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),

View File

@ -97,9 +97,9 @@ class MagnumScenarioTestCase(test.ScenarioTestCase):
self.cluster,
ready_statuses=["CREATE_COMPLETE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.
check_interval=CONF.openstack.
magnum_cluster_create_poll_interval,
timeout=CONF.benchmark.magnum_cluster_create_timeout,
timeout=CONF.openstack.magnum_cluster_create_timeout,
id_attr="uuid")
_, kwargs = self.clients("magnum").clusters.create.call_args
self.assertEqual("generated_name", kwargs["name"])

View File

@ -88,7 +88,7 @@ class MuranoScenarioTestCase(test.ScenarioTestCase):
environment.id, session.id
)
config = CONF.benchmark
config = CONF.openstack
self.mock_wait_for.mock.assert_called_once_with(
environment,
update_resource=self.mock_get_from_manager.mock.return_value,

View File

@ -112,8 +112,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["ACTIVE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_boot_poll_interval,
timeout=CONF.benchmark.nova_server_boot_timeout)
check_interval=CONF.openstack.nova_server_boot_poll_interval,
timeout=CONF.openstack.nova_server_boot_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for_status.mock.return_value,
return_server)
@ -155,8 +155,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["SUSPENDED"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_suspend_poll_interval,
timeout=CONF.benchmark.nova_server_suspend_timeout)
check_interval=CONF.openstack.nova_server_suspend_poll_interval,
timeout=CONF.openstack.nova_server_suspend_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.suspend_server")
@ -169,8 +169,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["ACTIVE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_resume_poll_interval,
timeout=CONF.benchmark.nova_server_resume_timeout)
check_interval=CONF.openstack.nova_server_resume_poll_interval,
timeout=CONF.openstack.nova_server_resume_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.resume_server")
@ -183,8 +183,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["PAUSED"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_pause_poll_interval,
timeout=CONF.benchmark.nova_server_pause_timeout)
check_interval=CONF.openstack.nova_server_pause_poll_interval,
timeout=CONF.openstack.nova_server_pause_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.pause_server")
@ -197,8 +197,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["ACTIVE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_unpause_poll_interval,
timeout=CONF.benchmark.nova_server_unpause_timeout)
check_interval=CONF.openstack.nova_server_unpause_poll_interval,
timeout=CONF.openstack.nova_server_unpause_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.unpause_server")
@ -211,8 +211,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["SHELVED_OFFLOADED"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_shelve_poll_interval,
timeout=CONF.benchmark.nova_server_shelve_timeout)
check_interval=CONF.openstack.nova_server_shelve_poll_interval,
timeout=CONF.openstack.nova_server_shelve_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.shelve_server")
@ -225,8 +225,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["ACTIVE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_unshelve_poll_interval,
timeout=CONF.benchmark.nova_server_unshelve_timeout)
check_interval=CONF.openstack.nova_server_unshelve_poll_interval,
timeout=CONF.openstack.nova_server_unshelve_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.unshelve_server")
@ -241,9 +241,9 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.image,
ready_statuses=["ACTIVE"],
update_resource=glance.get_image,
check_interval=CONF.benchmark.
check_interval=CONF.openstack.
nova_server_image_create_poll_interval,
timeout=CONF.benchmark.nova_server_image_create_timeout)
timeout=CONF.openstack.nova_server_image_create_timeout)
self.assertEqual(self.mock_wait_for_status.mock.return_value,
return_image)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
@ -258,8 +258,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_delete_poll_interval,
timeout=CONF.benchmark.nova_server_delete_timeout)
check_interval=CONF.openstack.nova_server_delete_poll_interval,
timeout=CONF.openstack.nova_server_delete_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.delete_server")
@ -273,8 +273,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_delete_poll_interval,
timeout=CONF.benchmark.nova_server_delete_timeout)
check_interval=CONF.openstack.nova_server_delete_poll_interval,
timeout=CONF.openstack.nova_server_delete_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.force_delete_server")
@ -287,8 +287,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["ACTIVE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_reboot_poll_interval,
timeout=CONF.benchmark.nova_server_reboot_timeout)
check_interval=CONF.openstack.nova_server_reboot_poll_interval,
timeout=CONF.openstack.nova_server_reboot_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.reboot_server")
@ -301,8 +301,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["ACTIVE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_reboot_poll_interval,
timeout=CONF.benchmark.nova_server_reboot_timeout)
check_interval=CONF.openstack.nova_server_reboot_poll_interval,
timeout=CONF.openstack.nova_server_reboot_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.soft_reboot_server")
@ -315,8 +315,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["ACTIVE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_rebuild_poll_interval,
timeout=CONF.benchmark.nova_server_rebuild_timeout)
check_interval=CONF.openstack.nova_server_rebuild_poll_interval,
timeout=CONF.openstack.nova_server_rebuild_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.rebuild_server")
@ -329,8 +329,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["ACTIVE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_start_poll_interval,
timeout=CONF.benchmark.nova_server_start_timeout)
check_interval=CONF.openstack.nova_server_start_poll_interval,
timeout=CONF.openstack.nova_server_start_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.start_server")
@ -343,8 +343,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["SHUTOFF"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_stop_poll_interval,
timeout=CONF.benchmark.nova_server_stop_timeout)
check_interval=CONF.openstack.nova_server_stop_poll_interval,
timeout=CONF.openstack.nova_server_stop_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.stop_server")
@ -357,8 +357,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["RESCUE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_rescue_poll_interval,
timeout=CONF.benchmark.nova_server_rescue_timeout)
check_interval=CONF.openstack.nova_server_rescue_poll_interval,
timeout=CONF.openstack.nova_server_rescue_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.rescue_server")
@ -371,8 +371,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["ACTIVE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_unrescue_poll_interval,
timeout=CONF.benchmark.nova_server_unrescue_timeout)
check_interval=CONF.openstack.nova_server_unrescue_poll_interval,
timeout=CONF.openstack.nova_server_unrescue_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.unrescue_server")
@ -381,7 +381,7 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
servers = [self.server, self.server1]
nova_scenario = utils.NovaScenario(context=self.context)
nova_scenario._delete_servers(servers, force=force)
check_interval = CONF.benchmark.nova_server_delete_poll_interval
check_interval = CONF.openstack.nova_server_delete_poll_interval
expected = []
for server in servers:
expected.append(mock.call(
@ -390,7 +390,7 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
check_deletion=True,
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=check_interval,
timeout=CONF.benchmark.nova_server_delete_timeout))
timeout=CONF.openstack.nova_server_delete_timeout))
if force:
server.force_delete.assert_called_once_with()
self.assertFalse(server.delete.called)
@ -421,9 +421,9 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
ready_statuses=["deleted", "pending_delete"],
check_deletion=True,
update_resource=glance.get_image,
check_interval=CONF.benchmark.
check_interval=CONF.openstack.
nova_server_image_delete_poll_interval,
timeout=CONF.benchmark.nova_server_image_delete_timeout)
timeout=CONF.openstack.nova_server_image_delete_timeout)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.delete_image")
@ -467,8 +467,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
servers[i],
ready_statuses=["ACTIVE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_boot_poll_interval,
timeout=CONF.benchmark.nova_server_boot_timeout)
check_interval=CONF.openstack.nova_server_boot_poll_interval,
timeout=CONF.openstack.nova_server_boot_timeout)
for i in range(instances_amount)]
self.mock_wait_for_status.mock.assert_has_calls(wait_for_status_calls)
@ -589,9 +589,9 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=[status],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.
check_interval=CONF.openstack.
nova_server_resize_revert_poll_interval,
timeout=CONF.benchmark.nova_server_resize_revert_timeout)
timeout=CONF.openstack.nova_server_resize_revert_timeout)
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.resize_revert")
@ -662,9 +662,9 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
self.server,
ready_statuses=["ACTIVE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.
check_interval=CONF.openstack.
nova_server_live_migrate_poll_interval,
timeout=CONF.benchmark.nova_server_live_migrate_timeout)
timeout=CONF.openstack.nova_server_live_migrate_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.live_migrate")
@ -705,8 +705,8 @@ class NovaScenarioTestCase(test.ScenarioTestCase):
fake_server,
ready_statuses=["VERIFY_RESIZE"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.nova_server_migrate_poll_interval,
timeout=CONF.benchmark.nova_server_migrate_timeout)
check_interval=CONF.openstack.nova_server_migrate_poll_interval,
timeout=CONF.openstack.nova_server_migrate_timeout)
self.mock_get_from_manager.mock.assert_called_once_with()
self._test_atomic_action_timer(nova_scenario.atomic_actions(),
"nova.migrate")

View File

@ -30,8 +30,8 @@ class SaharaJobTestCase(test.ScenarioTestCase):
super(SaharaJobTestCase, self).setUp()
self.context = test.get_test_context()
CONF.set_override("sahara_cluster_check_interval", 0, "benchmark")
CONF.set_override("sahara_job_check_interval", 0, "benchmark")
CONF.set_override("sahara_cluster_check_interval", 0, "openstack")
CONF.set_override("sahara_job_check_interval", 0, "openstack")
@mock.patch("%s.CreateLaunchJob._run_job_execution" % BASE)
def test_create_launch_job_java(self, mock_run_job):

View File

@ -41,8 +41,8 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(SaharaScenarioTestCase, self).setUp()
CONF.set_override("sahara_cluster_check_interval", 0, "benchmark")
CONF.set_override("sahara_job_check_interval", 0, "benchmark")
CONF.set_override("sahara_cluster_check_interval", 0, "openstack")
CONF.set_override("sahara_job_check_interval", 0, "openstack")
def test_list_node_group_templates(self):
ngts = []

View File

@ -66,7 +66,7 @@ class SenlinScenarioTestCase(test.ScenarioTestCase):
fake_cluster, ready_statuses=["ACTIVE"],
failure_statuses=["ERROR"],
update_resource=scenario._get_cluster,
timeout=CONF.benchmark.senlin_action_timeout)
timeout=CONF.openstack.senlin_action_timeout)
mock_generate_random_name.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"senlin.create_cluster")
@ -116,7 +116,7 @@ class SenlinScenarioTestCase(test.ScenarioTestCase):
fake_cluster, ready_statuses=["DELETED"],
failure_statuses=["ERROR"], check_deletion=True,
update_resource=scenario._get_cluster,
timeout=CONF.benchmark.senlin_action_timeout)
timeout=CONF.openstack.senlin_action_timeout)
self._test_atomic_action_timer(scenario.atomic_actions(),
"senlin.delete_cluster")

View File

@ -110,8 +110,8 @@ class VMScenarioTestCase(test.ScenarioTestCase):
utils.Host("1.2.3.4"),
ready_statuses=[utils.Host.ICMP_UP_STATUS],
update_resource=utils.Host.update_status,
timeout=CONF.benchmark.vm_ping_timeout,
check_interval=CONF.benchmark.vm_ping_poll_interval)
timeout=CONF.openstack.vm_ping_timeout,
check_interval=CONF.openstack.vm_ping_poll_interval)
@mock.patch(VMTASKS_UTILS + ".VMScenario._run_command_over_ssh")
@mock.patch("rally.common.sshutils.SSH")

View File

@ -65,8 +65,8 @@ class WatcherScenarioTestCase(test.ScenarioTestCase):
failure_statuses=["FAILED"],
status_attr="state",
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.benchmark.watcher_audit_launch_poll_interval,
timeout=CONF.benchmark.watcher_audit_launch_timeout,
check_interval=CONF.openstack.watcher_audit_launch_poll_interval,
timeout=CONF.openstack.watcher_audit_launch_timeout,
id_attr="uuid")
self.mock_get_from_manager.mock.assert_called_once_with()
self.admin_clients("watcher").audit.create.assert_called_once_with(

View File

@ -107,8 +107,8 @@ class CinderMixinTestCase(test.ScenarioTestCase):
volume,
ready_statuses=["available"],
update_resource=self.service._update_resource,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
def test_list_volumes(self):
@ -132,8 +132,8 @@ class CinderMixinTestCase(test.ScenarioTestCase):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.service._update_resource,
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=CONF.benchmark.cinder_volume_delete_poll_interval
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=CONF.openstack.cinder_volume_delete_poll_interval
)
@mock.patch("%s.block.BlockStorage.create_volume" % BASE_PATH)
@ -214,15 +214,15 @@ class CinderMixinTestCase(test.ScenarioTestCase):
volume,
ready_statuses=["available"],
update_resource=self.service._update_resource,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.
cinder_volume_create_poll_interval),
mock.call(
glance.get_image.return_value,
ready_statuses=["active"],
update_resource=glance.get_image,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=CONF.benchmark.
timeout=CONF.openstack.glance_image_create_timeout,
check_interval=CONF.openstack.
glance_image_create_poll_interval)
])
glance.get_image.assert_called_once_with(1)
@ -286,8 +286,8 @@ class CinderMixinTestCase(test.ScenarioTestCase):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.service._update_resource,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
timeout=cfg.CONF.openstack.cinder_volume_create_timeout,
check_interval=cfg.CONF.openstack
.cinder_volume_create_poll_interval)
def test_delete_backup(self):
@ -299,8 +299,8 @@ class CinderMixinTestCase(test.ScenarioTestCase):
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self.service._update_resource,
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
timeout=cfg.CONF.openstack.cinder_volume_create_timeout,
check_interval=cfg.CONF.openstack
.cinder_volume_create_poll_interval)
def test_restore_backup(self):

View File

@ -210,8 +210,8 @@ class TempestConfigfileManagerTestCase(test.TestCase):
self.tempest._configure_object_storage()
expected = (
("operator_role", CONF.tempest.swift_operator_role),
("reseller_admin_role", CONF.tempest.swift_reseller_admin_role))
("operator_role", CONF.openstack.swift_operator_role),
("reseller_admin_role", CONF.openstack.swift_reseller_admin_role))
result = self.tempest.conf.items("object-storage")
for item in expected:
self.assertIn(item, result)
@ -221,8 +221,8 @@ class TempestConfigfileManagerTestCase(test.TestCase):
self.tempest._configure_orchestration()
expected = (
("stack_owner_role", CONF.tempest.heat_stack_owner_role),
("stack_user_role", CONF.tempest.heat_stack_user_role))
("stack_owner_role", CONF.openstack.heat_stack_owner_role),
("stack_user_role", CONF.openstack.heat_stack_user_role))
result = self.tempest.conf.items("orchestration")
for item in expected:
self.assertIn(item, result)

View File

@ -92,7 +92,7 @@ class TempestContextTestCase(test.TestCase):
mock_get.return_value.iter_content.return_value = "data"
self.context._download_image_from_source(img_path)
mock_get.assert_called_once_with(CONF.tempest.img_url, stream=True)
mock_get.assert_called_once_with(CONF.openstack.img_url, stream=True)
mock_open.assert_called_once_with(img_path, "wb")
mock_open().write.assert_has_calls([mock.call("d"),
mock.call("a"),
@ -141,10 +141,10 @@ class TempestContextTestCase(test.TestCase):
self.assertEqual(0, mock_neutron_wrapper_create_network.call_count)
def test__create_tempest_roles(self):
role1 = CONF.tempest.swift_operator_role
role2 = CONF.tempest.swift_reseller_admin_role
role3 = CONF.tempest.heat_stack_owner_role
role4 = CONF.tempest.heat_stack_user_role
role1 = CONF.openstack.swift_operator_role
role2 = CONF.openstack.swift_reseller_admin_role
role3 = CONF.openstack.heat_stack_owner_role
role4 = CONF.openstack.heat_stack_user_role
client = self.context.clients.verified_keystone()
client.roles.list.return_value = [fakes.FakeRole(name=role1),
@ -224,9 +224,9 @@ class TempestContextTestCase(test.TestCase):
self.assertEqual(image, mock_image().create_image.return_value)
self.assertEqual(self.context._created_images[0],
client.create_image.return_value)
params = {"container_format": CONF.tempest.img_container_format,
params = {"container_format": CONF.openstack.img_container_format,
"image_location": mock.ANY,
"disk_format": CONF.tempest.img_disk_format,
"disk_format": CONF.openstack.img_disk_format,
"image_name": mock.ANY,
"visibility": "public"}
client.create_image.assert_called_once_with(**params)
@ -363,10 +363,10 @@ class TempestContextTestCase(test.TestCase):
helper_method=ctx._discover_or_create_image),
mock.call("compute", "flavor_ref",
helper_method=ctx._discover_or_create_flavor,
flv_ram=config.CONF.tempest.flavor_ref_ram),
flv_ram=config.CONF.openstack.flavor_ref_ram),
mock.call("compute", "flavor_ref_alt",
helper_method=ctx._discover_or_create_flavor,
flv_ram=config.CONF.tempest.flavor_ref_alt_ram)],
flv_ram=config.CONF.openstack.flavor_ref_alt_ram)],
mock__configure_option.call_args_list)
mock_create_dir.reset_mock()
@ -401,13 +401,13 @@ class TempestContextTestCase(test.TestCase):
helper_method=ctx._discover_or_create_image),
mock.call("compute", "flavor_ref",
helper_method=ctx._discover_or_create_flavor,
flv_ram=config.CONF.tempest.flavor_ref_ram),
flv_ram=config.CONF.openstack.flavor_ref_ram),
mock.call("compute", "flavor_ref_alt",
helper_method=ctx._discover_or_create_flavor,
flv_ram=config.CONF.tempest.flavor_ref_alt_ram),
flv_ram=config.CONF.openstack.flavor_ref_alt_ram),
mock.call("compute", "fixed_network_name",
helper_method=ctx._create_network_resources),
mock.call("orchestration", "instance_type",
helper_method=ctx._discover_or_create_flavor,
flv_ram=config.CONF.tempest.heat_instance_type_ram)],
flv_ram=config.CONF.openstack.heat_instance_type_ram)],
mock__configure_option.call_args_list)

View File

@ -110,8 +110,8 @@ class GlanceV1WrapperTestCase(test.ScenarioTestCase):
self.mock_wait_for_status.mock.assert_called_once_with(
self.client().images.create.return_value, ["active"],
update_resource=self.wrapped_client.get_image,
check_interval=CONF.benchmark.glance_image_create_poll_interval,
timeout=CONF.benchmark.glance_image_create_timeout)
check_interval=CONF.openstack.glance_image_create_poll_interval,
timeout=CONF.openstack.glance_image_create_timeout)
self.assertEqual(self.mock_wait_for_status.mock.return_value,
return_image)
@ -237,13 +237,13 @@ class GlanceV2WrapperTestCase(test.ScenarioTestCase):
mock.call(
self.client().images.create.return_value, ["queued"],
update_resource=self.wrapped_client.get_image,
check_interval=CONF.benchmark.
check_interval=CONF.openstack.
glance_image_create_poll_interval,
timeout=CONF.benchmark.glance_image_create_timeout),
timeout=CONF.openstack.glance_image_create_timeout),
mock.call(
created_image, ["active"],
update_resource=self.wrapped_client.get_image,
check_interval=CONF.benchmark.
check_interval=CONF.openstack.
glance_image_create_poll_interval,
timeout=mock.ANY)])
self.assertEqual(uploaded_image, return_image)