Reword "benchmark" word in code base
Change-Id: I6eee0480ce03e7b9d82faf3c712fb06ec9dd2281
This commit is contained in:
parent
bb735b175d
commit
ba26968ac8
@ -261,7 +261,7 @@ def cleanup(names=None, admin_required=None, admin=None, users=None,
|
||||
If False -> return only non admin plugins
|
||||
:param admin: rally.deployment.credential.Credential that corresponds to
|
||||
OpenStack admin.
|
||||
:param users: List of OpenStack users that was used during benchmarking.
|
||||
:param users: List of OpenStack users that was used during testing.
|
||||
Every user has next structure:
|
||||
{
|
||||
"id": <uuid1>,
|
||||
|
@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__)
|
||||
@validation.add("required_platform", platform="openstack", users=True)
|
||||
@context.configure(name="ceilometer", platform="openstack", order=450)
|
||||
class CeilometerSampleGenerator(context.Context):
|
||||
"""Context for creating samples and collecting resources for benchmarks."""
|
||||
"""Creates ceilometer samples and resources."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -29,7 +29,7 @@ LOG = logging.getLogger(__name__)
|
||||
@validation.add("required_platform", platform="openstack", admin=True)
|
||||
@context.configure(name="volume_types", platform="openstack", order=410)
|
||||
class VolumeTypeGenerator(context.Context):
|
||||
"""Context class for adding volumes types for benchmarks."""
|
||||
"""Adds cinder volumes types."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "array",
|
||||
|
@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
@context.configure(name="volumes", platform="openstack", order=420)
|
||||
class VolumeGenerator(context.Context):
|
||||
"""Context class for adding volumes to each user for benchmarks."""
|
||||
"""Creates volumes for each tenant."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -28,10 +28,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
@context.configure(name="ec2_servers", platform="openstack", order=460)
|
||||
class EC2ServerGenerator(context.Context):
|
||||
"""Context class for adding temporary servers for benchmarks.
|
||||
|
||||
Servers are added for each tenant.
|
||||
"""
|
||||
"""Creates specified amount of nova servers in each tenant uses ec2 API."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -38,7 +38,7 @@ LOG = logging.getLogger(__name__)
|
||||
@validation.add("required_platform", platform="openstack", users=True)
|
||||
@context.configure(name="images", platform="openstack", order=410)
|
||||
class ImageGenerator(context.Context):
|
||||
"""Context class for adding images to each user for benchmarks."""
|
||||
"""Uploads specified Glance images to every tenant."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -49,7 +49,7 @@ USER_DOMAIN_DESCR = "ID of domain in which users will be created."
|
||||
@validation.add("required_platform", platform="openstack", users=True)
|
||||
@context.configure(name="users", platform="openstack", order=100)
|
||||
class UserGenerator(context.Context):
|
||||
"""Context class for generating temporary users/tenants for benchmarks."""
|
||||
"""Creates specified amount of keystone users and tenants."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -36,7 +36,7 @@ LOG = logging.getLogger(__name__)
|
||||
@validation.add("required_platform", platform="openstack", users=True)
|
||||
@context.configure(name="ca_certs", platform="openstack", order=490)
|
||||
class CaCertGenerator(context.Context):
|
||||
"""Context class for generating temporary ca cert for benchmarks."""
|
||||
"""Creates ca certs."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -28,7 +28,7 @@ LOG = logging.getLogger(__name__)
|
||||
@validation.add("required_platform", platform="openstack", users=True)
|
||||
@context.configure(name="cluster_templates", platform="openstack", order=470)
|
||||
class ClusterTemplateGenerator(context.Context):
|
||||
"""Context class for generating temporary cluster model for benchmarks."""
|
||||
"""Creates Magnum cluster template."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -29,7 +29,7 @@ LOG = logging.getLogger(__name__)
|
||||
@validation.add("required_platform", platform="openstack", users=True)
|
||||
@context.configure(name="clusters", platform="openstack", order=480)
|
||||
class ClusterGenerator(context.Context):
|
||||
"""Context class for generating temporary cluster for benchmarks."""
|
||||
"""Creates specified amount of Magnum clusters."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -29,7 +29,7 @@ LOG = logging.getLogger(__name__)
|
||||
@validation.add("required_platform", platform="openstack", users=True)
|
||||
@context.configure(name="monasca_metrics", platform="openstack", order=510)
|
||||
class MonascaMetricGenerator(context.Context):
|
||||
"""Context for creating metrics for benchmarks."""
|
||||
"""Creates Monasca Metrics."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -40,7 +40,7 @@ def _prepare_open_secgroup(credential, secgroup_name):
|
||||
security_groups = neutron.list_security_groups()["security_groups"]
|
||||
rally_open = [sg for sg in security_groups if sg["name"] == secgroup_name]
|
||||
if not rally_open:
|
||||
descr = "Allow ssh access to VMs created by Rally for benchmarking"
|
||||
descr = "Allow ssh access to VMs created by Rally"
|
||||
rally_open = neutron.create_security_group(
|
||||
{"security_group": {"name": secgroup_name,
|
||||
"description": descr}})["security_group"]
|
||||
|
@ -29,10 +29,7 @@ LOG = logging.getLogger(__name__)
|
||||
@validation.add("required_platform", platform="openstack", users=True)
|
||||
@context.configure(name="servers", platform="openstack", order=430)
|
||||
class ServerGenerator(context.Context):
|
||||
"""Context class for adding temporary servers for benchmarks.
|
||||
|
||||
Servers are added for each tenant.
|
||||
"""
|
||||
"""Creates specified amount of Nova Servers per each tenant."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__)
|
||||
@validation.add("required_platform", platform="openstack", admin=True)
|
||||
@context.configure(name="quotas", platform="openstack", order=300)
|
||||
class Quotas(context.Context):
|
||||
"""Context class for updating benchmarks' tenants quotas."""
|
||||
"""Sets OpenStack Tenants quotas."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -33,16 +33,13 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class BaseCustomImageGenerator(context.Context):
|
||||
"""Base class for the contexts providing customized image with.
|
||||
"""Base plugin for the contexts providing customized image with.
|
||||
|
||||
Every context class for the specific customization must implement
|
||||
Every context plugin for the specific customization must implement
|
||||
the method `_customize_image` that is able to connect to the server
|
||||
using SSH and e.g. install applications inside it.
|
||||
using SSH and install applications inside it.
|
||||
|
||||
This is used e.g. to install the benchmark application using SSH
|
||||
access.
|
||||
|
||||
This base context class provides a way to prepare an image with
|
||||
This base context plugin provides a way to prepare an image with
|
||||
custom preinstalled applications. Basically, this code boots a VM, calls
|
||||
the `_customize_image` and then snapshots the VM disk, removing the VM
|
||||
afterwards. The image UUID is stored in the user["custom_image"]["id"]
|
||||
@ -168,7 +165,7 @@ class BaseCustomImageGenerator(context.Context):
|
||||
**kwargs)
|
||||
|
||||
try:
|
||||
LOG.debug("Installing benchmark on %r %s", server, fip["ip"])
|
||||
LOG.debug("Installing tools on %r %s", server, fip["ip"])
|
||||
self.customize_image(server, fip, user)
|
||||
|
||||
LOG.debug("Stopping server %r", server)
|
||||
|
@ -33,7 +33,7 @@ LOG = logging.getLogger(__name__)
|
||||
@validation.add("required_platform", platform="openstack", admin=True)
|
||||
@context.configure(name="audit_templates", platform="openstack", order=550)
|
||||
class AuditTemplateGenerator(context.Context):
|
||||
"""Context class for adding temporary audit template for benchmarks."""
|
||||
"""Creates Watcher audit templates for tenants."""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
|
@ -87,8 +87,7 @@ class OpenStackScenario(scenario.Scenario):
|
||||
def clients(self, client_type, version=None):
|
||||
"""Returns a python openstack client of the requested type.
|
||||
|
||||
The client will be that for one of the temporary non-administrator
|
||||
users created before the benchmark launch.
|
||||
Only one non-admin user is used per every run of scenario.
|
||||
|
||||
:param client_type: Client type ("nova"/"glance" etc.)
|
||||
:param version: client version ("1"/"2" etc.)
|
||||
|
@ -17,8 +17,6 @@ from rally.plugins.openstack import scenario
|
||||
from rally.plugins.openstack.scenarios.ceilometer import utils as ceiloutils
|
||||
from rally.task import validation
|
||||
|
||||
"""Benchmark scenarios for Ceilometer Alarms API."""
|
||||
|
||||
|
||||
@validation.add("required_services",
|
||||
services=[consts.Service.CEILOMETER])
|
||||
|
@ -99,7 +99,7 @@ class CeilometerScenario(scenario.OpenStackScenario):
|
||||
filter_by_user_id=None,
|
||||
filter_by_resource_id=None,
|
||||
metadata_query=None):
|
||||
"""Create a SimpleQuery for the list benchmarks.
|
||||
"""Create a SimpleQuery used by samples list API.
|
||||
|
||||
:param filter_by_project_id: add a project id to query
|
||||
:param filter_by_user_id: add a user id to query
|
||||
|
@ -13,10 +13,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Benchmark scenarios for Keystone.
|
||||
"""
|
||||
|
||||
from rally.common import logging
|
||||
from rally.plugins.openstack import scenario
|
||||
from rally.plugins.openstack.services.identity import identity
|
||||
|
@ -44,7 +44,7 @@ class ListHypervisors(utils.NovaScenario):
|
||||
@scenario.configure(name="NovaHypervisors.list_and_get_hypervisors",
|
||||
platform="openstack")
|
||||
class ListAndGetHypervisors(utils.NovaScenario):
|
||||
"""Benchmark scenario for Nova hypervisors."""
|
||||
|
||||
def run(self, detailed=True):
|
||||
"""List and Get hypervisors.
|
||||
|
||||
@ -81,6 +81,7 @@ class StatisticsHypervisors(utils.NovaScenario):
|
||||
@scenario.configure(name="NovaHypervisors.list_and_get_uptime_hypervisors",
|
||||
platform="openstack")
|
||||
class ListAndGetUptimeHypervisors(utils.NovaScenario):
|
||||
|
||||
def run(self, detailed=True):
|
||||
"""List hypervisors,then display the uptime of it.
|
||||
|
||||
@ -103,6 +104,7 @@ class ListAndGetUptimeHypervisors(utils.NovaScenario):
|
||||
@scenario.configure(name="NovaHypervisors.list_and_search_hypervisors",
|
||||
platform="openstack")
|
||||
class ListAndSearchHypervisors(utils.NovaScenario):
|
||||
|
||||
def run(self, detailed=True):
|
||||
"""List all servers belonging to specific hypervisor.
|
||||
|
||||
|
@ -1099,7 +1099,7 @@ class BootServerFromVolumeSnapshot(utils.NovaScenario,
|
||||
name="NovaServers.boot_server_associate_and_dissociate_floating_ip",
|
||||
platform="openstack")
|
||||
class BootServerAssociateAndDissociateFloatingIP(utils.NovaScenario):
|
||||
""""Benchmark scenarios for Nova FloatingIp API."""
|
||||
|
||||
def run(self, image, flavor, **kwargs):
|
||||
"""Boot a server associate and dissociate a floating IP from it.
|
||||
|
||||
@ -1129,6 +1129,7 @@ class BootServerAssociateAndDissociateFloatingIP(utils.NovaScenario):
|
||||
name="NovaServers.boot_server_and_list_interfaces",
|
||||
platform="openstack")
|
||||
class BootServerAndListInterfaces(utils.NovaScenario):
|
||||
|
||||
def run(self, image, flavor, **kwargs):
|
||||
"""Boot a server and list interfaces attached to it.
|
||||
|
||||
|
@ -22,9 +22,6 @@ from rally.task import validation
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
"""Benchmark scenarios for Sahara jobs."""
|
||||
|
||||
|
||||
@validation.add("required_services", services=[consts.Service.SAHARA])
|
||||
@validation.add("required_contexts", contexts=["users", "sahara_image",
|
||||
"sahara_job_binaries",
|
||||
|
@ -409,7 +409,7 @@ class SaharaScenario(scenario.OpenStackScenario):
|
||||
scale_object required by Sahara API and waits for the scaling to
|
||||
complete.
|
||||
|
||||
NOTE: This method is not meant to be called directly in benchmarks.
|
||||
NOTE: This method is not meant to be called directly in scenarios.
|
||||
There two specific scaling methods of up and down scaling which have
|
||||
different atomic timers.
|
||||
"""
|
||||
|
@ -25,9 +25,9 @@ class Stack(common_utils.RandomNameGeneratorMixin):
|
||||
|
||||
Usage:
|
||||
>>> stack = Stack(scenario, task, "template.yaml", parameters={"nodes": 3})
|
||||
>>> run_benchmark(stack)
|
||||
>>> do_testing(stack)
|
||||
>>> stack.update(nodes=4)
|
||||
>>> run_benchmark(stack)
|
||||
>>> do_testing(stack)
|
||||
"""
|
||||
|
||||
def __init__(self, scenario, task, template, files, parameters=None):
|
||||
|
@ -12,8 +12,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Run HTTP benchmark by runcommand_heat scenario."""
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
|
@ -923,10 +923,7 @@ class TaskTestCase(unittest.TestCase):
|
||||
task.join()
|
||||
results = json.loads(rally("task results"))
|
||||
iterations_completed = len(results[0]["result"])
|
||||
# NOTE(msdubov): check that the task is stopped after first runner
|
||||
# benchmark finished all its iterations
|
||||
self.assertEqual(3, iterations_completed)
|
||||
# NOTE(msdubov): check that the next benchmark scenario is not started
|
||||
self.assertEqual(1, len(results))
|
||||
self.assertIn("aborted", rally("task status"))
|
||||
|
||||
|
@ -27,7 +27,7 @@ CTX = "rally.plugins.openstack.context.sahara"
|
||||
|
||||
class SaharaClusterTestCase(test.ScenarioTestCase):
|
||||
|
||||
patch_benchmark_utils = False
|
||||
patch_task_utils = False
|
||||
|
||||
def setUp(self):
|
||||
super(SaharaClusterTestCase, self).setUp()
|
||||
|
@ -13,8 +13,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Tests for the Benchmark VM image context."""
|
||||
|
||||
import mock
|
||||
|
||||
from rally.plugins.openstack.context.vm import custom_image
|
||||
|
@ -277,7 +277,7 @@ class HeatScenarioTestCase(test.ScenarioTestCase):
|
||||
|
||||
|
||||
class HeatScenarioNegativeTestCase(test.ScenarioTestCase):
|
||||
patch_benchmark_utils = False
|
||||
patch_task_utils = False
|
||||
|
||||
def test_failed_create_stack(self):
|
||||
self.clients("heat").stacks.create.return_value = {
|
||||
|
@ -36,7 +36,7 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
|
||||
# get_from_manager() calls. As a result, the tests below do more
|
||||
# integrated/functional testing of wait_for() calls, and we can't
|
||||
# just mock out wait_for and friends the way we usually do.
|
||||
patch_benchmark_utils = False
|
||||
patch_task_utils = False
|
||||
|
||||
def setUp(self):
|
||||
super(SaharaScenarioTestCase, self).setUp()
|
||||
|
@ -72,8 +72,8 @@ class DBTestCase(TestCase):
|
||||
|
||||
class ScenarioTestCase(TestCase):
|
||||
"""Base class for Scenario tests using mocked self.clients."""
|
||||
benchmark_utils = "rally.task.utils"
|
||||
patch_benchmark_utils = True
|
||||
task_utils = "rally.task.utils"
|
||||
patch_task_utils = True
|
||||
|
||||
def client_factory(self, client_type, version=None, admin=False):
|
||||
"""Create a new client object."""
|
||||
@ -121,17 +121,17 @@ class ScenarioTestCase(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ScenarioTestCase, self).setUp()
|
||||
if self.patch_benchmark_utils:
|
||||
if self.patch_task_utils:
|
||||
self.mock_resource_is = fixtures.MockPatch(
|
||||
self.benchmark_utils + ".resource_is")
|
||||
self.task_utils + ".resource_is")
|
||||
self.mock_get_from_manager = fixtures.MockPatch(
|
||||
self.benchmark_utils + ".get_from_manager")
|
||||
self.task_utils + ".get_from_manager")
|
||||
self.mock_wait_for = fixtures.MockPatch(
|
||||
self.benchmark_utils + ".wait_for")
|
||||
self.task_utils + ".wait_for")
|
||||
self.mock_wait_for_delete = fixtures.MockPatch(
|
||||
self.benchmark_utils + ".wait_for_delete")
|
||||
self.task_utils + ".wait_for_delete")
|
||||
self.mock_wait_for_status = fixtures.MockPatch(
|
||||
self.benchmark_utils + ".wait_for_status")
|
||||
self.task_utils + ".wait_for_status")
|
||||
self.useFixture(self.mock_resource_is)
|
||||
self.useFixture(self.mock_get_from_manager)
|
||||
self.useFixture(self.mock_wait_for)
|
||||
|
Loading…
Reference in New Issue
Block a user