Added Sahara Clusters scenario
The scenario creates and deletes a Hadoop cluster. Change-Id: I1280f5f3f4cd7415788a3e474dd1852c68a3c35c
This commit is contained in:
parent
24218d9ff8
commit
e2a3e1c941
@ -0,0 +1,31 @@
|
||||
{
|
||||
"SaharaClusters.create_and_delete_cluster": [
|
||||
{
|
||||
"args": {
|
||||
"flavor": {
|
||||
"name": "m1.small"
|
||||
},
|
||||
"node_count": 2,
|
||||
"plugin_name": "vanilla",
|
||||
"hadoop_version": "2.3.0"
|
||||
},
|
||||
"runner": {
|
||||
"type": "constant",
|
||||
"times": 4,
|
||||
"concurrency": 2
|
||||
},
|
||||
"context": {
|
||||
"users": {
|
||||
"tenants": 1,
|
||||
"users_per_tenant": 1
|
||||
},
|
||||
"sahara_image": {
|
||||
"image_url": "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2",
|
||||
"username": "ubuntu",
|
||||
"plugin_name": "vanilla",
|
||||
"hadoop_version": "2.3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
@ -0,0 +1,22 @@
|
||||
---
|
||||
SaharaClusters.create_and_delete_cluster:
|
||||
-
|
||||
args:
|
||||
flavor:
|
||||
name: "m1.small"
|
||||
node_count: 2
|
||||
plugin_name: "vanilla"
|
||||
hadoop_version: "2.3.0"
|
||||
runner:
|
||||
type: "constant"
|
||||
times: 4
|
||||
concurrency: 2
|
||||
context:
|
||||
users:
|
||||
tenants: 1
|
||||
users_per_tenant: 1
|
||||
sahara_image:
|
||||
image_url: "http://sahara-files.mirantis.com/sahara-icehouse-vanilla-2.3.0-ubuntu-13.10.qcow2"
|
||||
username: "ubuntu"
|
||||
plugin_name: "vanilla"
|
||||
hadoop_version: "2.3.0"
|
@ -331,6 +331,17 @@
|
||||
#nova_server_image_delete_poll_interval=2.0
|
||||
|
||||
|
||||
#
|
||||
# Options defined in rally.benchmark.scenarios.sahara.utils
|
||||
#
|
||||
|
||||
# A timeout in seconds for a cluster create operation
|
||||
#cluster_create_timeout=600
|
||||
|
||||
# Cluster status polling interval in seconds
|
||||
#cluster_check_interval=5
|
||||
|
||||
|
||||
[database]
|
||||
|
||||
#
|
||||
|
62
rally/benchmark/scenarios/sahara/clusters.py
Normal file
62
rally/benchmark/scenarios/sahara/clusters.py
Normal file
@ -0,0 +1,62 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from rally.benchmark.scenarios import base
|
||||
from rally.benchmark.scenarios.sahara import utils
|
||||
from rally.benchmark import types
|
||||
from rally.benchmark import validation
|
||||
from rally import consts
|
||||
from rally.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SaharaClusters(utils.SaharaScenario):
|
||||
|
||||
@types.set(flavor=types.FlavorResourceType)
|
||||
@validation.add(validation.flavor_exists('flavor'))
|
||||
@validation.required_services(consts.Service.SAHARA)
|
||||
@validation.required_contexts("users", "sahara_image")
|
||||
@validation.add(validation.number("node_count", minval=2,
|
||||
integer_only=True))
|
||||
@base.scenario(context={"cleanup": ["sahara"]})
|
||||
def create_and_delete_cluster(self, flavor, node_count,
|
||||
plugin_name="vanilla",
|
||||
hadoop_version="2.3.0"):
|
||||
"""Test the Sahara Cluster launch and delete commands.
|
||||
|
||||
This scenario launches a Hadoop cluster, waits until it becomes
|
||||
'Active' and deletes it.
|
||||
|
||||
:param flavor: The Nova flavor that will be for nodes in the
|
||||
created node groups
|
||||
:param node_count: The total number of instances in a cluster (>= 2)
|
||||
:param plugin_name: The name of a provisioning plugin
|
||||
:param hadoop_version: The version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
"""
|
||||
|
||||
tenant_id = self.clients("keystone").tenant_id
|
||||
image_id = self.context()["sahara_images"][tenant_id]
|
||||
|
||||
LOG.debug("Using Image: %s" % image_id)
|
||||
|
||||
cluster = self._launch_cluster(flavor_id=flavor,
|
||||
image_id=image_id,
|
||||
node_count=node_count,
|
||||
plugin_name=plugin_name,
|
||||
hadoop_version=hadoop_version)
|
||||
|
||||
self._delete_cluster(cluster)
|
@ -13,10 +13,28 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from rally.benchmark.scenarios import base as scenario_base
|
||||
from oslo.config import cfg
|
||||
from saharaclient.api import base as sahara_base
|
||||
|
||||
from rally.benchmark.scenarios import base
|
||||
from rally.benchmark import utils as bench_utils
|
||||
from rally.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
CREATE_CLUSTER_OPTS = [
|
||||
cfg.IntOpt("cluster_create_timeout", default=600,
|
||||
help="A timeout in seconds for a cluster create operation"),
|
||||
cfg.IntOpt("cluster_check_interval", default=5,
|
||||
help="Cluster status polling interval in seconds")
|
||||
]
|
||||
|
||||
benchmark_group = cfg.OptGroup(name='benchmark', title='benchmark options')
|
||||
CONF.register_opts(CREATE_CLUSTER_OPTS, group=benchmark_group)
|
||||
|
||||
|
||||
class SaharaScenario(scenario_base.Scenario):
|
||||
class SaharaScenario(base.Scenario):
|
||||
|
||||
RESOURCE_NAME_LENGTH = 20
|
||||
|
||||
@ -34,14 +52,26 @@ class SaharaScenario(scenario_base.Scenario):
|
||||
}
|
||||
}
|
||||
|
||||
@scenario_base.atomic_action_timer('sahara.list_node_group_templates')
|
||||
REPLICATION_CONFIGS = {
|
||||
"vanilla": {
|
||||
"1.2.1": {
|
||||
"target": "HDFS",
|
||||
"config_name": "dfs.replication"
|
||||
},
|
||||
"2.3.0": {
|
||||
"target": "HDFS",
|
||||
"config_name": "dfs.replication"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@base.atomic_action_timer('sahara.list_node_group_templates')
|
||||
def _list_node_group_templates(self):
|
||||
"""Returns user Node Group Templates list."""
|
||||
|
||||
return self.clients("sahara").node_group_templates.list()
|
||||
|
||||
@scenario_base.atomic_action_timer(
|
||||
'sahara.create_master_node_group_template')
|
||||
@base.atomic_action_timer('sahara.create_master_node_group_template')
|
||||
def _create_master_node_group_template(self, flavor_id, plugin_name,
|
||||
hadoop_version):
|
||||
"""Creates a master Node Group Template with a random name.
|
||||
@ -63,8 +93,7 @@ class SaharaScenario(scenario_base.Scenario):
|
||||
node_processes=self.NODE_PROCESSES[plugin_name][hadoop_version]
|
||||
["master"])
|
||||
|
||||
@scenario_base.atomic_action_timer(
|
||||
'sahara.create_worker_node_group_template')
|
||||
@base.atomic_action_timer('sahara.create_worker_node_group_template')
|
||||
def _create_worker_node_group_template(self, flavor_id, plugin_name,
|
||||
hadoop_version):
|
||||
"""Creates a worker Node Group Template with a random name.
|
||||
@ -86,7 +115,7 @@ class SaharaScenario(scenario_base.Scenario):
|
||||
node_processes=self.NODE_PROCESSES[plugin_name][hadoop_version]
|
||||
["worker"])
|
||||
|
||||
@scenario_base.atomic_action_timer('sahara.delete_node_group_template')
|
||||
@base.atomic_action_timer('sahara.delete_node_group_template')
|
||||
def _delete_node_group_template(self, node_group):
|
||||
"""Deletes a Node Group Template by id.
|
||||
|
||||
@ -95,3 +124,86 @@ class SaharaScenario(scenario_base.Scenario):
|
||||
"""
|
||||
|
||||
self.clients("sahara").node_group_templates.delete(node_group.id)
|
||||
|
||||
@base.atomic_action_timer('sahara.launch_cluster')
|
||||
def _launch_cluster(self, plugin_name, hadoop_version, flavor_id,
|
||||
image_id, node_count):
|
||||
"""Creates a cluster and wait until it becomes Active.
|
||||
|
||||
The cluster is created with two node groups. The master Node Group is
|
||||
created with one instance. The worker node group contains
|
||||
node_count - 1 instances.
|
||||
|
||||
:param plugin_name: The provisioning plugin name
|
||||
:param hadoop_version: Hadoop version supported by the plugin
|
||||
:param flavor_id: The flavor which will be used to create instances
|
||||
:param image_id: The image id that will be used to boot instances
|
||||
:param node_count: The total number of instances. 1 master node, others
|
||||
for the workers
|
||||
:return: The created cluster
|
||||
"""
|
||||
|
||||
node_groups = [
|
||||
{
|
||||
"name": "master-ng",
|
||||
"flavor_id": flavor_id,
|
||||
"node_processes": self.NODE_PROCESSES[plugin_name]
|
||||
[hadoop_version]["master"],
|
||||
"count": 1
|
||||
}, {
|
||||
"name": "worker-ng",
|
||||
"flavor_id": flavor_id,
|
||||
"node_processes": self.NODE_PROCESSES[plugin_name]
|
||||
[hadoop_version]["worker"],
|
||||
"count": node_count - 1
|
||||
}
|
||||
]
|
||||
|
||||
name = self._generate_random_name(prefix="sahara-cluster-")
|
||||
|
||||
replication_value = min(node_count - 1, 3)
|
||||
# 3 is a default Hadoop replication
|
||||
|
||||
conf = self.REPLICATION_CONFIGS[plugin_name][hadoop_version]
|
||||
LOG.debug("Using replication factor: %s" % replication_value)
|
||||
|
||||
cluster_object = self.clients("sahara").clusters.create(
|
||||
name=name,
|
||||
plugin_name=plugin_name,
|
||||
hadoop_version=hadoop_version,
|
||||
node_groups=node_groups,
|
||||
default_image_id=image_id,
|
||||
cluster_configs={conf["target"]: {
|
||||
conf["config_name"]: replication_value}
|
||||
}
|
||||
)
|
||||
|
||||
def is_active(cluster_id):
|
||||
return self.clients("sahara").clusters.get(
|
||||
cluster_id).status.lower() == "active"
|
||||
|
||||
bench_utils.wait_for(
|
||||
resource=cluster_object.id, is_ready=is_active,
|
||||
timeout=CONF.benchmark.cluster_create_timeout,
|
||||
check_interval=CONF.benchmark.cluster_check_interval)
|
||||
|
||||
return self.clients("sahara").clusters.get(cluster_object.id)
|
||||
|
||||
@base.atomic_action_timer('sahara.delete_cluster')
|
||||
def _delete_cluster(self, cluster):
|
||||
"""Calls a Cluster delete by id and waits for complete deletion.
|
||||
|
||||
:param cluster: The Cluster to be deleted
|
||||
:return:
|
||||
"""
|
||||
|
||||
self.clients("sahara").clusters.delete(cluster.id)
|
||||
|
||||
def is_deleted(cl_id):
|
||||
try:
|
||||
self.clients("sahara").clusters.get(cl_id)
|
||||
return False
|
||||
except sahara_base.APIException:
|
||||
return True
|
||||
|
||||
bench_utils.wait_for(resource=cluster.id, is_ready=is_deleted)
|
||||
|
52
tests/benchmark/scenarios/sahara/test_clusters.py
Normal file
52
tests/benchmark/scenarios/sahara/test_clusters.py
Normal file
@ -0,0 +1,52 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from rally.benchmark.scenarios.sahara import clusters
|
||||
from tests import test
|
||||
|
||||
SAHARA_CLUSTERS = "rally.benchmark.scenarios.sahara.clusters.SaharaClusters"
|
||||
SAHARA_UTILS = 'rally.benchmark.scenarios.sahara.utils'
|
||||
|
||||
|
||||
class SaharaNodeGroupTemplatesTestCase(test.TestCase):
|
||||
|
||||
@mock.patch(SAHARA_CLUSTERS + "._delete_cluster")
|
||||
@mock.patch(SAHARA_CLUSTERS + "._launch_cluster",
|
||||
return_value=mock.MagicMock(id=42))
|
||||
@mock.patch(SAHARA_UTILS + '.SaharaScenario.clients')
|
||||
def test_create_and_delete_cluster(self, mock_clients, mock_launch_cluster,
|
||||
mock_delete_cluster):
|
||||
|
||||
clusters_scenario = clusters.SaharaClusters()
|
||||
|
||||
clusters_scenario.clients("keystone").tenant_id = "test_tenant"
|
||||
clusters_scenario.context = mock.MagicMock(return_value={
|
||||
"sahara_images": {"test_tenant": "test_image"}}
|
||||
)
|
||||
clusters_scenario.create_and_delete_cluster("test_flavor", 5,
|
||||
"test_plugin",
|
||||
"test_version")
|
||||
|
||||
mock_launch_cluster.assert_called_once_with(
|
||||
flavor_id="test_flavor",
|
||||
image_id="test_image",
|
||||
node_count=5,
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version")
|
||||
|
||||
mock_delete_cluster.assert_called_once_with(
|
||||
mock_launch_cluster.return_value)
|
@ -14,6 +14,7 @@
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from saharaclient.api import base as sahara_base
|
||||
|
||||
from rally.benchmark.scenarios.sahara import utils
|
||||
from tests.benchmark.scenarios import test_base
|
||||
@ -108,3 +109,91 @@ class SaharaNodeGroupTemplatesScenarioTestCase(test.TestCase):
|
||||
|
||||
self._test_atomic_action_timer(scenario.atomic_actions(),
|
||||
'sahara.delete_node_group_template')
|
||||
|
||||
@mock.patch(SAHARA_UTILS + '.SaharaScenario._generate_random_name',
|
||||
return_value="random_name")
|
||||
@mock.patch(SAHARA_UTILS + '.SaharaScenario.clients')
|
||||
def test_launch_cluster(self, mock_clients, mock_random_name):
|
||||
|
||||
scenario = utils.SaharaScenario()
|
||||
mock_processes = {
|
||||
"test_plugin": {
|
||||
"test_version": {
|
||||
"master": ["p1"],
|
||||
"worker": ["p2"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mock_configs = {
|
||||
"test_plugin": {
|
||||
"test_version": {
|
||||
"target": "HDFS",
|
||||
"config_name": "dfs.replication"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
node_groups = [
|
||||
{
|
||||
"name": "master-ng",
|
||||
"flavor_id": "test_flavor",
|
||||
"node_processes": ["p1"],
|
||||
"count": 1
|
||||
}, {
|
||||
"name": "worker-ng",
|
||||
"flavor_id": "test_flavor",
|
||||
"node_processes": ["p2"],
|
||||
"count": 41
|
||||
}
|
||||
]
|
||||
|
||||
scenario.NODE_PROCESSES = mock_processes
|
||||
scenario.REPLICATION_CONFIGS = mock_configs
|
||||
|
||||
mock_clients("sahara").clusters.create.return_value = mock.MagicMock(
|
||||
id="test_cluster_id")
|
||||
|
||||
mock_clients("sahara").clusters.get.return_value = mock.MagicMock(
|
||||
status="active")
|
||||
|
||||
scenario._launch_cluster(
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version",
|
||||
flavor_id="test_flavor",
|
||||
image_id="test_image",
|
||||
node_count=42
|
||||
)
|
||||
|
||||
mock_clients("sahara").clusters.create.assert_called_once_with(
|
||||
name="random_name",
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version",
|
||||
node_groups=node_groups,
|
||||
default_image_id="test_image",
|
||||
cluster_configs={"HDFS": {"dfs.replication": 3}}
|
||||
)
|
||||
|
||||
self._test_atomic_action_timer(scenario.atomic_actions(),
|
||||
'sahara.launch_cluster')
|
||||
|
||||
@mock.patch(SAHARA_UTILS + '.SaharaScenario.clients')
|
||||
def test_delete_cluster(self, mock_clients):
|
||||
|
||||
scenario = utils.SaharaScenario()
|
||||
cluster = mock.MagicMock(id=42)
|
||||
mock_clients("sahara").clusters.get.side_effect = [
|
||||
cluster, sahara_base.APIException()
|
||||
]
|
||||
|
||||
scenario._delete_cluster(cluster)
|
||||
|
||||
delete_mock = mock_clients("sahara").clusters.delete
|
||||
delete_mock.assert_called_once_with(42)
|
||||
|
||||
mock_clients("sahara").clusters.get.assert_has_calls([
|
||||
mock.call(42),
|
||||
mock.call(42)])
|
||||
|
||||
self._test_atomic_action_timer(scenario.atomic_actions(),
|
||||
'sahara.delete_cluster')
|
||||
|
Loading…
Reference in New Issue
Block a user