PYTHON-561 adding test for duplicate metric names

This commit is contained in:
GregBestland
2016-07-25 17:23:25 -05:00
parent 31dd3dcfc0
commit 5a23f1b1cc
4 changed files with 68 additions and 7 deletions

View File

@@ -187,6 +187,9 @@ class Metrics(object):
Default is 'cassandra-<num>'. Default is 'cassandra-<num>'.
""" """
if self.stats_name == stats_name:
return
if stats_name in scales._Stats.stats: if stats_name in scales._Stats.stats:
raise ValueError('"{0}" already exists in stats.'.format(stats_name)) raise ValueError('"{0}" already exists in stats.'.format(stats_name))

View File

@@ -538,8 +538,8 @@ class BasicKeyspaceUnitTestCase(unittest.TestCase):
execute_with_long_wait_retry(cls.session, ddl) execute_with_long_wait_retry(cls.session, ddl)
@classmethod @classmethod
def common_setup(cls, rf, keyspace_creation=True, create_class_table=False): def common_setup(cls, rf, keyspace_creation=True, create_class_table=False, metrics=False):
cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION) cls.cluster = Cluster(protocol_version=PROTOCOL_VERSION, metrics_enabled=metrics)
cls.session = cls.cluster.connect() cls.session = cls.cluster.connect()
cls.ks_name = cls.__name__.lower() cls.ks_name = cls.__name__.lower()
if keyspace_creation: if keyspace_creation:
@@ -592,6 +592,7 @@ class MockLoggingHandler(logging.Handler):
count+=1 count+=1
return count return count
class BasicExistingKeyspaceUnitTestCase(BasicKeyspaceUnitTestCase): class BasicExistingKeyspaceUnitTestCase(BasicKeyspaceUnitTestCase):
""" """
This is basic unit test defines class level teardown and setup methods. It assumes that keyspace is already defined, or created as part of the test. This is basic unit test defines class level teardown and setup methods. It assumes that keyspace is already defined, or created as part of the test.
@@ -646,7 +647,7 @@ class BasicSharedKeyspaceUnitTestCaseWTable(BasicSharedKeyspaceUnitTestCase):
""" """
@classmethod @classmethod
def setUpClass(self): def setUpClass(self):
self.common_setup(2, True) self.common_setup(3, True, True, True)
class BasicSharedKeyspaceUnitTestCaseRF3(BasicSharedKeyspaceUnitTestCase): class BasicSharedKeyspaceUnitTestCaseRF3(BasicSharedKeyspaceUnitTestCase):

View File

@@ -27,6 +27,7 @@ from cassandra import ConsistencyLevel, WriteTimeout, Unavailable, ReadTimeout
from cassandra.cluster import Cluster, NoHostAvailable from cassandra.cluster import Cluster, NoHostAvailable
from tests.integration import get_cluster, get_node, use_singledc, PROTOCOL_VERSION, execute_until_pass from tests.integration import get_cluster, get_node, use_singledc, PROTOCOL_VERSION, execute_until_pass
from greplin import scales from greplin import scales
from tests.integration import BasicSharedKeyspaceUnitTestCaseWTable
def setup_module(): def setup_module():
use_singledc() use_singledc()
@@ -174,16 +175,24 @@ class MetricsTests(unittest.TestCase):
# # TODO: Look for ways to generate retries # # TODO: Look for ways to generate retries
# pass # pass
class MetricsNamespaceTest(BasicSharedKeyspaceUnitTestCaseWTable):
def test_metrics_per_cluster(self): def test_metrics_per_cluster(self):
""" """
Test that metrics are per cluster. Test to validate that metrics can be scopped to invdividual clusters
@since 3.6.0
@jira_ticket PYTHON-561
@expected_result metrics should be scopped to a cluster level
@test_category metrics
""" """
cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION,
default_retry_policy=FallthroughRetryPolicy()) default_retry_policy=FallthroughRetryPolicy())
session2 = cluster2.connect("test3rf", wait_for_all_pools=True) cluster2.connect(self.ks_name, wait_for_all_pools=True)
query = SimpleStatement("SELECT * FROM test", consistency_level=ConsistencyLevel.ALL) query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL)
self.session.execute(query) self.session.execute(query)
# Pause node so it shows as unreachable to coordinator # Pause node so it shows as unreachable to coordinator
@@ -191,7 +200,7 @@ class MetricsTests(unittest.TestCase):
try: try:
# Test write # Test write
query = SimpleStatement("INSERT INTO test (k, v) VALUES (2, 2)", consistency_level=ConsistencyLevel.ALL) query = SimpleStatement("INSERT INTO {0}.{0} (k, v) VALUES (2, 2)".format(self.ks_name), consistency_level=ConsistencyLevel.ALL)
with self.assertRaises(WriteTimeout): with self.assertRaises(WriteTimeout):
self.session.execute(query, timeout=None) self.session.execute(query, timeout=None)
finally: finally:
@@ -217,3 +226,50 @@ class MetricsTests(unittest.TestCase):
# Test access by stats_name # Test access by stats_name
self.assertEqual(0.0, scales.getStats()['cluster2-metrics']['request_timer']['mean']) self.assertEqual(0.0, scales.getStats()['cluster2-metrics']['request_timer']['mean'])
cluster2.shutdown()
def test_duplicate_metrics_per_cluster(self):
"""
Test to validate that cluster metrics names can't overlap.
@since 3.6.0
@jira_ticket PYTHON-561
@expected_result metric names should not be allowed to be same.
@test_category metrics
"""
cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION,
default_retry_policy=FallthroughRetryPolicy())
cluster3 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION,
default_retry_policy=FallthroughRetryPolicy())
# Ensure duplicate metric names are not allowed
cluster2.metrics.set_stats_name("appcluster")
cluster2.metrics.set_stats_name("appcluster")
with self.assertRaises(ValueError):
cluster3.metrics.set_stats_name("appcluster")
cluster3.metrics.set_stats_name("devops")
session2 = cluster2.connect(self.ks_name, wait_for_all_pools=True)
session3 = cluster3.connect(self.ks_name, wait_for_all_pools=True)
# Basic validation that naming metrics doesn't impact their segration or accuracy
for i in range(10):
query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL)
session2.execute(query)
for i in range(5):
query = SimpleStatement("SELECT * FROM {0}.{0}".format(self.ks_name), consistency_level=ConsistencyLevel.ALL)
session3.execute(query)
self.assertEqual(cluster2.metrics.get_stats().values()[2]['count'], 10)
self.assertEqual(cluster3.metrics.get_stats().values()[2]['count'], 5)
# Check scales to ensure they are appropriately named
self.assertTrue("appcluster" in scales._Stats.stats.keys())
self.assertTrue("devops" in scales._Stats.stats.keys())

View File

@@ -4,6 +4,7 @@ Helper module to populate a dummy Cassandra tables with data.
from tests.integration.datatype_utils import PRIMITIVE_DATATYPES, get_sample from tests.integration.datatype_utils import PRIMITIVE_DATATYPES, get_sample
def create_table_with_all_types(table_name, session, N): def create_table_with_all_types(table_name, session, N):
""" """
Method that given a table_name and session construct a table that contains Method that given a table_name and session construct a table that contains