Allowing heartbeat test to run against C* 2.0. Fixing issue with legacy table test.

This commit is contained in:
GregBestland
2016-04-27 12:26:45 -05:00
parent af5a78f524
commit 4e6468574f
2 changed files with 14 additions and 8 deletions

View File

@@ -29,6 +29,7 @@ from cassandra.io.asyncorereactor import AsyncoreConnection
from cassandra.protocol import QueryMessage from cassandra.protocol import QueryMessage
from cassandra.connection import Connection from cassandra.connection import Connection
from cassandra.policies import WhiteListRoundRobinPolicy, HostStateListener from cassandra.policies import WhiteListRoundRobinPolicy, HostStateListener
from cassandra.pool import HostConnectionPool
from tests import is_monkey_patched from tests import is_monkey_patched
from tests.integration import use_singledc, PROTOCOL_VERSION, get_node from tests.integration import use_singledc, PROTOCOL_VERSION, get_node
@@ -125,6 +126,7 @@ class HeartbeatTest(unittest.TestCase):
rs.result() rs.result()
current_host = str(rs._current_host) current_host = str(rs._current_host)
count += 1 count += 1
time.sleep(.1)
self.assertLess(count, 100, "Never connected to the first node") self.assertLess(count, 100, "Never connected to the first node")
new_connections = self.wait_for_connections(host, self.cluster) new_connections = self.wait_for_connections(host, self.cluster)
self.assertIsNone(test_listener.host_down) self.assertIsNone(test_listener.host_down)
@@ -138,8 +140,12 @@ class HeartbeatTest(unittest.TestCase):
holders = cluster.get_connection_holders() holders = cluster.get_connection_holders()
for conn in holders: for conn in holders:
if host == str(getattr(conn, 'host', '')): if host == str(getattr(conn, 'host', '')):
if conn._connection is not None: if isinstance(conn, HostConnectionPool):
connections.append(conn._connection) if conn._connections is not None:
connections.append(conn._connections)
else:
if conn._connection is not None:
connections.append(conn._connection)
return connections return connections
def wait_for_connections(self, host, cluster): def wait_for_connections(self, host, cluster):
@@ -159,7 +165,7 @@ class HeartbeatTest(unittest.TestCase):
connections = self.fetch_connections(host, cluster) connections = self.fetch_connections(host, cluster)
if len(connections) is 0: if len(connections) is 0:
return return
time.sleep(.1) time.sleep(.5)
self.fail("Connections never cleared") self.fail("Connections never cleared")

View File

@@ -1134,14 +1134,14 @@ Approximate structure, for reference:
CREATE TABLE legacy.composite_comp_with_col ( CREATE TABLE legacy.composite_comp_with_col (
key blob, key blob,
t timeuuid,
b blob, b blob,
s text, s text,
t timeuuid,
"b@6869746d65776974686d75736963" blob, "b@6869746d65776974686d75736963" blob,
"b@6d616d6d616a616d6d61" blob, "b@6d616d6d616a616d6d61" blob,
PRIMARY KEY (key, t, b, s) PRIMARY KEY (key, b, s, t)
) WITH COMPACT STORAGE ) WITH COMPACT STORAGE
AND CLUSTERING ORDER BY (t ASC, b ASC, s ASC) AND CLUSTERING ORDER BY (b ASC, s ASC, t ASC)
AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}' AND caching = '{"keys":"ALL", "rows_per_partition":"NONE"}'
AND comment = 'Stores file meta data' AND comment = 'Stores file meta data'
AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'} AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
@@ -1262,8 +1262,8 @@ Approximate structure, for reference:
CREATE TABLE legacy.composite_comp_no_col ( CREATE TABLE legacy.composite_comp_no_col (
key blob, key blob,
column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(org.apache.cassandra.db.marshal.TimeUUIDType, org.apache.cassandra.db.marshal.BytesType, org.apache.cassandra.db.marshal.UTF8Type)', column1 'org.apache.cassandra.db.marshal.DynamicCompositeType(org.apache.cassandra.db.marshal.BytesType, org.apache.cassandra.db.marshal.UTF8Type, org.apache.cassandra.db.marshal.TimeUUIDType)',
column2 text, column2 timeuuid,
value blob, value blob,
PRIMARY KEY (key, column1, column1, column2) PRIMARY KEY (key, column1, column1, column2)
) WITH COMPACT STORAGE ) WITH COMPACT STORAGE