Merge branch '292'

Conflicts:
	cassandra/metadata.py
	tests/integration/standard/test_metadata.py
This commit is contained in:
Adam Holmberg
2015-10-20 16:23:00 -05:00
20 changed files with 36 additions and 630 deletions

View File

@@ -1225,64 +1225,6 @@ class Cluster(object):
return SchemaTargetType.KEYSPACE
return None
def refresh_schema(self, keyspace=None, table=None, usertype=None, function=None, aggregate=None, max_schema_agreement_wait=None):
"""
.. deprecated:: 2.6.0
Use refresh_*_metadata instead
Synchronously refresh schema metadata.
{keyspace, table, usertype} are string names of the respective entities.
``function`` is a :class:`cassandra.UserFunctionDescriptor`.
``aggregate`` is a :class:`cassandra.UserAggregateDescriptor`.
If none of ``{keyspace, table, usertype, function, aggregate}`` are specified, the entire schema is refreshed.
If any of ``{keyspace, table, usertype, function, aggregate}`` are specified, ``keyspace`` is required.
If only ``keyspace`` is specified, just the top-level keyspace metadata is refreshed (e.g. replication).
The remaining arguments ``{table, usertype, function, aggregate}``
are mutually exclusive -- only one may be specified.
By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait`
and :attr:`~.Cluster.control_connection_timeout`.
Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`.
Setting max_schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately.
An Exception is raised if schema refresh fails for any reason.
"""
msg = "refresh_schema is deprecated. Use Cluster.refresh_*_metadata instead."
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
self._validate_refresh_schema(keyspace, table, usertype, function, aggregate)
target_type = self._target_type_from_refresh_args(keyspace, table, usertype, function, aggregate)
if not self.control_connection.refresh_schema(target_type=target_type, keyspace=keyspace, table=table,
type=usertype, function=function, aggregate=aggregate,
schema_agreement_wait=max_schema_agreement_wait):
raise Exception("Schema was not refreshed. See log for details.")
def submit_schema_refresh(self, keyspace=None, table=None, usertype=None, function=None, aggregate=None):
"""
.. deprecated:: 2.6.0
Use refresh_*_metadata instead
Schedule a refresh of the internal representation of the current
schema for this cluster. See :meth:`~.refresh_schema` for description of parameters.
"""
msg = "submit_schema_refresh is deprecated. Use Cluster.refresh_*_metadata instead."
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
self._validate_refresh_schema(keyspace, table, usertype, function, aggregate)
target_type = self._target_type_from_refresh_args(keyspace, table, usertype, function, aggregate)
return self.executor.submit(
self.control_connection.refresh_schema, target_type=target_type, keyspace=keyspace, table=table,
type=usertype, function=function, aggregate=aggregate)
def refresh_schema_metadata(self, max_schema_agreement_wait=None):
"""
Synchronously refresh all schema metadata.
@@ -3130,26 +3072,14 @@ class ResponseFuture(object):
# otherwise, move onto another host
self.send_request()
def result(self, timeout=_NOT_SET):
def result(self):
"""
Return the final result or raise an Exception if errors were
encountered. If the final result or error has not been set
yet, this method will block until that time.
.. versionchanged:: 2.6.0
**`timeout` is deprecated. Use timeout in the Session execute functions instead.
The following description applies to deprecated behavior:**
You may set a timeout (in seconds) with the `timeout` parameter.
By default, the :attr:`~.default_timeout` for the :class:`.Session`
this was created through will be used for the timeout on this
operation.
This timeout applies to the entire request, including any retries
(decided internally by the :class:`.policies.RetryPolicy` used with
the request).
yet, this method will block until it is set, or the timeout
set for the request expires.
Timeout is specified in the Session request execution functions.
If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised.
This is a client-side timeout. For more information
about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`.
@@ -3167,18 +3097,7 @@ class ResponseFuture(object):
... log.exception("Operation failed:")
"""
if timeout is not _NOT_SET and not ResponseFuture._warned_timeout:
msg = "ResponseFuture.result timeout argument is deprecated. Specify the request timeout via Session.execute[_async]."
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
ResponseFuture._warned_timeout = True
else:
timeout = None
self._event.wait(timeout)
# TODO: remove this conditional when deprecated timeout parameter is removed
if not self._event.is_set():
self._on_timeout()
self._event.wait()
if self._final_result is not _NOT_SET:
return ResultSet(self, self._final_result)
else:

View File

@@ -120,13 +120,6 @@ class Column(object):
determines the order that the clustering keys are sorted on disk
"""
polymorphic_key = False
"""
*Deprecated*
see :attr:`~.discriminator_column`
"""
discriminator_column = False
"""
boolean, if set to True, this column will be used for discriminating records
@@ -151,7 +144,6 @@ class Column(object):
default=None,
required=False,
clustering_order=None,
polymorphic_key=False,
discriminator_column=False,
static=False):
self.partition_key = partition_key
@@ -161,14 +153,7 @@ class Column(object):
self.default = default
self.required = required
self.clustering_order = clustering_order
if polymorphic_key:
msg = "polymorphic_key is deprecated. Use discriminator_column instead."
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
self.discriminator_column = discriminator_column or polymorphic_key
self.polymorphic_key = self.discriminator_column
self.discriminator_column = discriminator_column
# the column name in the model definition
self.column_name = None
@@ -540,6 +525,7 @@ class UUID(Column):
def to_database(self, value):
return self.validate(value)
from uuid import UUID as pyUUID, getnode
@@ -550,25 +536,6 @@ class TimeUUID(UUID):
db_type = 'timeuuid'
@classmethod
def from_datetime(self, dt):
"""
generates a UUID for a given datetime
:param dt: datetime
:type dt: datetime
:return: uuid1
.. deprecated:: 2.6.0
Use :func:`cassandra.util.uuid_from_time`
"""
msg = "cqlengine.columns.TimeUUID.from_datetime is deprecated. Use cassandra.util.uuid_from_time instead."
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
return util.uuid_from_time(dt)
class Boolean(Column):
"""
@@ -612,17 +579,6 @@ class Float(BaseFloat):
"""
db_type = 'float'
def __init__(self, double_precision=None, **kwargs):
if double_precision is None or bool(double_precision):
msg = "Float(double_precision=True) is deprecated. Use Double() type instead."
double_precision = True
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
self.db_type = 'double' if double_precision else 'float'
super(Float, self).__init__(**kwargs)
class Double(BaseFloat):
"""

View File

@@ -37,58 +37,6 @@ log = logging.getLogger(__name__)
schema_columnfamilies = NamedTable('system', 'schema_columnfamilies')
def create_keyspace(name, strategy_class, replication_factor, durable_writes=True, **replication_values):
"""
*Deprecated - use :func:`create_keyspace_simple` or :func:`create_keyspace_network_topology` instead*
Creates a keyspace
If the keyspace already exists, it will not be modified.
**This function should be used with caution, especially in production environments.
Take care to execute schema modifications in a single context (i.e. not concurrently with other clients).**
*There are plans to guard schema-modifying functions with an environment-driven conditional.*
:param str name: name of keyspace to create
:param str strategy_class: keyspace replication strategy class (:attr:`~.SimpleStrategy` or :attr:`~.NetworkTopologyStrategy`
:param int replication_factor: keyspace replication factor, used with :attr:`~.SimpleStrategy`
:param bool durable_writes: Write log is bypassed if set to False
:param \*\*replication_values: Additional values to ad to the replication options map
"""
if not _allow_schema_modification():
return
msg = "Deprecated. Use create_keyspace_simple or create_keyspace_network_topology instead"
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
cluster = get_cluster()
if name not in cluster.metadata.keyspaces:
# try the 1.2 method
replication_map = {
'class': strategy_class,
'replication_factor': replication_factor
}
replication_map.update(replication_values)
if strategy_class.lower() != 'simplestrategy':
# Although the Cassandra documentation states for `replication_factor`
# that it is "Required if class is SimpleStrategy; otherwise,
# not used." we get an error if it is present.
replication_map.pop('replication_factor', None)
query = """
CREATE KEYSPACE {0}
WITH REPLICATION = {1}
""".format(metadata.protect_name(name), json.dumps(replication_map).replace('"', "'"))
if strategy_class != 'SimpleStrategy':
query += " AND DURABLE_WRITES = {0}".format('true' if durable_writes else 'false')
execute(query)
def create_keyspace_simple(name, replication_factor, durable_writes=True):
"""
Creates a keyspace with SimpleStrategy for replica placement
@@ -140,13 +88,6 @@ def _create_keyspace(name, durable_writes, strategy_class, strategy_options):
log.info("Not creating keyspace %s because it already exists", name)
def delete_keyspace(name):
msg = "Deprecated. Use drop_keyspace instead"
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
drop_keyspace(name)
def drop_keyspace(name):
"""
Drops a keyspace, if it exists.

View File

@@ -310,7 +310,6 @@ class BaseModel(object):
__keyspace__ = None
__polymorphic_key__ = None # DEPRECATED
__discriminator_value__ = None
__options__ = None
@@ -753,14 +752,7 @@ class ModelMetaClass(type):
is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False)
# short circuit __discriminator_value__ inheritance
# __polymorphic_key__ is deprecated
poly_key = attrs.get('__polymorphic_key__', None)
if poly_key:
msg = '__polymorphic_key__ is deprecated. Use __discriminator_value__ instead'
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
attrs['__discriminator_value__'] = attrs.get('__discriminator_value__', poly_key)
attrs['__polymorphic_key__'] = attrs['__discriminator_value__']
attrs['__discriminator_value__'] = attrs.get('__discriminator_value__')
options = attrs.get('__options__') or {}
attrs['__default_ttl__'] = options.get('default_time_to_live')
@@ -782,7 +774,7 @@ class ModelMetaClass(type):
discriminator_columns = [c for c in column_definitions if c[1].discriminator_column]
is_polymorphic = len(discriminator_columns) > 0
if len(discriminator_columns) > 1:
raise ModelDefinitionException('only one discriminator_column (polymorphic_key (deprecated)) can be defined in a model, {0} found'.format(len(discriminator_columns)))
raise ModelDefinitionException('only one discriminator_column can be defined in a model, {0} found'.format(len(discriminator_columns)))
if attrs['__discriminator_value__'] and not is_polymorphic:
raise ModelDefinitionException('__discriminator_value__ specified, but no base columns defined with discriminator_column=True')
@@ -790,7 +782,7 @@ class ModelMetaClass(type):
discriminator_column_name, discriminator_column = discriminator_columns[0] if discriminator_columns else (None, None)
if isinstance(discriminator_column, (columns.BaseContainerColumn, columns.Counter)):
raise ModelDefinitionException('counter and container columns cannot be used as discriminator columns (polymorphic_key (deprecated)) ')
raise ModelDefinitionException('counter and container columns cannot be used as discriminator columns')
# find polymorphic base class
polymorphic_base = None
@@ -946,13 +938,6 @@ class Model(BaseModel):
(e.g. compaction, default ttl, cache settings, tec.)
"""
__polymorphic_key__ = None
"""
*Deprecated.*
see :attr:`~.__discriminator_value__`
"""
__discriminator_value__ = None
"""
*Optional* Specifies a value for the discriminator column when using model inheritance.

View File

@@ -13,17 +13,13 @@
# limitations under the License.
from datetime import datetime, timedelta
import logging
import time
import six
import warnings
from cassandra.cqlengine import UnicodeMixin
from cassandra.cqlengine.functions import QueryValue
from cassandra.cqlengine.operators import BaseWhereOperator, InOperator
log = logging.getLogger(__name__)
class StatementException(Exception):
pass
@@ -295,11 +291,6 @@ class ListUpdateClause(ContainerUpdateClause):
ctx[str(ctx_id)] = self._to_database(self._assignments)
ctx_id += 1
if self._prepend is not None:
msg = "Previous versions of cqlengine implicitly reversed prepended lists to account for CASSANDRA-8733. " \
"THIS VERSION DOES NOT. This warning will be removed in a future release."
warnings.warn(msg)
log.warning(msg)
ctx[str(ctx_id)] = self._to_database(self._prepend)
ctx_id += 1
if self._append is not None:

View File

@@ -73,20 +73,6 @@ def trim_if_startswith(s, prefix):
return s
def unix_time_from_uuid1(u):
msg = "'cassandra.cqltypes.unix_time_from_uuid1' has moved to 'cassandra.util'. This entry point will be removed in the next major version."
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
return util.unix_time_from_uuid1(u)
def datetime_from_timestamp(timestamp):
msg = "'cassandra.cqltypes.datetime_from_timestamp' has moved to 'cassandra.util'. This entry point will be removed in the next major version."
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
return util.datetime_from_timestamp(timestamp)
_casstypes = {}
@@ -538,7 +524,6 @@ class DateType(_CassandraType):
@staticmethod
def interpret_datestring(val):
# not used internally. deprecate?
if val[-5] in ('+', '-'):
offset = (int(val[-4:-2]) * 3600 + int(val[-2:]) * 60) * int(val[-5] + '1')
val = val[:-5]
@@ -582,7 +567,7 @@ class TimeUUIDType(DateType):
typename = 'timeuuid'
def my_timestamp(self):
return unix_time_from_uuid1(self.val)
return util.unix_time_from_uuid1(self.val)
@staticmethod
def deserialize(byts, protocol_version):

View File

@@ -1,58 +0,0 @@
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
import warnings
import cassandra.query
import logging
log = logging.getLogger(__name__)
_have_warned = False
def warn_once(f):
@wraps(f)
def new_f(*args, **kwargs):
global _have_warned
if not _have_warned:
msg = "cassandra.decoder.%s has moved to cassandra.query.%s" % (f.__name__, f.__name__)
warnings.warn(msg, DeprecationWarning)
log.warning(msg)
_have_warned = True
return f(*args, **kwargs)
return new_f
tuple_factory = warn_once(cassandra.query.tuple_factory)
"""
Deprecated: use :meth:`cassandra.query.tuple_factory()`
"""
named_tuple_factory = warn_once(cassandra.query.named_tuple_factory)
"""
Deprecated: use :meth:`cassandra.query.named_tuple_factory()`
"""
dict_factory = warn_once(cassandra.query.dict_factory)
"""
Deprecated: use :meth:`cassandra.query.dict_factory()`
"""
ordered_dict_factory = warn_once(cassandra.query.ordered_dict_factory)
"""
Deprecated: use :meth:`cassandra.query.ordered_dict_factory()`
"""

View File

@@ -185,7 +185,6 @@ class Metadata(object):
# return one or the other based on the query results.
# Here we deal with that.
if isinstance(meta, TableMetadata):
meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated
keyspace_meta._add_table_metadata(meta)
else:
keyspace_meta._add_view_metadata(meta)
@@ -973,14 +972,6 @@ class TableMetadata(object):
A representation of the schema for a single table.
"""
keyspace = None
"""
An instance of :class:`~.KeyspaceMetadata`.
.. deprecated:: 2.7.0
"""
keyspace_name = None
""" String name of this Table's keyspace """
@@ -1564,7 +1555,7 @@ class TriggerMetadata(object):
def as_cql_query(self):
ret = "CREATE TRIGGER %s ON %s.%s USING %s" % (
protect_name(self.name),
protect_name(self.table.keyspace.name),
protect_name(self.table.keyspace_name),
protect_name(self.table.name),
protect_value(self.options['class'])
)
@@ -1655,7 +1646,6 @@ class SchemaParserV22(_SchemaParser):
try:
for table_row in self.keyspace_table_rows.get(keyspace_meta.name, []):
table_meta = self._build_table_metadata(table_row)
table_meta.keyspace = keyspace_meta # temporary while TableMetadata.keyspace is deprecated
keyspace_meta._add_table_metadata(table_meta)
for usertype_row in self.keyspace_type_rows.get(keyspace_meta.name, []):

View File

@@ -87,8 +87,6 @@
.. automethod:: refresh_user_aggregate_metadata
.. automethod:: refresh_schema
.. automethod:: refresh_nodes
.. automethod:: set_meta_refresh_enabled

View File

@@ -29,8 +29,6 @@ Columns
.. autoattribute:: clustering_order
.. autoattribute:: polymorphic_key
.. autoattribute:: discriminator_column
.. autoattribute:: static
@@ -78,8 +76,6 @@ Columns of all types are initialized by passing :class:`.Column` attributes to t
.. autoclass:: TimeUUID(**kwargs)
.. automethod:: from_datetime
.. autoclass:: TinyInt(**kwargs)
.. autoclass:: UserDefinedType

View File

@@ -33,8 +33,6 @@ Model
.. _ttl-change:
.. autoattribute:: __default_ttl__
.. autoattribute:: __polymorphic_key__
.. autoattribute:: __discriminator_value__
See :ref:`model_inheritance` for usage examples.

View File

@@ -175,15 +175,9 @@ class TestTimeUUID(BaseColumnIOTest):
return val if isinstance(val, UUID) else UUID(val)
# until Floats are implicitly single:
class FloatSingle(columns.Float):
def __init__(self, **kwargs):
super(FloatSingle, self).__init__(double_precision=False, **kwargs)
class TestFloatIO(BaseColumnIOTest):
column = FloatSingle
column = columns.Float
pkey_val = 4.75
data_val = -1.5

View File

@@ -37,33 +37,20 @@ class KeyspaceManagementTest(BaseCassEngTestCase):
cluster = get_cluster()
keyspace_ss = 'test_ks_ss'
self.assertFalse(keyspace_ss in cluster.metadata.keyspaces)
self.assertNotIn(keyspace_ss, cluster.metadata.keyspaces)
management.create_keyspace_simple(keyspace_ss, 2)
self.assertTrue(keyspace_ss in cluster.metadata.keyspaces)
self.assertIn(keyspace_ss, cluster.metadata.keyspaces)
management.drop_keyspace(keyspace_ss)
self.assertFalse(keyspace_ss in cluster.metadata.keyspaces)
with warnings.catch_warnings(record=True) as w:
management.create_keyspace(keyspace_ss, strategy_class="SimpleStrategy", replication_factor=1)
self.assertEqual(len(w), 1)
self.assertEqual(w[-1].category, DeprecationWarning)
self.assertTrue(keyspace_ss in cluster.metadata.keyspaces)
management.drop_keyspace(keyspace_ss)
self.assertFalse(keyspace_ss in cluster.metadata.keyspaces)
self.assertNotIn(keyspace_ss, cluster.metadata.keyspaces)
keyspace_nts = 'test_ks_nts'
self.assertFalse(keyspace_nts in cluster.metadata.keyspaces)
management.create_keyspace_simple(keyspace_nts, 2)
self.assertTrue(keyspace_nts in cluster.metadata.keyspaces)
self.assertNotIn(keyspace_nts, cluster.metadata.keyspaces)
management.create_keyspace_network_topology(keyspace_nts, {'dc1': 1})
self.assertIn(keyspace_nts, cluster.metadata.keyspaces)
with warnings.catch_warnings(record=True) as w:
management.delete_keyspace(keyspace_nts)
self.assertEqual(len(w), 1)
self.assertEqual(w[-1].category, DeprecationWarning)
self.assertFalse(keyspace_nts in cluster.metadata.keyspaces)
management.drop_keyspace(keyspace_nts)
self.assertNotIn(keyspace_nts, cluster.metadata.keyspaces)
class DropTableTest(BaseCassEngTestCase):

View File

@@ -156,7 +156,7 @@ class TestModelIO(BaseCassEngTestCase):
e = columns.DateTime()
f = columns.Decimal()
g = columns.Double()
h = columns.Float(double_precision=False)
h = columns.Float()
i = columns.Inet()
j = columns.Integer()
k = columns.Text()
@@ -225,13 +225,8 @@ class TestModelIO(BaseCassEngTestCase):
"""
Test for inserting single-precision and double-precision values into a Float and Double columns
test_can_insert_double_and_float tests a Float can only hold a single-precision value, unless
"double_precision" attribute is specified as True or is unspecified. This test first tests that an AttributeError
is raised when attempting to input a double-precision value into a single-precision Float. It then verifies that
Double, Float(double_precision=True) and Float() can hold double-precision values by default. It also verifies that
columns.Float(double_precision=False) can hold a single-precision value, and a Double can hold a single-precision value.
@since 2.6.0
@changed 3.0.0 removed deprecated Float(double_precision) parameter
@jira_ticket PYTHON-246
@expected_result Each floating point column type is able to hold their respective precision values.
@@ -240,24 +235,19 @@ class TestModelIO(BaseCassEngTestCase):
class FloatingPointModel(Model):
id = columns.Integer(primary_key=True)
a = columns.Float(double_precision=False)
b = columns.Float(double_precision=True)
c = columns.Float()
f = columns.Float()
d = columns.Double()
sync_table(FloatingPointModel)
FloatingPointModel.create(id=0, a=2.39)
FloatingPointModel.create(id=0, f=2.39)
output = FloatingPointModel.objects().first()
self.assertEqual(2.390000104904175, output.a)
self.assertEqual(2.390000104904175, output.f) # float loses precision
FloatingPointModel.create(id=0, a=3.4028234663852886e+38, b=2.39, c=2.39, d=2.39)
FloatingPointModel.create(id=0, f=3.4028234663852886e+38, d=2.39)
output = FloatingPointModel.objects().first()
self.assertEqual(3.4028234663852886e+38, output.a)
self.assertEqual(2.39, output.b)
self.assertEqual(2.39, output.c)
self.assertEqual(2.39, output.d)
self.assertEqual(3.4028234663852886e+38, output.f)
self.assertEqual(2.39, output.d) # double retains precision
FloatingPointModel.create(id=0, d=3.4028234663852886e+38)
output = FloatingPointModel.objects().first()

View File

@@ -22,243 +22,6 @@ from tests.integration.cqlengine.base import BaseCassEngTestCase
from cassandra.cqlengine import management
class TestPolymorphicClassConstruction(BaseCassEngTestCase):
def test_multiple_polymorphic_key_failure(self):
""" Tests that defining a model with more than one polymorphic key fails """
with self.assertRaises(models.ModelDefinitionException):
class M(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
type2 = columns.Integer(polymorphic_key=True)
def test_no_polymorphic_key_column_failure(self):
with self.assertRaises(models.ModelDefinitionException):
class M(models.Model):
__polymorphic_key__ = 1
def test_polymorphic_key_inheritance(self):
""" Tests that polymorphic_key attribute is not inherited """
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
class M1(Base):
__polymorphic_key__ = 1
class M2(M1):
pass
assert M2.__polymorphic_key__ is None
def test_polymorphic_metaclass(self):
""" Tests that the model meta class configures polymorphic models properly """
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
class M1(Base):
__polymorphic_key__ = 1
assert Base._is_polymorphic
assert M1._is_polymorphic
assert Base._is_polymorphic_base
assert not M1._is_polymorphic_base
assert Base._discriminator_column is Base._columns['type1']
assert M1._discriminator_column is M1._columns['type1']
assert Base._discriminator_column_name == 'type1'
assert M1._discriminator_column_name == 'type1'
def test_table_names_are_inherited_from_poly_base(self):
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
class M1(Base):
__polymorphic_key__ = 1
assert Base.column_family_name() == M1.column_family_name()
def test_collection_columns_cant_be_polymorphic_keys(self):
with self.assertRaises(models.ModelDefinitionException):
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Set(columns.Integer, polymorphic_key=True)
class PolyBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(polymorphic_key=True)
class Poly1(PolyBase):
__polymorphic_key__ = 1
data1 = columns.Text()
class Poly2(PolyBase):
__polymorphic_key__ = 2
data2 = columns.Text()
class TestPolymorphicModel(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestPolymorphicModel, cls).setUpClass()
management.sync_table(Poly1)
management.sync_table(Poly2)
@classmethod
def tearDownClass(cls):
super(TestPolymorphicModel, cls).tearDownClass()
management.drop_table(Poly1)
management.drop_table(Poly2)
def test_saving_base_model_fails(self):
with self.assertRaises(models.PolymorphicModelException):
PolyBase.create()
def test_saving_subclass_saves_poly_key(self):
p1 = Poly1.create(data1='pickle')
p2 = Poly2.create(data2='bacon')
assert p1.row_type == Poly1.__polymorphic_key__
assert p2.row_type == Poly2.__polymorphic_key__
def test_query_deserialization(self):
p1 = Poly1.create(data1='pickle')
p2 = Poly2.create(data2='bacon')
p1r = PolyBase.get(partition=p1.partition)
p2r = PolyBase.get(partition=p2.partition)
assert isinstance(p1r, Poly1)
assert isinstance(p2r, Poly2)
def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key(self):
p1 = Poly1.create()
session = get_session()
with mock.patch.object(session, 'execute') as m:
Poly1.objects(partition=p1.partition).delete()
# make sure our polymorphic key isn't in the CQL
# not sure how we would even get here if it was in there
# since the CQL would fail.
self.assertNotIn("row_type", m.call_args[0][0].query_string)
class UnindexedPolyBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
cluster = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(polymorphic_key=True)
class UnindexedPoly1(UnindexedPolyBase):
__polymorphic_key__ = 1
data1 = columns.Text()
class UnindexedPoly2(UnindexedPolyBase):
__polymorphic_key__ = 2
data2 = columns.Text()
class UnindexedPoly3(UnindexedPoly2):
__polymorphic_key__ = 3
data3 = columns.Text()
class TestUnindexedPolymorphicQuery(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestUnindexedPolymorphicQuery, cls).setUpClass()
management.sync_table(UnindexedPoly1)
management.sync_table(UnindexedPoly2)
management.sync_table(UnindexedPoly3)
cls.p1 = UnindexedPoly1.create(data1='pickle')
cls.p2 = UnindexedPoly2.create(partition=cls.p1.partition, data2='bacon')
cls.p3 = UnindexedPoly3.create(partition=cls.p1.partition, data3='turkey')
@classmethod
def tearDownClass(cls):
super(TestUnindexedPolymorphicQuery, cls).tearDownClass()
management.drop_table(UnindexedPoly1)
management.drop_table(UnindexedPoly2)
management.drop_table(UnindexedPoly3)
def test_non_conflicting_type_results_work(self):
p1, p2, p3 = self.p1, self.p2, self.p3
assert len(list(UnindexedPoly1.objects(partition=p1.partition, cluster=p1.cluster))) == 1
assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster=p2.cluster))) == 1
assert len(list(UnindexedPoly3.objects(partition=p1.partition, cluster=p3.cluster))) == 1
def test_subclassed_model_results_work_properly(self):
p1, p2, p3 = self.p1, self.p2, self.p3
assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster__in=[p2.cluster, p3.cluster]))) == 2
def test_conflicting_type_results(self):
with self.assertRaises(models.PolymorphicModelException):
list(UnindexedPoly1.objects(partition=self.p1.partition))
with self.assertRaises(models.PolymorphicModelException):
list(UnindexedPoly2.objects(partition=self.p1.partition))
class IndexedPolyBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
cluster = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(polymorphic_key=True, index=True)
class IndexedPoly1(IndexedPolyBase):
__polymorphic_key__ = 1
data1 = columns.Text()
class IndexedPoly2(IndexedPolyBase):
__polymorphic_key__ = 2
data2 = columns.Text()
class TestIndexedPolymorphicQuery(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestIndexedPolymorphicQuery, cls).setUpClass()
management.sync_table(IndexedPoly1)
management.sync_table(IndexedPoly2)
cls.p1 = IndexedPoly1.create(data1='pickle')
cls.p2 = IndexedPoly2.create(partition=cls.p1.partition, data2='bacon')
@classmethod
def tearDownClass(cls):
super(TestIndexedPolymorphicQuery, cls).tearDownClass()
management.drop_table(IndexedPoly1)
management.drop_table(IndexedPoly2)
def test_success_case(self):
assert len(list(IndexedPoly1.objects(partition=self.p1.partition))) == 1
assert len(list(IndexedPoly2.objects(partition=self.p1.partition))) == 1
#########
# Repeated tests for 'discriminator' properties, following deprecation of polymorphic variants
#########
class TestInheritanceClassConstruction(BaseCassEngTestCase):
def test_multiple_discriminator_value_failure(self):

View File

@@ -214,7 +214,7 @@ class UserDefinedTypeTests(BaseCassEngTestCase):
e = columns.DateTime()
f = columns.Decimal()
g = columns.Double()
h = columns.Float(double_precision=False)
h = columns.Float()
i = columns.Inet()
j = columns.Integer()
k = columns.Text()
@@ -261,7 +261,7 @@ class UserDefinedTypeTests(BaseCassEngTestCase):
e = columns.DateTime()
f = columns.Decimal()
g = columns.Double()
h = columns.Float(double_precision=False)
h = columns.Float()
i = columns.Inet()
j = columns.Integer()
k = columns.Text()

View File

@@ -254,32 +254,6 @@ class ClusterTests(unittest.TestCase):
cluster.set_max_connections_per_host(HostDistance.LOCAL, max_connections_per_host + 1)
self.assertEqual(cluster.get_max_connections_per_host(HostDistance.LOCAL), max_connections_per_host + 1)
def test_submit_schema_refresh(self):
"""
Ensure new new schema is refreshed after submit_schema_refresh()
"""
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect()
self.assertNotIn("newkeyspace", cluster.metadata.keyspaces)
other_cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = other_cluster.connect()
execute_until_pass(session,
"""
CREATE KEYSPACE newkeyspace
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}
""")
future = cluster.submit_schema_refresh()
future.result()
self.assertIn("newkeyspace", cluster.metadata.keyspaces)
execute_until_pass(session, "DROP KEYSPACE newkeyspace")
cluster.shutdown()
other_cluster.shutdown()
def test_refresh_schema(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
@@ -606,10 +580,6 @@ class ClusterTests(unittest.TestCase):
cluster.refresh_schema_metadata()
cluster.refresh_schema_metadata(max_schema_agreement_wait=0)
# submit schema refresh
future = cluster.submit_schema_refresh()
future.result()
assert_quiescent_pool_state(self, cluster)
cluster.shutdown()

View File

@@ -162,6 +162,6 @@ class CustomPayloadTests(unittest.TestCase):
# Submit the statement with our custom payload. Validate the one
# we receive from the server matches
response_future = self.session.execute_async(statement, custom_payload=custom_payload)
response_future.result(timeout=10.0)
response_future.result()
returned_custom_payload = response_future.custom_payload
self.assertEqual(custom_payload, returned_custom_payload)

View File

@@ -113,7 +113,8 @@ class SchemaMetadataTests(BasicSegregatedKeyspaceUnitTestCase):
self.assertTrue(self.function_table_name in ksmeta.tables)
tablemeta = ksmeta.tables[self.function_table_name]
self.assertEqual(tablemeta.keyspace, ksmeta) # tablemeta.keyspace is deprecated
self.assertEqual(tablemeta.keyspace_name, ksmeta.name)
self.assertEqual(tablemeta.name, self.function_table_name)
self.assertEqual(tablemeta.name, self.function_table_name)
self.assertEqual([u'a'], [c.name for c in tablemeta.partition_key])

View File

@@ -148,7 +148,7 @@ class QueryTests(unittest.TestCase):
query = "SELECT * FROM system.local"
statement = SimpleStatement(query)
response_future = session.execute_async(statement, trace=True)
response_future.result(timeout=10.0)
response_future.result()
# Fetch the client_ip from the trace.
trace = response_future.get_query_trace(max_wait=2.0)
@@ -787,4 +787,4 @@ class MaterializedViewQueryTest(BasicSharedKeyspaceUnitTestCase):
self.assertEquals(results[1].month, 1)
self.assertEquals(results[1].day, 25)
self.assertEquals(results[1].score, 3200)
self.assertEquals(results[1].user, "pcmanus")
self.assertEquals(results[1].user, "pcmanus")