Browse files

Merge remote branch 'nickmbailey/0.8' into multiversion

  • Loading branch information...
2 parents ccb3641 + 43132c0 commit a6a3aef4b43099bae452193c7d9c22c95a9d073c @thobbs thobbs committed Apr 13, 2011
View
7 debian/changelog
@@ -1,3 +1,10 @@
+python-telephus (0.7.1) unstable; urgency=low
+
+ * Tyler Hobbs <tyler@datastax.com>:
+ + Restore compatibility with Python 2.5
+
+ -- paul cannon <paul@riptano.com> Wed, 30 Mar 2011 12:31:30 -0500
+
python-telephus (0.7) unstable; urgency=low
* paul cannon <paul@riptano.com>:
View
2 setup.py
@@ -3,7 +3,7 @@
from distutils.core import setup
setup(
name='telephus',
- version='0.7',
+ version='0.7.1',
description='connection pooled, low-level client API for Cassandra in Twisted python',
author='brandon@faltering.com',
url='http://github.com/driftx/Telephus',
View
21 telephus/cassandra/Cassandra-remote
@@ -31,7 +31,9 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print ' get_range_slices(ColumnParent column_parent, SlicePredicate predicate, KeyRange range, ConsistencyLevel consistency_level)'
print ' get_indexed_slices(ColumnParent column_parent, IndexClause index_clause, SlicePredicate column_predicate, ConsistencyLevel consistency_level)'
print ' void insert(string key, ColumnParent column_parent, Column column, ConsistencyLevel consistency_level)'
+ print ' void add(string key, ColumnParent column_parent, CounterColumn column, ConsistencyLevel consistency_level)'
print ' void remove(string key, ColumnPath column_path, i64 timestamp, ConsistencyLevel consistency_level)'
+ print ' void remove_counter(string key, ColumnPath path, ConsistencyLevel consistency_level)'
print ' void batch_mutate( mutation_map, ConsistencyLevel consistency_level)'
print ' void truncate(string cfname)'
print ' describe_schema_versions()'
@@ -49,6 +51,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
print ' string system_drop_keyspace(string keyspace)'
print ' string system_update_keyspace(KsDef ks_def)'
print ' string system_update_column_family(CfDef cf_def)'
+ print ' CqlResult execute_cql_query(string query, Compression compression)'
print ''
sys.exit(0)
@@ -159,12 +162,24 @@ elif cmd == 'insert':
sys.exit(1)
pp.pprint(client.insert(args[0],eval(args[1]),eval(args[2]),eval(args[3]),))
+elif cmd == 'add':
+ if len(args) != 4:
+ print 'add requires 4 args'
+ sys.exit(1)
+ pp.pprint(client.add(args[0],eval(args[1]),eval(args[2]),eval(args[3]),))
+
elif cmd == 'remove':
if len(args) != 4:
print 'remove requires 4 args'
sys.exit(1)
pp.pprint(client.remove(args[0],eval(args[1]),eval(args[2]),eval(args[3]),))
+elif cmd == 'remove_counter':
+ if len(args) != 3:
+ print 'remove_counter requires 3 args'
+ sys.exit(1)
+ pp.pprint(client.remove_counter(args[0],eval(args[1]),eval(args[2]),))
+
elif cmd == 'batch_mutate':
if len(args) != 2:
print 'batch_mutate requires 2 args'
@@ -267,6 +282,12 @@ elif cmd == 'system_update_column_family':
sys.exit(1)
pp.pprint(client.system_update_column_family(eval(args[0]),))
+elif cmd == 'execute_cql_query':
+ if len(args) != 2:
+ print 'execute_cql_query requires 2 args'
+ sys.exit(1)
+ pp.pprint(client.execute_cql_query(args[0],eval(args[1]),))
+
else:
print 'Unrecognized method %s' % cmd
sys.exit(1)
View
1,255 telephus/cassandra/Cassandra.py
@@ -131,6 +131,18 @@ def insert(key, column_parent, column, consistency_level):
"""
pass
+ def add(key, column_parent, column, consistency_level):
+ """
+ Increment or decrement a counter.
+
+ Parameters:
+ - key
+ - column_parent
+ - column
+ - consistency_level
+ """
+ pass
+
def remove(key, column_path, timestamp, consistency_level):
"""
Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
@@ -145,6 +157,19 @@ def remove(key, column_path, timestamp, consistency_level):
"""
pass
+ def remove_counter(key, path, consistency_level):
+ """
+ Remove a counter at the specified location.
+ Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update
+ until the delete has reached all the nodes and all of them have been fully compacted.
+
+ Parameters:
+ - key
+ - path
+ - consistency_level
+ """
+ pass
+
def batch_mutate(mutation_map, consistency_level):
"""
Mutate many columns or super columns for many row keys. See also: Mutation.
@@ -305,6 +330,17 @@ def system_update_column_family(cf_def):
"""
pass
+ def execute_cql_query(query, compression):
+ """
+ Executes a CQL (Cassandra Query Language) statement and returns a
+ CqlResult containing the results.
+
+ Parameters:
+ - query
+ - compression
+ """
+ pass
+
class Client:
implements(Iface)
@@ -760,6 +796,51 @@ def recv_insert(self, iprot, mtype, rseqid):
return d.errback(result.te)
return d.callback(None)
+ def add(self, key, column_parent, column, consistency_level):
+ """
+ Increment or decrement a counter.
+
+ Parameters:
+ - key
+ - column_parent
+ - column
+ - consistency_level
+ """
+ self._seqid += 1
+ d = self._reqs[self._seqid] = defer.Deferred()
+ self.send_add(key, column_parent, column, consistency_level)
+ return d
+
+ def send_add(self, key, column_parent, column, consistency_level):
+ oprot = self._oprot_factory.getProtocol(self._transport)
+ oprot.writeMessageBegin('add', TMessageType.CALL, self._seqid)
+ args = add_args()
+ args.key = key
+ args.column_parent = column_parent
+ args.column = column
+ args.consistency_level = consistency_level
+ args.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def recv_add(self, iprot, mtype, rseqid):
+ d = self._reqs.pop(rseqid)
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ return d.errback(x)
+ result = add_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.ire != None:
+ return d.errback(result.ire)
+ if result.ue != None:
+ return d.errback(result.ue)
+ if result.te != None:
+ return d.errback(result.te)
+ return d.callback(None)
+
def remove(self, key, column_path, timestamp, consistency_level):
"""
Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
@@ -807,6 +888,51 @@ def recv_remove(self, iprot, mtype, rseqid):
return d.errback(result.te)
return d.callback(None)
+ def remove_counter(self, key, path, consistency_level):
+ """
+ Remove a counter at the specified location.
+ Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update
+ until the delete has reached all the nodes and all of them have been fully compacted.
+
+ Parameters:
+ - key
+ - path
+ - consistency_level
+ """
+ self._seqid += 1
+ d = self._reqs[self._seqid] = defer.Deferred()
+ self.send_remove_counter(key, path, consistency_level)
+ return d
+
+ def send_remove_counter(self, key, path, consistency_level):
+ oprot = self._oprot_factory.getProtocol(self._transport)
+ oprot.writeMessageBegin('remove_counter', TMessageType.CALL, self._seqid)
+ args = remove_counter_args()
+ args.key = key
+ args.path = path
+ args.consistency_level = consistency_level
+ args.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def recv_remove_counter(self, iprot, mtype, rseqid):
+ d = self._reqs.pop(rseqid)
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ return d.errback(x)
+ result = remove_counter_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.ire != None:
+ return d.errback(result.ire)
+ if result.ue != None:
+ return d.errback(result.ue)
+ if result.te != None:
+ return d.errback(result.te)
+ return d.callback(None)
+
def batch_mutate(self, mutation_map, consistency_level):
"""
Mutate many columns or super columns for many row keys. See also: Mutation.
@@ -1211,6 +1337,8 @@ def recv_describe_splits(self, iprot, mtype, rseqid):
iprot.readMessageEnd()
if result.success != None:
return d.callback(result.success)
+ if result.ire != None:
+ return d.errback(result.ire)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "describe_splits failed: unknown result"))
def system_add_column_family(self, cf_def):
@@ -1248,6 +1376,8 @@ def recv_system_add_column_family(self, iprot, mtype, rseqid):
return d.callback(result.success)
if result.ire != None:
return d.errback(result.ire)
+ if result.sde != None:
+ return d.errback(result.sde)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "system_add_column_family failed: unknown result"))
def system_drop_column_family(self, column_family):
@@ -1285,6 +1415,8 @@ def recv_system_drop_column_family(self, iprot, mtype, rseqid):
return d.callback(result.success)
if result.ire != None:
return d.errback(result.ire)
+ if result.sde != None:
+ return d.errback(result.sde)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "system_drop_column_family failed: unknown result"))
def system_add_keyspace(self, ks_def):
@@ -1322,6 +1454,8 @@ def recv_system_add_keyspace(self, iprot, mtype, rseqid):
return d.callback(result.success)
if result.ire != None:
return d.errback(result.ire)
+ if result.sde != None:
+ return d.errback(result.sde)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "system_add_keyspace failed: unknown result"))
def system_drop_keyspace(self, keyspace):
@@ -1359,6 +1493,8 @@ def recv_system_drop_keyspace(self, iprot, mtype, rseqid):
return d.callback(result.success)
if result.ire != None:
return d.errback(result.ire)
+ if result.sde != None:
+ return d.errback(result.sde)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "system_drop_keyspace failed: unknown result"))
def system_update_keyspace(self, ks_def):
@@ -1396,6 +1532,8 @@ def recv_system_update_keyspace(self, iprot, mtype, rseqid):
return d.callback(result.success)
if result.ire != None:
return d.errback(result.ire)
+ if result.sde != None:
+ return d.errback(result.sde)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "system_update_keyspace failed: unknown result"))
def system_update_column_family(self, cf_def):
@@ -1433,8 +1571,56 @@ def recv_system_update_column_family(self, iprot, mtype, rseqid):
return d.callback(result.success)
if result.ire != None:
return d.errback(result.ire)
+ if result.sde != None:
+ return d.errback(result.sde)
return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "system_update_column_family failed: unknown result"))
+ def execute_cql_query(self, query, compression):
+ """
+ Executes a CQL (Cassandra Query Language) statement and returns a
+ CqlResult containing the results.
+
+ Parameters:
+ - query
+ - compression
+ """
+ self._seqid += 1
+ d = self._reqs[self._seqid] = defer.Deferred()
+ self.send_execute_cql_query(query, compression)
+ return d
+
+ def send_execute_cql_query(self, query, compression):
+ oprot = self._oprot_factory.getProtocol(self._transport)
+ oprot.writeMessageBegin('execute_cql_query', TMessageType.CALL, self._seqid)
+ args = execute_cql_query_args()
+ args.query = query
+ args.compression = compression
+ args.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def recv_execute_cql_query(self, iprot, mtype, rseqid):
+ d = self._reqs.pop(rseqid)
+ if mtype == TMessageType.EXCEPTION:
+ x = TApplicationException()
+ x.read(iprot)
+ iprot.readMessageEnd()
+ return d.errback(x)
+ result = execute_cql_query_result()
+ result.read(iprot)
+ iprot.readMessageEnd()
+ if result.success != None:
+ return d.callback(result.success)
+ if result.ire != None:
+ return d.errback(result.ire)
+ if result.ue != None:
+ return d.errback(result.ue)
+ if result.te != None:
+ return d.errback(result.te)
+ if result.sde != None:
+ return d.errback(result.sde)
+ return d.errback(TApplicationException(TApplicationException.MISSING_RESULT, "execute_cql_query failed: unknown result"))
+
class Processor(TProcessor):
implements(Iface)
@@ -1452,7 +1638,9 @@ def __init__(self, handler):
self._processMap["get_range_slices"] = Processor.process_get_range_slices
self._processMap["get_indexed_slices"] = Processor.process_get_indexed_slices
self._processMap["insert"] = Processor.process_insert
+ self._processMap["add"] = Processor.process_add
self._processMap["remove"] = Processor.process_remove
+ self._processMap["remove_counter"] = Processor.process_remove_counter
self._processMap["batch_mutate"] = Processor.process_batch_mutate
self._processMap["truncate"] = Processor.process_truncate
self._processMap["describe_schema_versions"] = Processor.process_describe_schema_versions
@@ -1470,6 +1658,7 @@ def __init__(self, handler):
self._processMap["system_drop_keyspace"] = Processor.process_system_drop_keyspace
self._processMap["system_update_keyspace"] = Processor.process_system_update_keyspace
self._processMap["system_update_column_family"] = Processor.process_system_update_column_family
+ self._processMap["execute_cql_query"] = Processor.process_execute_cql_query
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
@@ -1791,6 +1980,37 @@ def write_results_exception_insert(self, error, result, seqid, oprot):
oprot.writeMessageEnd()
oprot.trans.flush()
+ def process_add(self, seqid, iprot, oprot):
+ args = add_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = add_result()
+ d = defer.maybeDeferred(self._handler.add, args.key, args.column_parent, args.column, args.consistency_level)
+ d.addCallback(self.write_results_success_add, result, seqid, oprot)
+ d.addErrback(self.write_results_exception_add, result, seqid, oprot)
+ return d
+
+ def write_results_success_add(self, success, result, seqid, oprot):
+ result.success = success
+ oprot.writeMessageBegin("add", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def write_results_exception_add(self, error, result, seqid, oprot):
+ try:
+ error.raiseException()
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("add", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
def process_remove(self, seqid, iprot, oprot):
args = remove_args()
args.read(iprot)
@@ -1822,6 +2042,37 @@ def write_results_exception_remove(self, error, result, seqid, oprot):
oprot.writeMessageEnd()
oprot.trans.flush()
+ def process_remove_counter(self, seqid, iprot, oprot):
+ args = remove_counter_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = remove_counter_result()
+ d = defer.maybeDeferred(self._handler.remove_counter, args.key, args.path, args.consistency_level)
+ d.addCallback(self.write_results_success_remove_counter, result, seqid, oprot)
+ d.addErrback(self.write_results_exception_remove_counter, result, seqid, oprot)
+ return d
+
+ def write_results_success_remove_counter(self, success, result, seqid, oprot):
+ result.success = success
+ oprot.writeMessageBegin("remove_counter", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def write_results_exception_remove_counter(self, error, result, seqid, oprot):
+ try:
+ error.raiseException()
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ oprot.writeMessageBegin("remove_counter", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
def process_batch_mutate(self, seqid, iprot, oprot):
args = batch_mutate_args()
args.read(iprot)
@@ -2063,6 +2314,7 @@ def process_describe_splits(self, seqid, iprot, oprot):
result = describe_splits_result()
d = defer.maybeDeferred(self._handler.describe_splits, args.cfName, args.start_token, args.end_token, args.keys_per_split)
d.addCallback(self.write_results_success_describe_splits, result, seqid, oprot)
+ d.addErrback(self.write_results_exception_describe_splits, result, seqid, oprot)
return d
def write_results_success_describe_splits(self, success, result, seqid, oprot):
@@ -2072,6 +2324,16 @@ def write_results_success_describe_splits(self, success, result, seqid, oprot):
oprot.writeMessageEnd()
oprot.trans.flush()
+ def write_results_exception_describe_splits(self, error, result, seqid, oprot):
+ try:
+ error.raiseException()
+ except InvalidRequestException, ire:
+ result.ire = ire
+ oprot.writeMessageBegin("describe_splits", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
def process_system_add_column_family(self, seqid, iprot, oprot):
args = system_add_column_family_args()
args.read(iprot)
@@ -2094,6 +2356,8 @@ def write_results_exception_system_add_column_family(self, error, result, seqid,
error.raiseException()
except InvalidRequestException, ire:
result.ire = ire
+ except SchemaDisagreementException, sde:
+ result.sde = sde
oprot.writeMessageBegin("system_add_column_family", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
@@ -2121,6 +2385,8 @@ def write_results_exception_system_drop_column_family(self, error, result, seqid
error.raiseException()
except InvalidRequestException, ire:
result.ire = ire
+ except SchemaDisagreementException, sde:
+ result.sde = sde
oprot.writeMessageBegin("system_drop_column_family", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
@@ -2148,6 +2414,8 @@ def write_results_exception_system_add_keyspace(self, error, result, seqid, opro
error.raiseException()
except InvalidRequestException, ire:
result.ire = ire
+ except SchemaDisagreementException, sde:
+ result.sde = sde
oprot.writeMessageBegin("system_add_keyspace", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
@@ -2175,6 +2443,8 @@ def write_results_exception_system_drop_keyspace(self, error, result, seqid, opr
error.raiseException()
except InvalidRequestException, ire:
result.ire = ire
+ except SchemaDisagreementException, sde:
+ result.sde = sde
oprot.writeMessageBegin("system_drop_keyspace", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
@@ -2202,6 +2472,8 @@ def write_results_exception_system_update_keyspace(self, error, result, seqid, o
error.raiseException()
except InvalidRequestException, ire:
result.ire = ire
+ except SchemaDisagreementException, sde:
+ result.sde = sde
oprot.writeMessageBegin("system_update_keyspace", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
@@ -2229,11 +2501,46 @@ def write_results_exception_system_update_column_family(self, error, result, seq
error.raiseException()
except InvalidRequestException, ire:
result.ire = ire
+ except SchemaDisagreementException, sde:
+ result.sde = sde
oprot.writeMessageBegin("system_update_column_family", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
+ def process_execute_cql_query(self, seqid, iprot, oprot):
+ args = execute_cql_query_args()
+ args.read(iprot)
+ iprot.readMessageEnd()
+ result = execute_cql_query_result()
+ d = defer.maybeDeferred(self._handler.execute_cql_query, args.query, args.compression)
+ d.addCallback(self.write_results_success_execute_cql_query, result, seqid, oprot)
+ d.addErrback(self.write_results_exception_execute_cql_query, result, seqid, oprot)
+ return d
+
+ def write_results_success_execute_cql_query(self, success, result, seqid, oprot):
+ result.success = success
+ oprot.writeMessageBegin("execute_cql_query", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
+ def write_results_exception_execute_cql_query(self, error, result, seqid, oprot):
+ try:
+ error.raiseException()
+ except InvalidRequestException, ire:
+ result.ire = ire
+ except UnavailableException, ue:
+ result.ue = ue
+ except TimedOutException, te:
+ result.te = te
+ except SchemaDisagreementException, sde:
+ result.sde = sde
+ oprot.writeMessageBegin("execute_cql_query", TMessageType.REPLY, seqid)
+ result.write(oprot)
+ oprot.writeMessageEnd()
+ oprot.trans.flush()
+
# HELPER FUNCTIONS AND STRUCTURES
@@ -2833,11 +3140,11 @@ def read(self, iprot):
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype70, _size67) = iprot.readListBegin()
- for _i71 in xrange(_size67):
- _elem72 = ColumnOrSuperColumn()
- _elem72.read(iprot)
- self.success.append(_elem72)
+ (_etype91, _size88) = iprot.readListBegin()
+ for _i92 in xrange(_size88):
+ _elem93 = ColumnOrSuperColumn()
+ _elem93.read(iprot)
+ self.success.append(_elem93)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -2872,8 +3179,8 @@ def write(self, oprot):
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter73 in self.success:
- iter73.write(oprot)
+ for iter94 in self.success:
+ iter94.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire != None:
@@ -3142,10 +3449,10 @@ def read(self, iprot):
if fid == 1:
if ftype == TType.LIST:
self.keys = []
- (_etype77, _size74) = iprot.readListBegin()
- for _i78 in xrange(_size74):
- _elem79 = iprot.readString();
- self.keys.append(_elem79)
+ (_etype98, _size95) = iprot.readListBegin()
+ for _i99 in xrange(_size95):
+ _elem100 = iprot.readString();
+ self.keys.append(_elem100)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -3179,8 +3486,8 @@ def write(self, oprot):
if self.keys != None:
oprot.writeFieldBegin('keys', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.keys))
- for iter80 in self.keys:
- oprot.writeString(iter80)
+ for iter101 in self.keys:
+ oprot.writeString(iter101)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.column_parent != None:
@@ -3254,17 +3561,17 @@ def read(self, iprot):
if fid == 0:
if ftype == TType.MAP:
self.success = {}
- (_ktype82, _vtype83, _size81 ) = iprot.readMapBegin()
- for _i85 in xrange(_size81):
- _key86 = iprot.readString();
- _val87 = []
- (_etype91, _size88) = iprot.readListBegin()
- for _i92 in xrange(_size88):
- _elem93 = ColumnOrSuperColumn()
- _elem93.read(iprot)
- _val87.append(_elem93)
+ (_ktype103, _vtype104, _size102 ) = iprot.readMapBegin()
+ for _i106 in xrange(_size102):
+ _key107 = iprot.readString();
+ _val108 = []
+ (_etype112, _size109) = iprot.readListBegin()
+ for _i113 in xrange(_size109):
+ _elem114 = ColumnOrSuperColumn()
+ _elem114.read(iprot)
+ _val108.append(_elem114)
iprot.readListEnd()
- self.success[_key86] = _val87
+ self.success[_key107] = _val108
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -3299,11 +3606,11 @@ def write(self, oprot):
if self.success != None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.success))
- for kiter94,viter95 in self.success.items():
- oprot.writeString(kiter94)
- oprot.writeListBegin(TType.STRUCT, len(viter95))
- for iter96 in viter95:
- iter96.write(oprot)
+ for kiter115,viter116 in self.success.items():
+ oprot.writeString(kiter115)
+ oprot.writeListBegin(TType.STRUCT, len(viter116))
+ for iter117 in viter116:
+ iter117.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
@@ -3371,10 +3678,10 @@ def read(self, iprot):
if fid == 1:
if ftype == TType.LIST:
self.keys = []
- (_etype100, _size97) = iprot.readListBegin()
- for _i101 in xrange(_size97):
- _elem102 = iprot.readString();
- self.keys.append(_elem102)
+ (_etype121, _size118) = iprot.readListBegin()
+ for _i122 in xrange(_size118):
+ _elem123 = iprot.readString();
+ self.keys.append(_elem123)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -3408,8 +3715,8 @@ def write(self, oprot):
if self.keys != None:
oprot.writeFieldBegin('keys', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.keys))
- for iter103 in self.keys:
- oprot.writeString(iter103)
+ for iter124 in self.keys:
+ oprot.writeString(iter124)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.column_parent != None:
@@ -3483,11 +3790,11 @@ def read(self, iprot):
if fid == 0:
if ftype == TType.MAP:
self.success = {}
- (_ktype105, _vtype106, _size104 ) = iprot.readMapBegin()
- for _i108 in xrange(_size104):
- _key109 = iprot.readString();
- _val110 = iprot.readI32();
- self.success[_key109] = _val110
+ (_ktype126, _vtype127, _size125 ) = iprot.readMapBegin()
+ for _i129 in xrange(_size125):
+ _key130 = iprot.readString();
+ _val131 = iprot.readI32();
+ self.success[_key130] = _val131
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -3522,9 +3829,9 @@ def write(self, oprot):
if self.success != None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.I32, len(self.success))
- for kiter111,viter112 in self.success.items():
- oprot.writeString(kiter111)
- oprot.writeI32(viter112)
+ for kiter132,viter133 in self.success.items():
+ oprot.writeString(kiter132)
+ oprot.writeI32(viter133)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.ire != None:
@@ -3696,11 +4003,11 @@ def read(self, iprot):
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype116, _size113) = iprot.readListBegin()
- for _i117 in xrange(_size113):
- _elem118 = KeySlice()
- _elem118.read(iprot)
- self.success.append(_elem118)
+ (_etype137, _size134) = iprot.readListBegin()
+ for _i138 in xrange(_size134):
+ _elem139 = KeySlice()
+ _elem139.read(iprot)
+ self.success.append(_elem139)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -3735,8 +4042,8 @@ def write(self, oprot):
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter119 in self.success:
- iter119.write(oprot)
+ for iter140 in self.success:
+ iter140.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire != None:
@@ -3908,11 +4215,11 @@ def read(self, iprot):
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype123, _size120) = iprot.readListBegin()
- for _i124 in xrange(_size120):
- _elem125 = KeySlice()
- _elem125.read(iprot)
- self.success.append(_elem125)
+ (_etype144, _size141) = iprot.readListBegin()
+ for _i145 in xrange(_size141):
+ _elem146 = KeySlice()
+ _elem146.read(iprot)
+ self.success.append(_elem146)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -3947,8 +4254,8 @@ def write(self, oprot):
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter126 in self.success:
- iter126.write(oprot)
+ for iter147 in self.success:
+ iter147.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire != None:
@@ -4171,27 +4478,27 @@ def __eq__(self, other):
def __ne__(self, other):
return not (self == other)
-class remove_args:
+class add_args:
"""
Attributes:
- key
- - column_path
- - timestamp
+ - column_parent
+ - column
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
- (2, TType.STRUCT, 'column_path', (ColumnPath, ColumnPath.thrift_spec), None, ), # 2
- (3, TType.I64, 'timestamp', None, None, ), # 3
+ (2, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 2
+ (3, TType.STRUCT, 'column', (CounterColumn, CounterColumn.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
- def __init__(self, key=None, column_path=None, timestamp=None, consistency_level=thrift_spec[4][4],):
+ def __init__(self, key=None, column_parent=None, column=None, consistency_level=thrift_spec[4][4],):
self.key = key
- self.column_path = column_path
- self.timestamp = timestamp
+ self.column_parent = column_parent
+ self.column = column
self.consistency_level = consistency_level
def read(self, iprot):
@@ -4210,13 +4517,14 @@ def read(self, iprot):
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
- self.column_path = ColumnPath()
- self.column_path.read(iprot)
+ self.column_parent = ColumnParent()
+ self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
- if ftype == TType.I64:
- self.timestamp = iprot.readI64();
+ if ftype == TType.STRUCT:
+ self.column = CounterColumn()
+ self.column.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
@@ -4233,18 +4541,18 @@ def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('remove_args')
+ oprot.writeStructBegin('add_args')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
- if self.column_path != None:
- oprot.writeFieldBegin('column_path', TType.STRUCT, 2)
- self.column_path.write(oprot)
+ if self.column_parent != None:
+ oprot.writeFieldBegin('column_parent', TType.STRUCT, 2)
+ self.column_parent.write(oprot)
oprot.writeFieldEnd()
- if self.timestamp != None:
- oprot.writeFieldBegin('timestamp', TType.I64, 3)
- oprot.writeI64(self.timestamp)
+ if self.column != None:
+ oprot.writeFieldBegin('column', TType.STRUCT, 3)
+ self.column.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level != None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
@@ -4255,10 +4563,12 @@ def write(self, oprot):
def validate(self):
if self.key is None:
raise TProtocol.TProtocolException(message='Required field key is unset!')
- if self.column_path is None:
- raise TProtocol.TProtocolException(message='Required field column_path is unset!')
- if self.timestamp is None:
- raise TProtocol.TProtocolException(message='Required field timestamp is unset!')
+ if self.column_parent is None:
+ raise TProtocol.TProtocolException(message='Required field column_parent is unset!')
+ if self.column is None:
+ raise TProtocol.TProtocolException(message='Required field column is unset!')
+ if self.consistency_level is None:
+ raise TProtocol.TProtocolException(message='Required field consistency_level is unset!')
return
@@ -4273,7 +4583,7 @@ def __eq__(self, other):
def __ne__(self, other):
return not (self == other)
-class remove_result:
+class add_result:
"""
Attributes:
- ire
@@ -4329,7 +4639,7 @@ def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
- oprot.writeStructBegin('remove_result')
+ oprot.writeStructBegin('add_result')
if self.ire != None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
@@ -4359,20 +4669,384 @@ def __eq__(self, other):
def __ne__(self, other):
return not (self == other)
-class batch_mutate_args:
+class remove_args:
"""
Attributes:
- - mutation_map
+ - key
+ - column_path
+ - timestamp
- consistency_level
"""
thrift_spec = (
None, # 0
- (1, TType.MAP, 'mutation_map', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.LIST,(TType.STRUCT,(Mutation, Mutation.thrift_spec)))), None, ), # 1
- (2, TType.I32, 'consistency_level', None, 1, ), # 2
- )
-
- def __init__(self, mutation_map=None, consistency_level=thrift_spec[2][4],):
+ (1, TType.STRING, 'key', None, None, ), # 1
+ (2, TType.STRUCT, 'column_path', (ColumnPath, ColumnPath.thrift_spec), None, ), # 2
+ (3, TType.I64, 'timestamp', None, None, ), # 3
+ (4, TType.I32, 'consistency_level', None, 1, ), # 4
+ )
+
+ def __init__(self, key=None, column_path=None, timestamp=None, consistency_level=thrift_spec[4][4],):
+ self.key = key
+ self.column_path = column_path
+ self.timestamp = timestamp
+ self.consistency_level = consistency_level
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.key = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.column_path = ColumnPath()
+ self.column_path.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I64:
+ self.timestamp = iprot.readI64();
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.I32:
+ self.consistency_level = iprot.readI32();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('remove_args')
+ if self.key != None:
+ oprot.writeFieldBegin('key', TType.STRING, 1)
+ oprot.writeString(self.key)
+ oprot.writeFieldEnd()
+ if self.column_path != None:
+ oprot.writeFieldBegin('column_path', TType.STRUCT, 2)
+ self.column_path.write(oprot)
+ oprot.writeFieldEnd()
+ if self.timestamp != None:
+ oprot.writeFieldBegin('timestamp', TType.I64, 3)
+ oprot.writeI64(self.timestamp)
+ oprot.writeFieldEnd()
+ if self.consistency_level != None:
+ oprot.writeFieldBegin('consistency_level', TType.I32, 4)
+ oprot.writeI32(self.consistency_level)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+ def validate(self):
+ if self.key is None:
+ raise TProtocol.TProtocolException(message='Required field key is unset!')
+ if self.column_path is None:
+ raise TProtocol.TProtocolException(message='Required field column_path is unset!')
+ if self.timestamp is None:
+ raise TProtocol.TProtocolException(message='Required field timestamp is unset!')
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class remove_result:
+ """
+ Attributes:
+ - ire
+ - ue
+ - te
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
+ (3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
+ )
+
+ def __init__(self, ire=None, ue=None, te=None,):
+ self.ire = ire
+ self.ue = ue
+ self.te = te
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.ire = InvalidRequestException()
+ self.ire.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.ue = UnavailableException()
+ self.ue.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.te = TimedOutException()
+ self.te.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('remove_result')
+ if self.ire != None:
+ oprot.writeFieldBegin('ire', TType.STRUCT, 1)
+ self.ire.write(oprot)
+ oprot.writeFieldEnd()
+ if self.ue != None:
+ oprot.writeFieldBegin('ue', TType.STRUCT, 2)
+ self.ue.write(oprot)
+ oprot.writeFieldEnd()
+ if self.te != None:
+ oprot.writeFieldBegin('te', TType.STRUCT, 3)
+ self.te.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class remove_counter_args:
+ """
+ Attributes:
+ - key
+ - path
+ - consistency_level
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'key', None, None, ), # 1
+ (2, TType.STRUCT, 'path', (ColumnPath, ColumnPath.thrift_spec), None, ), # 2
+ (3, TType.I32, 'consistency_level', None, 1, ), # 3
+ )
+
+ def __init__(self, key=None, path=None, consistency_level=thrift_spec[3][4],):
+ self.key = key
+ self.path = path
+ self.consistency_level = consistency_level
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.key = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.path = ColumnPath()
+ self.path.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I32:
+ self.consistency_level = iprot.readI32();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('remove_counter_args')
+ if self.key != None:
+ oprot.writeFieldBegin('key', TType.STRING, 1)
+ oprot.writeString(self.key)
+ oprot.writeFieldEnd()
+ if self.path != None:
+ oprot.writeFieldBegin('path', TType.STRUCT, 2)
+ self.path.write(oprot)
+ oprot.writeFieldEnd()
+ if self.consistency_level != None:
+ oprot.writeFieldBegin('consistency_level', TType.I32, 3)
+ oprot.writeI32(self.consistency_level)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+ def validate(self):
+ if self.key is None:
+ raise TProtocol.TProtocolException(message='Required field key is unset!')
+ if self.path is None:
+ raise TProtocol.TProtocolException(message='Required field path is unset!')
+ if self.consistency_level is None:
+ raise TProtocol.TProtocolException(message='Required field consistency_level is unset!')
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class remove_counter_result:
+ """
+ Attributes:
+ - ire
+ - ue
+ - te
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
+ (3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
+ )
+
+ def __init__(self, ire=None, ue=None, te=None,):
+ self.ire = ire
+ self.ue = ue
+ self.te = te
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRUCT:
+ self.ire = InvalidRequestException()
+ self.ire.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.ue = UnavailableException()
+ self.ue.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.te = TimedOutException()
+ self.te.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('remove_counter_result')
+ if self.ire != None:
+ oprot.writeFieldBegin('ire', TType.STRUCT, 1)
+ self.ire.write(oprot)
+ oprot.writeFieldEnd()
+ if self.ue != None:
+ oprot.writeFieldBegin('ue', TType.STRUCT, 2)
+ self.ue.write(oprot)
+ oprot.writeFieldEnd()
+ if self.te != None:
+ oprot.writeFieldBegin('te', TType.STRUCT, 3)
+ self.te.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class batch_mutate_args:
+ """
+ Attributes:
+ - mutation_map
+ - consistency_level
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.MAP, 'mutation_map', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.LIST,(TType.STRUCT,(Mutation, Mutation.thrift_spec)))), None, ), # 1
+ (2, TType.I32, 'consistency_level', None, 1, ), # 2
+ )
+
+ def __init__(self, mutation_map=None, consistency_level=thrift_spec[2][4],):
self.mutation_map = mutation_map
self.consistency_level = consistency_level
@@ -4388,23 +5062,23 @@ def read(self, iprot):
if fid == 1:
if ftype == TType.MAP:
self.mutation_map = {}
- (_ktype128, _vtype129, _size127 ) = iprot.readMapBegin()
- for _i131 in xrange(_size127):
- _key132 = iprot.readString();
- _val133 = {}
- (_ktype135, _vtype136, _size134 ) = iprot.readMapBegin()
- for _i138 in xrange(_size134):
- _key139 = iprot.readString();
- _val140 = []
- (_etype144, _size141) = iprot.readListBegin()
- for _i145 in xrange(_size141):
- _elem146 = Mutation()
- _elem146.read(iprot)
- _val140.append(_elem146)
+ (_ktype149, _vtype150, _size148 ) = iprot.readMapBegin()
+ for _i152 in xrange(_size148):
+ _key153 = iprot.readString();
+ _val154 = {}
+ (_ktype156, _vtype157, _size155 ) = iprot.readMapBegin()
+ for _i159 in xrange(_size155):
+ _key160 = iprot.readString();
+ _val161 = []
+ (_etype165, _size162) = iprot.readListBegin()
+ for _i166 in xrange(_size162):
+ _elem167 = Mutation()
+ _elem167.read(iprot)
+ _val161.append(_elem167)
iprot.readListEnd()
- _val133[_key139] = _val140
+ _val154[_key160] = _val161
iprot.readMapEnd()
- self.mutation_map[_key132] = _val133
+ self.mutation_map[_key153] = _val154
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -4426,14 +5100,14 @@ def write(self, oprot):
if self.mutation_map != None:
oprot.writeFieldBegin('mutation_map', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.mutation_map))
- for kiter147,viter148 in self.mutation_map.items():
- oprot.writeString(kiter147)
- oprot.writeMapBegin(TType.STRING, TType.LIST, len(viter148))
- for kiter149,viter150 in viter148.items():
- oprot.writeString(kiter149)
- oprot.writeListBegin(TType.STRUCT, len(viter150))
- for iter151 in viter150:
- iter151.write(oprot)
+ for kiter168,viter169 in self.mutation_map.items():
+ oprot.writeString(kiter168)
+ oprot.writeMapBegin(TType.STRING, TType.LIST, len(viter169))
+ for kiter170,viter171 in viter169.items():
+ oprot.writeString(kiter170)
+ oprot.writeListBegin(TType.STRUCT, len(viter171))
+ for iter172 in viter171:
+ iter172.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeMapEnd()
@@ -4752,16 +5426,16 @@ def read(self, iprot):
if fid == 0:
if ftype == TType.MAP:
self.success = {}
- (_ktype153, _vtype154, _size152 ) = iprot.readMapBegin()
- for _i156 in xrange(_size152):
- _key157 = iprot.readString();
- _val158 = []
- (_etype162, _size159) = iprot.readListBegin()
- for _i163 in xrange(_size159):
- _elem164 = iprot.readString();
- _val158.append(_elem164)
+ (_ktype174, _vtype175, _size173 ) = iprot.readMapBegin()
+ for _i177 in xrange(_size173):
+ _key178 = iprot.readString();
+ _val179 = []
+ (_etype183, _size180) = iprot.readListBegin()
+ for _i184 in xrange(_size180):
+ _elem185 = iprot.readString();
+ _val179.append(_elem185)
iprot.readListEnd()
- self.success[_key157] = _val158
+ self.success[_key178] = _val179
iprot.readMapEnd()
else:
iprot.skip(ftype)
@@ -4784,11 +5458,11 @@ def write(self, oprot):
if self.success != None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.success))
- for kiter165,viter166 in self.success.items():
- oprot.writeString(kiter165)
- oprot.writeListBegin(TType.STRING, len(viter166))
- for iter167 in viter166:
- oprot.writeString(iter167)
+ for kiter186,viter187 in self.success.items():
+ oprot.writeString(kiter186)
+ oprot.writeListBegin(TType.STRING, len(viter187))
+ for iter188 in viter187:
+ oprot.writeString(iter188)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
@@ -4882,11 +5556,11 @@ def read(self, iprot):
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype171, _size168) = iprot.readListBegin()
- for _i172 in xrange(_size168):
- _elem173 = KsDef()
- _elem173.read(iprot)
- self.success.append(_elem173)
+ (_etype192, _size189) = iprot.readListBegin()
+ for _i193 in xrange(_size189):
+ _elem194 = KsDef()
+ _elem194.read(iprot)
+ self.success.append(_elem194)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -4909,8 +5583,8 @@ def write(self, oprot):
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter174 in self.success:
- iter174.write(oprot)
+ for iter195 in self.success:
+ iter195.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire != None:
@@ -5221,11 +5895,11 @@ def read(self, iprot):
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype178, _size175) = iprot.readListBegin()
- for _i179 in xrange(_size175):
- _elem180 = TokenRange()
- _elem180.read(iprot)
- self.success.append(_elem180)
+ (_etype199, _size196) = iprot.readListBegin()
+ for _i200 in xrange(_size196):
+ _elem201 = TokenRange()
+ _elem201.read(iprot)
+ self.success.append(_elem201)
iprot.readListEnd()
else:
iprot.skip(ftype)
@@ -5248,8 +5922,8 @@ def write(self, oprot):
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
- for iter181 in self.success:
- iter181.write(oprot)
+ for iter202 in self.success:
+ iter202.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire != None:
@@ -5724,14 +6398,17 @@ class describe_splits_result:
"""
Attributes:
- success
+ - ire
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0
+ (1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
)
- def __init__(self, success=None,):
+ def __init__(self, success=None, ire=None,):
self.success = success
+ self.ire = ire
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -5745,13 +6422,19 @@ def read(self, iprot):
if fid == 0:
if ftype == TType.LIST:
self.success = []
- (_etype185, _size182) = iprot.readListBegin()
- for _i186 in xrange(_size182):
- _elem187 = iprot.readString();
- self.success.append(_elem187)
+ (_etype206, _size203) = iprot.readListBegin()
+ for _i207 in xrange(_size203):
+ _elem208 = iprot.readString();
+ self.success.append(_elem208)
iprot.readListEnd()
else:
iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.ire = InvalidRequestException()
+ self.ire.read(iprot)
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -5765,10 +6448,14 @@ def write(self, oprot):
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRING, len(self.success))
- for iter188 in self.success:
- oprot.writeString(iter188)
+ for iter209 in self.success:
+ oprot.writeString(iter209)
oprot.writeListEnd()
oprot.writeFieldEnd()
+ if self.ire != None:
+ oprot.writeFieldBegin('ire', TType.STRUCT, 1)
+ self.ire.write(oprot)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
@@ -5853,16 +6540,19 @@ class system_add_column_family_result:
Attributes:
- success
- ire
+ - sde
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 2
)
- def __init__(self, success=None, ire=None,):
+ def __init__(self, success=None, ire=None, sde=None,):
self.success = success
self.ire = ire
+ self.sde = sde
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -5884,6 +6574,12 @@ def read(self, iprot):
self.ire.read(iprot)
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.sde = SchemaDisagreementException()
+ self.sde.read(iprot)
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -5902,6 +6598,10 @@ def write(self, oprot):
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
+ if self.sde != None:
+ oprot.writeFieldBegin('sde', TType.STRUCT, 2)
+ self.sde.write(oprot)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
@@ -5985,16 +6685,19 @@ class system_drop_column_family_result:
Attributes:
- success
- ire
+ - sde
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 2
)
- def __init__(self, success=None, ire=None,):
+ def __init__(self, success=None, ire=None, sde=None,):
self.success = success
self.ire = ire
+ self.sde = sde
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -6016,6 +6719,12 @@ def read(self, iprot):
self.ire.read(iprot)
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.sde = SchemaDisagreementException()
+ self.sde.read(iprot)
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -6034,6 +6743,10 @@ def write(self, oprot):
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
+ if self.sde != None:
+ oprot.writeFieldBegin('sde', TType.STRUCT, 2)
+ self.sde.write(oprot)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
@@ -6118,16 +6831,19 @@ class system_add_keyspace_result:
Attributes:
- success
- ire
+ - sde
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 2
)
- def __init__(self, success=None, ire=None,):
+ def __init__(self, success=None, ire=None, sde=None,):
self.success = success
self.ire = ire
+ self.sde = sde
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -6149,6 +6865,12 @@ def read(self, iprot):
self.ire.read(iprot)
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.sde = SchemaDisagreementException()
+ self.sde.read(iprot)
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -6167,6 +6889,10 @@ def write(self, oprot):
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
+ if self.sde != None:
+ oprot.writeFieldBegin('sde', TType.STRUCT, 2)
+ self.sde.write(oprot)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
@@ -6250,16 +6976,19 @@ class system_drop_keyspace_result:
Attributes:
- success
- ire
+ - sde
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 2
)
- def __init__(self, success=None, ire=None,):
+ def __init__(self, success=None, ire=None, sde=None,):
self.success = success
self.ire = ire
+ self.sde = sde
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -6281,6 +7010,12 @@ def read(self, iprot):
self.ire.read(iprot)
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.sde = SchemaDisagreementException()
+ self.sde.read(iprot)
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -6299,6 +7034,10 @@ def write(self, oprot):
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
+ if self.sde != None:
+ oprot.writeFieldBegin('sde', TType.STRUCT, 2)
+ self.sde.write(oprot)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
@@ -6383,16 +7122,19 @@ class system_update_keyspace_result:
Attributes:
- success
- ire
+ - sde
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 2
)
- def __init__(self, success=None, ire=None,):
+ def __init__(self, success=None, ire=None, sde=None,):
self.success = success
self.ire = ire
+ self.sde = sde
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -6414,6 +7156,12 @@ def read(self, iprot):
self.ire.read(iprot)
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.sde = SchemaDisagreementException()
+ self.sde.read(iprot)
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -6432,6 +7180,10 @@ def write(self, oprot):
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
+ if self.sde != None:
+ oprot.writeFieldBegin('sde', TType.STRUCT, 2)
+ self.sde.write(oprot)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
@@ -6516,16 +7268,19 @@ class system_update_column_family_result:
Attributes:
- success
- ire
+ - sde
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 2
)
- def __init__(self, success=None, ire=None,):
+ def __init__(self, success=None, ire=None, sde=None,):
self.success = success
self.ire = ire
+ self.sde = sde
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -6547,6 +7302,12 @@ def read(self, iprot):
self.ire.read(iprot)
else:
iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.sde = SchemaDisagreementException()
+ self.sde.read(iprot)
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -6565,6 +7326,196 @@ def write(self, oprot):
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
+ if self.sde != None:
+ oprot.writeFieldBegin('sde', TType.STRUCT, 2)
+ self.sde.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class execute_cql_query_args:
+ """
+ Attributes:
+ - query
+ - compression
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'query', None, None, ), # 1
+ (2, TType.I32, 'compression', None, None, ), # 2
+ )
+
+ def __init__(self, query=None, compression=None,):
+ self.query = query
+ self.compression = compression
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.query = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I32:
+ self.compression = iprot.readI32();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('execute_cql_query_args')
+ if self.query != None:
+ oprot.writeFieldBegin('query', TType.STRING, 1)
+ oprot.writeString(self.query)
+ oprot.writeFieldEnd()
+ if self.compression != None:
+ oprot.writeFieldBegin('compression', TType.I32, 2)
+ oprot.writeI32(self.compression)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+ def validate(self):
+ if self.query is None:
+ raise TProtocol.TProtocolException(message='Required field query is unset!')
+ if self.compression is None:
+ raise TProtocol.TProtocolException(message='Required field compression is unset!')
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class execute_cql_query_result:
+ """
+ Attributes:
+ - success
+ - ire
+ - ue
+ - te
+ - sde
+ """
+
+ thrift_spec = (
+ (0, TType.STRUCT, 'success', (CqlResult, CqlResult.thrift_spec), None, ), # 0
+ (1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
+ (2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
+ (3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
+ (4, TType.STRUCT, 'sde', (SchemaDisagreementException, SchemaDisagreementException.thrift_spec), None, ), # 4
+ )
+
+ def __init__(self, success=None, ire=None, ue=None, te=None, sde=None,):
+ self.success = success
+ self.ire = ire
+ self.ue = ue
+ self.te = te
+ self.sde = sde
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 0:
+ if ftype == TType.STRUCT:
+ self.success = CqlResult()
+ self.success.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 1:
+ if ftype == TType.STRUCT:
+ self.ire = InvalidRequestException()
+ self.ire.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRUCT:
+ self.ue = UnavailableException()
+ self.ue.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.te = TimedOutException()
+ self.te.read(iprot)
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRUCT:
+ self.sde = SchemaDisagreementException()
+ self.sde.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('execute_cql_query_result')
+ if self.success != None:
+ oprot.writeFieldBegin('success', TType.STRUCT, 0)
+ self.success.write(oprot)
+ oprot.writeFieldEnd()
+ if self.ire != None:
+ oprot.writeFieldBegin('ire', TType.STRUCT, 1)
+ self.ire.write(oprot)
+ oprot.writeFieldEnd()
+ if self.ue != None:
+ oprot.writeFieldBegin('ue', TType.STRUCT, 2)
+ self.ue.write(oprot)
+ oprot.writeFieldEnd()
+ if self.te != None:
+ oprot.writeFieldBegin('te', TType.STRUCT, 3)
+ self.te.write(oprot)
+ oprot.writeFieldEnd()
+ if self.sde != None:
+ oprot.writeFieldBegin('sde', TType.STRUCT, 4)
+ self.sde.write(oprot)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
View
2 telephus/cassandra/constants.py
@@ -7,4 +7,4 @@
from thrift.Thrift import *
from ttypes import *
-VERSION = "19.4.0"
+VERSION = "20.1.0"
View
700 telephus/cassandra/ttypes.py
@@ -16,36 +16,55 @@
class ConsistencyLevel:
"""
- The ConsistencyLevel is an enum that controls both read and write behavior based on <ReplicationFactor> in your
- storage-conf.xml. The different consistency levels have different meanings, depending on if you're doing a write or read
- operation. Note that if W + R > ReplicationFactor, where W is the number of nodes to block for on write, and R
- the number to block for on reads, you will have strongly consistent behavior; that is, readers will always see the most
- recent write. Of these, the most interesting is to do QUORUM reads and writes, which gives you consistency while still
- allowing availability in the face of node failures up to half of <ReplicationFactor>. Of course if latency is more
- important than consistency then you can use lower values for either or both.
+ The ConsistencyLevel is an enum that controls both read and write
+ behavior based on the ReplicationFactor of the keyspace. The
+ different consistency levels have different meanings, depending on
+ if you're doing a write or read operation.
+
+ If W + R > ReplicationFactor, where W is the number of nodes to
+ block for on write, and R the number to block for on reads, you
+ will have strongly consistent behavior; that is, readers will
+ always see the most recent write. Of these, the most interesting is
+ to do QUORUM reads and writes, which gives you consistency while
+ still allowing availability in the face of node failures up to half
+ of <ReplicationFactor>. Of course if latency is more important than
+ consistency then you can use lower values for either or both.
+
+ Some ConsistencyLevels (ONE, TWO, THREE) refer to a specific number
+ of replicas rather than a logical concept that adjusts
+ automatically with the replication factor. Of these, only ONE is
+ commonly used; TWO and (even more rarely) THREE are only useful
+ when you care more about guaranteeing a certain level of
+ durability, than consistency.
Write consistency levels make the following guarantees before reporting success to the client:
ANY Ensure that the write has been written once somewhere, including possibly being hinted in a non-target node.
ONE Ensure that the write has been written to at least 1 node's commit log and memory table
+ TWO Ensure that the write has been written to at least 2 node's commit log and memory table
+ THREE Ensure that the write has been written to at least 3 node's commit log and memory table
QUORUM Ensure that the write has been written to <ReplicationFactor> / 2 + 1 nodes
LOCAL_QUORUM Ensure that the write has been written to <ReplicationFactor> / 2 + 1 nodes, within the local datacenter (requires NetworkTopologyStrategy)
EACH_QUORUM Ensure that the write has been written to <ReplicationFactor> / 2 + 1 nodes in each datacenter (requires NetworkTopologyStrategy)
ALL Ensure that the write is written to <code>&lt;ReplicationFactor&gt;</code> nodes before responding to the client.
- Read:
+ Read consistency levels make the following guarantees before returning successful results to the client:
ANY Not supported. You probably want ONE instead.
- ONE Will return the record returned by the first node to respond. A consistency check is always done in a background thread to fix any consistency issues when ConsistencyLevel.ONE is used. This means subsequent calls will have correct data even if the initial read gets an older value. (This is called 'read repair'.)
- QUORUM Will query all storage nodes and return the record with the most recent timestamp once it has at least a majority of replicas reported. Again, the remaining replicas will be checked in the background.
+ ONE Returns the record obtained from a single replica.
+ TWO Returns the record with the most recent timestamp once two replicas have replied.
+ THREE Returns the record with the most recent timestamp once three replicas have replied.
+ QUORUM Returns the record with the most recent timestamp once a majority of replicas have replied.
LOCAL_QUORUM Returns the record with the most recent timestamp once a majority of replicas within the local datacenter have replied.
EACH_QUORUM Returns the record with the most recent timestamp once a majority of replicas within each datacenter have replied.
- ALL Queries all storage nodes and returns the record with the most recent timestamp.
+ ALL Returns the record with the most recent timestamp once all replicas have replied (implies no replica may be down)..
"""
ONE = 1
QUORUM = 2
LOCAL_QUORUM = 3
EACH_QUORUM = 4
ALL = 5
ANY = 6
+ TWO = 7
+ THREE = 8
_VALUES_TO_NAMES = {
1: "ONE",
@@ -54,6 +73,8 @@ class ConsistencyLevel:
4: "EACH_QUORUM",
5: "ALL",
6: "ANY",
+ 7: "TWO",
+ 8: "THREE",
}
_NAMES_TO_VALUES = {
@@ -63,6 +84,8 @@ class ConsistencyLevel:
"EACH_QUORUM": 4,
"ALL": 5,
"ANY": 6,
+ "TWO": 7,
+ "THREE": 8,
}
class IndexOperator:
@@ -99,6 +122,40 @@ class IndexType:
"KEYS": 0,
}
+class Compression:
+ """
+ CQL query compression
+ """
+ GZIP = 1
+ NONE = 2
+
+ _VALUES_TO_NAMES = {