From 869bb26c8249cf717bf9213e07d9d3181e6bad98 Mon Sep 17 00:00:00 2001 From: mjsax Date: Wed, 30 Sep 2015 00:37:30 +0200 Subject: [PATCH 1/3] added new parameters to TopologyBuilder - batch_size (int): set the same batch size for all UDF declared output streams - batch_sizes (Map): sets the batch sizer for individual streams (including system streams) --- .../backtype/storm/generated/Assignment.java | 194 ++--- .../backtype/storm/generated/BoltStats.java | 442 +++++----- .../storm/generated/ClusterSummary.java | 110 +-- .../generated/ClusterWorkerHeartbeat.java | 54 +- .../storm/generated/ComponentCommon.java | 238 ++++- .../backtype/storm/generated/Credentials.java | 46 +- .../storm/generated/ExecutorStats.java | 170 ++-- .../storm/generated/LSApprovedWorkers.java | 46 +- .../generated/LSSupervisorAssignments.java | 50 +- .../storm/generated/LSWorkerHeartbeat.java | 38 +- .../storm/generated/LocalAssignment.java | 38 +- .../storm/generated/LocalStateData.java | 50 +- .../backtype/storm/generated/NodeInfo.java | 34 +- .../storm/generated/RebalanceOptions.java | 46 +- .../backtype/storm/generated/SpoutStats.java | 254 +++--- .../backtype/storm/generated/StormBase.java | 94 +- .../storm/generated/StormTopology.java | 146 ++-- .../storm/generated/SupervisorInfo.java | 110 +-- .../storm/generated/TopologyInfo.java | 162 ++-- .../storm/topology/TopologyBuilder.java | 216 ++++- storm-core/src/py/storm/ttypes.py | 821 +++++++++--------- storm-core/src/storm.thrift | 3 +- 22 files changed, 1858 insertions(+), 1504 deletions(-) diff --git a/storm-core/src/jvm/backtype/storm/generated/Assignment.java b/storm-core/src/jvm/backtype/storm/generated/Assignment.java index f576c84c7a3..0a8dd77a6ee 100644 --- a/storm-core/src/jvm/backtype/storm/generated/Assignment.java +++ b/storm-core/src/jvm/backtype/storm/generated/Assignment.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class Assignment implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Assignment"); @@ -678,15 +678,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Assignment struct) case 2: // NODE_HOST if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map388 = iprot.readMapBegin(); - struct.node_host = new HashMap(2*_map388.size); - String _key389; - String _val390; - for (int _i391 = 0; _i391 < _map388.size; ++_i391) + org.apache.thrift.protocol.TMap _map398 = iprot.readMapBegin(); + struct.node_host = new HashMap(2*_map398.size); + String _key399; + String _val400; + for (int _i401 = 0; _i401 < _map398.size; ++_i401) { - _key389 = iprot.readString(); - _val390 = iprot.readString(); - struct.node_host.put(_key389, _val390); + _key399 = iprot.readString(); + _val400 = iprot.readString(); + struct.node_host.put(_key399, _val400); } iprot.readMapEnd(); } @@ -698,26 +698,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Assignment struct) case 3: // EXECUTOR_NODE_PORT if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map392 = iprot.readMapBegin(); - struct.executor_node_port = new HashMap,NodeInfo>(2*_map392.size); - List _key393; - NodeInfo _val394; - for (int _i395 = 0; _i395 < _map392.size; ++_i395) + org.apache.thrift.protocol.TMap _map402 = iprot.readMapBegin(); + struct.executor_node_port = new HashMap,NodeInfo>(2*_map402.size); + List _key403; + NodeInfo _val404; + for (int _i405 = 0; _i405 < _map402.size; ++_i405) { { - org.apache.thrift.protocol.TList _list396 = iprot.readListBegin(); - _key393 = new ArrayList(_list396.size); - long _elem397; - for (int _i398 = 0; _i398 < _list396.size; ++_i398) + org.apache.thrift.protocol.TList _list406 = iprot.readListBegin(); + _key403 = new ArrayList(_list406.size); + long _elem407; + for (int _i408 = 0; _i408 < _list406.size; ++_i408) { - _elem397 = iprot.readI64(); - _key393.add(_elem397); + _elem407 = iprot.readI64(); + _key403.add(_elem407); } iprot.readListEnd(); } - _val394 = new NodeInfo(); - _val394.read(iprot); - struct.executor_node_port.put(_key393, _val394); + _val404 = new NodeInfo(); + _val404.read(iprot); + struct.executor_node_port.put(_key403, _val404); } iprot.readMapEnd(); } @@ -729,25 +729,25 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Assignment struct) case 4: // EXECUTOR_START_TIME_SECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map399 = iprot.readMapBegin(); - struct.executor_start_time_secs = new HashMap,Long>(2*_map399.size); - List _key400; - long _val401; - for (int _i402 = 0; _i402 < _map399.size; ++_i402) + org.apache.thrift.protocol.TMap _map409 = iprot.readMapBegin(); + struct.executor_start_time_secs = new HashMap,Long>(2*_map409.size); + List _key410; + long _val411; + for (int _i412 = 0; _i412 < _map409.size; ++_i412) { { - org.apache.thrift.protocol.TList _list403 = iprot.readListBegin(); - _key400 = new ArrayList(_list403.size); - long _elem404; - for (int _i405 = 0; _i405 < _list403.size; ++_i405) + org.apache.thrift.protocol.TList _list413 = iprot.readListBegin(); + _key410 = new ArrayList(_list413.size); + long _elem414; + for (int _i415 = 0; _i415 < _list413.size; ++_i415) { - _elem404 = iprot.readI64(); - _key400.add(_elem404); + _elem414 = iprot.readI64(); + _key410.add(_elem414); } iprot.readListEnd(); } - _val401 = iprot.readI64(); - struct.executor_start_time_secs.put(_key400, _val401); + _val411 = iprot.readI64(); + struct.executor_start_time_secs.put(_key410, _val411); } iprot.readMapEnd(); } @@ -779,10 +779,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Assignment struct) oprot.writeFieldBegin(NODE_HOST_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.node_host.size())); - for (Map.Entry _iter406 : struct.node_host.entrySet()) + for (Map.Entry _iter416 : struct.node_host.entrySet()) { - oprot.writeString(_iter406.getKey()); - oprot.writeString(_iter406.getValue()); + oprot.writeString(_iter416.getKey()); + oprot.writeString(_iter416.getValue()); } oprot.writeMapEnd(); } @@ -794,17 +794,17 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Assignment struct) oprot.writeFieldBegin(EXECUTOR_NODE_PORT_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRUCT, struct.executor_node_port.size())); - for (Map.Entry, NodeInfo> _iter407 : struct.executor_node_port.entrySet()) + for (Map.Entry, NodeInfo> _iter417 : struct.executor_node_port.entrySet()) { { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, _iter407.getKey().size())); - for (long _iter408 : _iter407.getKey()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, _iter417.getKey().size())); + for (long _iter418 : _iter417.getKey()) { - oprot.writeI64(_iter408); + oprot.writeI64(_iter418); } oprot.writeListEnd(); } - _iter407.getValue().write(oprot); + _iter417.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -816,17 +816,17 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Assignment struct) oprot.writeFieldBegin(EXECUTOR_START_TIME_SECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.I64, struct.executor_start_time_secs.size())); - for (Map.Entry, Long> _iter409 : struct.executor_start_time_secs.entrySet()) + for (Map.Entry, Long> _iter419 : struct.executor_start_time_secs.entrySet()) { { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, _iter409.getKey().size())); - for (long _iter410 : _iter409.getKey()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, _iter419.getKey().size())); + for (long _iter420 : _iter419.getKey()) { - oprot.writeI64(_iter410); + oprot.writeI64(_iter420); } oprot.writeListEnd(); } - oprot.writeI64(_iter409.getValue()); + oprot.writeI64(_iter419.getValue()); } oprot.writeMapEnd(); } @@ -865,42 +865,42 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Assignment struct) if (struct.is_set_node_host()) { { oprot.writeI32(struct.node_host.size()); - for (Map.Entry _iter411 : struct.node_host.entrySet()) + for (Map.Entry _iter421 : struct.node_host.entrySet()) { - oprot.writeString(_iter411.getKey()); - oprot.writeString(_iter411.getValue()); + oprot.writeString(_iter421.getKey()); + oprot.writeString(_iter421.getValue()); } } } if (struct.is_set_executor_node_port()) { { oprot.writeI32(struct.executor_node_port.size()); - for (Map.Entry, NodeInfo> _iter412 : struct.executor_node_port.entrySet()) + for (Map.Entry, NodeInfo> _iter422 : struct.executor_node_port.entrySet()) { { - oprot.writeI32(_iter412.getKey().size()); - for (long _iter413 : _iter412.getKey()) + oprot.writeI32(_iter422.getKey().size()); + for (long _iter423 : _iter422.getKey()) { - oprot.writeI64(_iter413); + oprot.writeI64(_iter423); } } - _iter412.getValue().write(oprot); + _iter422.getValue().write(oprot); } } } if (struct.is_set_executor_start_time_secs()) { { oprot.writeI32(struct.executor_start_time_secs.size()); - for (Map.Entry, Long> _iter414 : struct.executor_start_time_secs.entrySet()) + for (Map.Entry, Long> _iter424 : struct.executor_start_time_secs.entrySet()) { { - oprot.writeI32(_iter414.getKey().size()); - for (long _iter415 : _iter414.getKey()) + oprot.writeI32(_iter424.getKey().size()); + for (long _iter425 : _iter424.getKey()) { - oprot.writeI64(_iter415); + oprot.writeI64(_iter425); } } - oprot.writeI64(_iter414.getValue()); + oprot.writeI64(_iter424.getValue()); } } } @@ -914,64 +914,64 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Assignment struct) t BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map416 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.node_host = new HashMap(2*_map416.size); - String _key417; - String _val418; - for (int _i419 = 0; _i419 < _map416.size; ++_i419) + org.apache.thrift.protocol.TMap _map426 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.node_host = new HashMap(2*_map426.size); + String _key427; + String _val428; + for (int _i429 = 0; _i429 < _map426.size; ++_i429) { - _key417 = iprot.readString(); - _val418 = iprot.readString(); - struct.node_host.put(_key417, _val418); + _key427 = iprot.readString(); + _val428 = iprot.readString(); + struct.node_host.put(_key427, _val428); } } struct.set_node_host_isSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TMap _map420 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.executor_node_port = new HashMap,NodeInfo>(2*_map420.size); - List _key421; - NodeInfo _val422; - for (int _i423 = 0; _i423 < _map420.size; ++_i423) + org.apache.thrift.protocol.TMap _map430 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.executor_node_port = new HashMap,NodeInfo>(2*_map430.size); + List _key431; + NodeInfo _val432; + for (int _i433 = 0; _i433 < _map430.size; ++_i433) { { - org.apache.thrift.protocol.TList _list424 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - _key421 = new ArrayList(_list424.size); - long _elem425; - for (int _i426 = 0; _i426 < _list424.size; ++_i426) + org.apache.thrift.protocol.TList _list434 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + _key431 = new ArrayList(_list434.size); + long _elem435; + for (int _i436 = 0; _i436 < _list434.size; ++_i436) { - _elem425 = iprot.readI64(); - _key421.add(_elem425); + _elem435 = iprot.readI64(); + _key431.add(_elem435); } } - _val422 = new NodeInfo(); - _val422.read(iprot); - struct.executor_node_port.put(_key421, _val422); + _val432 = new NodeInfo(); + _val432.read(iprot); + struct.executor_node_port.put(_key431, _val432); } } struct.set_executor_node_port_isSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map427 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.executor_start_time_secs = new HashMap,Long>(2*_map427.size); - List _key428; - long _val429; - for (int _i430 = 0; _i430 < _map427.size; ++_i430) + org.apache.thrift.protocol.TMap _map437 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.executor_start_time_secs = new HashMap,Long>(2*_map437.size); + List _key438; + long _val439; + for (int _i440 = 0; _i440 < _map437.size; ++_i440) { { - org.apache.thrift.protocol.TList _list431 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - _key428 = new ArrayList(_list431.size); - long _elem432; - for (int _i433 = 0; _i433 < _list431.size; ++_i433) + org.apache.thrift.protocol.TList _list441 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + _key438 = new ArrayList(_list441.size); + long _elem442; + for (int _i443 = 0; _i443 < _list441.size; ++_i443) { - _elem432 = iprot.readI64(); - _key428.add(_elem432); + _elem442 = iprot.readI64(); + _key438.add(_elem442); } } - _val429 = iprot.readI64(); - struct.executor_start_time_secs.put(_key428, _val429); + _val439 = iprot.readI64(); + struct.executor_start_time_secs.put(_key438, _val439); } } struct.set_executor_start_time_secs_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/BoltStats.java b/storm-core/src/jvm/backtype/storm/generated/BoltStats.java index 26ef5d8c631..3f31d690304 100644 --- a/storm-core/src/jvm/backtype/storm/generated/BoltStats.java +++ b/storm-core/src/jvm/backtype/storm/generated/BoltStats.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class BoltStats implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BoltStats"); @@ -881,28 +881,28 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, BoltStats struct) t case 1: // ACKED if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map98 = iprot.readMapBegin(); - struct.acked = new HashMap>(2*_map98.size); - String _key99; - Map _val100; - for (int _i101 = 0; _i101 < _map98.size; ++_i101) + org.apache.thrift.protocol.TMap _map108 = iprot.readMapBegin(); + struct.acked = new HashMap>(2*_map108.size); + String _key109; + Map _val110; + for (int _i111 = 0; _i111 < _map108.size; ++_i111) { - _key99 = iprot.readString(); + _key109 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map102 = iprot.readMapBegin(); - _val100 = new HashMap(2*_map102.size); - GlobalStreamId _key103; - long _val104; - for (int _i105 = 0; _i105 < _map102.size; ++_i105) + org.apache.thrift.protocol.TMap _map112 = iprot.readMapBegin(); + _val110 = new HashMap(2*_map112.size); + GlobalStreamId _key113; + long _val114; + for (int _i115 = 0; _i115 < _map112.size; ++_i115) { - _key103 = new GlobalStreamId(); - _key103.read(iprot); - _val104 = iprot.readI64(); - _val100.put(_key103, _val104); + _key113 = new GlobalStreamId(); + _key113.read(iprot); + _val114 = iprot.readI64(); + _val110.put(_key113, _val114); } iprot.readMapEnd(); } - struct.acked.put(_key99, _val100); + struct.acked.put(_key109, _val110); } iprot.readMapEnd(); } @@ -914,28 +914,28 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, BoltStats struct) t case 2: // FAILED if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map106 = iprot.readMapBegin(); - struct.failed = new HashMap>(2*_map106.size); - String _key107; - Map _val108; - for (int _i109 = 0; _i109 < _map106.size; ++_i109) + org.apache.thrift.protocol.TMap _map116 = iprot.readMapBegin(); + struct.failed = new HashMap>(2*_map116.size); + String _key117; + Map _val118; + for (int _i119 = 0; _i119 < _map116.size; ++_i119) { - _key107 = iprot.readString(); + _key117 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map110 = iprot.readMapBegin(); - _val108 = new HashMap(2*_map110.size); - GlobalStreamId _key111; - long _val112; - for (int _i113 = 0; _i113 < _map110.size; ++_i113) + org.apache.thrift.protocol.TMap _map120 = iprot.readMapBegin(); + _val118 = new HashMap(2*_map120.size); + GlobalStreamId _key121; + long _val122; + for (int _i123 = 0; _i123 < _map120.size; ++_i123) { - _key111 = new GlobalStreamId(); - _key111.read(iprot); - _val112 = iprot.readI64(); - _val108.put(_key111, _val112); + _key121 = new GlobalStreamId(); + _key121.read(iprot); + _val122 = iprot.readI64(); + _val118.put(_key121, _val122); } iprot.readMapEnd(); } - struct.failed.put(_key107, _val108); + struct.failed.put(_key117, _val118); } iprot.readMapEnd(); } @@ -947,28 +947,28 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, BoltStats struct) t case 3: // PROCESS_MS_AVG if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map114 = iprot.readMapBegin(); - struct.process_ms_avg = new HashMap>(2*_map114.size); - String _key115; - Map _val116; - for (int _i117 = 0; _i117 < _map114.size; ++_i117) + org.apache.thrift.protocol.TMap _map124 = iprot.readMapBegin(); + struct.process_ms_avg = new HashMap>(2*_map124.size); + String _key125; + Map _val126; + for (int _i127 = 0; _i127 < _map124.size; ++_i127) { - _key115 = iprot.readString(); + _key125 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map118 = iprot.readMapBegin(); - _val116 = new HashMap(2*_map118.size); - GlobalStreamId _key119; - double _val120; - for (int _i121 = 0; _i121 < _map118.size; ++_i121) + org.apache.thrift.protocol.TMap _map128 = iprot.readMapBegin(); + _val126 = new HashMap(2*_map128.size); + GlobalStreamId _key129; + double _val130; + for (int _i131 = 0; _i131 < _map128.size; ++_i131) { - _key119 = new GlobalStreamId(); - _key119.read(iprot); - _val120 = iprot.readDouble(); - _val116.put(_key119, _val120); + _key129 = new GlobalStreamId(); + _key129.read(iprot); + _val130 = iprot.readDouble(); + _val126.put(_key129, _val130); } iprot.readMapEnd(); } - struct.process_ms_avg.put(_key115, _val116); + struct.process_ms_avg.put(_key125, _val126); } iprot.readMapEnd(); } @@ -980,28 +980,28 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, BoltStats struct) t case 4: // EXECUTED if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map122 = iprot.readMapBegin(); - struct.executed = new HashMap>(2*_map122.size); - String _key123; - Map _val124; - for (int _i125 = 0; _i125 < _map122.size; ++_i125) + org.apache.thrift.protocol.TMap _map132 = iprot.readMapBegin(); + struct.executed = new HashMap>(2*_map132.size); + String _key133; + Map _val134; + for (int _i135 = 0; _i135 < _map132.size; ++_i135) { - _key123 = iprot.readString(); + _key133 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map126 = iprot.readMapBegin(); - _val124 = new HashMap(2*_map126.size); - GlobalStreamId _key127; - long _val128; - for (int _i129 = 0; _i129 < _map126.size; ++_i129) + org.apache.thrift.protocol.TMap _map136 = iprot.readMapBegin(); + _val134 = new HashMap(2*_map136.size); + GlobalStreamId _key137; + long _val138; + for (int _i139 = 0; _i139 < _map136.size; ++_i139) { - _key127 = new GlobalStreamId(); - _key127.read(iprot); - _val128 = iprot.readI64(); - _val124.put(_key127, _val128); + _key137 = new GlobalStreamId(); + _key137.read(iprot); + _val138 = iprot.readI64(); + _val134.put(_key137, _val138); } iprot.readMapEnd(); } - struct.executed.put(_key123, _val124); + struct.executed.put(_key133, _val134); } iprot.readMapEnd(); } @@ -1013,28 +1013,28 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, BoltStats struct) t case 5: // EXECUTE_MS_AVG if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map130 = iprot.readMapBegin(); - struct.execute_ms_avg = new HashMap>(2*_map130.size); - String _key131; - Map _val132; - for (int _i133 = 0; _i133 < _map130.size; ++_i133) + org.apache.thrift.protocol.TMap _map140 = iprot.readMapBegin(); + struct.execute_ms_avg = new HashMap>(2*_map140.size); + String _key141; + Map _val142; + for (int _i143 = 0; _i143 < _map140.size; ++_i143) { - _key131 = iprot.readString(); + _key141 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map134 = iprot.readMapBegin(); - _val132 = new HashMap(2*_map134.size); - GlobalStreamId _key135; - double _val136; - for (int _i137 = 0; _i137 < _map134.size; ++_i137) + org.apache.thrift.protocol.TMap _map144 = iprot.readMapBegin(); + _val142 = new HashMap(2*_map144.size); + GlobalStreamId _key145; + double _val146; + for (int _i147 = 0; _i147 < _map144.size; ++_i147) { - _key135 = new GlobalStreamId(); - _key135.read(iprot); - _val136 = iprot.readDouble(); - _val132.put(_key135, _val136); + _key145 = new GlobalStreamId(); + _key145.read(iprot); + _val146 = iprot.readDouble(); + _val142.put(_key145, _val146); } iprot.readMapEnd(); } - struct.execute_ms_avg.put(_key131, _val132); + struct.execute_ms_avg.put(_key141, _val142); } iprot.readMapEnd(); } @@ -1060,15 +1060,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, BoltStats struct) oprot.writeFieldBegin(ACKED_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.acked.size())); - for (Map.Entry> _iter138 : struct.acked.entrySet()) + for (Map.Entry> _iter148 : struct.acked.entrySet()) { - oprot.writeString(_iter138.getKey()); + oprot.writeString(_iter148.getKey()); { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, _iter138.getValue().size())); - for (Map.Entry _iter139 : _iter138.getValue().entrySet()) + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, _iter148.getValue().size())); + for (Map.Entry _iter149 : _iter148.getValue().entrySet()) { - _iter139.getKey().write(oprot); - oprot.writeI64(_iter139.getValue()); + _iter149.getKey().write(oprot); + oprot.writeI64(_iter149.getValue()); } oprot.writeMapEnd(); } @@ -1081,15 +1081,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, BoltStats struct) oprot.writeFieldBegin(FAILED_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.failed.size())); - for (Map.Entry> _iter140 : struct.failed.entrySet()) + for (Map.Entry> _iter150 : struct.failed.entrySet()) { - oprot.writeString(_iter140.getKey()); + oprot.writeString(_iter150.getKey()); { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, _iter140.getValue().size())); - for (Map.Entry _iter141 : _iter140.getValue().entrySet()) + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, _iter150.getValue().size())); + for (Map.Entry _iter151 : _iter150.getValue().entrySet()) { - _iter141.getKey().write(oprot); - oprot.writeI64(_iter141.getValue()); + _iter151.getKey().write(oprot); + oprot.writeI64(_iter151.getValue()); } oprot.writeMapEnd(); } @@ -1102,15 +1102,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, BoltStats struct) oprot.writeFieldBegin(PROCESS_MS_AVG_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.process_ms_avg.size())); - for (Map.Entry> _iter142 : struct.process_ms_avg.entrySet()) + for (Map.Entry> _iter152 : struct.process_ms_avg.entrySet()) { - oprot.writeString(_iter142.getKey()); + oprot.writeString(_iter152.getKey()); { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.DOUBLE, _iter142.getValue().size())); - for (Map.Entry _iter143 : _iter142.getValue().entrySet()) + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.DOUBLE, _iter152.getValue().size())); + for (Map.Entry _iter153 : _iter152.getValue().entrySet()) { - _iter143.getKey().write(oprot); - oprot.writeDouble(_iter143.getValue()); + _iter153.getKey().write(oprot); + oprot.writeDouble(_iter153.getValue()); } oprot.writeMapEnd(); } @@ -1123,15 +1123,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, BoltStats struct) oprot.writeFieldBegin(EXECUTED_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.executed.size())); - for (Map.Entry> _iter144 : struct.executed.entrySet()) + for (Map.Entry> _iter154 : struct.executed.entrySet()) { - oprot.writeString(_iter144.getKey()); + oprot.writeString(_iter154.getKey()); { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, _iter144.getValue().size())); - for (Map.Entry _iter145 : _iter144.getValue().entrySet()) + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, _iter154.getValue().size())); + for (Map.Entry _iter155 : _iter154.getValue().entrySet()) { - _iter145.getKey().write(oprot); - oprot.writeI64(_iter145.getValue()); + _iter155.getKey().write(oprot); + oprot.writeI64(_iter155.getValue()); } oprot.writeMapEnd(); } @@ -1144,15 +1144,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, BoltStats struct) oprot.writeFieldBegin(EXECUTE_MS_AVG_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.execute_ms_avg.size())); - for (Map.Entry> _iter146 : struct.execute_ms_avg.entrySet()) + for (Map.Entry> _iter156 : struct.execute_ms_avg.entrySet()) { - oprot.writeString(_iter146.getKey()); + oprot.writeString(_iter156.getKey()); { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.DOUBLE, _iter146.getValue().size())); - for (Map.Entry _iter147 : _iter146.getValue().entrySet()) + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.DOUBLE, _iter156.getValue().size())); + for (Map.Entry _iter157 : _iter156.getValue().entrySet()) { - _iter147.getKey().write(oprot); - oprot.writeDouble(_iter147.getValue()); + _iter157.getKey().write(oprot); + oprot.writeDouble(_iter157.getValue()); } oprot.writeMapEnd(); } @@ -1180,75 +1180,75 @@ public void write(org.apache.thrift.protocol.TProtocol prot, BoltStats struct) t TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.acked.size()); - for (Map.Entry> _iter148 : struct.acked.entrySet()) + for (Map.Entry> _iter158 : struct.acked.entrySet()) { - oprot.writeString(_iter148.getKey()); + oprot.writeString(_iter158.getKey()); { - oprot.writeI32(_iter148.getValue().size()); - for (Map.Entry _iter149 : _iter148.getValue().entrySet()) + oprot.writeI32(_iter158.getValue().size()); + for (Map.Entry _iter159 : _iter158.getValue().entrySet()) { - _iter149.getKey().write(oprot); - oprot.writeI64(_iter149.getValue()); + _iter159.getKey().write(oprot); + oprot.writeI64(_iter159.getValue()); } } } } { oprot.writeI32(struct.failed.size()); - for (Map.Entry> _iter150 : struct.failed.entrySet()) + for (Map.Entry> _iter160 : struct.failed.entrySet()) { - oprot.writeString(_iter150.getKey()); + oprot.writeString(_iter160.getKey()); { - oprot.writeI32(_iter150.getValue().size()); - for (Map.Entry _iter151 : _iter150.getValue().entrySet()) + oprot.writeI32(_iter160.getValue().size()); + for (Map.Entry _iter161 : _iter160.getValue().entrySet()) { - _iter151.getKey().write(oprot); - oprot.writeI64(_iter151.getValue()); + _iter161.getKey().write(oprot); + oprot.writeI64(_iter161.getValue()); } } } } { oprot.writeI32(struct.process_ms_avg.size()); - for (Map.Entry> _iter152 : struct.process_ms_avg.entrySet()) + for (Map.Entry> _iter162 : struct.process_ms_avg.entrySet()) { - oprot.writeString(_iter152.getKey()); + oprot.writeString(_iter162.getKey()); { - oprot.writeI32(_iter152.getValue().size()); - for (Map.Entry _iter153 : _iter152.getValue().entrySet()) + oprot.writeI32(_iter162.getValue().size()); + for (Map.Entry _iter163 : _iter162.getValue().entrySet()) { - _iter153.getKey().write(oprot); - oprot.writeDouble(_iter153.getValue()); + _iter163.getKey().write(oprot); + oprot.writeDouble(_iter163.getValue()); } } } } { oprot.writeI32(struct.executed.size()); - for (Map.Entry> _iter154 : struct.executed.entrySet()) + for (Map.Entry> _iter164 : struct.executed.entrySet()) { - oprot.writeString(_iter154.getKey()); + oprot.writeString(_iter164.getKey()); { - oprot.writeI32(_iter154.getValue().size()); - for (Map.Entry _iter155 : _iter154.getValue().entrySet()) + oprot.writeI32(_iter164.getValue().size()); + for (Map.Entry _iter165 : _iter164.getValue().entrySet()) { - _iter155.getKey().write(oprot); - oprot.writeI64(_iter155.getValue()); + _iter165.getKey().write(oprot); + oprot.writeI64(_iter165.getValue()); } } } } { oprot.writeI32(struct.execute_ms_avg.size()); - for (Map.Entry> _iter156 : struct.execute_ms_avg.entrySet()) + for (Map.Entry> _iter166 : struct.execute_ms_avg.entrySet()) { - oprot.writeString(_iter156.getKey()); + oprot.writeString(_iter166.getKey()); { - oprot.writeI32(_iter156.getValue().size()); - for (Map.Entry _iter157 : _iter156.getValue().entrySet()) + oprot.writeI32(_iter166.getValue().size()); + for (Map.Entry _iter167 : _iter166.getValue().entrySet()) { - _iter157.getKey().write(oprot); - oprot.writeDouble(_iter157.getValue()); + _iter167.getKey().write(oprot); + oprot.writeDouble(_iter167.getValue()); } } } @@ -1259,127 +1259,127 @@ public void write(org.apache.thrift.protocol.TProtocol prot, BoltStats struct) t public void read(org.apache.thrift.protocol.TProtocol prot, BoltStats struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map158 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); - struct.acked = new HashMap>(2*_map158.size); - String _key159; - Map _val160; - for (int _i161 = 0; _i161 < _map158.size; ++_i161) + org.apache.thrift.protocol.TMap _map168 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); + struct.acked = new HashMap>(2*_map168.size); + String _key169; + Map _val170; + for (int _i171 = 0; _i171 < _map168.size; ++_i171) { - _key159 = iprot.readString(); + _key169 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map162 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, iprot.readI32()); - _val160 = new HashMap(2*_map162.size); - GlobalStreamId _key163; - long _val164; - for (int _i165 = 0; _i165 < _map162.size; ++_i165) + org.apache.thrift.protocol.TMap _map172 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, iprot.readI32()); + _val170 = new HashMap(2*_map172.size); + GlobalStreamId _key173; + long _val174; + for (int _i175 = 0; _i175 < _map172.size; ++_i175) { - _key163 = new GlobalStreamId(); - _key163.read(iprot); - _val164 = iprot.readI64(); - _val160.put(_key163, _val164); + _key173 = new GlobalStreamId(); + _key173.read(iprot); + _val174 = iprot.readI64(); + _val170.put(_key173, _val174); } } - struct.acked.put(_key159, _val160); + struct.acked.put(_key169, _val170); } } struct.set_acked_isSet(true); { - org.apache.thrift.protocol.TMap _map166 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); - struct.failed = new HashMap>(2*_map166.size); - String _key167; - Map _val168; - for (int _i169 = 0; _i169 < _map166.size; ++_i169) + org.apache.thrift.protocol.TMap _map176 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); + struct.failed = new HashMap>(2*_map176.size); + String _key177; + Map _val178; + for (int _i179 = 0; _i179 < _map176.size; ++_i179) { - _key167 = iprot.readString(); + _key177 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map170 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, iprot.readI32()); - _val168 = new HashMap(2*_map170.size); - GlobalStreamId _key171; - long _val172; - for (int _i173 = 0; _i173 < _map170.size; ++_i173) + org.apache.thrift.protocol.TMap _map180 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, iprot.readI32()); + _val178 = new HashMap(2*_map180.size); + GlobalStreamId _key181; + long _val182; + for (int _i183 = 0; _i183 < _map180.size; ++_i183) { - _key171 = new GlobalStreamId(); - _key171.read(iprot); - _val172 = iprot.readI64(); - _val168.put(_key171, _val172); + _key181 = new GlobalStreamId(); + _key181.read(iprot); + _val182 = iprot.readI64(); + _val178.put(_key181, _val182); } } - struct.failed.put(_key167, _val168); + struct.failed.put(_key177, _val178); } } struct.set_failed_isSet(true); { - org.apache.thrift.protocol.TMap _map174 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); - struct.process_ms_avg = new HashMap>(2*_map174.size); - String _key175; - Map _val176; - for (int _i177 = 0; _i177 < _map174.size; ++_i177) + org.apache.thrift.protocol.TMap _map184 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); + struct.process_ms_avg = new HashMap>(2*_map184.size); + String _key185; + Map _val186; + for (int _i187 = 0; _i187 < _map184.size; ++_i187) { - _key175 = iprot.readString(); + _key185 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map178 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32()); - _val176 = new HashMap(2*_map178.size); - GlobalStreamId _key179; - double _val180; - for (int _i181 = 0; _i181 < _map178.size; ++_i181) + org.apache.thrift.protocol.TMap _map188 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32()); + _val186 = new HashMap(2*_map188.size); + GlobalStreamId _key189; + double _val190; + for (int _i191 = 0; _i191 < _map188.size; ++_i191) { - _key179 = new GlobalStreamId(); - _key179.read(iprot); - _val180 = iprot.readDouble(); - _val176.put(_key179, _val180); + _key189 = new GlobalStreamId(); + _key189.read(iprot); + _val190 = iprot.readDouble(); + _val186.put(_key189, _val190); } } - struct.process_ms_avg.put(_key175, _val176); + struct.process_ms_avg.put(_key185, _val186); } } struct.set_process_ms_avg_isSet(true); { - org.apache.thrift.protocol.TMap _map182 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); - struct.executed = new HashMap>(2*_map182.size); - String _key183; - Map _val184; - for (int _i185 = 0; _i185 < _map182.size; ++_i185) + org.apache.thrift.protocol.TMap _map192 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); + struct.executed = new HashMap>(2*_map192.size); + String _key193; + Map _val194; + for (int _i195 = 0; _i195 < _map192.size; ++_i195) { - _key183 = iprot.readString(); + _key193 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map186 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, iprot.readI32()); - _val184 = new HashMap(2*_map186.size); - GlobalStreamId _key187; - long _val188; - for (int _i189 = 0; _i189 < _map186.size; ++_i189) + org.apache.thrift.protocol.TMap _map196 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.I64, iprot.readI32()); + _val194 = new HashMap(2*_map196.size); + GlobalStreamId _key197; + long _val198; + for (int _i199 = 0; _i199 < _map196.size; ++_i199) { - _key187 = new GlobalStreamId(); - _key187.read(iprot); - _val188 = iprot.readI64(); - _val184.put(_key187, _val188); + _key197 = new GlobalStreamId(); + _key197.read(iprot); + _val198 = iprot.readI64(); + _val194.put(_key197, _val198); } } - struct.executed.put(_key183, _val184); + struct.executed.put(_key193, _val194); } } struct.set_executed_isSet(true); { - org.apache.thrift.protocol.TMap _map190 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); - struct.execute_ms_avg = new HashMap>(2*_map190.size); - String _key191; - Map _val192; - for (int _i193 = 0; _i193 < _map190.size; ++_i193) + org.apache.thrift.protocol.TMap _map200 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); + struct.execute_ms_avg = new HashMap>(2*_map200.size); + String _key201; + Map _val202; + for (int _i203 = 0; _i203 < _map200.size; ++_i203) { - _key191 = iprot.readString(); + _key201 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map194 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32()); - _val192 = new HashMap(2*_map194.size); - GlobalStreamId _key195; - double _val196; - for (int _i197 = 0; _i197 < _map194.size; ++_i197) + org.apache.thrift.protocol.TMap _map204 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32()); + _val202 = new HashMap(2*_map204.size); + GlobalStreamId _key205; + double _val206; + for (int _i207 = 0; _i207 < _map204.size; ++_i207) { - _key195 = new GlobalStreamId(); - _key195.read(iprot); - _val196 = iprot.readDouble(); - _val192.put(_key195, _val196); + _key205 = new GlobalStreamId(); + _key205.read(iprot); + _val206 = iprot.readDouble(); + _val202.put(_key205, _val206); } } - struct.execute_ms_avg.put(_key191, _val192); + struct.execute_ms_avg.put(_key201, _val202); } } struct.set_execute_ms_avg_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/ClusterSummary.java b/storm-core/src/jvm/backtype/storm/generated/ClusterSummary.java index e5651df86a7..09a8310a389 100644 --- a/storm-core/src/jvm/backtype/storm/generated/ClusterSummary.java +++ b/storm-core/src/jvm/backtype/storm/generated/ClusterSummary.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-3-2") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class ClusterSummary implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ClusterSummary"); @@ -581,14 +581,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClusterSummary stru case 1: // SUPERVISORS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list74 = iprot.readListBegin(); - struct.supervisors = new ArrayList(_list74.size); - SupervisorSummary _elem75; - for (int _i76 = 0; _i76 < _list74.size; ++_i76) + org.apache.thrift.protocol.TList _list84 = iprot.readListBegin(); + struct.supervisors = new ArrayList(_list84.size); + SupervisorSummary _elem85; + for (int _i86 = 0; _i86 < _list84.size; ++_i86) { - _elem75 = new SupervisorSummary(); - _elem75.read(iprot); - struct.supervisors.add(_elem75); + _elem85 = new SupervisorSummary(); + _elem85.read(iprot); + struct.supervisors.add(_elem85); } iprot.readListEnd(); } @@ -600,14 +600,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClusterSummary stru case 3: // TOPOLOGIES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list77 = iprot.readListBegin(); - struct.topologies = new ArrayList(_list77.size); - TopologySummary _elem78; - for (int _i79 = 0; _i79 < _list77.size; ++_i79) + org.apache.thrift.protocol.TList _list87 = iprot.readListBegin(); + struct.topologies = new ArrayList(_list87.size); + TopologySummary _elem88; + for (int _i89 = 0; _i89 < _list87.size; ++_i89) { - _elem78 = new TopologySummary(); - _elem78.read(iprot); - struct.topologies.add(_elem78); + _elem88 = new TopologySummary(); + _elem88.read(iprot); + struct.topologies.add(_elem88); } iprot.readListEnd(); } @@ -619,14 +619,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClusterSummary stru case 4: // NIMBUSES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list80 = iprot.readListBegin(); - struct.nimbuses = new ArrayList(_list80.size); - NimbusSummary _elem81; - for (int _i82 = 0; _i82 < _list80.size; ++_i82) + org.apache.thrift.protocol.TList _list90 = iprot.readListBegin(); + struct.nimbuses = new ArrayList(_list90.size); + NimbusSummary _elem91; + for (int _i92 = 0; _i92 < _list90.size; ++_i92) { - _elem81 = new NimbusSummary(); - _elem81.read(iprot); - struct.nimbuses.add(_elem81); + _elem91 = new NimbusSummary(); + _elem91.read(iprot); + struct.nimbuses.add(_elem91); } iprot.readListEnd(); } @@ -652,9 +652,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClusterSummary str oprot.writeFieldBegin(SUPERVISORS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.supervisors.size())); - for (SupervisorSummary _iter83 : struct.supervisors) + for (SupervisorSummary _iter93 : struct.supervisors) { - _iter83.write(oprot); + _iter93.write(oprot); } oprot.writeListEnd(); } @@ -664,9 +664,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClusterSummary str oprot.writeFieldBegin(TOPOLOGIES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.topologies.size())); - for (TopologySummary _iter84 : struct.topologies) + for (TopologySummary _iter94 : struct.topologies) { - _iter84.write(oprot); + _iter94.write(oprot); } oprot.writeListEnd(); } @@ -676,9 +676,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClusterSummary str oprot.writeFieldBegin(NIMBUSES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.nimbuses.size())); - for (NimbusSummary _iter85 : struct.nimbuses) + for (NimbusSummary _iter95 : struct.nimbuses) { - _iter85.write(oprot); + _iter95.write(oprot); } oprot.writeListEnd(); } @@ -703,23 +703,23 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClusterSummary stru TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.supervisors.size()); - for (SupervisorSummary _iter86 : struct.supervisors) + for (SupervisorSummary _iter96 : struct.supervisors) { - _iter86.write(oprot); + _iter96.write(oprot); } } { oprot.writeI32(struct.topologies.size()); - for (TopologySummary _iter87 : struct.topologies) + for (TopologySummary _iter97 : struct.topologies) { - _iter87.write(oprot); + _iter97.write(oprot); } } { oprot.writeI32(struct.nimbuses.size()); - for (NimbusSummary _iter88 : struct.nimbuses) + for (NimbusSummary _iter98 : struct.nimbuses) { - _iter88.write(oprot); + _iter98.write(oprot); } } } @@ -728,38 +728,38 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClusterSummary stru public void read(org.apache.thrift.protocol.TProtocol prot, ClusterSummary struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list89 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.supervisors = new ArrayList(_list89.size); - SupervisorSummary _elem90; - for (int _i91 = 0; _i91 < _list89.size; ++_i91) + org.apache.thrift.protocol.TList _list99 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.supervisors = new ArrayList(_list99.size); + SupervisorSummary _elem100; + for (int _i101 = 0; _i101 < _list99.size; ++_i101) { - _elem90 = new SupervisorSummary(); - _elem90.read(iprot); - struct.supervisors.add(_elem90); + _elem100 = new SupervisorSummary(); + _elem100.read(iprot); + struct.supervisors.add(_elem100); } } struct.set_supervisors_isSet(true); { - org.apache.thrift.protocol.TList _list92 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.topologies = new ArrayList(_list92.size); - TopologySummary _elem93; - for (int _i94 = 0; _i94 < _list92.size; ++_i94) + org.apache.thrift.protocol.TList _list102 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.topologies = new ArrayList(_list102.size); + TopologySummary _elem103; + for (int _i104 = 0; _i104 < _list102.size; ++_i104) { - _elem93 = new TopologySummary(); - _elem93.read(iprot); - struct.topologies.add(_elem93); + _elem103 = new TopologySummary(); + _elem103.read(iprot); + struct.topologies.add(_elem103); } } struct.set_topologies_isSet(true); { - org.apache.thrift.protocol.TList _list95 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.nimbuses = new ArrayList(_list95.size); - NimbusSummary _elem96; - for (int _i97 = 0; _i97 < _list95.size; ++_i97) + org.apache.thrift.protocol.TList _list105 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.nimbuses = new ArrayList(_list105.size); + NimbusSummary _elem106; + for (int _i107 = 0; _i107 < _list105.size; ++_i107) { - _elem96 = new NimbusSummary(); - _elem96.read(iprot); - struct.nimbuses.add(_elem96); + _elem106 = new NimbusSummary(); + _elem106.read(iprot); + struct.nimbuses.add(_elem106); } } struct.set_nimbuses_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/ClusterWorkerHeartbeat.java b/storm-core/src/jvm/backtype/storm/generated/ClusterWorkerHeartbeat.java index ce2f5f43dfd..327ee45f6ee 100644 --- a/storm-core/src/jvm/backtype/storm/generated/ClusterWorkerHeartbeat.java +++ b/storm-core/src/jvm/backtype/storm/generated/ClusterWorkerHeartbeat.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class ClusterWorkerHeartbeat implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ClusterWorkerHeartbeat"); @@ -635,17 +635,17 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClusterWorkerHeartb case 2: // EXECUTOR_STATS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map454 = iprot.readMapBegin(); - struct.executor_stats = new HashMap(2*_map454.size); - ExecutorInfo _key455; - ExecutorStats _val456; - for (int _i457 = 0; _i457 < _map454.size; ++_i457) + org.apache.thrift.protocol.TMap _map464 = iprot.readMapBegin(); + struct.executor_stats = new HashMap(2*_map464.size); + ExecutorInfo _key465; + ExecutorStats _val466; + for (int _i467 = 0; _i467 < _map464.size; ++_i467) { - _key455 = new ExecutorInfo(); - _key455.read(iprot); - _val456 = new ExecutorStats(); - _val456.read(iprot); - struct.executor_stats.put(_key455, _val456); + _key465 = new ExecutorInfo(); + _key465.read(iprot); + _val466 = new ExecutorStats(); + _val466.read(iprot); + struct.executor_stats.put(_key465, _val466); } iprot.readMapEnd(); } @@ -692,10 +692,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClusterWorkerHeart oprot.writeFieldBegin(EXECUTOR_STATS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, struct.executor_stats.size())); - for (Map.Entry _iter458 : struct.executor_stats.entrySet()) + for (Map.Entry _iter468 : struct.executor_stats.entrySet()) { - _iter458.getKey().write(oprot); - _iter458.getValue().write(oprot); + _iter468.getKey().write(oprot); + _iter468.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -727,10 +727,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClusterWorkerHeartb oprot.writeString(struct.storm_id); { oprot.writeI32(struct.executor_stats.size()); - for (Map.Entry _iter459 : struct.executor_stats.entrySet()) + for (Map.Entry _iter469 : struct.executor_stats.entrySet()) { - _iter459.getKey().write(oprot); - _iter459.getValue().write(oprot); + _iter469.getKey().write(oprot); + _iter469.getValue().write(oprot); } } oprot.writeI32(struct.time_secs); @@ -743,17 +743,17 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ClusterWorkerHeartbe struct.storm_id = iprot.readString(); struct.set_storm_id_isSet(true); { - org.apache.thrift.protocol.TMap _map460 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.executor_stats = new HashMap(2*_map460.size); - ExecutorInfo _key461; - ExecutorStats _val462; - for (int _i463 = 0; _i463 < _map460.size; ++_i463) + org.apache.thrift.protocol.TMap _map470 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.executor_stats = new HashMap(2*_map470.size); + ExecutorInfo _key471; + ExecutorStats _val472; + for (int _i473 = 0; _i473 < _map470.size; ++_i473) { - _key461 = new ExecutorInfo(); - _key461.read(iprot); - _val462 = new ExecutorStats(); - _val462.read(iprot); - struct.executor_stats.put(_key461, _val462); + _key471 = new ExecutorInfo(); + _key471.read(iprot); + _val472 = new ExecutorStats(); + _val472.read(iprot); + struct.executor_stats.put(_key471, _val472); } } struct.set_executor_stats_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/ComponentCommon.java b/storm-core/src/jvm/backtype/storm/generated/ComponentCommon.java index 68686035120..f41d4ee80bf 100644 --- a/storm-core/src/jvm/backtype/storm/generated/ComponentCommon.java +++ b/storm-core/src/jvm/backtype/storm/generated/ComponentCommon.java @@ -51,14 +51,15 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-2-6") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class ComponentCommon implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ComponentCommon"); private static final org.apache.thrift.protocol.TField INPUTS_FIELD_DESC = new org.apache.thrift.protocol.TField("inputs", org.apache.thrift.protocol.TType.MAP, (short)1); private static final org.apache.thrift.protocol.TField STREAMS_FIELD_DESC = new org.apache.thrift.protocol.TField("streams", org.apache.thrift.protocol.TType.MAP, (short)2); private static final org.apache.thrift.protocol.TField PARALLELISM_HINT_FIELD_DESC = new org.apache.thrift.protocol.TField("parallelism_hint", org.apache.thrift.protocol.TType.I32, (short)3); - private static final org.apache.thrift.protocol.TField JSON_CONF_FIELD_DESC = new org.apache.thrift.protocol.TField("json_conf", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField BATCH_SIZES_FIELD_DESC = new org.apache.thrift.protocol.TField("batch_sizes", org.apache.thrift.protocol.TType.MAP, (short)4); + private static final org.apache.thrift.protocol.TField JSON_CONF_FIELD_DESC = new org.apache.thrift.protocol.TField("json_conf", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -69,6 +70,7 @@ public class ComponentCommon implements org.apache.thrift.TBase inputs; // required private Map streams; // required private int parallelism_hint; // optional + private Map batch_sizes; // optional private String json_conf; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -76,7 +78,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { INPUTS((short)1, "inputs"), STREAMS((short)2, "streams"), PARALLELISM_HINT((short)3, "parallelism_hint"), - JSON_CONF((short)4, "json_conf"); + BATCH_SIZES((short)4, "batch_sizes"), + JSON_CONF((short)5, "json_conf"); private static final Map byName = new HashMap(); @@ -97,7 +100,9 @@ public static _Fields findByThriftId(int fieldId) { return STREAMS; case 3: // PARALLELISM_HINT return PARALLELISM_HINT; - case 4: // JSON_CONF + case 4: // BATCH_SIZES + return BATCH_SIZES; + case 5: // JSON_CONF return JSON_CONF; default: return null; @@ -141,7 +146,7 @@ public String getFieldName() { // isset id assignments private static final int __PARALLELISM_HINT_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PARALLELISM_HINT,_Fields.JSON_CONF}; + private static final _Fields optionals[] = {_Fields.PARALLELISM_HINT,_Fields.BATCH_SIZES,_Fields.JSON_CONF}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -155,6 +160,10 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, StreamInfo.class)))); tmpMap.put(_Fields.PARALLELISM_HINT, new org.apache.thrift.meta_data.FieldMetaData("parallelism_hint", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.BATCH_SIZES, new org.apache.thrift.meta_data.FieldMetaData("batch_sizes", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)))); tmpMap.put(_Fields.JSON_CONF, new org.apache.thrift.meta_data.FieldMetaData("json_conf", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); @@ -209,6 +218,10 @@ public ComponentCommon(ComponentCommon other) { this.streams = __this__streams; } this.parallelism_hint = other.parallelism_hint; + if (other.is_set_batch_sizes()) { + Map __this__batch_sizes = new HashMap(other.batch_sizes); + this.batch_sizes = __this__batch_sizes; + } if (other.is_set_json_conf()) { this.json_conf = other.json_conf; } @@ -224,6 +237,7 @@ public void clear() { this.streams = null; set_parallelism_hint_isSet(false); this.parallelism_hint = 0; + this.batch_sizes = null; this.json_conf = null; } @@ -317,6 +331,40 @@ public void set_parallelism_hint_isSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PARALLELISM_HINT_ISSET_ID, value); } + public int get_batch_sizes_size() { + return (this.batch_sizes == null) ? 0 : this.batch_sizes.size(); + } + + public void put_to_batch_sizes(String key, int val) { + if (this.batch_sizes == null) { + this.batch_sizes = new HashMap(); + } + this.batch_sizes.put(key, val); + } + + public Map get_batch_sizes() { + return this.batch_sizes; + } + + public void set_batch_sizes(Map batch_sizes) { + this.batch_sizes = batch_sizes; + } + + public void unset_batch_sizes() { + this.batch_sizes = null; + } + + /** Returns true if field batch_sizes is set (has been assigned a value) and false otherwise */ + public boolean is_set_batch_sizes() { + return this.batch_sizes != null; + } + + public void set_batch_sizes_isSet(boolean value) { + if (!value) { + this.batch_sizes = null; + } + } + public String get_json_conf() { return this.json_conf; } @@ -366,6 +414,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case BATCH_SIZES: + if (value == null) { + unset_batch_sizes(); + } else { + set_batch_sizes((Map)value); + } + break; + case JSON_CONF: if (value == null) { unset_json_conf(); @@ -388,6 +444,9 @@ public Object getFieldValue(_Fields field) { case PARALLELISM_HINT: return Integer.valueOf(get_parallelism_hint()); + case BATCH_SIZES: + return get_batch_sizes(); + case JSON_CONF: return get_json_conf(); @@ -408,6 +467,8 @@ public boolean isSet(_Fields field) { return is_set_streams(); case PARALLELISM_HINT: return is_set_parallelism_hint(); + case BATCH_SIZES: + return is_set_batch_sizes(); case JSON_CONF: return is_set_json_conf(); } @@ -454,6 +515,15 @@ public boolean equals(ComponentCommon that) { return false; } + boolean this_present_batch_sizes = true && this.is_set_batch_sizes(); + boolean that_present_batch_sizes = true && that.is_set_batch_sizes(); + if (this_present_batch_sizes || that_present_batch_sizes) { + if (!(this_present_batch_sizes && that_present_batch_sizes)) + return false; + if (!this.batch_sizes.equals(that.batch_sizes)) + return false; + } + boolean this_present_json_conf = true && this.is_set_json_conf(); boolean that_present_json_conf = true && that.is_set_json_conf(); if (this_present_json_conf || that_present_json_conf) { @@ -485,6 +555,11 @@ public int hashCode() { if (present_parallelism_hint) list.add(parallelism_hint); + boolean present_batch_sizes = true && (is_set_batch_sizes()); + list.add(present_batch_sizes); + if (present_batch_sizes) + list.add(batch_sizes); + boolean present_json_conf = true && (is_set_json_conf()); list.add(present_json_conf); if (present_json_conf) @@ -531,6 +606,16 @@ public int compareTo(ComponentCommon other) { return lastComparison; } } + lastComparison = Boolean.valueOf(is_set_batch_sizes()).compareTo(other.is_set_batch_sizes()); + if (lastComparison != 0) { + return lastComparison; + } + if (is_set_batch_sizes()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.batch_sizes, other.batch_sizes); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(is_set_json_conf()).compareTo(other.is_set_json_conf()); if (lastComparison != 0) { return lastComparison; @@ -582,6 +667,16 @@ public String toString() { sb.append(this.parallelism_hint); first = false; } + if (is_set_batch_sizes()) { + if (!first) sb.append(", "); + sb.append("batch_sizes:"); + if (this.batch_sizes == null) { + sb.append("null"); + } else { + sb.append(this.batch_sizes); + } + first = false; + } if (is_set_json_conf()) { if (!first) sb.append(", "); sb.append("json_conf:"); @@ -696,7 +791,27 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ComponentCommon str org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // JSON_CONF + case 4: // BATCH_SIZES + if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { + { + org.apache.thrift.protocol.TMap _map32 = iprot.readMapBegin(); + struct.batch_sizes = new HashMap(2*_map32.size); + String _key33; + int _val34; + for (int _i35 = 0; _i35 < _map32.size; ++_i35) + { + _key33 = iprot.readString(); + _val34 = iprot.readI32(); + struct.batch_sizes.put(_key33, _val34); + } + iprot.readMapEnd(); + } + struct.set_batch_sizes_isSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // JSON_CONF if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.json_conf = iprot.readString(); struct.set_json_conf_isSet(true); @@ -721,10 +836,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ComponentCommon st oprot.writeFieldBegin(INPUTS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, struct.inputs.size())); - for (Map.Entry _iter32 : struct.inputs.entrySet()) + for (Map.Entry _iter36 : struct.inputs.entrySet()) { - _iter32.getKey().write(oprot); - _iter32.getValue().write(oprot); + _iter36.getKey().write(oprot); + _iter36.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -734,10 +849,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ComponentCommon st oprot.writeFieldBegin(STREAMS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.streams.size())); - for (Map.Entry _iter33 : struct.streams.entrySet()) + for (Map.Entry _iter37 : struct.streams.entrySet()) { - oprot.writeString(_iter33.getKey()); - _iter33.getValue().write(oprot); + oprot.writeString(_iter37.getKey()); + _iter37.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -748,6 +863,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ComponentCommon st oprot.writeI32(struct.parallelism_hint); oprot.writeFieldEnd(); } + if (struct.batch_sizes != null) { + if (struct.is_set_batch_sizes()) { + oprot.writeFieldBegin(BATCH_SIZES_FIELD_DESC); + { + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.batch_sizes.size())); + for (Map.Entry _iter38 : struct.batch_sizes.entrySet()) + { + oprot.writeString(_iter38.getKey()); + oprot.writeI32(_iter38.getValue()); + } + oprot.writeMapEnd(); + } + oprot.writeFieldEnd(); + } + } if (struct.json_conf != null) { if (struct.is_set_json_conf()) { oprot.writeFieldBegin(JSON_CONF_FIELD_DESC); @@ -774,31 +904,44 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ComponentCommon str TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.inputs.size()); - for (Map.Entry _iter34 : struct.inputs.entrySet()) + for (Map.Entry _iter39 : struct.inputs.entrySet()) { - _iter34.getKey().write(oprot); - _iter34.getValue().write(oprot); + _iter39.getKey().write(oprot); + _iter39.getValue().write(oprot); } } { oprot.writeI32(struct.streams.size()); - for (Map.Entry _iter35 : struct.streams.entrySet()) + for (Map.Entry _iter40 : struct.streams.entrySet()) { - oprot.writeString(_iter35.getKey()); - _iter35.getValue().write(oprot); + oprot.writeString(_iter40.getKey()); + _iter40.getValue().write(oprot); } } BitSet optionals = new BitSet(); if (struct.is_set_parallelism_hint()) { optionals.set(0); } - if (struct.is_set_json_conf()) { + if (struct.is_set_batch_sizes()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.is_set_json_conf()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.is_set_parallelism_hint()) { oprot.writeI32(struct.parallelism_hint); } + if (struct.is_set_batch_sizes()) { + { + oprot.writeI32(struct.batch_sizes.size()); + for (Map.Entry _iter41 : struct.batch_sizes.entrySet()) + { + oprot.writeString(_iter41.getKey()); + oprot.writeI32(_iter41.getValue()); + } + } + } if (struct.is_set_json_conf()) { oprot.writeString(struct.json_conf); } @@ -808,40 +951,55 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ComponentCommon str public void read(org.apache.thrift.protocol.TProtocol prot, ComponentCommon struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map36 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.inputs = new HashMap(2*_map36.size); - GlobalStreamId _key37; - Grouping _val38; - for (int _i39 = 0; _i39 < _map36.size; ++_i39) + org.apache.thrift.protocol.TMap _map42 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRUCT, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.inputs = new HashMap(2*_map42.size); + GlobalStreamId _key43; + Grouping _val44; + for (int _i45 = 0; _i45 < _map42.size; ++_i45) { - _key37 = new GlobalStreamId(); - _key37.read(iprot); - _val38 = new Grouping(); - _val38.read(iprot); - struct.inputs.put(_key37, _val38); + _key43 = new GlobalStreamId(); + _key43.read(iprot); + _val44 = new Grouping(); + _val44.read(iprot); + struct.inputs.put(_key43, _val44); } } struct.set_inputs_isSet(true); { - org.apache.thrift.protocol.TMap _map40 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.streams = new HashMap(2*_map40.size); - String _key41; - StreamInfo _val42; - for (int _i43 = 0; _i43 < _map40.size; ++_i43) + org.apache.thrift.protocol.TMap _map46 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.streams = new HashMap(2*_map46.size); + String _key47; + StreamInfo _val48; + for (int _i49 = 0; _i49 < _map46.size; ++_i49) { - _key41 = iprot.readString(); - _val42 = new StreamInfo(); - _val42.read(iprot); - struct.streams.put(_key41, _val42); + _key47 = iprot.readString(); + _val48 = new StreamInfo(); + _val48.read(iprot); + struct.streams.put(_key47, _val48); } } struct.set_streams_isSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.parallelism_hint = iprot.readI32(); struct.set_parallelism_hint_isSet(true); } if (incoming.get(1)) { + { + org.apache.thrift.protocol.TMap _map50 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.batch_sizes = new HashMap(2*_map50.size); + String _key51; + int _val52; + for (int _i53 = 0; _i53 < _map50.size; ++_i53) + { + _key51 = iprot.readString(); + _val52 = iprot.readI32(); + struct.batch_sizes.put(_key51, _val52); + } + } + struct.set_batch_sizes_isSet(true); + } + if (incoming.get(2)) { struct.json_conf = iprot.readString(); struct.set_json_conf_isSet(true); } diff --git a/storm-core/src/jvm/backtype/storm/generated/Credentials.java b/storm-core/src/jvm/backtype/storm/generated/Credentials.java index 8484b18bd94..4f4fb48ad18 100644 --- a/storm-core/src/jvm/backtype/storm/generated/Credentials.java +++ b/storm-core/src/jvm/backtype/storm/generated/Credentials.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class Credentials implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Credentials"); @@ -365,15 +365,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Credentials struct) case 1: // CREDS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map344 = iprot.readMapBegin(); - struct.creds = new HashMap(2*_map344.size); - String _key345; - String _val346; - for (int _i347 = 0; _i347 < _map344.size; ++_i347) + org.apache.thrift.protocol.TMap _map354 = iprot.readMapBegin(); + struct.creds = new HashMap(2*_map354.size); + String _key355; + String _val356; + for (int _i357 = 0; _i357 < _map354.size; ++_i357) { - _key345 = iprot.readString(); - _val346 = iprot.readString(); - struct.creds.put(_key345, _val346); + _key355 = iprot.readString(); + _val356 = iprot.readString(); + struct.creds.put(_key355, _val356); } iprot.readMapEnd(); } @@ -399,10 +399,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Credentials struct oprot.writeFieldBegin(CREDS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.creds.size())); - for (Map.Entry _iter348 : struct.creds.entrySet()) + for (Map.Entry _iter358 : struct.creds.entrySet()) { - oprot.writeString(_iter348.getKey()); - oprot.writeString(_iter348.getValue()); + oprot.writeString(_iter358.getKey()); + oprot.writeString(_iter358.getValue()); } oprot.writeMapEnd(); } @@ -427,10 +427,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Credentials struct) TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.creds.size()); - for (Map.Entry _iter349 : struct.creds.entrySet()) + for (Map.Entry _iter359 : struct.creds.entrySet()) { - oprot.writeString(_iter349.getKey()); - oprot.writeString(_iter349.getValue()); + oprot.writeString(_iter359.getKey()); + oprot.writeString(_iter359.getValue()); } } } @@ -439,15 +439,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Credentials struct) public void read(org.apache.thrift.protocol.TProtocol prot, Credentials struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map350 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.creds = new HashMap(2*_map350.size); - String _key351; - String _val352; - for (int _i353 = 0; _i353 < _map350.size; ++_i353) + org.apache.thrift.protocol.TMap _map360 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.creds = new HashMap(2*_map360.size); + String _key361; + String _val362; + for (int _i363 = 0; _i363 < _map360.size; ++_i363) { - _key351 = iprot.readString(); - _val352 = iprot.readString(); - struct.creds.put(_key351, _val352); + _key361 = iprot.readString(); + _val362 = iprot.readString(); + struct.creds.put(_key361, _val362); } } struct.set_creds_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/ExecutorStats.java b/storm-core/src/jvm/backtype/storm/generated/ExecutorStats.java index 58a7936bd9b..8b56ad619c0 100644 --- a/storm-core/src/jvm/backtype/storm/generated/ExecutorStats.java +++ b/storm-core/src/jvm/backtype/storm/generated/ExecutorStats.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class ExecutorStats implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ExecutorStats"); @@ -660,27 +660,27 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ExecutorStats struc case 1: // EMITTED if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map258 = iprot.readMapBegin(); - struct.emitted = new HashMap>(2*_map258.size); - String _key259; - Map _val260; - for (int _i261 = 0; _i261 < _map258.size; ++_i261) + org.apache.thrift.protocol.TMap _map268 = iprot.readMapBegin(); + struct.emitted = new HashMap>(2*_map268.size); + String _key269; + Map _val270; + for (int _i271 = 0; _i271 < _map268.size; ++_i271) { - _key259 = iprot.readString(); + _key269 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map262 = iprot.readMapBegin(); - _val260 = new HashMap(2*_map262.size); - String _key263; - long _val264; - for (int _i265 = 0; _i265 < _map262.size; ++_i265) + org.apache.thrift.protocol.TMap _map272 = iprot.readMapBegin(); + _val270 = new HashMap(2*_map272.size); + String _key273; + long _val274; + for (int _i275 = 0; _i275 < _map272.size; ++_i275) { - _key263 = iprot.readString(); - _val264 = iprot.readI64(); - _val260.put(_key263, _val264); + _key273 = iprot.readString(); + _val274 = iprot.readI64(); + _val270.put(_key273, _val274); } iprot.readMapEnd(); } - struct.emitted.put(_key259, _val260); + struct.emitted.put(_key269, _val270); } iprot.readMapEnd(); } @@ -692,27 +692,27 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ExecutorStats struc case 2: // TRANSFERRED if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map266 = iprot.readMapBegin(); - struct.transferred = new HashMap>(2*_map266.size); - String _key267; - Map _val268; - for (int _i269 = 0; _i269 < _map266.size; ++_i269) + org.apache.thrift.protocol.TMap _map276 = iprot.readMapBegin(); + struct.transferred = new HashMap>(2*_map276.size); + String _key277; + Map _val278; + for (int _i279 = 0; _i279 < _map276.size; ++_i279) { - _key267 = iprot.readString(); + _key277 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map270 = iprot.readMapBegin(); - _val268 = new HashMap(2*_map270.size); - String _key271; - long _val272; - for (int _i273 = 0; _i273 < _map270.size; ++_i273) + org.apache.thrift.protocol.TMap _map280 = iprot.readMapBegin(); + _val278 = new HashMap(2*_map280.size); + String _key281; + long _val282; + for (int _i283 = 0; _i283 < _map280.size; ++_i283) { - _key271 = iprot.readString(); - _val272 = iprot.readI64(); - _val268.put(_key271, _val272); + _key281 = iprot.readString(); + _val282 = iprot.readI64(); + _val278.put(_key281, _val282); } iprot.readMapEnd(); } - struct.transferred.put(_key267, _val268); + struct.transferred.put(_key277, _val278); } iprot.readMapEnd(); } @@ -755,15 +755,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ExecutorStats stru oprot.writeFieldBegin(EMITTED_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.emitted.size())); - for (Map.Entry> _iter274 : struct.emitted.entrySet()) + for (Map.Entry> _iter284 : struct.emitted.entrySet()) { - oprot.writeString(_iter274.getKey()); + oprot.writeString(_iter284.getKey()); { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, _iter274.getValue().size())); - for (Map.Entry _iter275 : _iter274.getValue().entrySet()) + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, _iter284.getValue().size())); + for (Map.Entry _iter285 : _iter284.getValue().entrySet()) { - oprot.writeString(_iter275.getKey()); - oprot.writeI64(_iter275.getValue()); + oprot.writeString(_iter285.getKey()); + oprot.writeI64(_iter285.getValue()); } oprot.writeMapEnd(); } @@ -776,15 +776,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ExecutorStats stru oprot.writeFieldBegin(TRANSFERRED_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.transferred.size())); - for (Map.Entry> _iter276 : struct.transferred.entrySet()) + for (Map.Entry> _iter286 : struct.transferred.entrySet()) { - oprot.writeString(_iter276.getKey()); + oprot.writeString(_iter286.getKey()); { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, _iter276.getValue().size())); - for (Map.Entry _iter277 : _iter276.getValue().entrySet()) + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, _iter286.getValue().size())); + for (Map.Entry _iter287 : _iter286.getValue().entrySet()) { - oprot.writeString(_iter277.getKey()); - oprot.writeI64(_iter277.getValue()); + oprot.writeString(_iter287.getKey()); + oprot.writeI64(_iter287.getValue()); } oprot.writeMapEnd(); } @@ -820,30 +820,30 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ExecutorStats struc TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.emitted.size()); - for (Map.Entry> _iter278 : struct.emitted.entrySet()) + for (Map.Entry> _iter288 : struct.emitted.entrySet()) { - oprot.writeString(_iter278.getKey()); + oprot.writeString(_iter288.getKey()); { - oprot.writeI32(_iter278.getValue().size()); - for (Map.Entry _iter279 : _iter278.getValue().entrySet()) + oprot.writeI32(_iter288.getValue().size()); + for (Map.Entry _iter289 : _iter288.getValue().entrySet()) { - oprot.writeString(_iter279.getKey()); - oprot.writeI64(_iter279.getValue()); + oprot.writeString(_iter289.getKey()); + oprot.writeI64(_iter289.getValue()); } } } } { oprot.writeI32(struct.transferred.size()); - for (Map.Entry> _iter280 : struct.transferred.entrySet()) + for (Map.Entry> _iter290 : struct.transferred.entrySet()) { - oprot.writeString(_iter280.getKey()); + oprot.writeString(_iter290.getKey()); { - oprot.writeI32(_iter280.getValue().size()); - for (Map.Entry _iter281 : _iter280.getValue().entrySet()) + oprot.writeI32(_iter290.getValue().size()); + for (Map.Entry _iter291 : _iter290.getValue().entrySet()) { - oprot.writeString(_iter281.getKey()); - oprot.writeI64(_iter281.getValue()); + oprot.writeString(_iter291.getKey()); + oprot.writeI64(_iter291.getValue()); } } } @@ -856,50 +856,50 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ExecutorStats struc public void read(org.apache.thrift.protocol.TProtocol prot, ExecutorStats struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map282 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); - struct.emitted = new HashMap>(2*_map282.size); - String _key283; - Map _val284; - for (int _i285 = 0; _i285 < _map282.size; ++_i285) + org.apache.thrift.protocol.TMap _map292 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); + struct.emitted = new HashMap>(2*_map292.size); + String _key293; + Map _val294; + for (int _i295 = 0; _i295 < _map292.size; ++_i295) { - _key283 = iprot.readString(); + _key293 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map286 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, iprot.readI32()); - _val284 = new HashMap(2*_map286.size); - String _key287; - long _val288; - for (int _i289 = 0; _i289 < _map286.size; ++_i289) + org.apache.thrift.protocol.TMap _map296 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, iprot.readI32()); + _val294 = new HashMap(2*_map296.size); + String _key297; + long _val298; + for (int _i299 = 0; _i299 < _map296.size; ++_i299) { - _key287 = iprot.readString(); - _val288 = iprot.readI64(); - _val284.put(_key287, _val288); + _key297 = iprot.readString(); + _val298 = iprot.readI64(); + _val294.put(_key297, _val298); } } - struct.emitted.put(_key283, _val284); + struct.emitted.put(_key293, _val294); } } struct.set_emitted_isSet(true); { - org.apache.thrift.protocol.TMap _map290 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); - struct.transferred = new HashMap>(2*_map290.size); - String _key291; - Map _val292; - for (int _i293 = 0; _i293 < _map290.size; ++_i293) + org.apache.thrift.protocol.TMap _map300 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); + struct.transferred = new HashMap>(2*_map300.size); + String _key301; + Map _val302; + for (int _i303 = 0; _i303 < _map300.size; ++_i303) { - _key291 = iprot.readString(); + _key301 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map294 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, iprot.readI32()); - _val292 = new HashMap(2*_map294.size); - String _key295; - long _val296; - for (int _i297 = 0; _i297 < _map294.size; ++_i297) + org.apache.thrift.protocol.TMap _map304 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, iprot.readI32()); + _val302 = new HashMap(2*_map304.size); + String _key305; + long _val306; + for (int _i307 = 0; _i307 < _map304.size; ++_i307) { - _key295 = iprot.readString(); - _val296 = iprot.readI64(); - _val292.put(_key295, _val296); + _key305 = iprot.readString(); + _val306 = iprot.readI64(); + _val302.put(_key305, _val306); } } - struct.transferred.put(_key291, _val292); + struct.transferred.put(_key301, _val302); } } struct.set_transferred_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/LSApprovedWorkers.java b/storm-core/src/jvm/backtype/storm/generated/LSApprovedWorkers.java index fd9740b4e61..3c35f6fac0d 100644 --- a/storm-core/src/jvm/backtype/storm/generated/LSApprovedWorkers.java +++ b/storm-core/src/jvm/backtype/storm/generated/LSApprovedWorkers.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class LSApprovedWorkers implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LSApprovedWorkers"); @@ -365,15 +365,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LSApprovedWorkers s case 1: // APPROVED_WORKERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map482 = iprot.readMapBegin(); - struct.approved_workers = new HashMap(2*_map482.size); - String _key483; - int _val484; - for (int _i485 = 0; _i485 < _map482.size; ++_i485) + org.apache.thrift.protocol.TMap _map492 = iprot.readMapBegin(); + struct.approved_workers = new HashMap(2*_map492.size); + String _key493; + int _val494; + for (int _i495 = 0; _i495 < _map492.size; ++_i495) { - _key483 = iprot.readString(); - _val484 = iprot.readI32(); - struct.approved_workers.put(_key483, _val484); + _key493 = iprot.readString(); + _val494 = iprot.readI32(); + struct.approved_workers.put(_key493, _val494); } iprot.readMapEnd(); } @@ -399,10 +399,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LSApprovedWorkers oprot.writeFieldBegin(APPROVED_WORKERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.approved_workers.size())); - for (Map.Entry _iter486 : struct.approved_workers.entrySet()) + for (Map.Entry _iter496 : struct.approved_workers.entrySet()) { - oprot.writeString(_iter486.getKey()); - oprot.writeI32(_iter486.getValue()); + oprot.writeString(_iter496.getKey()); + oprot.writeI32(_iter496.getValue()); } oprot.writeMapEnd(); } @@ -427,10 +427,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LSApprovedWorkers s TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.approved_workers.size()); - for (Map.Entry _iter487 : struct.approved_workers.entrySet()) + for (Map.Entry _iter497 : struct.approved_workers.entrySet()) { - oprot.writeString(_iter487.getKey()); - oprot.writeI32(_iter487.getValue()); + oprot.writeString(_iter497.getKey()); + oprot.writeI32(_iter497.getValue()); } } } @@ -439,15 +439,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LSApprovedWorkers s public void read(org.apache.thrift.protocol.TProtocol prot, LSApprovedWorkers struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map488 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.approved_workers = new HashMap(2*_map488.size); - String _key489; - int _val490; - for (int _i491 = 0; _i491 < _map488.size; ++_i491) + org.apache.thrift.protocol.TMap _map498 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.approved_workers = new HashMap(2*_map498.size); + String _key499; + int _val500; + for (int _i501 = 0; _i501 < _map498.size; ++_i501) { - _key489 = iprot.readString(); - _val490 = iprot.readI32(); - struct.approved_workers.put(_key489, _val490); + _key499 = iprot.readString(); + _val500 = iprot.readI32(); + struct.approved_workers.put(_key499, _val500); } } struct.set_approved_workers_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/LSSupervisorAssignments.java b/storm-core/src/jvm/backtype/storm/generated/LSSupervisorAssignments.java index 1d85ceca3c7..53a558ed29a 100644 --- a/storm-core/src/jvm/backtype/storm/generated/LSSupervisorAssignments.java +++ b/storm-core/src/jvm/backtype/storm/generated/LSSupervisorAssignments.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class LSSupervisorAssignments implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LSSupervisorAssignments"); @@ -376,16 +376,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LSSupervisorAssignm case 1: // ASSIGNMENTS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map492 = iprot.readMapBegin(); - struct.assignments = new HashMap(2*_map492.size); - int _key493; - LocalAssignment _val494; - for (int _i495 = 0; _i495 < _map492.size; ++_i495) + org.apache.thrift.protocol.TMap _map502 = iprot.readMapBegin(); + struct.assignments = new HashMap(2*_map502.size); + int _key503; + LocalAssignment _val504; + for (int _i505 = 0; _i505 < _map502.size; ++_i505) { - _key493 = iprot.readI32(); - _val494 = new LocalAssignment(); - _val494.read(iprot); - struct.assignments.put(_key493, _val494); + _key503 = iprot.readI32(); + _val504 = new LocalAssignment(); + _val504.read(iprot); + struct.assignments.put(_key503, _val504); } iprot.readMapEnd(); } @@ -411,10 +411,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LSSupervisorAssign oprot.writeFieldBegin(ASSIGNMENTS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, struct.assignments.size())); - for (Map.Entry _iter496 : struct.assignments.entrySet()) + for (Map.Entry _iter506 : struct.assignments.entrySet()) { - oprot.writeI32(_iter496.getKey()); - _iter496.getValue().write(oprot); + oprot.writeI32(_iter506.getKey()); + _iter506.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -439,10 +439,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LSSupervisorAssignm TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.assignments.size()); - for (Map.Entry _iter497 : struct.assignments.entrySet()) + for (Map.Entry _iter507 : struct.assignments.entrySet()) { - oprot.writeI32(_iter497.getKey()); - _iter497.getValue().write(oprot); + oprot.writeI32(_iter507.getKey()); + _iter507.getValue().write(oprot); } } } @@ -451,16 +451,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LSSupervisorAssignm public void read(org.apache.thrift.protocol.TProtocol prot, LSSupervisorAssignments struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map498 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.assignments = new HashMap(2*_map498.size); - int _key499; - LocalAssignment _val500; - for (int _i501 = 0; _i501 < _map498.size; ++_i501) + org.apache.thrift.protocol.TMap _map508 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I32, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.assignments = new HashMap(2*_map508.size); + int _key509; + LocalAssignment _val510; + for (int _i511 = 0; _i511 < _map508.size; ++_i511) { - _key499 = iprot.readI32(); - _val500 = new LocalAssignment(); - _val500.read(iprot); - struct.assignments.put(_key499, _val500); + _key509 = iprot.readI32(); + _val510 = new LocalAssignment(); + _val510.read(iprot); + struct.assignments.put(_key509, _val510); } } struct.set_assignments_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/LSWorkerHeartbeat.java b/storm-core/src/jvm/backtype/storm/generated/LSWorkerHeartbeat.java index 98726cc660b..14a6bee4d76 100644 --- a/storm-core/src/jvm/backtype/storm/generated/LSWorkerHeartbeat.java +++ b/storm-core/src/jvm/backtype/storm/generated/LSWorkerHeartbeat.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class LSWorkerHeartbeat implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LSWorkerHeartbeat"); @@ -638,14 +638,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LSWorkerHeartbeat s case 3: // EXECUTORS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list502 = iprot.readListBegin(); - struct.executors = new ArrayList(_list502.size); - ExecutorInfo _elem503; - for (int _i504 = 0; _i504 < _list502.size; ++_i504) + org.apache.thrift.protocol.TList _list512 = iprot.readListBegin(); + struct.executors = new ArrayList(_list512.size); + ExecutorInfo _elem513; + for (int _i514 = 0; _i514 < _list512.size; ++_i514) { - _elem503 = new ExecutorInfo(); - _elem503.read(iprot); - struct.executors.add(_elem503); + _elem513 = new ExecutorInfo(); + _elem513.read(iprot); + struct.executors.add(_elem513); } iprot.readListEnd(); } @@ -687,9 +687,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LSWorkerHeartbeat oprot.writeFieldBegin(EXECUTORS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.executors.size())); - for (ExecutorInfo _iter505 : struct.executors) + for (ExecutorInfo _iter515 : struct.executors) { - _iter505.write(oprot); + _iter515.write(oprot); } oprot.writeListEnd(); } @@ -719,9 +719,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LSWorkerHeartbeat s oprot.writeString(struct.topology_id); { oprot.writeI32(struct.executors.size()); - for (ExecutorInfo _iter506 : struct.executors) + for (ExecutorInfo _iter516 : struct.executors) { - _iter506.write(oprot); + _iter516.write(oprot); } } oprot.writeI32(struct.port); @@ -735,14 +735,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, LSWorkerHeartbeat st struct.topology_id = iprot.readString(); struct.set_topology_id_isSet(true); { - org.apache.thrift.protocol.TList _list507 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.executors = new ArrayList(_list507.size); - ExecutorInfo _elem508; - for (int _i509 = 0; _i509 < _list507.size; ++_i509) + org.apache.thrift.protocol.TList _list517 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.executors = new ArrayList(_list517.size); + ExecutorInfo _elem518; + for (int _i519 = 0; _i519 < _list517.size; ++_i519) { - _elem508 = new ExecutorInfo(); - _elem508.read(iprot); - struct.executors.add(_elem508); + _elem518 = new ExecutorInfo(); + _elem518.read(iprot); + struct.executors.add(_elem518); } } struct.set_executors_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/LocalAssignment.java b/storm-core/src/jvm/backtype/storm/generated/LocalAssignment.java index d985cba51be..ff7ee17bbda 100644 --- a/storm-core/src/jvm/backtype/storm/generated/LocalAssignment.java +++ b/storm-core/src/jvm/backtype/storm/generated/LocalAssignment.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class LocalAssignment implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LocalAssignment"); @@ -464,14 +464,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LocalAssignment str case 2: // EXECUTORS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list474 = iprot.readListBegin(); - struct.executors = new ArrayList(_list474.size); - ExecutorInfo _elem475; - for (int _i476 = 0; _i476 < _list474.size; ++_i476) + org.apache.thrift.protocol.TList _list484 = iprot.readListBegin(); + struct.executors = new ArrayList(_list484.size); + ExecutorInfo _elem485; + for (int _i486 = 0; _i486 < _list484.size; ++_i486) { - _elem475 = new ExecutorInfo(); - _elem475.read(iprot); - struct.executors.add(_elem475); + _elem485 = new ExecutorInfo(); + _elem485.read(iprot); + struct.executors.add(_elem485); } iprot.readListEnd(); } @@ -502,9 +502,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LocalAssignment st oprot.writeFieldBegin(EXECUTORS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.executors.size())); - for (ExecutorInfo _iter477 : struct.executors) + for (ExecutorInfo _iter487 : struct.executors) { - _iter477.write(oprot); + _iter487.write(oprot); } oprot.writeListEnd(); } @@ -530,9 +530,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LocalAssignment str oprot.writeString(struct.topology_id); { oprot.writeI32(struct.executors.size()); - for (ExecutorInfo _iter478 : struct.executors) + for (ExecutorInfo _iter488 : struct.executors) { - _iter478.write(oprot); + _iter488.write(oprot); } } } @@ -543,14 +543,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, LocalAssignment stru struct.topology_id = iprot.readString(); struct.set_topology_id_isSet(true); { - org.apache.thrift.protocol.TList _list479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.executors = new ArrayList(_list479.size); - ExecutorInfo _elem480; - for (int _i481 = 0; _i481 < _list479.size; ++_i481) + org.apache.thrift.protocol.TList _list489 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.executors = new ArrayList(_list489.size); + ExecutorInfo _elem490; + for (int _i491 = 0; _i491 < _list489.size; ++_i491) { - _elem480 = new ExecutorInfo(); - _elem480.read(iprot); - struct.executors.add(_elem480); + _elem490 = new ExecutorInfo(); + _elem490.read(iprot); + struct.executors.add(_elem490); } } struct.set_executors_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/LocalStateData.java b/storm-core/src/jvm/backtype/storm/generated/LocalStateData.java index 0ce2d7a0c20..4684a01c4d2 100644 --- a/storm-core/src/jvm/backtype/storm/generated/LocalStateData.java +++ b/storm-core/src/jvm/backtype/storm/generated/LocalStateData.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class LocalStateData implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LocalStateData"); @@ -376,16 +376,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LocalStateData stru case 1: // SERIALIZED_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map464 = iprot.readMapBegin(); - struct.serialized_parts = new HashMap(2*_map464.size); - String _key465; - ThriftSerializedObject _val466; - for (int _i467 = 0; _i467 < _map464.size; ++_i467) + org.apache.thrift.protocol.TMap _map474 = iprot.readMapBegin(); + struct.serialized_parts = new HashMap(2*_map474.size); + String _key475; + ThriftSerializedObject _val476; + for (int _i477 = 0; _i477 < _map474.size; ++_i477) { - _key465 = iprot.readString(); - _val466 = new ThriftSerializedObject(); - _val466.read(iprot); - struct.serialized_parts.put(_key465, _val466); + _key475 = iprot.readString(); + _val476 = new ThriftSerializedObject(); + _val476.read(iprot); + struct.serialized_parts.put(_key475, _val476); } iprot.readMapEnd(); } @@ -411,10 +411,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LocalStateData str oprot.writeFieldBegin(SERIALIZED_PARTS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.serialized_parts.size())); - for (Map.Entry _iter468 : struct.serialized_parts.entrySet()) + for (Map.Entry _iter478 : struct.serialized_parts.entrySet()) { - oprot.writeString(_iter468.getKey()); - _iter468.getValue().write(oprot); + oprot.writeString(_iter478.getKey()); + _iter478.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -439,10 +439,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LocalStateData stru TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.serialized_parts.size()); - for (Map.Entry _iter469 : struct.serialized_parts.entrySet()) + for (Map.Entry _iter479 : struct.serialized_parts.entrySet()) { - oprot.writeString(_iter469.getKey()); - _iter469.getValue().write(oprot); + oprot.writeString(_iter479.getKey()); + _iter479.getValue().write(oprot); } } } @@ -451,16 +451,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LocalStateData stru public void read(org.apache.thrift.protocol.TProtocol prot, LocalStateData struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map470 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.serialized_parts = new HashMap(2*_map470.size); - String _key471; - ThriftSerializedObject _val472; - for (int _i473 = 0; _i473 < _map470.size; ++_i473) + org.apache.thrift.protocol.TMap _map480 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.serialized_parts = new HashMap(2*_map480.size); + String _key481; + ThriftSerializedObject _val482; + for (int _i483 = 0; _i483 < _map480.size; ++_i483) { - _key471 = iprot.readString(); - _val472 = new ThriftSerializedObject(); - _val472.read(iprot); - struct.serialized_parts.put(_key471, _val472); + _key481 = iprot.readString(); + _val482 = new ThriftSerializedObject(); + _val482.read(iprot); + struct.serialized_parts.put(_key481, _val482); } } struct.set_serialized_parts_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/NodeInfo.java b/storm-core/src/jvm/backtype/storm/generated/NodeInfo.java index f272cd81db8..483c9b26c74 100644 --- a/storm-core/src/jvm/backtype/storm/generated/NodeInfo.java +++ b/storm-core/src/jvm/backtype/storm/generated/NodeInfo.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class NodeInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NodeInfo"); @@ -461,13 +461,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NodeInfo struct) th case 2: // PORT if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set380 = iprot.readSetBegin(); - struct.port = new HashSet(2*_set380.size); - long _elem381; - for (int _i382 = 0; _i382 < _set380.size; ++_i382) + org.apache.thrift.protocol.TSet _set390 = iprot.readSetBegin(); + struct.port = new HashSet(2*_set390.size); + long _elem391; + for (int _i392 = 0; _i392 < _set390.size; ++_i392) { - _elem381 = iprot.readI64(); - struct.port.add(_elem381); + _elem391 = iprot.readI64(); + struct.port.add(_elem391); } iprot.readSetEnd(); } @@ -498,9 +498,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NodeInfo struct) t oprot.writeFieldBegin(PORT_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.port.size())); - for (long _iter383 : struct.port) + for (long _iter393 : struct.port) { - oprot.writeI64(_iter383); + oprot.writeI64(_iter393); } oprot.writeSetEnd(); } @@ -526,9 +526,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NodeInfo struct) th oprot.writeString(struct.node); { oprot.writeI32(struct.port.size()); - for (long _iter384 : struct.port) + for (long _iter394 : struct.port) { - oprot.writeI64(_iter384); + oprot.writeI64(_iter394); } } } @@ -539,13 +539,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, NodeInfo struct) thr struct.node = iprot.readString(); struct.set_node_isSet(true); { - org.apache.thrift.protocol.TSet _set385 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.port = new HashSet(2*_set385.size); - long _elem386; - for (int _i387 = 0; _i387 < _set385.size; ++_i387) + org.apache.thrift.protocol.TSet _set395 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.port = new HashSet(2*_set395.size); + long _elem396; + for (int _i397 = 0; _i397 < _set395.size; ++_i397) { - _elem386 = iprot.readI64(); - struct.port.add(_elem386); + _elem396 = iprot.readI64(); + struct.port.add(_elem396); } } struct.set_port_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/RebalanceOptions.java b/storm-core/src/jvm/backtype/storm/generated/RebalanceOptions.java index 2cc7762cd84..0c781f00f68 100644 --- a/storm-core/src/jvm/backtype/storm/generated/RebalanceOptions.java +++ b/storm-core/src/jvm/backtype/storm/generated/RebalanceOptions.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class RebalanceOptions implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RebalanceOptions"); @@ -529,15 +529,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, RebalanceOptions st case 3: // NUM_EXECUTORS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map334 = iprot.readMapBegin(); - struct.num_executors = new HashMap(2*_map334.size); - String _key335; - int _val336; - for (int _i337 = 0; _i337 < _map334.size; ++_i337) + org.apache.thrift.protocol.TMap _map344 = iprot.readMapBegin(); + struct.num_executors = new HashMap(2*_map344.size); + String _key345; + int _val346; + for (int _i347 = 0; _i347 < _map344.size; ++_i347) { - _key335 = iprot.readString(); - _val336 = iprot.readI32(); - struct.num_executors.put(_key335, _val336); + _key345 = iprot.readString(); + _val346 = iprot.readI32(); + struct.num_executors.put(_key345, _val346); } iprot.readMapEnd(); } @@ -574,10 +574,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, RebalanceOptions s oprot.writeFieldBegin(NUM_EXECUTORS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.num_executors.size())); - for (Map.Entry _iter338 : struct.num_executors.entrySet()) + for (Map.Entry _iter348 : struct.num_executors.entrySet()) { - oprot.writeString(_iter338.getKey()); - oprot.writeI32(_iter338.getValue()); + oprot.writeString(_iter348.getKey()); + oprot.writeI32(_iter348.getValue()); } oprot.writeMapEnd(); } @@ -621,10 +621,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, RebalanceOptions st if (struct.is_set_num_executors()) { { oprot.writeI32(struct.num_executors.size()); - for (Map.Entry _iter339 : struct.num_executors.entrySet()) + for (Map.Entry _iter349 : struct.num_executors.entrySet()) { - oprot.writeString(_iter339.getKey()); - oprot.writeI32(_iter339.getValue()); + oprot.writeString(_iter349.getKey()); + oprot.writeI32(_iter349.getValue()); } } } @@ -644,15 +644,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, RebalanceOptions str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map340 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.num_executors = new HashMap(2*_map340.size); - String _key341; - int _val342; - for (int _i343 = 0; _i343 < _map340.size; ++_i343) + org.apache.thrift.protocol.TMap _map350 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.num_executors = new HashMap(2*_map350.size); + String _key351; + int _val352; + for (int _i353 = 0; _i353 < _map350.size; ++_i353) { - _key341 = iprot.readString(); - _val342 = iprot.readI32(); - struct.num_executors.put(_key341, _val342); + _key351 = iprot.readString(); + _val352 = iprot.readI32(); + struct.num_executors.put(_key351, _val352); } } struct.set_num_executors_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/SpoutStats.java b/storm-core/src/jvm/backtype/storm/generated/SpoutStats.java index c18a7d00fbe..fb1fbf9596f 100644 --- a/storm-core/src/jvm/backtype/storm/generated/SpoutStats.java +++ b/storm-core/src/jvm/backtype/storm/generated/SpoutStats.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class SpoutStats implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SpoutStats"); @@ -602,27 +602,27 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SpoutStats struct) case 1: // ACKED if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map198 = iprot.readMapBegin(); - struct.acked = new HashMap>(2*_map198.size); - String _key199; - Map _val200; - for (int _i201 = 0; _i201 < _map198.size; ++_i201) + org.apache.thrift.protocol.TMap _map208 = iprot.readMapBegin(); + struct.acked = new HashMap>(2*_map208.size); + String _key209; + Map _val210; + for (int _i211 = 0; _i211 < _map208.size; ++_i211) { - _key199 = iprot.readString(); + _key209 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map202 = iprot.readMapBegin(); - _val200 = new HashMap(2*_map202.size); - String _key203; - long _val204; - for (int _i205 = 0; _i205 < _map202.size; ++_i205) + org.apache.thrift.protocol.TMap _map212 = iprot.readMapBegin(); + _val210 = new HashMap(2*_map212.size); + String _key213; + long _val214; + for (int _i215 = 0; _i215 < _map212.size; ++_i215) { - _key203 = iprot.readString(); - _val204 = iprot.readI64(); - _val200.put(_key203, _val204); + _key213 = iprot.readString(); + _val214 = iprot.readI64(); + _val210.put(_key213, _val214); } iprot.readMapEnd(); } - struct.acked.put(_key199, _val200); + struct.acked.put(_key209, _val210); } iprot.readMapEnd(); } @@ -634,27 +634,27 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SpoutStats struct) case 2: // FAILED if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map206 = iprot.readMapBegin(); - struct.failed = new HashMap>(2*_map206.size); - String _key207; - Map _val208; - for (int _i209 = 0; _i209 < _map206.size; ++_i209) + org.apache.thrift.protocol.TMap _map216 = iprot.readMapBegin(); + struct.failed = new HashMap>(2*_map216.size); + String _key217; + Map _val218; + for (int _i219 = 0; _i219 < _map216.size; ++_i219) { - _key207 = iprot.readString(); + _key217 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map210 = iprot.readMapBegin(); - _val208 = new HashMap(2*_map210.size); - String _key211; - long _val212; - for (int _i213 = 0; _i213 < _map210.size; ++_i213) + org.apache.thrift.protocol.TMap _map220 = iprot.readMapBegin(); + _val218 = new HashMap(2*_map220.size); + String _key221; + long _val222; + for (int _i223 = 0; _i223 < _map220.size; ++_i223) { - _key211 = iprot.readString(); - _val212 = iprot.readI64(); - _val208.put(_key211, _val212); + _key221 = iprot.readString(); + _val222 = iprot.readI64(); + _val218.put(_key221, _val222); } iprot.readMapEnd(); } - struct.failed.put(_key207, _val208); + struct.failed.put(_key217, _val218); } iprot.readMapEnd(); } @@ -666,27 +666,27 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SpoutStats struct) case 3: // COMPLETE_MS_AVG if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map214 = iprot.readMapBegin(); - struct.complete_ms_avg = new HashMap>(2*_map214.size); - String _key215; - Map _val216; - for (int _i217 = 0; _i217 < _map214.size; ++_i217) + org.apache.thrift.protocol.TMap _map224 = iprot.readMapBegin(); + struct.complete_ms_avg = new HashMap>(2*_map224.size); + String _key225; + Map _val226; + for (int _i227 = 0; _i227 < _map224.size; ++_i227) { - _key215 = iprot.readString(); + _key225 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map218 = iprot.readMapBegin(); - _val216 = new HashMap(2*_map218.size); - String _key219; - double _val220; - for (int _i221 = 0; _i221 < _map218.size; ++_i221) + org.apache.thrift.protocol.TMap _map228 = iprot.readMapBegin(); + _val226 = new HashMap(2*_map228.size); + String _key229; + double _val230; + for (int _i231 = 0; _i231 < _map228.size; ++_i231) { - _key219 = iprot.readString(); - _val220 = iprot.readDouble(); - _val216.put(_key219, _val220); + _key229 = iprot.readString(); + _val230 = iprot.readDouble(); + _val226.put(_key229, _val230); } iprot.readMapEnd(); } - struct.complete_ms_avg.put(_key215, _val216); + struct.complete_ms_avg.put(_key225, _val226); } iprot.readMapEnd(); } @@ -712,15 +712,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SpoutStats struct) oprot.writeFieldBegin(ACKED_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.acked.size())); - for (Map.Entry> _iter222 : struct.acked.entrySet()) + for (Map.Entry> _iter232 : struct.acked.entrySet()) { - oprot.writeString(_iter222.getKey()); + oprot.writeString(_iter232.getKey()); { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, _iter222.getValue().size())); - for (Map.Entry _iter223 : _iter222.getValue().entrySet()) + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, _iter232.getValue().size())); + for (Map.Entry _iter233 : _iter232.getValue().entrySet()) { - oprot.writeString(_iter223.getKey()); - oprot.writeI64(_iter223.getValue()); + oprot.writeString(_iter233.getKey()); + oprot.writeI64(_iter233.getValue()); } oprot.writeMapEnd(); } @@ -733,15 +733,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SpoutStats struct) oprot.writeFieldBegin(FAILED_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.failed.size())); - for (Map.Entry> _iter224 : struct.failed.entrySet()) + for (Map.Entry> _iter234 : struct.failed.entrySet()) { - oprot.writeString(_iter224.getKey()); + oprot.writeString(_iter234.getKey()); { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, _iter224.getValue().size())); - for (Map.Entry _iter225 : _iter224.getValue().entrySet()) + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, _iter234.getValue().size())); + for (Map.Entry _iter235 : _iter234.getValue().entrySet()) { - oprot.writeString(_iter225.getKey()); - oprot.writeI64(_iter225.getValue()); + oprot.writeString(_iter235.getKey()); + oprot.writeI64(_iter235.getValue()); } oprot.writeMapEnd(); } @@ -754,15 +754,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SpoutStats struct) oprot.writeFieldBegin(COMPLETE_MS_AVG_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, struct.complete_ms_avg.size())); - for (Map.Entry> _iter226 : struct.complete_ms_avg.entrySet()) + for (Map.Entry> _iter236 : struct.complete_ms_avg.entrySet()) { - oprot.writeString(_iter226.getKey()); + oprot.writeString(_iter236.getKey()); { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.DOUBLE, _iter226.getValue().size())); - for (Map.Entry _iter227 : _iter226.getValue().entrySet()) + oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.DOUBLE, _iter236.getValue().size())); + for (Map.Entry _iter237 : _iter236.getValue().entrySet()) { - oprot.writeString(_iter227.getKey()); - oprot.writeDouble(_iter227.getValue()); + oprot.writeString(_iter237.getKey()); + oprot.writeDouble(_iter237.getValue()); } oprot.writeMapEnd(); } @@ -790,45 +790,45 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SpoutStats struct) TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.acked.size()); - for (Map.Entry> _iter228 : struct.acked.entrySet()) + for (Map.Entry> _iter238 : struct.acked.entrySet()) { - oprot.writeString(_iter228.getKey()); + oprot.writeString(_iter238.getKey()); { - oprot.writeI32(_iter228.getValue().size()); - for (Map.Entry _iter229 : _iter228.getValue().entrySet()) + oprot.writeI32(_iter238.getValue().size()); + for (Map.Entry _iter239 : _iter238.getValue().entrySet()) { - oprot.writeString(_iter229.getKey()); - oprot.writeI64(_iter229.getValue()); + oprot.writeString(_iter239.getKey()); + oprot.writeI64(_iter239.getValue()); } } } } { oprot.writeI32(struct.failed.size()); - for (Map.Entry> _iter230 : struct.failed.entrySet()) + for (Map.Entry> _iter240 : struct.failed.entrySet()) { - oprot.writeString(_iter230.getKey()); + oprot.writeString(_iter240.getKey()); { - oprot.writeI32(_iter230.getValue().size()); - for (Map.Entry _iter231 : _iter230.getValue().entrySet()) + oprot.writeI32(_iter240.getValue().size()); + for (Map.Entry _iter241 : _iter240.getValue().entrySet()) { - oprot.writeString(_iter231.getKey()); - oprot.writeI64(_iter231.getValue()); + oprot.writeString(_iter241.getKey()); + oprot.writeI64(_iter241.getValue()); } } } } { oprot.writeI32(struct.complete_ms_avg.size()); - for (Map.Entry> _iter232 : struct.complete_ms_avg.entrySet()) + for (Map.Entry> _iter242 : struct.complete_ms_avg.entrySet()) { - oprot.writeString(_iter232.getKey()); + oprot.writeString(_iter242.getKey()); { - oprot.writeI32(_iter232.getValue().size()); - for (Map.Entry _iter233 : _iter232.getValue().entrySet()) + oprot.writeI32(_iter242.getValue().size()); + for (Map.Entry _iter243 : _iter242.getValue().entrySet()) { - oprot.writeString(_iter233.getKey()); - oprot.writeDouble(_iter233.getValue()); + oprot.writeString(_iter243.getKey()); + oprot.writeDouble(_iter243.getValue()); } } } @@ -839,74 +839,74 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SpoutStats struct) public void read(org.apache.thrift.protocol.TProtocol prot, SpoutStats struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map234 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); - struct.acked = new HashMap>(2*_map234.size); - String _key235; - Map _val236; - for (int _i237 = 0; _i237 < _map234.size; ++_i237) + org.apache.thrift.protocol.TMap _map244 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); + struct.acked = new HashMap>(2*_map244.size); + String _key245; + Map _val246; + for (int _i247 = 0; _i247 < _map244.size; ++_i247) { - _key235 = iprot.readString(); + _key245 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map238 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, iprot.readI32()); - _val236 = new HashMap(2*_map238.size); - String _key239; - long _val240; - for (int _i241 = 0; _i241 < _map238.size; ++_i241) + org.apache.thrift.protocol.TMap _map248 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, iprot.readI32()); + _val246 = new HashMap(2*_map248.size); + String _key249; + long _val250; + for (int _i251 = 0; _i251 < _map248.size; ++_i251) { - _key239 = iprot.readString(); - _val240 = iprot.readI64(); - _val236.put(_key239, _val240); + _key249 = iprot.readString(); + _val250 = iprot.readI64(); + _val246.put(_key249, _val250); } } - struct.acked.put(_key235, _val236); + struct.acked.put(_key245, _val246); } } struct.set_acked_isSet(true); { - org.apache.thrift.protocol.TMap _map242 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); - struct.failed = new HashMap>(2*_map242.size); - String _key243; - Map _val244; - for (int _i245 = 0; _i245 < _map242.size; ++_i245) + org.apache.thrift.protocol.TMap _map252 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); + struct.failed = new HashMap>(2*_map252.size); + String _key253; + Map _val254; + for (int _i255 = 0; _i255 < _map252.size; ++_i255) { - _key243 = iprot.readString(); + _key253 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map246 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, iprot.readI32()); - _val244 = new HashMap(2*_map246.size); - String _key247; - long _val248; - for (int _i249 = 0; _i249 < _map246.size; ++_i249) + org.apache.thrift.protocol.TMap _map256 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, iprot.readI32()); + _val254 = new HashMap(2*_map256.size); + String _key257; + long _val258; + for (int _i259 = 0; _i259 < _map256.size; ++_i259) { - _key247 = iprot.readString(); - _val248 = iprot.readI64(); - _val244.put(_key247, _val248); + _key257 = iprot.readString(); + _val258 = iprot.readI64(); + _val254.put(_key257, _val258); } } - struct.failed.put(_key243, _val244); + struct.failed.put(_key253, _val254); } } struct.set_failed_isSet(true); { - org.apache.thrift.protocol.TMap _map250 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); - struct.complete_ms_avg = new HashMap>(2*_map250.size); - String _key251; - Map _val252; - for (int _i253 = 0; _i253 < _map250.size; ++_i253) + org.apache.thrift.protocol.TMap _map260 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.MAP, iprot.readI32()); + struct.complete_ms_avg = new HashMap>(2*_map260.size); + String _key261; + Map _val262; + for (int _i263 = 0; _i263 < _map260.size; ++_i263) { - _key251 = iprot.readString(); + _key261 = iprot.readString(); { - org.apache.thrift.protocol.TMap _map254 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32()); - _val252 = new HashMap(2*_map254.size); - String _key255; - double _val256; - for (int _i257 = 0; _i257 < _map254.size; ++_i257) + org.apache.thrift.protocol.TMap _map264 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32()); + _val262 = new HashMap(2*_map264.size); + String _key265; + double _val266; + for (int _i267 = 0; _i267 < _map264.size; ++_i267) { - _key255 = iprot.readString(); - _val256 = iprot.readDouble(); - _val252.put(_key255, _val256); + _key265 = iprot.readString(); + _val266 = iprot.readDouble(); + _val262.put(_key265, _val266); } } - struct.complete_ms_avg.put(_key251, _val252); + struct.complete_ms_avg.put(_key261, _val262); } } struct.set_complete_ms_avg_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/StormBase.java b/storm-core/src/jvm/backtype/storm/generated/StormBase.java index 250cc9c1e12..253db46a965 100644 --- a/storm-core/src/jvm/backtype/storm/generated/StormBase.java +++ b/storm-core/src/jvm/backtype/storm/generated/StormBase.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class StormBase implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StormBase"); @@ -1090,15 +1090,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StormBase struct) t case 4: // COMPONENT_EXECUTORS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map434 = iprot.readMapBegin(); - struct.component_executors = new HashMap(2*_map434.size); - String _key435; - int _val436; - for (int _i437 = 0; _i437 < _map434.size; ++_i437) + org.apache.thrift.protocol.TMap _map444 = iprot.readMapBegin(); + struct.component_executors = new HashMap(2*_map444.size); + String _key445; + int _val446; + for (int _i447 = 0; _i447 < _map444.size; ++_i447) { - _key435 = iprot.readString(); - _val436 = iprot.readI32(); - struct.component_executors.put(_key435, _val436); + _key445 = iprot.readString(); + _val446 = iprot.readI32(); + struct.component_executors.put(_key445, _val446); } iprot.readMapEnd(); } @@ -1143,16 +1143,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StormBase struct) t case 9: // COMPONENT_DEBUG if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map438 = iprot.readMapBegin(); - struct.component_debug = new HashMap(2*_map438.size); - String _key439; - DebugOptions _val440; - for (int _i441 = 0; _i441 < _map438.size; ++_i441) + org.apache.thrift.protocol.TMap _map448 = iprot.readMapBegin(); + struct.component_debug = new HashMap(2*_map448.size); + String _key449; + DebugOptions _val450; + for (int _i451 = 0; _i451 < _map448.size; ++_i451) { - _key439 = iprot.readString(); - _val440 = new DebugOptions(); - _val440.read(iprot); - struct.component_debug.put(_key439, _val440); + _key449 = iprot.readString(); + _val450 = new DebugOptions(); + _val450.read(iprot); + struct.component_debug.put(_key449, _val450); } iprot.readMapEnd(); } @@ -1192,10 +1192,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StormBase struct) oprot.writeFieldBegin(COMPONENT_EXECUTORS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.component_executors.size())); - for (Map.Entry _iter442 : struct.component_executors.entrySet()) + for (Map.Entry _iter452 : struct.component_executors.entrySet()) { - oprot.writeString(_iter442.getKey()); - oprot.writeI32(_iter442.getValue()); + oprot.writeString(_iter452.getKey()); + oprot.writeI32(_iter452.getValue()); } oprot.writeMapEnd(); } @@ -1233,10 +1233,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StormBase struct) oprot.writeFieldBegin(COMPONENT_DEBUG_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.component_debug.size())); - for (Map.Entry _iter443 : struct.component_debug.entrySet()) + for (Map.Entry _iter453 : struct.component_debug.entrySet()) { - oprot.writeString(_iter443.getKey()); - _iter443.getValue().write(oprot); + oprot.writeString(_iter453.getKey()); + _iter453.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -1286,10 +1286,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, StormBase struct) t if (struct.is_set_component_executors()) { { oprot.writeI32(struct.component_executors.size()); - for (Map.Entry _iter444 : struct.component_executors.entrySet()) + for (Map.Entry _iter454 : struct.component_executors.entrySet()) { - oprot.writeString(_iter444.getKey()); - oprot.writeI32(_iter444.getValue()); + oprot.writeString(_iter454.getKey()); + oprot.writeI32(_iter454.getValue()); } } } @@ -1308,10 +1308,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, StormBase struct) t if (struct.is_set_component_debug()) { { oprot.writeI32(struct.component_debug.size()); - for (Map.Entry _iter445 : struct.component_debug.entrySet()) + for (Map.Entry _iter455 : struct.component_debug.entrySet()) { - oprot.writeString(_iter445.getKey()); - _iter445.getValue().write(oprot); + oprot.writeString(_iter455.getKey()); + _iter455.getValue().write(oprot); } } } @@ -1329,15 +1329,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, StormBase struct) th BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map446 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.component_executors = new HashMap(2*_map446.size); - String _key447; - int _val448; - for (int _i449 = 0; _i449 < _map446.size; ++_i449) + org.apache.thrift.protocol.TMap _map456 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32()); + struct.component_executors = new HashMap(2*_map456.size); + String _key457; + int _val458; + for (int _i459 = 0; _i459 < _map456.size; ++_i459) { - _key447 = iprot.readString(); - _val448 = iprot.readI32(); - struct.component_executors.put(_key447, _val448); + _key457 = iprot.readString(); + _val458 = iprot.readI32(); + struct.component_executors.put(_key457, _val458); } } struct.set_component_executors_isSet(true); @@ -1361,16 +1361,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, StormBase struct) th } if (incoming.get(5)) { { - org.apache.thrift.protocol.TMap _map450 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.component_debug = new HashMap(2*_map450.size); - String _key451; - DebugOptions _val452; - for (int _i453 = 0; _i453 < _map450.size; ++_i453) + org.apache.thrift.protocol.TMap _map460 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.component_debug = new HashMap(2*_map460.size); + String _key461; + DebugOptions _val462; + for (int _i463 = 0; _i463 < _map460.size; ++_i463) { - _key451 = iprot.readString(); - _val452 = new DebugOptions(); - _val452.read(iprot); - struct.component_debug.put(_key451, _val452); + _key461 = iprot.readString(); + _val462 = new DebugOptions(); + _val462.read(iprot); + struct.component_debug.put(_key461, _val462); } } struct.set_component_debug_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/StormTopology.java b/storm-core/src/jvm/backtype/storm/generated/StormTopology.java index d022e95d2ba..1ab1d5cc8e5 100644 --- a/storm-core/src/jvm/backtype/storm/generated/StormTopology.java +++ b/storm-core/src/jvm/backtype/storm/generated/StormTopology.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-2-6") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class StormTopology implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StormTopology"); @@ -596,16 +596,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StormTopology struc case 1: // SPOUTS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map44 = iprot.readMapBegin(); - struct.spouts = new HashMap(2*_map44.size); - String _key45; - SpoutSpec _val46; - for (int _i47 = 0; _i47 < _map44.size; ++_i47) + org.apache.thrift.protocol.TMap _map54 = iprot.readMapBegin(); + struct.spouts = new HashMap(2*_map54.size); + String _key55; + SpoutSpec _val56; + for (int _i57 = 0; _i57 < _map54.size; ++_i57) { - _key45 = iprot.readString(); - _val46 = new SpoutSpec(); - _val46.read(iprot); - struct.spouts.put(_key45, _val46); + _key55 = iprot.readString(); + _val56 = new SpoutSpec(); + _val56.read(iprot); + struct.spouts.put(_key55, _val56); } iprot.readMapEnd(); } @@ -617,16 +617,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StormTopology struc case 2: // BOLTS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map48 = iprot.readMapBegin(); - struct.bolts = new HashMap(2*_map48.size); - String _key49; - Bolt _val50; - for (int _i51 = 0; _i51 < _map48.size; ++_i51) + org.apache.thrift.protocol.TMap _map58 = iprot.readMapBegin(); + struct.bolts = new HashMap(2*_map58.size); + String _key59; + Bolt _val60; + for (int _i61 = 0; _i61 < _map58.size; ++_i61) { - _key49 = iprot.readString(); - _val50 = new Bolt(); - _val50.read(iprot); - struct.bolts.put(_key49, _val50); + _key59 = iprot.readString(); + _val60 = new Bolt(); + _val60.read(iprot); + struct.bolts.put(_key59, _val60); } iprot.readMapEnd(); } @@ -638,16 +638,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StormTopology struc case 3: // STATE_SPOUTS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map52 = iprot.readMapBegin(); - struct.state_spouts = new HashMap(2*_map52.size); - String _key53; - StateSpoutSpec _val54; - for (int _i55 = 0; _i55 < _map52.size; ++_i55) + org.apache.thrift.protocol.TMap _map62 = iprot.readMapBegin(); + struct.state_spouts = new HashMap(2*_map62.size); + String _key63; + StateSpoutSpec _val64; + for (int _i65 = 0; _i65 < _map62.size; ++_i65) { - _key53 = iprot.readString(); - _val54 = new StateSpoutSpec(); - _val54.read(iprot); - struct.state_spouts.put(_key53, _val54); + _key63 = iprot.readString(); + _val64 = new StateSpoutSpec(); + _val64.read(iprot); + struct.state_spouts.put(_key63, _val64); } iprot.readMapEnd(); } @@ -673,10 +673,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StormTopology stru oprot.writeFieldBegin(SPOUTS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.spouts.size())); - for (Map.Entry _iter56 : struct.spouts.entrySet()) + for (Map.Entry _iter66 : struct.spouts.entrySet()) { - oprot.writeString(_iter56.getKey()); - _iter56.getValue().write(oprot); + oprot.writeString(_iter66.getKey()); + _iter66.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -686,10 +686,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StormTopology stru oprot.writeFieldBegin(BOLTS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.bolts.size())); - for (Map.Entry _iter57 : struct.bolts.entrySet()) + for (Map.Entry _iter67 : struct.bolts.entrySet()) { - oprot.writeString(_iter57.getKey()); - _iter57.getValue().write(oprot); + oprot.writeString(_iter67.getKey()); + _iter67.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -699,10 +699,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StormTopology stru oprot.writeFieldBegin(STATE_SPOUTS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.state_spouts.size())); - for (Map.Entry _iter58 : struct.state_spouts.entrySet()) + for (Map.Entry _iter68 : struct.state_spouts.entrySet()) { - oprot.writeString(_iter58.getKey()); - _iter58.getValue().write(oprot); + oprot.writeString(_iter68.getKey()); + _iter68.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -727,26 +727,26 @@ public void write(org.apache.thrift.protocol.TProtocol prot, StormTopology struc TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.spouts.size()); - for (Map.Entry _iter59 : struct.spouts.entrySet()) + for (Map.Entry _iter69 : struct.spouts.entrySet()) { - oprot.writeString(_iter59.getKey()); - _iter59.getValue().write(oprot); + oprot.writeString(_iter69.getKey()); + _iter69.getValue().write(oprot); } } { oprot.writeI32(struct.bolts.size()); - for (Map.Entry _iter60 : struct.bolts.entrySet()) + for (Map.Entry _iter70 : struct.bolts.entrySet()) { - oprot.writeString(_iter60.getKey()); - _iter60.getValue().write(oprot); + oprot.writeString(_iter70.getKey()); + _iter70.getValue().write(oprot); } } { oprot.writeI32(struct.state_spouts.size()); - for (Map.Entry _iter61 : struct.state_spouts.entrySet()) + for (Map.Entry _iter71 : struct.state_spouts.entrySet()) { - oprot.writeString(_iter61.getKey()); - _iter61.getValue().write(oprot); + oprot.writeString(_iter71.getKey()); + _iter71.getValue().write(oprot); } } } @@ -755,44 +755,44 @@ public void write(org.apache.thrift.protocol.TProtocol prot, StormTopology struc public void read(org.apache.thrift.protocol.TProtocol prot, StormTopology struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map62 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.spouts = new HashMap(2*_map62.size); - String _key63; - SpoutSpec _val64; - for (int _i65 = 0; _i65 < _map62.size; ++_i65) + org.apache.thrift.protocol.TMap _map72 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.spouts = new HashMap(2*_map72.size); + String _key73; + SpoutSpec _val74; + for (int _i75 = 0; _i75 < _map72.size; ++_i75) { - _key63 = iprot.readString(); - _val64 = new SpoutSpec(); - _val64.read(iprot); - struct.spouts.put(_key63, _val64); + _key73 = iprot.readString(); + _val74 = new SpoutSpec(); + _val74.read(iprot); + struct.spouts.put(_key73, _val74); } } struct.set_spouts_isSet(true); { - org.apache.thrift.protocol.TMap _map66 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.bolts = new HashMap(2*_map66.size); - String _key67; - Bolt _val68; - for (int _i69 = 0; _i69 < _map66.size; ++_i69) + org.apache.thrift.protocol.TMap _map76 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.bolts = new HashMap(2*_map76.size); + String _key77; + Bolt _val78; + for (int _i79 = 0; _i79 < _map76.size; ++_i79) { - _key67 = iprot.readString(); - _val68 = new Bolt(); - _val68.read(iprot); - struct.bolts.put(_key67, _val68); + _key77 = iprot.readString(); + _val78 = new Bolt(); + _val78.read(iprot); + struct.bolts.put(_key77, _val78); } } struct.set_bolts_isSet(true); { - org.apache.thrift.protocol.TMap _map70 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.state_spouts = new HashMap(2*_map70.size); - String _key71; - StateSpoutSpec _val72; - for (int _i73 = 0; _i73 < _map70.size; ++_i73) + org.apache.thrift.protocol.TMap _map80 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.state_spouts = new HashMap(2*_map80.size); + String _key81; + StateSpoutSpec _val82; + for (int _i83 = 0; _i83 < _map80.size; ++_i83) { - _key71 = iprot.readString(); - _val72 = new StateSpoutSpec(); - _val72.read(iprot); - struct.state_spouts.put(_key71, _val72); + _key81 = iprot.readString(); + _val82 = new StateSpoutSpec(); + _val82.read(iprot); + struct.state_spouts.put(_key81, _val82); } } struct.set_state_spouts_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/SupervisorInfo.java b/storm-core/src/jvm/backtype/storm/generated/SupervisorInfo.java index bb4698c66d4..865d4e2e8b8 100644 --- a/storm-core/src/jvm/backtype/storm/generated/SupervisorInfo.java +++ b/storm-core/src/jvm/backtype/storm/generated/SupervisorInfo.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class SupervisorInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SupervisorInfo"); @@ -990,13 +990,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SupervisorInfo stru case 4: // USED_PORTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list354 = iprot.readListBegin(); - struct.used_ports = new ArrayList(_list354.size); - long _elem355; - for (int _i356 = 0; _i356 < _list354.size; ++_i356) + org.apache.thrift.protocol.TList _list364 = iprot.readListBegin(); + struct.used_ports = new ArrayList(_list364.size); + long _elem365; + for (int _i366 = 0; _i366 < _list364.size; ++_i366) { - _elem355 = iprot.readI64(); - struct.used_ports.add(_elem355); + _elem365 = iprot.readI64(); + struct.used_ports.add(_elem365); } iprot.readListEnd(); } @@ -1008,13 +1008,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SupervisorInfo stru case 5: // META if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list357 = iprot.readListBegin(); - struct.meta = new ArrayList(_list357.size); - long _elem358; - for (int _i359 = 0; _i359 < _list357.size; ++_i359) + org.apache.thrift.protocol.TList _list367 = iprot.readListBegin(); + struct.meta = new ArrayList(_list367.size); + long _elem368; + for (int _i369 = 0; _i369 < _list367.size; ++_i369) { - _elem358 = iprot.readI64(); - struct.meta.add(_elem358); + _elem368 = iprot.readI64(); + struct.meta.add(_elem368); } iprot.readListEnd(); } @@ -1026,15 +1026,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SupervisorInfo stru case 6: // SCHEDULER_META if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map360 = iprot.readMapBegin(); - struct.scheduler_meta = new HashMap(2*_map360.size); - String _key361; - String _val362; - for (int _i363 = 0; _i363 < _map360.size; ++_i363) + org.apache.thrift.protocol.TMap _map370 = iprot.readMapBegin(); + struct.scheduler_meta = new HashMap(2*_map370.size); + String _key371; + String _val372; + for (int _i373 = 0; _i373 < _map370.size; ++_i373) { - _key361 = iprot.readString(); - _val362 = iprot.readString(); - struct.scheduler_meta.put(_key361, _val362); + _key371 = iprot.readString(); + _val372 = iprot.readString(); + struct.scheduler_meta.put(_key371, _val372); } iprot.readMapEnd(); } @@ -1092,9 +1092,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SupervisorInfo str oprot.writeFieldBegin(USED_PORTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.used_ports.size())); - for (long _iter364 : struct.used_ports) + for (long _iter374 : struct.used_ports) { - oprot.writeI64(_iter364); + oprot.writeI64(_iter374); } oprot.writeListEnd(); } @@ -1106,9 +1106,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SupervisorInfo str oprot.writeFieldBegin(META_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.meta.size())); - for (long _iter365 : struct.meta) + for (long _iter375 : struct.meta) { - oprot.writeI64(_iter365); + oprot.writeI64(_iter375); } oprot.writeListEnd(); } @@ -1120,10 +1120,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SupervisorInfo str oprot.writeFieldBegin(SCHEDULER_META_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.scheduler_meta.size())); - for (Map.Entry _iter366 : struct.scheduler_meta.entrySet()) + for (Map.Entry _iter376 : struct.scheduler_meta.entrySet()) { - oprot.writeString(_iter366.getKey()); - oprot.writeString(_iter366.getValue()); + oprot.writeString(_iter376.getKey()); + oprot.writeString(_iter376.getValue()); } oprot.writeMapEnd(); } @@ -1187,28 +1187,28 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SupervisorInfo stru if (struct.is_set_used_ports()) { { oprot.writeI32(struct.used_ports.size()); - for (long _iter367 : struct.used_ports) + for (long _iter377 : struct.used_ports) { - oprot.writeI64(_iter367); + oprot.writeI64(_iter377); } } } if (struct.is_set_meta()) { { oprot.writeI32(struct.meta.size()); - for (long _iter368 : struct.meta) + for (long _iter378 : struct.meta) { - oprot.writeI64(_iter368); + oprot.writeI64(_iter378); } } } if (struct.is_set_scheduler_meta()) { { oprot.writeI32(struct.scheduler_meta.size()); - for (Map.Entry _iter369 : struct.scheduler_meta.entrySet()) + for (Map.Entry _iter379 : struct.scheduler_meta.entrySet()) { - oprot.writeString(_iter369.getKey()); - oprot.writeString(_iter369.getValue()); + oprot.writeString(_iter379.getKey()); + oprot.writeString(_iter379.getValue()); } } } @@ -1234,41 +1234,41 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SupervisorInfo struc } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list370 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.used_ports = new ArrayList(_list370.size); - long _elem371; - for (int _i372 = 0; _i372 < _list370.size; ++_i372) + org.apache.thrift.protocol.TList _list380 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.used_ports = new ArrayList(_list380.size); + long _elem381; + for (int _i382 = 0; _i382 < _list380.size; ++_i382) { - _elem371 = iprot.readI64(); - struct.used_ports.add(_elem371); + _elem381 = iprot.readI64(); + struct.used_ports.add(_elem381); } } struct.set_used_ports_isSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.meta = new ArrayList(_list373.size); - long _elem374; - for (int _i375 = 0; _i375 < _list373.size; ++_i375) + org.apache.thrift.protocol.TList _list383 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.meta = new ArrayList(_list383.size); + long _elem384; + for (int _i385 = 0; _i385 < _list383.size; ++_i385) { - _elem374 = iprot.readI64(); - struct.meta.add(_elem374); + _elem384 = iprot.readI64(); + struct.meta.add(_elem384); } } struct.set_meta_isSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map376 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.scheduler_meta = new HashMap(2*_map376.size); - String _key377; - String _val378; - for (int _i379 = 0; _i379 < _map376.size; ++_i379) + org.apache.thrift.protocol.TMap _map386 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.scheduler_meta = new HashMap(2*_map386.size); + String _key387; + String _val388; + for (int _i389 = 0; _i389 < _map386.size; ++_i389) { - _key377 = iprot.readString(); - _val378 = iprot.readString(); - struct.scheduler_meta.put(_key377, _val378); + _key387 = iprot.readString(); + _val388 = iprot.readString(); + struct.scheduler_meta.put(_key387, _val388); } } struct.set_scheduler_meta_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/generated/TopologyInfo.java b/storm-core/src/jvm/backtype/storm/generated/TopologyInfo.java index 6c2cc69e12d..06e3a1c5841 100644 --- a/storm-core/src/jvm/backtype/storm/generated/TopologyInfo.java +++ b/storm-core/src/jvm/backtype/storm/generated/TopologyInfo.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-3") +@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-9-29") public class TopologyInfo implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TopologyInfo"); @@ -1194,14 +1194,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TopologyInfo struct case 4: // EXECUTORS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list298 = iprot.readListBegin(); - struct.executors = new ArrayList(_list298.size); - ExecutorSummary _elem299; - for (int _i300 = 0; _i300 < _list298.size; ++_i300) + org.apache.thrift.protocol.TList _list308 = iprot.readListBegin(); + struct.executors = new ArrayList(_list308.size); + ExecutorSummary _elem309; + for (int _i310 = 0; _i310 < _list308.size; ++_i310) { - _elem299 = new ExecutorSummary(); - _elem299.read(iprot); - struct.executors.add(_elem299); + _elem309 = new ExecutorSummary(); + _elem309.read(iprot); + struct.executors.add(_elem309); } iprot.readListEnd(); } @@ -1221,26 +1221,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TopologyInfo struct case 6: // ERRORS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map301 = iprot.readMapBegin(); - struct.errors = new HashMap>(2*_map301.size); - String _key302; - List _val303; - for (int _i304 = 0; _i304 < _map301.size; ++_i304) + org.apache.thrift.protocol.TMap _map311 = iprot.readMapBegin(); + struct.errors = new HashMap>(2*_map311.size); + String _key312; + List _val313; + for (int _i314 = 0; _i314 < _map311.size; ++_i314) { - _key302 = iprot.readString(); + _key312 = iprot.readString(); { - org.apache.thrift.protocol.TList _list305 = iprot.readListBegin(); - _val303 = new ArrayList(_list305.size); - ErrorInfo _elem306; - for (int _i307 = 0; _i307 < _list305.size; ++_i307) + org.apache.thrift.protocol.TList _list315 = iprot.readListBegin(); + _val313 = new ArrayList(_list315.size); + ErrorInfo _elem316; + for (int _i317 = 0; _i317 < _list315.size; ++_i317) { - _elem306 = new ErrorInfo(); - _elem306.read(iprot); - _val303.add(_elem306); + _elem316 = new ErrorInfo(); + _elem316.read(iprot); + _val313.add(_elem316); } iprot.readListEnd(); } - struct.errors.put(_key302, _val303); + struct.errors.put(_key312, _val313); } iprot.readMapEnd(); } @@ -1252,16 +1252,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TopologyInfo struct case 7: // COMPONENT_DEBUG if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map308 = iprot.readMapBegin(); - struct.component_debug = new HashMap(2*_map308.size); - String _key309; - DebugOptions _val310; - for (int _i311 = 0; _i311 < _map308.size; ++_i311) + org.apache.thrift.protocol.TMap _map318 = iprot.readMapBegin(); + struct.component_debug = new HashMap(2*_map318.size); + String _key319; + DebugOptions _val320; + for (int _i321 = 0; _i321 < _map318.size; ++_i321) { - _key309 = iprot.readString(); - _val310 = new DebugOptions(); - _val310.read(iprot); - struct.component_debug.put(_key309, _val310); + _key319 = iprot.readString(); + _val320 = new DebugOptions(); + _val320.read(iprot); + struct.component_debug.put(_key319, _val320); } iprot.readMapEnd(); } @@ -1324,9 +1324,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TopologyInfo struc oprot.writeFieldBegin(EXECUTORS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.executors.size())); - for (ExecutorSummary _iter312 : struct.executors) + for (ExecutorSummary _iter322 : struct.executors) { - _iter312.write(oprot); + _iter322.write(oprot); } oprot.writeListEnd(); } @@ -1341,14 +1341,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TopologyInfo struc oprot.writeFieldBegin(ERRORS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.errors.size())); - for (Map.Entry> _iter313 : struct.errors.entrySet()) + for (Map.Entry> _iter323 : struct.errors.entrySet()) { - oprot.writeString(_iter313.getKey()); + oprot.writeString(_iter323.getKey()); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter313.getValue().size())); - for (ErrorInfo _iter314 : _iter313.getValue()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter323.getValue().size())); + for (ErrorInfo _iter324 : _iter323.getValue()) { - _iter314.write(oprot); + _iter324.write(oprot); } oprot.writeListEnd(); } @@ -1362,10 +1362,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TopologyInfo struc oprot.writeFieldBegin(COMPONENT_DEBUG_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.component_debug.size())); - for (Map.Entry _iter315 : struct.component_debug.entrySet()) + for (Map.Entry _iter325 : struct.component_debug.entrySet()) { - oprot.writeString(_iter315.getKey()); - _iter315.getValue().write(oprot); + oprot.writeString(_iter325.getKey()); + _iter325.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -1413,22 +1413,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TopologyInfo struct oprot.writeI32(struct.uptime_secs); { oprot.writeI32(struct.executors.size()); - for (ExecutorSummary _iter316 : struct.executors) + for (ExecutorSummary _iter326 : struct.executors) { - _iter316.write(oprot); + _iter326.write(oprot); } } oprot.writeString(struct.status); { oprot.writeI32(struct.errors.size()); - for (Map.Entry> _iter317 : struct.errors.entrySet()) + for (Map.Entry> _iter327 : struct.errors.entrySet()) { - oprot.writeString(_iter317.getKey()); + oprot.writeString(_iter327.getKey()); { - oprot.writeI32(_iter317.getValue().size()); - for (ErrorInfo _iter318 : _iter317.getValue()) + oprot.writeI32(_iter327.getValue().size()); + for (ErrorInfo _iter328 : _iter327.getValue()) { - _iter318.write(oprot); + _iter328.write(oprot); } } } @@ -1450,10 +1450,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TopologyInfo struct if (struct.is_set_component_debug()) { { oprot.writeI32(struct.component_debug.size()); - for (Map.Entry _iter319 : struct.component_debug.entrySet()) + for (Map.Entry _iter329 : struct.component_debug.entrySet()) { - oprot.writeString(_iter319.getKey()); - _iter319.getValue().write(oprot); + oprot.writeString(_iter329.getKey()); + _iter329.getValue().write(oprot); } } } @@ -1478,55 +1478,55 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TopologyInfo struct) struct.uptime_secs = iprot.readI32(); struct.set_uptime_secs_isSet(true); { - org.apache.thrift.protocol.TList _list320 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.executors = new ArrayList(_list320.size); - ExecutorSummary _elem321; - for (int _i322 = 0; _i322 < _list320.size; ++_i322) + org.apache.thrift.protocol.TList _list330 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.executors = new ArrayList(_list330.size); + ExecutorSummary _elem331; + for (int _i332 = 0; _i332 < _list330.size; ++_i332) { - _elem321 = new ExecutorSummary(); - _elem321.read(iprot); - struct.executors.add(_elem321); + _elem331 = new ExecutorSummary(); + _elem331.read(iprot); + struct.executors.add(_elem331); } } struct.set_executors_isSet(true); struct.status = iprot.readString(); struct.set_status_isSet(true); { - org.apache.thrift.protocol.TMap _map323 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.errors = new HashMap>(2*_map323.size); - String _key324; - List _val325; - for (int _i326 = 0; _i326 < _map323.size; ++_i326) + org.apache.thrift.protocol.TMap _map333 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.errors = new HashMap>(2*_map333.size); + String _key334; + List _val335; + for (int _i336 = 0; _i336 < _map333.size; ++_i336) { - _key324 = iprot.readString(); + _key334 = iprot.readString(); { - org.apache.thrift.protocol.TList _list327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - _val325 = new ArrayList(_list327.size); - ErrorInfo _elem328; - for (int _i329 = 0; _i329 < _list327.size; ++_i329) + org.apache.thrift.protocol.TList _list337 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + _val335 = new ArrayList(_list337.size); + ErrorInfo _elem338; + for (int _i339 = 0; _i339 < _list337.size; ++_i339) { - _elem328 = new ErrorInfo(); - _elem328.read(iprot); - _val325.add(_elem328); + _elem338 = new ErrorInfo(); + _elem338.read(iprot); + _val335.add(_elem338); } } - struct.errors.put(_key324, _val325); + struct.errors.put(_key334, _val335); } } struct.set_errors_isSet(true); BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map330 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.component_debug = new HashMap(2*_map330.size); - String _key331; - DebugOptions _val332; - for (int _i333 = 0; _i333 < _map330.size; ++_i333) + org.apache.thrift.protocol.TMap _map340 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.component_debug = new HashMap(2*_map340.size); + String _key341; + DebugOptions _val342; + for (int _i343 = 0; _i343 < _map340.size; ++_i343) { - _key331 = iprot.readString(); - _val332 = new DebugOptions(); - _val332.read(iprot); - struct.component_debug.put(_key331, _val332); + _key341 = iprot.readString(); + _val342 = new DebugOptions(); + _val342.read(iprot); + struct.component_debug.put(_key341, _val342); } } struct.set_component_debug_isSet(true); diff --git a/storm-core/src/jvm/backtype/storm/topology/TopologyBuilder.java b/storm-core/src/jvm/backtype/storm/topology/TopologyBuilder.java index 38b30d728b4..125b2959269 100644 --- a/storm-core/src/jvm/backtype/storm/topology/TopologyBuilder.java +++ b/storm-core/src/jvm/backtype/storm/topology/TopologyBuilder.java @@ -45,9 +45,9 @@ *
  * TopologyBuilder builder = new TopologyBuilder();
  *
- * builder.setSpout("1", new TestWordSpout(true), 5);
- * builder.setSpout("2", new TestWordSpout(true), 3);
- * builder.setBolt("3", new TestWordCounter(), 3)
+ * builder.setSpout("1", new TestWordSpout(true), 5, 20);
+ * builder.setSpout("2", new TestWordSpout(true), 3, 20);
+ * builder.setBolt("3", new TestWordCounter(), 3, 40)
  *          .fieldsGrouping("1", new Fields("word"))
  *          .fieldsGrouping("2", new Fields("word"));
  * builder.setBolt("4", new TestGlobalCount())
@@ -66,9 +66,9 @@
  * 
  * TopologyBuilder builder = new TopologyBuilder();
  *
- * builder.setSpout("1", new TestWordSpout(true), 5);
- * builder.setSpout("2", new TestWordSpout(true), 3);
- * builder.setBolt("3", new TestWordCounter(), 3)
+ * builder.setSpout("1", new TestWordSpout(true), 5, 20);
+ * builder.setSpout("2", new TestWordSpout(true), 3, 20);
+ * builder.setBolt("3", new TestWordCounter(), 3, 40)
  *          .fieldsGrouping("1", new Fields("word"))
  *          .fieldsGrouping("2", new Fields("word"));
  * builder.setBolt("4", new TestGlobalCount())
@@ -118,7 +118,7 @@ public StormTopology createTopology() {
     }
 
     /**
-     * Define a new bolt in this topology with parallelism of just one thread.
+     * Define a new bolt in this topology with parallelism of just one thread and transport batching disabled.
      *
      * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
      * @param bolt the bolt
@@ -126,11 +126,11 @@ public StormTopology createTopology() {
      * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
      */
     public BoltDeclarer setBolt(String id, IRichBolt bolt) throws IllegalArgumentException {
-        return setBolt(id, bolt, null);
+        return setBolt(id, bolt, (Number)null);
     }
 
     /**
-     * Define a new bolt in this topology with the specified amount of parallelism.
+     * Define a new bolt in this topology with the specified amount of parallelism and transport batching disabled.
      *
      * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
      * @param bolt the bolt
@@ -139,8 +139,49 @@ public BoltDeclarer setBolt(String id, IRichBolt bolt) throws IllegalArgumentExc
      * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
      */
     public BoltDeclarer setBolt(String id, IRichBolt bolt, Number parallelism_hint) throws IllegalArgumentException {
+        return setBolt(id, bolt, parallelism_hint, (Number) null);
+    }
+
+    /**
+     * Define a new bolt in this topology with parallelism of just one thread and transport batch sizes for the specified streams.
+     *
+     * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
+     * @param bolt the bolt
+     * @param batch_sizes the number of output tuples that should be assembled into a batch per output stream. Each batch will be transferred to a consumer instance at once.
+     * @return use the returned object to declare the inputs to this component
+     * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
+     */
+    public  BoltDeclarer setBolt(String id, IRichBolt bolt, Map batch_sizes) throws IllegalArgumentException {
+        return setBolt(id, bolt, null, batch_sizes);
+    }
+    
+    /**
+     * Define a new bolt in this topology with the specified amount of parallelism and single transport batch size for all output streams.
+     *
+     * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
+     * @param bolt the bolt
+     * @param parallelism_hint the number of tasks that should be assigned to execute this bolt. Each task will run on a thread in a process somewhere around the cluster.
+     * @param batch_size the number of output tuples that should be assembled into a batch. Each batch will be transferred to a consumer instance at once.
+     * @return use the returned object to declare the inputs to this component
+     * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
+     */
+    public BoltDeclarer setBolt(String id, IRichBolt bolt, Number parallelism_hint, Number batch_size) throws IllegalArgumentException {
+        return setBolt(id, bolt, parallelism_hint, applyBatchSizeToDeclaredStreams(bolt, batch_size));
+    }
+
+    /**
+     * Define a new bolt in this topology with the specified amount of parallelism and transport batch sizes for the specified streams.
+     *
+     * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
+     * @param bolt the bolt
+     * @param parallelism_hint the number of tasks that should be assigned to execute this bolt. Each task will run on a thread in a process somewhere around the cluster.
+     * @param batch_sizes the number of output tuples that should be assembled into a batch per output stream. Each batch will be transferred to a consumer instance at once.
+     * @return use the returned object to declare the inputs to this component
+     * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
+     */
+    public  BoltDeclarer setBolt(String id, IRichBolt bolt, Number parallelism_hint, Map batch_sizes) throws IllegalArgumentException {
         validateUnusedId(id);
-        initCommon(id, bolt, parallelism_hint);
+        initCommon(id, bolt, parallelism_hint, convertHashMap(batch_sizes));
         _bolts.put(id, bolt);
         return new BoltGetter(id);
     }
@@ -157,7 +198,7 @@ public BoltDeclarer setBolt(String id, IRichBolt bolt, Number parallelism_hint)
      * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
      */
     public BoltDeclarer setBolt(String id, IBasicBolt bolt) throws IllegalArgumentException {
-        return setBolt(id, bolt, null);
+        return setBolt(id, bolt, (Number) null);
     }
 
     /**
@@ -168,47 +209,151 @@ public BoltDeclarer setBolt(String id, IBasicBolt bolt) throws IllegalArgumentEx
      *
      * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
      * @param bolt the basic bolt
-     * @param parallelism_hint the number of tasks that should be assigned to execute this bolt. Each task will run on a thread in a process somwehere around the cluster.
+     * @param parallelism_hint the number of tasks that should be assigned to execute this bolt. Each task will run on a thread in a process somewhere around the cluster.
      * @return use the returned object to declare the inputs to this component
      * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
      */
     public BoltDeclarer setBolt(String id, IBasicBolt bolt, Number parallelism_hint) throws IllegalArgumentException {
-        return setBolt(id, new BasicBoltExecutor(bolt), parallelism_hint);
+        return setBolt(id, bolt, parallelism_hint, (Number) null);
     }
 
     /**
-     * Define a new spout in this topology.
+     * Define a new bolt in this topology. This defines a basic bolt, which is a
+     * simpler to use but more restricted kind of bolt. Basic bolts are intended
+     * for non-aggregation processing and automate the anchoring/acking process to
+     * achieve proper reliability in the topology.
+     *
+     * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
+     * @param bolt the basic bolt
+     * @param batch_sizes the number of output tuples that should be assembled into a batch per output stream. Each batch will be transferred to a consumer instance at once.
+     * @return use the returned object to declare the inputs to this component
+     * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
+     */
+    public  BoltDeclarer setBolt(String id, IBasicBolt bolt, Map batch_sizes) throws IllegalArgumentException {
+        return setBolt(id, bolt, null, batch_sizes);
+    }
+
+    /**
+     * Define a new bolt in this topology. This defines a basic bolt, which is a
+     * simpler to use but more restricted kind of bolt. Basic bolts are intended
+     * for non-aggregation processing and automate the anchoring/acking process to
+     * achieve proper reliability in the topology.
+     *
+     * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
+     * @param bolt the basic bolt
+     * @param parallelism_hint the number of tasks that should be assigned to execute this bolt. Each task will run on a thread in a process somewhere around the cluster.
+     * @param batch_size the number of output tuples that should be assembled into a batch. Each batch will be transferred to a consumer instance at once.
+     * @return use the returned object to declare the inputs to this component
+     * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
+     */
+    public BoltDeclarer setBolt(String id, IBasicBolt bolt, Number parallelism_hint, Number batch_size) throws IllegalArgumentException {
+        return setBolt(id, bolt, parallelism_hint, applyBatchSizeToDeclaredStreams(bolt, batch_size));
+    }
+
+    /**
+     * Define a new bolt in this topology. This defines a basic bolt, which is a
+     * simpler to use but more restricted kind of bolt. Basic bolts are intended
+     * for non-aggregation processing and automate the anchoring/acking process to
+     * achieve proper reliability in the topology.
+     *
+     * @param id the id of this component. This id is referenced by other components that want to consume this bolt's outputs.
+     * @param bolt the basic bolt
+     * @param parallelism_hint the number of tasks that should be assigned to execute this bolt. Each task will run on a thread in a process somewhere around the cluster.
+     * @param batch_sizes the number of output tuples that should be assembled into a batch per output stream. Each batch will be transferred to a consumer instance at once.
+     * @return use the returned object to declare the inputs to this component
+     * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
+     */
+    public  BoltDeclarer setBolt(String id, IBasicBolt bolt, Number parallelism_hint, Map batch_sizes) throws IllegalArgumentException {
+        return setBolt(id, new BasicBoltExecutor(bolt), parallelism_hint, batch_sizes);
+    }
+
+    /**
+     * Define a new spout in this topology with parallelism of just one thread and transport batching disabled.
      *
      * @param id the id of this component. This id is referenced by other components that want to consume this spout's outputs.
      * @param spout the spout
      * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
      */
     public SpoutDeclarer setSpout(String id, IRichSpout spout) throws IllegalArgumentException {
-        return setSpout(id, spout, null);
+        return setSpout(id, spout, (Number) null);
     }
 
     /**
-     * Define a new spout in this topology with the specified parallelism. If the spout declares
-     * itself as non-distributed, the parallelism_hint will be ignored and only one task
-     * will be allocated to this component.
+     * Define a new spout in this topology with the specified parallelism and transport batching
+     * disabled. If the spout declares itself as non-distributed, The parallelism_hint will be
+     * ignored and only one task will be allocated to this component.
      *
      * @param id the id of this component. This id is referenced by other components that want to consume this spout's outputs.
-     * @param parallelism_hint the number of tasks that should be assigned to execute this spout. Each task will run on a thread in a process somwehere around the cluster.
      * @param spout the spout
+     * @param parallelism_hint the number of tasks that should be assigned to execute this spout. Each task will run on a thread in a process somwehere around the cluster.
      * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
      */
     public SpoutDeclarer setSpout(String id, IRichSpout spout, Number parallelism_hint) throws IllegalArgumentException {
+        return setSpout(id, spout, parallelism_hint, (Number) null);
+    }
+
+    /**
+     * Define a new spout in this topology of just one thread and transport batch sizes for the specified streams.
+     *
+     * @param id the id of this component. This id is referenced by other components that want to consume this spout's outputs.
+     * @param spout the spout
+     * @param batch_sizes the number of output tuples that should be assembled into a batch per output stream. Each batch will be transferred to a consumer instance at once.
+     * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
+     */
+    public  SpoutDeclarer setSpout(String id, IRichSpout spout, Map batch_sizes) throws IllegalArgumentException {
+        return setSpout(id, spout, null, batch_sizes);
+    }
+
+    /**
+     * Define a new spout in this topology with the specified parallelism and single transport
+     * batch size for all output streams. If the spout declares itself as non-distributed, the
+     * parallelism_hint will be ignored and only one task will be allocated to this component.
+     *
+     * @param id the id of this component. This id is referenced by other components that want to consume this spout's outputs.
+     * @param spout the spout
+     * @param parallelism_hint the number of tasks that should be assigned to execute this spout. Each task will run on a thread in a process somwehere around the cluster.
+     * @param batch_size the number of output tuples that should be assembled into a batch. Each batch will be transferred to a consumer instance at once.
+     * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
+     */
+    public SpoutDeclarer setSpout(String id, IRichSpout spout, Number parallelism_hint, Number batch_size) throws IllegalArgumentException {
+        return setSpout(id, spout, parallelism_hint, applyBatchSizeToDeclaredStreams(spout, batch_size));
+    }
+
+    /**
+     * Define a new spout in this topology with the specified parallelism and transport batch sizes
+     * for the specified streams. If the spout declares itself as non-distributed, the
+     * parallelism_hint will be ignored and only one task will be allocated to this component.
+     *
+     * @param id the id of this component. This id is referenced by other components that want to consume this spout's outputs.
+     * @param spout the spout
+     * @param parallelism_hint the number of tasks that should be assigned to execute this spout. Each task will run on a thread in a process somwehere around the cluster.
+     * @param batch_sizes the number of output tuples that should be assembled into a batch per output stream. Each batch will be transferred to a consumer instance at once.
+     * @throws IllegalArgumentException if {@code parallelism_hint} is not positive
+     */
+    public  SpoutDeclarer setSpout(String id, IRichSpout spout, Number parallelism_hint, Map batch_sizes) throws IllegalArgumentException {
         validateUnusedId(id);
-        initCommon(id, spout, parallelism_hint);
+        initCommon(id, spout, parallelism_hint, convertHashMap(batch_sizes));
         _spouts.put(id, spout);
         return new SpoutGetter(id);
     }
 
     public void setStateSpout(String id, IRichStateSpout stateSpout) throws IllegalArgumentException {
-        setStateSpout(id, stateSpout, null);
+        setStateSpout(id, stateSpout, (Number) null);
     }
 
     public void setStateSpout(String id, IRichStateSpout stateSpout, Number parallelism_hint) throws IllegalArgumentException {
+        setStateSpout(id, stateSpout, parallelism_hint,(Number) null);
+    }
+
+    public  void setStateSpout(String id, IRichStateSpout stateSpout, Map batch_sizes) throws IllegalArgumentException {
+        setStateSpout(id, stateSpout,  null, batch_sizes);
+    }
+
+    public void setStateSpout(String id, IRichStateSpout stateSpout, Number parallelism_hint, Number batch_size) throws IllegalArgumentException {
+        setStateSpout(id, stateSpout, parallelism_hint, applyBatchSizeToDeclaredStreams(stateSpout, batch_size));
+    }
+
+    public  void setStateSpout(String id, IRichStateSpout stateSpout, Number parallelism_hint, Map batch_sizes) throws IllegalArgumentException {
         validateUnusedId(id);
         // TODO: finish
     }
@@ -235,7 +380,7 @@ private ComponentCommon getComponentCommon(String id, IComponent component) {
         return ret;        
     }
     
-    private void initCommon(String id, IComponent component, Number parallelism) throws IllegalArgumentException {
+    private void initCommon(String id, IComponent component, Number parallelism, Map batch_sizes) throws IllegalArgumentException {
         ComponentCommon common = new ComponentCommon();
         common.set_inputs(new HashMap());
         if(parallelism!=null) {
@@ -245,6 +390,7 @@ private void initCommon(String id, IComponent component, Number parallelism) thr
             }
             common.set_parallelism_hint(dop);
         }
+        if(batch_sizes!=null) common.set_batch_sizes(batch_sizes);
         Map conf = component.getComponentConfiguration();
         if(conf!=null) common.set_json_conf(JSONValue.toJSONString(conf));
         _commons.put(id, common);
@@ -379,4 +525,30 @@ private static String mergeIntoJson(Map into, Map newMap) {
         if(newMap!=null) res.putAll(newMap);
         return JSONValue.toJSONString(res);
     }
+    
+    private static HashMap applyBatchSizeToDeclaredStreams(IComponent spout_or_bolt, Number batch_size) {
+        HashMap batchSizes = null;
+        if (batch_size != null) {
+            batchSizes = new HashMap();
+
+            OutputFieldsGetter declarer = new OutputFieldsGetter();
+            spout_or_bolt.declareOutputFields(declarer);
+
+            for (String outputStreamId : declarer.getFieldsDeclaration().keySet()) {
+                batchSizes.put(outputStreamId, batch_size);
+            }
+        }
+        return batchSizes;
+    }
+
+    private static  HashMap convertHashMap(Map batch_sizes) {
+        HashMap batchSizes = null;
+        if(batch_sizes != null) {
+            batchSizes = new HashMap();
+            for(Map.Entry e : batch_sizes.entrySet()) {
+                batchSizes.put(e.getKey(), e.getValue().intValue());
+            }
+        }
+        return batchSizes;
+    }
 }
diff --git a/storm-core/src/py/storm/ttypes.py b/storm-core/src/py/storm/ttypes.py
index 4f484490075..06682835eaf 100644
--- a/storm-core/src/py/storm/ttypes.py
+++ b/storm-core/src/py/storm/ttypes.py
@@ -872,6 +872,7 @@ class ComponentCommon:
    - inputs
    - streams
    - parallelism_hint
+   - batch_sizes
    - json_conf
   """
 
@@ -880,13 +881,15 @@ class ComponentCommon:
     (1, TType.MAP, 'inputs', (TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.STRUCT,(Grouping, Grouping.thrift_spec)), None, ), # 1
     (2, TType.MAP, 'streams', (TType.STRING,None,TType.STRUCT,(StreamInfo, StreamInfo.thrift_spec)), None, ), # 2
     (3, TType.I32, 'parallelism_hint', None, None, ), # 3
-    (4, TType.STRING, 'json_conf', None, None, ), # 4
+    (4, TType.MAP, 'batch_sizes', (TType.STRING,None,TType.I32,None), None, ), # 4
+    (5, TType.STRING, 'json_conf', None, None, ), # 5
   )
 
-  def __init__(self, inputs=None, streams=None, parallelism_hint=None, json_conf=None,):
+  def __init__(self, inputs=None, streams=None, parallelism_hint=None, batch_sizes=None, json_conf=None,):
     self.inputs = inputs
     self.streams = streams
     self.parallelism_hint = parallelism_hint
+    self.batch_sizes = batch_sizes
     self.json_conf = json_conf
 
   def read(self, iprot):
@@ -929,6 +932,17 @@ def read(self, iprot):
         else:
           iprot.skip(ftype)
       elif fid == 4:
+        if ftype == TType.MAP:
+          self.batch_sizes = {}
+          (_ktype36, _vtype37, _size35 ) = iprot.readMapBegin()
+          for _i39 in xrange(_size35):
+            _key40 = iprot.readString().decode('utf-8')
+            _val41 = iprot.readI32();
+            self.batch_sizes[_key40] = _val41
+          iprot.readMapEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
         if ftype == TType.STRING:
           self.json_conf = iprot.readString().decode('utf-8')
         else:
@@ -946,25 +960,33 @@ def write(self, oprot):
     if self.inputs is not None:
       oprot.writeFieldBegin('inputs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRUCT, TType.STRUCT, len(self.inputs))
-      for kiter35,viter36 in self.inputs.items():
-        kiter35.write(oprot)
-        viter36.write(oprot)
+      for kiter42,viter43 in self.inputs.items():
+        kiter42.write(oprot)
+        viter43.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.streams is not None:
       oprot.writeFieldBegin('streams', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.streams))
-      for kiter37,viter38 in self.streams.items():
-        oprot.writeString(kiter37.encode('utf-8'))
-        viter38.write(oprot)
+      for kiter44,viter45 in self.streams.items():
+        oprot.writeString(kiter44.encode('utf-8'))
+        viter45.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.parallelism_hint is not None:
       oprot.writeFieldBegin('parallelism_hint', TType.I32, 3)
       oprot.writeI32(self.parallelism_hint)
       oprot.writeFieldEnd()
+    if self.batch_sizes is not None:
+      oprot.writeFieldBegin('batch_sizes', TType.MAP, 4)
+      oprot.writeMapBegin(TType.STRING, TType.I32, len(self.batch_sizes))
+      for kiter46,viter47 in self.batch_sizes.items():
+        oprot.writeString(kiter46.encode('utf-8'))
+        oprot.writeI32(viter47)
+      oprot.writeMapEnd()
+      oprot.writeFieldEnd()
     if self.json_conf is not None:
-      oprot.writeFieldBegin('json_conf', TType.STRING, 4)
+      oprot.writeFieldBegin('json_conf', TType.STRING, 5)
       oprot.writeString(self.json_conf.encode('utf-8'))
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -983,6 +1005,7 @@ def __hash__(self):
     value = (value * 31) ^ hash(self.inputs)
     value = (value * 31) ^ hash(self.streams)
     value = (value * 31) ^ hash(self.parallelism_hint)
+    value = (value * 31) ^ hash(self.batch_sizes)
     value = (value * 31) ^ hash(self.json_conf)
     return value
 
@@ -1281,36 +1304,36 @@ def read(self, iprot):
       if fid == 1:
         if ftype == TType.MAP:
           self.spouts = {}
-          (_ktype40, _vtype41, _size39 ) = iprot.readMapBegin()
-          for _i43 in xrange(_size39):
-            _key44 = iprot.readString().decode('utf-8')
-            _val45 = SpoutSpec()
-            _val45.read(iprot)
-            self.spouts[_key44] = _val45
+          (_ktype49, _vtype50, _size48 ) = iprot.readMapBegin()
+          for _i52 in xrange(_size48):
+            _key53 = iprot.readString().decode('utf-8')
+            _val54 = SpoutSpec()
+            _val54.read(iprot)
+            self.spouts[_key53] = _val54
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.MAP:
           self.bolts = {}
-          (_ktype47, _vtype48, _size46 ) = iprot.readMapBegin()
-          for _i50 in xrange(_size46):
-            _key51 = iprot.readString().decode('utf-8')
-            _val52 = Bolt()
-            _val52.read(iprot)
-            self.bolts[_key51] = _val52
+          (_ktype56, _vtype57, _size55 ) = iprot.readMapBegin()
+          for _i59 in xrange(_size55):
+            _key60 = iprot.readString().decode('utf-8')
+            _val61 = Bolt()
+            _val61.read(iprot)
+            self.bolts[_key60] = _val61
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
         if ftype == TType.MAP:
           self.state_spouts = {}
-          (_ktype54, _vtype55, _size53 ) = iprot.readMapBegin()
-          for _i57 in xrange(_size53):
-            _key58 = iprot.readString().decode('utf-8')
-            _val59 = StateSpoutSpec()
-            _val59.read(iprot)
-            self.state_spouts[_key58] = _val59
+          (_ktype63, _vtype64, _size62 ) = iprot.readMapBegin()
+          for _i66 in xrange(_size62):
+            _key67 = iprot.readString().decode('utf-8')
+            _val68 = StateSpoutSpec()
+            _val68.read(iprot)
+            self.state_spouts[_key67] = _val68
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -1327,25 +1350,25 @@ def write(self, oprot):
     if self.spouts is not None:
       oprot.writeFieldBegin('spouts', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.spouts))
-      for kiter60,viter61 in self.spouts.items():
-        oprot.writeString(kiter60.encode('utf-8'))
-        viter61.write(oprot)
+      for kiter69,viter70 in self.spouts.items():
+        oprot.writeString(kiter69.encode('utf-8'))
+        viter70.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.bolts is not None:
       oprot.writeFieldBegin('bolts', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.bolts))
-      for kiter62,viter63 in self.bolts.items():
-        oprot.writeString(kiter62.encode('utf-8'))
-        viter63.write(oprot)
+      for kiter71,viter72 in self.bolts.items():
+        oprot.writeString(kiter71.encode('utf-8'))
+        viter72.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.state_spouts is not None:
       oprot.writeFieldBegin('state_spouts', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.state_spouts))
-      for kiter64,viter65 in self.state_spouts.items():
-        oprot.writeString(kiter64.encode('utf-8'))
-        viter65.write(oprot)
+      for kiter73,viter74 in self.state_spouts.items():
+        oprot.writeString(kiter73.encode('utf-8'))
+        viter74.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -2660,33 +2683,33 @@ def read(self, iprot):
       if fid == 1:
         if ftype == TType.LIST:
           self.supervisors = []
-          (_etype69, _size66) = iprot.readListBegin()
-          for _i70 in xrange(_size66):
-            _elem71 = SupervisorSummary()
-            _elem71.read(iprot)
-            self.supervisors.append(_elem71)
+          (_etype78, _size75) = iprot.readListBegin()
+          for _i79 in xrange(_size75):
+            _elem80 = SupervisorSummary()
+            _elem80.read(iprot)
+            self.supervisors.append(_elem80)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
         if ftype == TType.LIST:
           self.topologies = []
-          (_etype75, _size72) = iprot.readListBegin()
-          for _i76 in xrange(_size72):
-            _elem77 = TopologySummary()
-            _elem77.read(iprot)
-            self.topologies.append(_elem77)
+          (_etype84, _size81) = iprot.readListBegin()
+          for _i85 in xrange(_size81):
+            _elem86 = TopologySummary()
+            _elem86.read(iprot)
+            self.topologies.append(_elem86)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.LIST:
           self.nimbuses = []
-          (_etype81, _size78) = iprot.readListBegin()
-          for _i82 in xrange(_size78):
-            _elem83 = NimbusSummary()
-            _elem83.read(iprot)
-            self.nimbuses.append(_elem83)
+          (_etype90, _size87) = iprot.readListBegin()
+          for _i91 in xrange(_size87):
+            _elem92 = NimbusSummary()
+            _elem92.read(iprot)
+            self.nimbuses.append(_elem92)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -2703,22 +2726,22 @@ def write(self, oprot):
     if self.supervisors is not None:
       oprot.writeFieldBegin('supervisors', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.supervisors))
-      for iter84 in self.supervisors:
-        iter84.write(oprot)
+      for iter93 in self.supervisors:
+        iter93.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.topologies is not None:
       oprot.writeFieldBegin('topologies', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.topologies))
-      for iter85 in self.topologies:
-        iter85.write(oprot)
+      for iter94 in self.topologies:
+        iter94.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.nimbuses is not None:
       oprot.writeFieldBegin('nimbuses', TType.LIST, 4)
       oprot.writeListBegin(TType.STRUCT, len(self.nimbuses))
-      for iter86 in self.nimbuses:
-        iter86.write(oprot)
+      for iter95 in self.nimbuses:
+        iter95.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -2898,90 +2921,90 @@ def read(self, iprot):
       if fid == 1:
         if ftype == TType.MAP:
           self.acked = {}
-          (_ktype88, _vtype89, _size87 ) = iprot.readMapBegin()
-          for _i91 in xrange(_size87):
-            _key92 = iprot.readString().decode('utf-8')
-            _val93 = {}
-            (_ktype95, _vtype96, _size94 ) = iprot.readMapBegin()
-            for _i98 in xrange(_size94):
-              _key99 = GlobalStreamId()
-              _key99.read(iprot)
-              _val100 = iprot.readI64();
-              _val93[_key99] = _val100
+          (_ktype97, _vtype98, _size96 ) = iprot.readMapBegin()
+          for _i100 in xrange(_size96):
+            _key101 = iprot.readString().decode('utf-8')
+            _val102 = {}
+            (_ktype104, _vtype105, _size103 ) = iprot.readMapBegin()
+            for _i107 in xrange(_size103):
+              _key108 = GlobalStreamId()
+              _key108.read(iprot)
+              _val109 = iprot.readI64();
+              _val102[_key108] = _val109
             iprot.readMapEnd()
-            self.acked[_key92] = _val93
+            self.acked[_key101] = _val102
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.MAP:
           self.failed = {}
-          (_ktype102, _vtype103, _size101 ) = iprot.readMapBegin()
-          for _i105 in xrange(_size101):
-            _key106 = iprot.readString().decode('utf-8')
-            _val107 = {}
-            (_ktype109, _vtype110, _size108 ) = iprot.readMapBegin()
-            for _i112 in xrange(_size108):
-              _key113 = GlobalStreamId()
-              _key113.read(iprot)
-              _val114 = iprot.readI64();
-              _val107[_key113] = _val114
+          (_ktype111, _vtype112, _size110 ) = iprot.readMapBegin()
+          for _i114 in xrange(_size110):
+            _key115 = iprot.readString().decode('utf-8')
+            _val116 = {}
+            (_ktype118, _vtype119, _size117 ) = iprot.readMapBegin()
+            for _i121 in xrange(_size117):
+              _key122 = GlobalStreamId()
+              _key122.read(iprot)
+              _val123 = iprot.readI64();
+              _val116[_key122] = _val123
             iprot.readMapEnd()
-            self.failed[_key106] = _val107
+            self.failed[_key115] = _val116
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
         if ftype == TType.MAP:
           self.process_ms_avg = {}
-          (_ktype116, _vtype117, _size115 ) = iprot.readMapBegin()
-          for _i119 in xrange(_size115):
-            _key120 = iprot.readString().decode('utf-8')
-            _val121 = {}
-            (_ktype123, _vtype124, _size122 ) = iprot.readMapBegin()
-            for _i126 in xrange(_size122):
-              _key127 = GlobalStreamId()
-              _key127.read(iprot)
-              _val128 = iprot.readDouble();
-              _val121[_key127] = _val128
+          (_ktype125, _vtype126, _size124 ) = iprot.readMapBegin()
+          for _i128 in xrange(_size124):
+            _key129 = iprot.readString().decode('utf-8')
+            _val130 = {}
+            (_ktype132, _vtype133, _size131 ) = iprot.readMapBegin()
+            for _i135 in xrange(_size131):
+              _key136 = GlobalStreamId()
+              _key136.read(iprot)
+              _val137 = iprot.readDouble();
+              _val130[_key136] = _val137
             iprot.readMapEnd()
-            self.process_ms_avg[_key120] = _val121
+            self.process_ms_avg[_key129] = _val130
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.MAP:
           self.executed = {}
-          (_ktype130, _vtype131, _size129 ) = iprot.readMapBegin()
-          for _i133 in xrange(_size129):
-            _key134 = iprot.readString().decode('utf-8')
-            _val135 = {}
-            (_ktype137, _vtype138, _size136 ) = iprot.readMapBegin()
-            for _i140 in xrange(_size136):
-              _key141 = GlobalStreamId()
-              _key141.read(iprot)
-              _val142 = iprot.readI64();
-              _val135[_key141] = _val142
+          (_ktype139, _vtype140, _size138 ) = iprot.readMapBegin()
+          for _i142 in xrange(_size138):
+            _key143 = iprot.readString().decode('utf-8')
+            _val144 = {}
+            (_ktype146, _vtype147, _size145 ) = iprot.readMapBegin()
+            for _i149 in xrange(_size145):
+              _key150 = GlobalStreamId()
+              _key150.read(iprot)
+              _val151 = iprot.readI64();
+              _val144[_key150] = _val151
             iprot.readMapEnd()
-            self.executed[_key134] = _val135
+            self.executed[_key143] = _val144
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 5:
         if ftype == TType.MAP:
           self.execute_ms_avg = {}
-          (_ktype144, _vtype145, _size143 ) = iprot.readMapBegin()
-          for _i147 in xrange(_size143):
-            _key148 = iprot.readString().decode('utf-8')
-            _val149 = {}
-            (_ktype151, _vtype152, _size150 ) = iprot.readMapBegin()
-            for _i154 in xrange(_size150):
-              _key155 = GlobalStreamId()
-              _key155.read(iprot)
-              _val156 = iprot.readDouble();
-              _val149[_key155] = _val156
+          (_ktype153, _vtype154, _size152 ) = iprot.readMapBegin()
+          for _i156 in xrange(_size152):
+            _key157 = iprot.readString().decode('utf-8')
+            _val158 = {}
+            (_ktype160, _vtype161, _size159 ) = iprot.readMapBegin()
+            for _i163 in xrange(_size159):
+              _key164 = GlobalStreamId()
+              _key164.read(iprot)
+              _val165 = iprot.readDouble();
+              _val158[_key164] = _val165
             iprot.readMapEnd()
-            self.execute_ms_avg[_key148] = _val149
+            self.execute_ms_avg[_key157] = _val158
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -2998,60 +3021,60 @@ def write(self, oprot):
     if self.acked is not None:
       oprot.writeFieldBegin('acked', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.acked))
-      for kiter157,viter158 in self.acked.items():
-        oprot.writeString(kiter157.encode('utf-8'))
-        oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter158))
-        for kiter159,viter160 in viter158.items():
-          kiter159.write(oprot)
-          oprot.writeI64(viter160)
+      for kiter166,viter167 in self.acked.items():
+        oprot.writeString(kiter166.encode('utf-8'))
+        oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter167))
+        for kiter168,viter169 in viter167.items():
+          kiter168.write(oprot)
+          oprot.writeI64(viter169)
         oprot.writeMapEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.failed is not None:
       oprot.writeFieldBegin('failed', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.failed))
-      for kiter161,viter162 in self.failed.items():
-        oprot.writeString(kiter161.encode('utf-8'))
-        oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter162))
-        for kiter163,viter164 in viter162.items():
-          kiter163.write(oprot)
-          oprot.writeI64(viter164)
+      for kiter170,viter171 in self.failed.items():
+        oprot.writeString(kiter170.encode('utf-8'))
+        oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter171))
+        for kiter172,viter173 in viter171.items():
+          kiter172.write(oprot)
+          oprot.writeI64(viter173)
         oprot.writeMapEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.process_ms_avg is not None:
       oprot.writeFieldBegin('process_ms_avg', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.process_ms_avg))
-      for kiter165,viter166 in self.process_ms_avg.items():
-        oprot.writeString(kiter165.encode('utf-8'))
-        oprot.writeMapBegin(TType.STRUCT, TType.DOUBLE, len(viter166))
-        for kiter167,viter168 in viter166.items():
-          kiter167.write(oprot)
-          oprot.writeDouble(viter168)
+      for kiter174,viter175 in self.process_ms_avg.items():
+        oprot.writeString(kiter174.encode('utf-8'))
+        oprot.writeMapBegin(TType.STRUCT, TType.DOUBLE, len(viter175))
+        for kiter176,viter177 in viter175.items():
+          kiter176.write(oprot)
+          oprot.writeDouble(viter177)
         oprot.writeMapEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.executed is not None:
       oprot.writeFieldBegin('executed', TType.MAP, 4)
       oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.executed))
-      for kiter169,viter170 in self.executed.items():
-        oprot.writeString(kiter169.encode('utf-8'))
-        oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter170))
-        for kiter171,viter172 in viter170.items():
-          kiter171.write(oprot)
-          oprot.writeI64(viter172)
+      for kiter178,viter179 in self.executed.items():
+        oprot.writeString(kiter178.encode('utf-8'))
+        oprot.writeMapBegin(TType.STRUCT, TType.I64, len(viter179))
+        for kiter180,viter181 in viter179.items():
+          kiter180.write(oprot)
+          oprot.writeI64(viter181)
         oprot.writeMapEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.execute_ms_avg is not None:
       oprot.writeFieldBegin('execute_ms_avg', TType.MAP, 5)
       oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.execute_ms_avg))
-      for kiter173,viter174 in self.execute_ms_avg.items():
-        oprot.writeString(kiter173.encode('utf-8'))
-        oprot.writeMapBegin(TType.STRUCT, TType.DOUBLE, len(viter174))
-        for kiter175,viter176 in viter174.items():
-          kiter175.write(oprot)
-          oprot.writeDouble(viter176)
+      for kiter182,viter183 in self.execute_ms_avg.items():
+        oprot.writeString(kiter182.encode('utf-8'))
+        oprot.writeMapBegin(TType.STRUCT, TType.DOUBLE, len(viter183))
+        for kiter184,viter185 in viter183.items():
+          kiter184.write(oprot)
+          oprot.writeDouble(viter185)
         oprot.writeMapEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
@@ -3124,51 +3147,51 @@ def read(self, iprot):
       if fid == 1:
         if ftype == TType.MAP:
           self.acked = {}
-          (_ktype178, _vtype179, _size177 ) = iprot.readMapBegin()
-          for _i181 in xrange(_size177):
-            _key182 = iprot.readString().decode('utf-8')
-            _val183 = {}
-            (_ktype185, _vtype186, _size184 ) = iprot.readMapBegin()
-            for _i188 in xrange(_size184):
-              _key189 = iprot.readString().decode('utf-8')
-              _val190 = iprot.readI64();
-              _val183[_key189] = _val190
+          (_ktype187, _vtype188, _size186 ) = iprot.readMapBegin()
+          for _i190 in xrange(_size186):
+            _key191 = iprot.readString().decode('utf-8')
+            _val192 = {}
+            (_ktype194, _vtype195, _size193 ) = iprot.readMapBegin()
+            for _i197 in xrange(_size193):
+              _key198 = iprot.readString().decode('utf-8')
+              _val199 = iprot.readI64();
+              _val192[_key198] = _val199
             iprot.readMapEnd()
-            self.acked[_key182] = _val183
+            self.acked[_key191] = _val192
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.MAP:
           self.failed = {}
-          (_ktype192, _vtype193, _size191 ) = iprot.readMapBegin()
-          for _i195 in xrange(_size191):
-            _key196 = iprot.readString().decode('utf-8')
-            _val197 = {}
-            (_ktype199, _vtype200, _size198 ) = iprot.readMapBegin()
-            for _i202 in xrange(_size198):
-              _key203 = iprot.readString().decode('utf-8')
-              _val204 = iprot.readI64();
-              _val197[_key203] = _val204
+          (_ktype201, _vtype202, _size200 ) = iprot.readMapBegin()
+          for _i204 in xrange(_size200):
+            _key205 = iprot.readString().decode('utf-8')
+            _val206 = {}
+            (_ktype208, _vtype209, _size207 ) = iprot.readMapBegin()
+            for _i211 in xrange(_size207):
+              _key212 = iprot.readString().decode('utf-8')
+              _val213 = iprot.readI64();
+              _val206[_key212] = _val213
             iprot.readMapEnd()
-            self.failed[_key196] = _val197
+            self.failed[_key205] = _val206
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
         if ftype == TType.MAP:
           self.complete_ms_avg = {}
-          (_ktype206, _vtype207, _size205 ) = iprot.readMapBegin()
-          for _i209 in xrange(_size205):
-            _key210 = iprot.readString().decode('utf-8')
-            _val211 = {}
-            (_ktype213, _vtype214, _size212 ) = iprot.readMapBegin()
-            for _i216 in xrange(_size212):
-              _key217 = iprot.readString().decode('utf-8')
-              _val218 = iprot.readDouble();
-              _val211[_key217] = _val218
+          (_ktype215, _vtype216, _size214 ) = iprot.readMapBegin()
+          for _i218 in xrange(_size214):
+            _key219 = iprot.readString().decode('utf-8')
+            _val220 = {}
+            (_ktype222, _vtype223, _size221 ) = iprot.readMapBegin()
+            for _i225 in xrange(_size221):
+              _key226 = iprot.readString().decode('utf-8')
+              _val227 = iprot.readDouble();
+              _val220[_key226] = _val227
             iprot.readMapEnd()
-            self.complete_ms_avg[_key210] = _val211
+            self.complete_ms_avg[_key219] = _val220
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -3185,36 +3208,36 @@ def write(self, oprot):
     if self.acked is not None:
       oprot.writeFieldBegin('acked', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.acked))
-      for kiter219,viter220 in self.acked.items():
-        oprot.writeString(kiter219.encode('utf-8'))
-        oprot.writeMapBegin(TType.STRING, TType.I64, len(viter220))
-        for kiter221,viter222 in viter220.items():
-          oprot.writeString(kiter221.encode('utf-8'))
-          oprot.writeI64(viter222)
+      for kiter228,viter229 in self.acked.items():
+        oprot.writeString(kiter228.encode('utf-8'))
+        oprot.writeMapBegin(TType.STRING, TType.I64, len(viter229))
+        for kiter230,viter231 in viter229.items():
+          oprot.writeString(kiter230.encode('utf-8'))
+          oprot.writeI64(viter231)
         oprot.writeMapEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.failed is not None:
       oprot.writeFieldBegin('failed', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.failed))
-      for kiter223,viter224 in self.failed.items():
-        oprot.writeString(kiter223.encode('utf-8'))
-        oprot.writeMapBegin(TType.STRING, TType.I64, len(viter224))
-        for kiter225,viter226 in viter224.items():
-          oprot.writeString(kiter225.encode('utf-8'))
-          oprot.writeI64(viter226)
+      for kiter232,viter233 in self.failed.items():
+        oprot.writeString(kiter232.encode('utf-8'))
+        oprot.writeMapBegin(TType.STRING, TType.I64, len(viter233))
+        for kiter234,viter235 in viter233.items():
+          oprot.writeString(kiter234.encode('utf-8'))
+          oprot.writeI64(viter235)
         oprot.writeMapEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.complete_ms_avg is not None:
       oprot.writeFieldBegin('complete_ms_avg', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.complete_ms_avg))
-      for kiter227,viter228 in self.complete_ms_avg.items():
-        oprot.writeString(kiter227.encode('utf-8'))
-        oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(viter228))
-        for kiter229,viter230 in viter228.items():
-          oprot.writeString(kiter229.encode('utf-8'))
-          oprot.writeDouble(viter230)
+      for kiter236,viter237 in self.complete_ms_avg.items():
+        oprot.writeString(kiter236.encode('utf-8'))
+        oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(viter237))
+        for kiter238,viter239 in viter237.items():
+          oprot.writeString(kiter238.encode('utf-8'))
+          oprot.writeDouble(viter239)
         oprot.writeMapEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
@@ -3364,34 +3387,34 @@ def read(self, iprot):
       if fid == 1:
         if ftype == TType.MAP:
           self.emitted = {}
-          (_ktype232, _vtype233, _size231 ) = iprot.readMapBegin()
-          for _i235 in xrange(_size231):
-            _key236 = iprot.readString().decode('utf-8')
-            _val237 = {}
-            (_ktype239, _vtype240, _size238 ) = iprot.readMapBegin()
-            for _i242 in xrange(_size238):
-              _key243 = iprot.readString().decode('utf-8')
-              _val244 = iprot.readI64();
-              _val237[_key243] = _val244
+          (_ktype241, _vtype242, _size240 ) = iprot.readMapBegin()
+          for _i244 in xrange(_size240):
+            _key245 = iprot.readString().decode('utf-8')
+            _val246 = {}
+            (_ktype248, _vtype249, _size247 ) = iprot.readMapBegin()
+            for _i251 in xrange(_size247):
+              _key252 = iprot.readString().decode('utf-8')
+              _val253 = iprot.readI64();
+              _val246[_key252] = _val253
             iprot.readMapEnd()
-            self.emitted[_key236] = _val237
+            self.emitted[_key245] = _val246
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.MAP:
           self.transferred = {}
-          (_ktype246, _vtype247, _size245 ) = iprot.readMapBegin()
-          for _i249 in xrange(_size245):
-            _key250 = iprot.readString().decode('utf-8')
-            _val251 = {}
-            (_ktype253, _vtype254, _size252 ) = iprot.readMapBegin()
-            for _i256 in xrange(_size252):
-              _key257 = iprot.readString().decode('utf-8')
-              _val258 = iprot.readI64();
-              _val251[_key257] = _val258
+          (_ktype255, _vtype256, _size254 ) = iprot.readMapBegin()
+          for _i258 in xrange(_size254):
+            _key259 = iprot.readString().decode('utf-8')
+            _val260 = {}
+            (_ktype262, _vtype263, _size261 ) = iprot.readMapBegin()
+            for _i265 in xrange(_size261):
+              _key266 = iprot.readString().decode('utf-8')
+              _val267 = iprot.readI64();
+              _val260[_key266] = _val267
             iprot.readMapEnd()
-            self.transferred[_key250] = _val251
+            self.transferred[_key259] = _val260
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -3419,24 +3442,24 @@ def write(self, oprot):
     if self.emitted is not None:
       oprot.writeFieldBegin('emitted', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.emitted))
-      for kiter259,viter260 in self.emitted.items():
-        oprot.writeString(kiter259.encode('utf-8'))
-        oprot.writeMapBegin(TType.STRING, TType.I64, len(viter260))
-        for kiter261,viter262 in viter260.items():
-          oprot.writeString(kiter261.encode('utf-8'))
-          oprot.writeI64(viter262)
+      for kiter268,viter269 in self.emitted.items():
+        oprot.writeString(kiter268.encode('utf-8'))
+        oprot.writeMapBegin(TType.STRING, TType.I64, len(viter269))
+        for kiter270,viter271 in viter269.items():
+          oprot.writeString(kiter270.encode('utf-8'))
+          oprot.writeI64(viter271)
         oprot.writeMapEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.transferred is not None:
       oprot.writeFieldBegin('transferred', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.transferred))
-      for kiter263,viter264 in self.transferred.items():
-        oprot.writeString(kiter263.encode('utf-8'))
-        oprot.writeMapBegin(TType.STRING, TType.I64, len(viter264))
-        for kiter265,viter266 in viter264.items():
-          oprot.writeString(kiter265.encode('utf-8'))
-          oprot.writeI64(viter266)
+      for kiter272,viter273 in self.transferred.items():
+        oprot.writeString(kiter272.encode('utf-8'))
+        oprot.writeMapBegin(TType.STRING, TType.I64, len(viter273))
+        for kiter274,viter275 in viter273.items():
+          oprot.writeString(kiter274.encode('utf-8'))
+          oprot.writeI64(viter275)
         oprot.writeMapEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
@@ -4280,11 +4303,11 @@ def read(self, iprot):
       elif fid == 4:
         if ftype == TType.LIST:
           self.executors = []
-          (_etype270, _size267) = iprot.readListBegin()
-          for _i271 in xrange(_size267):
-            _elem272 = ExecutorSummary()
-            _elem272.read(iprot)
-            self.executors.append(_elem272)
+          (_etype279, _size276) = iprot.readListBegin()
+          for _i280 in xrange(_size276):
+            _elem281 = ExecutorSummary()
+            _elem281.read(iprot)
+            self.executors.append(_elem281)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -4296,29 +4319,29 @@ def read(self, iprot):
       elif fid == 6:
         if ftype == TType.MAP:
           self.errors = {}
-          (_ktype274, _vtype275, _size273 ) = iprot.readMapBegin()
-          for _i277 in xrange(_size273):
-            _key278 = iprot.readString().decode('utf-8')
-            _val279 = []
-            (_etype283, _size280) = iprot.readListBegin()
-            for _i284 in xrange(_size280):
-              _elem285 = ErrorInfo()
-              _elem285.read(iprot)
-              _val279.append(_elem285)
+          (_ktype283, _vtype284, _size282 ) = iprot.readMapBegin()
+          for _i286 in xrange(_size282):
+            _key287 = iprot.readString().decode('utf-8')
+            _val288 = []
+            (_etype292, _size289) = iprot.readListBegin()
+            for _i293 in xrange(_size289):
+              _elem294 = ErrorInfo()
+              _elem294.read(iprot)
+              _val288.append(_elem294)
             iprot.readListEnd()
-            self.errors[_key278] = _val279
+            self.errors[_key287] = _val288
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 7:
         if ftype == TType.MAP:
           self.component_debug = {}
-          (_ktype287, _vtype288, _size286 ) = iprot.readMapBegin()
-          for _i290 in xrange(_size286):
-            _key291 = iprot.readString().decode('utf-8')
-            _val292 = DebugOptions()
-            _val292.read(iprot)
-            self.component_debug[_key291] = _val292
+          (_ktype296, _vtype297, _size295 ) = iprot.readMapBegin()
+          for _i299 in xrange(_size295):
+            _key300 = iprot.readString().decode('utf-8')
+            _val301 = DebugOptions()
+            _val301.read(iprot)
+            self.component_debug[_key300] = _val301
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4362,8 +4385,8 @@ def write(self, oprot):
     if self.executors is not None:
       oprot.writeFieldBegin('executors', TType.LIST, 4)
       oprot.writeListBegin(TType.STRUCT, len(self.executors))
-      for iter293 in self.executors:
-        iter293.write(oprot)
+      for iter302 in self.executors:
+        iter302.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.status is not None:
@@ -4373,20 +4396,20 @@ def write(self, oprot):
     if self.errors is not None:
       oprot.writeFieldBegin('errors', TType.MAP, 6)
       oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.errors))
-      for kiter294,viter295 in self.errors.items():
-        oprot.writeString(kiter294.encode('utf-8'))
-        oprot.writeListBegin(TType.STRUCT, len(viter295))
-        for iter296 in viter295:
-          iter296.write(oprot)
+      for kiter303,viter304 in self.errors.items():
+        oprot.writeString(kiter303.encode('utf-8'))
+        oprot.writeListBegin(TType.STRUCT, len(viter304))
+        for iter305 in viter304:
+          iter305.write(oprot)
         oprot.writeListEnd()
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.component_debug is not None:
       oprot.writeFieldBegin('component_debug', TType.MAP, 7)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.component_debug))
-      for kiter297,viter298 in self.component_debug.items():
-        oprot.writeString(kiter297.encode('utf-8'))
-        viter298.write(oprot)
+      for kiter306,viter307 in self.component_debug.items():
+        oprot.writeString(kiter306.encode('utf-8'))
+        viter307.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.sched_status is not None:
@@ -4630,11 +4653,11 @@ def read(self, iprot):
       elif fid == 3:
         if ftype == TType.MAP:
           self.num_executors = {}
-          (_ktype300, _vtype301, _size299 ) = iprot.readMapBegin()
-          for _i303 in xrange(_size299):
-            _key304 = iprot.readString().decode('utf-8')
-            _val305 = iprot.readI32();
-            self.num_executors[_key304] = _val305
+          (_ktype309, _vtype310, _size308 ) = iprot.readMapBegin()
+          for _i312 in xrange(_size308):
+            _key313 = iprot.readString().decode('utf-8')
+            _val314 = iprot.readI32();
+            self.num_executors[_key313] = _val314
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4659,9 +4682,9 @@ def write(self, oprot):
     if self.num_executors is not None:
       oprot.writeFieldBegin('num_executors', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.I32, len(self.num_executors))
-      for kiter306,viter307 in self.num_executors.items():
-        oprot.writeString(kiter306.encode('utf-8'))
-        oprot.writeI32(viter307)
+      for kiter315,viter316 in self.num_executors.items():
+        oprot.writeString(kiter315.encode('utf-8'))
+        oprot.writeI32(viter316)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4715,11 +4738,11 @@ def read(self, iprot):
       if fid == 1:
         if ftype == TType.MAP:
           self.creds = {}
-          (_ktype309, _vtype310, _size308 ) = iprot.readMapBegin()
-          for _i312 in xrange(_size308):
-            _key313 = iprot.readString().decode('utf-8')
-            _val314 = iprot.readString().decode('utf-8')
-            self.creds[_key313] = _val314
+          (_ktype318, _vtype319, _size317 ) = iprot.readMapBegin()
+          for _i321 in xrange(_size317):
+            _key322 = iprot.readString().decode('utf-8')
+            _val323 = iprot.readString().decode('utf-8')
+            self.creds[_key322] = _val323
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4736,9 +4759,9 @@ def write(self, oprot):
     if self.creds is not None:
       oprot.writeFieldBegin('creds', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.creds))
-      for kiter315,viter316 in self.creds.items():
-        oprot.writeString(kiter315.encode('utf-8'))
-        oprot.writeString(viter316.encode('utf-8'))
+      for kiter324,viter325 in self.creds.items():
+        oprot.writeString(kiter324.encode('utf-8'))
+        oprot.writeString(viter325.encode('utf-8'))
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -4909,31 +4932,31 @@ def read(self, iprot):
       elif fid == 4:
         if ftype == TType.LIST:
           self.used_ports = []
-          (_etype320, _size317) = iprot.readListBegin()
-          for _i321 in xrange(_size317):
-            _elem322 = iprot.readI64();
-            self.used_ports.append(_elem322)
+          (_etype329, _size326) = iprot.readListBegin()
+          for _i330 in xrange(_size326):
+            _elem331 = iprot.readI64();
+            self.used_ports.append(_elem331)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 5:
         if ftype == TType.LIST:
           self.meta = []
-          (_etype326, _size323) = iprot.readListBegin()
-          for _i327 in xrange(_size323):
-            _elem328 = iprot.readI64();
-            self.meta.append(_elem328)
+          (_etype335, _size332) = iprot.readListBegin()
+          for _i336 in xrange(_size332):
+            _elem337 = iprot.readI64();
+            self.meta.append(_elem337)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 6:
         if ftype == TType.MAP:
           self.scheduler_meta = {}
-          (_ktype330, _vtype331, _size329 ) = iprot.readMapBegin()
-          for _i333 in xrange(_size329):
-            _key334 = iprot.readString().decode('utf-8')
-            _val335 = iprot.readString().decode('utf-8')
-            self.scheduler_meta[_key334] = _val335
+          (_ktype339, _vtype340, _size338 ) = iprot.readMapBegin()
+          for _i342 in xrange(_size338):
+            _key343 = iprot.readString().decode('utf-8')
+            _val344 = iprot.readString().decode('utf-8')
+            self.scheduler_meta[_key343] = _val344
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -4972,23 +4995,23 @@ def write(self, oprot):
     if self.used_ports is not None:
       oprot.writeFieldBegin('used_ports', TType.LIST, 4)
       oprot.writeListBegin(TType.I64, len(self.used_ports))
-      for iter336 in self.used_ports:
-        oprot.writeI64(iter336)
+      for iter345 in self.used_ports:
+        oprot.writeI64(iter345)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.meta is not None:
       oprot.writeFieldBegin('meta', TType.LIST, 5)
       oprot.writeListBegin(TType.I64, len(self.meta))
-      for iter337 in self.meta:
-        oprot.writeI64(iter337)
+      for iter346 in self.meta:
+        oprot.writeI64(iter346)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.scheduler_meta is not None:
       oprot.writeFieldBegin('scheduler_meta', TType.MAP, 6)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.scheduler_meta))
-      for kiter338,viter339 in self.scheduler_meta.items():
-        oprot.writeString(kiter338.encode('utf-8'))
-        oprot.writeString(viter339.encode('utf-8'))
+      for kiter347,viter348 in self.scheduler_meta.items():
+        oprot.writeString(kiter347.encode('utf-8'))
+        oprot.writeString(viter348.encode('utf-8'))
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.uptime_secs is not None:
@@ -5067,10 +5090,10 @@ def read(self, iprot):
       elif fid == 2:
         if ftype == TType.SET:
           self.port = set()
-          (_etype343, _size340) = iprot.readSetBegin()
-          for _i344 in xrange(_size340):
-            _elem345 = iprot.readI64();
-            self.port.add(_elem345)
+          (_etype352, _size349) = iprot.readSetBegin()
+          for _i353 in xrange(_size349):
+            _elem354 = iprot.readI64();
+            self.port.add(_elem354)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
@@ -5091,8 +5114,8 @@ def write(self, oprot):
     if self.port is not None:
       oprot.writeFieldBegin('port', TType.SET, 2)
       oprot.writeSetBegin(TType.I64, len(self.port))
-      for iter346 in self.port:
-        oprot.writeI64(iter346)
+      for iter355 in self.port:
+        oprot.writeI64(iter355)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5175,44 +5198,44 @@ def read(self, iprot):
       elif fid == 2:
         if ftype == TType.MAP:
           self.node_host = {}
-          (_ktype348, _vtype349, _size347 ) = iprot.readMapBegin()
-          for _i351 in xrange(_size347):
-            _key352 = iprot.readString().decode('utf-8')
-            _val353 = iprot.readString().decode('utf-8')
-            self.node_host[_key352] = _val353
+          (_ktype357, _vtype358, _size356 ) = iprot.readMapBegin()
+          for _i360 in xrange(_size356):
+            _key361 = iprot.readString().decode('utf-8')
+            _val362 = iprot.readString().decode('utf-8')
+            self.node_host[_key361] = _val362
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
         if ftype == TType.MAP:
           self.executor_node_port = {}
-          (_ktype355, _vtype356, _size354 ) = iprot.readMapBegin()
-          for _i358 in xrange(_size354):
-            _key359 = []
-            (_etype364, _size361) = iprot.readListBegin()
-            for _i365 in xrange(_size361):
-              _elem366 = iprot.readI64();
-              _key359.append(_elem366)
+          (_ktype364, _vtype365, _size363 ) = iprot.readMapBegin()
+          for _i367 in xrange(_size363):
+            _key368 = []
+            (_etype373, _size370) = iprot.readListBegin()
+            for _i374 in xrange(_size370):
+              _elem375 = iprot.readI64();
+              _key368.append(_elem375)
             iprot.readListEnd()
-            _val360 = NodeInfo()
-            _val360.read(iprot)
-            self.executor_node_port[_key359] = _val360
+            _val369 = NodeInfo()
+            _val369.read(iprot)
+            self.executor_node_port[_key368] = _val369
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.MAP:
           self.executor_start_time_secs = {}
-          (_ktype368, _vtype369, _size367 ) = iprot.readMapBegin()
-          for _i371 in xrange(_size367):
-            _key372 = []
-            (_etype377, _size374) = iprot.readListBegin()
-            for _i378 in xrange(_size374):
-              _elem379 = iprot.readI64();
-              _key372.append(_elem379)
+          (_ktype377, _vtype378, _size376 ) = iprot.readMapBegin()
+          for _i380 in xrange(_size376):
+            _key381 = []
+            (_etype386, _size383) = iprot.readListBegin()
+            for _i387 in xrange(_size383):
+              _elem388 = iprot.readI64();
+              _key381.append(_elem388)
             iprot.readListEnd()
-            _val373 = iprot.readI64();
-            self.executor_start_time_secs[_key372] = _val373
+            _val382 = iprot.readI64();
+            self.executor_start_time_secs[_key381] = _val382
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -5233,31 +5256,31 @@ def write(self, oprot):
     if self.node_host is not None:
       oprot.writeFieldBegin('node_host', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.node_host))
-      for kiter380,viter381 in self.node_host.items():
-        oprot.writeString(kiter380.encode('utf-8'))
-        oprot.writeString(viter381.encode('utf-8'))
+      for kiter389,viter390 in self.node_host.items():
+        oprot.writeString(kiter389.encode('utf-8'))
+        oprot.writeString(viter390.encode('utf-8'))
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.executor_node_port is not None:
       oprot.writeFieldBegin('executor_node_port', TType.MAP, 3)
       oprot.writeMapBegin(TType.LIST, TType.STRUCT, len(self.executor_node_port))
-      for kiter382,viter383 in self.executor_node_port.items():
-        oprot.writeListBegin(TType.I64, len(kiter382))
-        for iter384 in kiter382:
-          oprot.writeI64(iter384)
+      for kiter391,viter392 in self.executor_node_port.items():
+        oprot.writeListBegin(TType.I64, len(kiter391))
+        for iter393 in kiter391:
+          oprot.writeI64(iter393)
         oprot.writeListEnd()
-        viter383.write(oprot)
+        viter392.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.executor_start_time_secs is not None:
       oprot.writeFieldBegin('executor_start_time_secs', TType.MAP, 4)
       oprot.writeMapBegin(TType.LIST, TType.I64, len(self.executor_start_time_secs))
-      for kiter385,viter386 in self.executor_start_time_secs.items():
-        oprot.writeListBegin(TType.I64, len(kiter385))
-        for iter387 in kiter385:
-          oprot.writeI64(iter387)
+      for kiter394,viter395 in self.executor_start_time_secs.items():
+        oprot.writeListBegin(TType.I64, len(kiter394))
+        for iter396 in kiter394:
+          oprot.writeI64(iter396)
         oprot.writeListEnd()
-        oprot.writeI64(viter386)
+        oprot.writeI64(viter395)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5433,11 +5456,11 @@ def read(self, iprot):
       elif fid == 4:
         if ftype == TType.MAP:
           self.component_executors = {}
-          (_ktype389, _vtype390, _size388 ) = iprot.readMapBegin()
-          for _i392 in xrange(_size388):
-            _key393 = iprot.readString().decode('utf-8')
-            _val394 = iprot.readI32();
-            self.component_executors[_key393] = _val394
+          (_ktype398, _vtype399, _size397 ) = iprot.readMapBegin()
+          for _i401 in xrange(_size397):
+            _key402 = iprot.readString().decode('utf-8')
+            _val403 = iprot.readI32();
+            self.component_executors[_key402] = _val403
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -5465,12 +5488,12 @@ def read(self, iprot):
       elif fid == 9:
         if ftype == TType.MAP:
           self.component_debug = {}
-          (_ktype396, _vtype397, _size395 ) = iprot.readMapBegin()
-          for _i399 in xrange(_size395):
-            _key400 = iprot.readString().decode('utf-8')
-            _val401 = DebugOptions()
-            _val401.read(iprot)
-            self.component_debug[_key400] = _val401
+          (_ktype405, _vtype406, _size404 ) = iprot.readMapBegin()
+          for _i408 in xrange(_size404):
+            _key409 = iprot.readString().decode('utf-8')
+            _val410 = DebugOptions()
+            _val410.read(iprot)
+            self.component_debug[_key409] = _val410
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -5499,9 +5522,9 @@ def write(self, oprot):
     if self.component_executors is not None:
       oprot.writeFieldBegin('component_executors', TType.MAP, 4)
       oprot.writeMapBegin(TType.STRING, TType.I32, len(self.component_executors))
-      for kiter402,viter403 in self.component_executors.items():
-        oprot.writeString(kiter402.encode('utf-8'))
-        oprot.writeI32(viter403)
+      for kiter411,viter412 in self.component_executors.items():
+        oprot.writeString(kiter411.encode('utf-8'))
+        oprot.writeI32(viter412)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.launch_time_secs is not None:
@@ -5523,9 +5546,9 @@ def write(self, oprot):
     if self.component_debug is not None:
       oprot.writeFieldBegin('component_debug', TType.MAP, 9)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.component_debug))
-      for kiter404,viter405 in self.component_debug.items():
-        oprot.writeString(kiter404.encode('utf-8'))
-        viter405.write(oprot)
+      for kiter413,viter414 in self.component_debug.items():
+        oprot.writeString(kiter413.encode('utf-8'))
+        viter414.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5605,13 +5628,13 @@ def read(self, iprot):
       elif fid == 2:
         if ftype == TType.MAP:
           self.executor_stats = {}
-          (_ktype407, _vtype408, _size406 ) = iprot.readMapBegin()
-          for _i410 in xrange(_size406):
-            _key411 = ExecutorInfo()
-            _key411.read(iprot)
-            _val412 = ExecutorStats()
-            _val412.read(iprot)
-            self.executor_stats[_key411] = _val412
+          (_ktype416, _vtype417, _size415 ) = iprot.readMapBegin()
+          for _i419 in xrange(_size415):
+            _key420 = ExecutorInfo()
+            _key420.read(iprot)
+            _val421 = ExecutorStats()
+            _val421.read(iprot)
+            self.executor_stats[_key420] = _val421
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -5642,9 +5665,9 @@ def write(self, oprot):
     if self.executor_stats is not None:
       oprot.writeFieldBegin('executor_stats', TType.MAP, 2)
       oprot.writeMapBegin(TType.STRUCT, TType.STRUCT, len(self.executor_stats))
-      for kiter413,viter414 in self.executor_stats.items():
-        kiter413.write(oprot)
-        viter414.write(oprot)
+      for kiter422,viter423 in self.executor_stats.items():
+        kiter422.write(oprot)
+        viter423.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.time_secs is not None:
@@ -5797,12 +5820,12 @@ def read(self, iprot):
       if fid == 1:
         if ftype == TType.MAP:
           self.serialized_parts = {}
-          (_ktype416, _vtype417, _size415 ) = iprot.readMapBegin()
-          for _i419 in xrange(_size415):
-            _key420 = iprot.readString().decode('utf-8')
-            _val421 = ThriftSerializedObject()
-            _val421.read(iprot)
-            self.serialized_parts[_key420] = _val421
+          (_ktype425, _vtype426, _size424 ) = iprot.readMapBegin()
+          for _i428 in xrange(_size424):
+            _key429 = iprot.readString().decode('utf-8')
+            _val430 = ThriftSerializedObject()
+            _val430.read(iprot)
+            self.serialized_parts[_key429] = _val430
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -5819,9 +5842,9 @@ def write(self, oprot):
     if self.serialized_parts is not None:
       oprot.writeFieldBegin('serialized_parts', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.serialized_parts))
-      for kiter422,viter423 in self.serialized_parts.items():
-        oprot.writeString(kiter422.encode('utf-8'))
-        viter423.write(oprot)
+      for kiter431,viter432 in self.serialized_parts.items():
+        oprot.writeString(kiter431.encode('utf-8'))
+        viter432.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -5883,11 +5906,11 @@ def read(self, iprot):
       elif fid == 2:
         if ftype == TType.LIST:
           self.executors = []
-          (_etype427, _size424) = iprot.readListBegin()
-          for _i428 in xrange(_size424):
-            _elem429 = ExecutorInfo()
-            _elem429.read(iprot)
-            self.executors.append(_elem429)
+          (_etype436, _size433) = iprot.readListBegin()
+          for _i437 in xrange(_size433):
+            _elem438 = ExecutorInfo()
+            _elem438.read(iprot)
+            self.executors.append(_elem438)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -5908,8 +5931,8 @@ def write(self, oprot):
     if self.executors is not None:
       oprot.writeFieldBegin('executors', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.executors))
-      for iter430 in self.executors:
-        iter430.write(oprot)
+      for iter439 in self.executors:
+        iter439.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -6033,11 +6056,11 @@ def read(self, iprot):
       if fid == 1:
         if ftype == TType.MAP:
           self.approved_workers = {}
-          (_ktype432, _vtype433, _size431 ) = iprot.readMapBegin()
-          for _i435 in xrange(_size431):
-            _key436 = iprot.readString().decode('utf-8')
-            _val437 = iprot.readI32();
-            self.approved_workers[_key436] = _val437
+          (_ktype441, _vtype442, _size440 ) = iprot.readMapBegin()
+          for _i444 in xrange(_size440):
+            _key445 = iprot.readString().decode('utf-8')
+            _val446 = iprot.readI32();
+            self.approved_workers[_key445] = _val446
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -6054,9 +6077,9 @@ def write(self, oprot):
     if self.approved_workers is not None:
       oprot.writeFieldBegin('approved_workers', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.I32, len(self.approved_workers))
-      for kiter438,viter439 in self.approved_workers.items():
-        oprot.writeString(kiter438.encode('utf-8'))
-        oprot.writeI32(viter439)
+      for kiter447,viter448 in self.approved_workers.items():
+        oprot.writeString(kiter447.encode('utf-8'))
+        oprot.writeI32(viter448)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -6110,12 +6133,12 @@ def read(self, iprot):
       if fid == 1:
         if ftype == TType.MAP:
           self.assignments = {}
-          (_ktype441, _vtype442, _size440 ) = iprot.readMapBegin()
-          for _i444 in xrange(_size440):
-            _key445 = iprot.readI32();
-            _val446 = LocalAssignment()
-            _val446.read(iprot)
-            self.assignments[_key445] = _val446
+          (_ktype450, _vtype451, _size449 ) = iprot.readMapBegin()
+          for _i453 in xrange(_size449):
+            _key454 = iprot.readI32();
+            _val455 = LocalAssignment()
+            _val455.read(iprot)
+            self.assignments[_key454] = _val455
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -6132,9 +6155,9 @@ def write(self, oprot):
     if self.assignments is not None:
       oprot.writeFieldBegin('assignments', TType.MAP, 1)
       oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.assignments))
-      for kiter447,viter448 in self.assignments.items():
-        oprot.writeI32(kiter447)
-        viter448.write(oprot)
+      for kiter456,viter457 in self.assignments.items():
+        oprot.writeI32(kiter456)
+        viter457.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -6207,11 +6230,11 @@ def read(self, iprot):
       elif fid == 3:
         if ftype == TType.LIST:
           self.executors = []
-          (_etype452, _size449) = iprot.readListBegin()
-          for _i453 in xrange(_size449):
-            _elem454 = ExecutorInfo()
-            _elem454.read(iprot)
-            self.executors.append(_elem454)
+          (_etype461, _size458) = iprot.readListBegin()
+          for _i462 in xrange(_size458):
+            _elem463 = ExecutorInfo()
+            _elem463.read(iprot)
+            self.executors.append(_elem463)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -6241,8 +6264,8 @@ def write(self, oprot):
     if self.executors is not None:
       oprot.writeFieldBegin('executors', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.executors))
-      for iter455 in self.executors:
-        iter455.write(oprot)
+      for iter464 in self.executors:
+        iter464.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.port is not None:
diff --git a/storm-core/src/storm.thrift b/storm-core/src/storm.thrift
index f6876816aed..24f32a10e78 100644
--- a/storm-core/src/storm.thrift
+++ b/storm-core/src/storm.thrift
@@ -81,6 +81,7 @@ struct ComponentCommon {
   1: required map inputs;
   2: required map streams; //key is stream id
   3: optional i32 parallelism_hint; //how many threads across the cluster should be dedicated to this component
+  4: optional map batch_sizes; //output batch size per declared output stream
 
   // component specific configuration respects:
   // topology.debug: false
@@ -89,7 +90,7 @@ struct ComponentCommon {
   // topology.kryo.register // this is the only additive one
   
   // component specific configuration
-  4: optional string json_conf;
+  5: optional string json_conf;
 }
 
 struct SpoutSpec {

From 00d8294664e7cdf2f9339ea01a4b08ae0687c7a6 Mon Sep 17 00:00:00 2001
From: mjsax 
Date: Tue, 29 Sep 2015 20:57:17 +0200
Subject: [PATCH 2/3] added Batch type (as alternative to Tuple)  - extended
 Kryo (de)serialization for Batch  - integrated Tuple and Batch
 (de)serialization

---
 .../storm/serialization/IBatchSerializer.java | 24 +++++
 ...izer.java => ITupleBatchDeserializer.java} |  4 +-
 .../serialization/ITupleBatchSerializer.java  | 20 ++++
 .../storm/serialization/ITupleSerializer.java |  4 +-
 .../serialization/KryoBatchSerializer.java    | 58 ++++++++++++
 .../KryoTupleBatchDeserializer.java           | 81 ++++++++++++++++
 .../KryoTupleBatchSerializer.java             | 42 +++++++++
 .../serialization/KryoTupleDeserializer.java  | 59 ------------
 .../serialization/KryoTupleSerializer.java    |  5 +-
 .../src/jvm/backtype/storm/tuple/Batch.java   | 94 +++++++++++++++++++
 .../clj/backtype/storm/serialization_test.clj |  2 +-
 11 files changed, 327 insertions(+), 66 deletions(-)
 create mode 100644 storm-core/src/jvm/backtype/storm/serialization/IBatchSerializer.java
 rename storm-core/src/jvm/backtype/storm/serialization/{ITupleDeserializer.java => ITupleBatchDeserializer.java} (91%)
 create mode 100644 storm-core/src/jvm/backtype/storm/serialization/ITupleBatchSerializer.java
 create mode 100644 storm-core/src/jvm/backtype/storm/serialization/KryoBatchSerializer.java
 create mode 100644 storm-core/src/jvm/backtype/storm/serialization/KryoTupleBatchDeserializer.java
 create mode 100644 storm-core/src/jvm/backtype/storm/serialization/KryoTupleBatchSerializer.java
 delete mode 100644 storm-core/src/jvm/backtype/storm/serialization/KryoTupleDeserializer.java
 create mode 100644 storm-core/src/jvm/backtype/storm/tuple/Batch.java

diff --git a/storm-core/src/jvm/backtype/storm/serialization/IBatchSerializer.java b/storm-core/src/jvm/backtype/storm/serialization/IBatchSerializer.java
new file mode 100644
index 00000000000..17502fdb2a2
--- /dev/null
+++ b/storm-core/src/jvm/backtype/storm/serialization/IBatchSerializer.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package backtype.storm.serialization;
+
+import backtype.storm.tuple.Batch;
+
+public interface IBatchSerializer {
+    byte[] serialize(Batch batch);
+}
diff --git a/storm-core/src/jvm/backtype/storm/serialization/ITupleDeserializer.java b/storm-core/src/jvm/backtype/storm/serialization/ITupleBatchDeserializer.java
similarity index 91%
rename from storm-core/src/jvm/backtype/storm/serialization/ITupleDeserializer.java
rename to storm-core/src/jvm/backtype/storm/serialization/ITupleBatchDeserializer.java
index 4e68658555e..a019a840a2e 100644
--- a/storm-core/src/jvm/backtype/storm/serialization/ITupleDeserializer.java
+++ b/storm-core/src/jvm/backtype/storm/serialization/ITupleBatchDeserializer.java
@@ -20,6 +20,6 @@
 import backtype.storm.tuple.Tuple;
 import java.io.IOException;
 
-public interface ITupleDeserializer {
-    Tuple deserialize(byte[] ser);        
+public interface ITupleBatchDeserializer {
+    Object deserialize(byte[] ser);
 }
diff --git a/storm-core/src/jvm/backtype/storm/serialization/ITupleBatchSerializer.java b/storm-core/src/jvm/backtype/storm/serialization/ITupleBatchSerializer.java
new file mode 100644
index 00000000000..060d9896ca3
--- /dev/null
+++ b/storm-core/src/jvm/backtype/storm/serialization/ITupleBatchSerializer.java
@@ -0,0 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package backtype.storm.serialization;
+
+public interface ITupleBatchSerializer extends ITupleSerializer, IBatchSerializer {}
diff --git a/storm-core/src/jvm/backtype/storm/serialization/ITupleSerializer.java b/storm-core/src/jvm/backtype/storm/serialization/ITupleSerializer.java
index 90ad932411b..b28d4cb88ea 100644
--- a/storm-core/src/jvm/backtype/storm/serialization/ITupleSerializer.java
+++ b/storm-core/src/jvm/backtype/storm/serialization/ITupleSerializer.java
@@ -17,10 +17,10 @@
  */
 package backtype.storm.serialization;
 
-import backtype.storm.tuple.Tuple;
+import backtype.storm.tuple.TupleImpl;
 
 
 public interface ITupleSerializer {
-    byte[] serialize(Tuple tuple);
+    byte[] serialize(TupleImpl tuple);
 //    long crc32(Tuple tuple);
 }
diff --git a/storm-core/src/jvm/backtype/storm/serialization/KryoBatchSerializer.java b/storm-core/src/jvm/backtype/storm/serialization/KryoBatchSerializer.java
new file mode 100644
index 00000000000..96de750e469
--- /dev/null
+++ b/storm-core/src/jvm/backtype/storm/serialization/KryoBatchSerializer.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package backtype.storm.serialization;
+
+import backtype.storm.task.GeneralTopologyContext;
+import backtype.storm.tuple.Batch;
+import backtype.storm.tuple.TupleImpl;
+import backtype.storm.tuple.Values;
+import com.esotericsoftware.kryo.io.Output;
+
+import java.io.IOException;
+import java.util.Map;
+
+public class KryoBatchSerializer implements IBatchSerializer {
+    KryoValuesSerializer _kryo;
+    SerializationFactory.IdDictionary _ids;
+    Output _kryoOut;
+
+    public KryoBatchSerializer(final Map conf, final GeneralTopologyContext context) {
+        _kryo = new KryoValuesSerializer(conf);
+        _kryoOut = new Output(2000, 2000000000);
+        _ids = new SerializationFactory.IdDictionary(context.getRawTopology());
+    }
+
+    public byte[] serialize(Batch batch) {
+        final int size = batch.tupleBuffer.size();
+        try {
+            _kryoOut.clear();
+            _kryoOut.writeByte('B');
+            _kryoOut.writeInt(size);
+            _kryoOut.writeInt(batch.sourceTaskId, true);
+            _kryoOut.writeInt(_ids.getStreamId(batch.sourceComponent, batch.streamId), true);
+            for(int i = 0; i < size; ++i) {
+                batch.idBuffer.get(i).serialize(_kryoOut);
+                _kryo.serializeInto(batch.tupleBuffer.get(i), _kryoOut);
+            }
+            return _kryoOut.toBytes();
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+}
diff --git a/storm-core/src/jvm/backtype/storm/serialization/KryoTupleBatchDeserializer.java b/storm-core/src/jvm/backtype/storm/serialization/KryoTupleBatchDeserializer.java
new file mode 100644
index 00000000000..f6e1feae24d
--- /dev/null
+++ b/storm-core/src/jvm/backtype/storm/serialization/KryoTupleBatchDeserializer.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package backtype.storm.serialization;
+
+import backtype.storm.task.GeneralTopologyContext;
+import backtype.storm.tuple.Batch;
+import backtype.storm.tuple.MessageId;
+import backtype.storm.tuple.TupleImpl;
+import com.esotericsoftware.kryo.io.Input;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class KryoTupleBatchDeserializer implements ITupleBatchDeserializer {
+    GeneralTopologyContext _context;
+    KryoValuesDeserializer _kryo;
+    SerializationFactory.IdDictionary _ids;
+    Input _kryoInput;
+
+    public KryoTupleBatchDeserializer(final Map conf, final GeneralTopologyContext context) {
+        _kryo = new KryoValuesDeserializer(conf);
+        _context = context;
+        _ids = new SerializationFactory.IdDictionary(context.getRawTopology());
+        _kryoInput = new Input(1);
+    }
+
+    public Object deserialize(byte[] ser) {
+        try {
+            _kryoInput.setBuffer(ser);
+            byte header = _kryoInput.readByte();
+            if(header == 'T') {
+                int taskId = _kryoInput.readInt(true);
+                int streamId = _kryoInput.readInt(true);
+                MessageId id = MessageId.deserialize(_kryoInput);
+                List values = _kryo.deserializeFrom(_kryoInput);
+
+                String componentName = _context.getComponentId(taskId);
+                String streamName = _ids.getStreamName(componentName, streamId);
+                
+                return new TupleImpl(_context, values, taskId, streamName, id);
+            } else {
+                assert (header == 'B');
+                final int size = _kryoInput.readInt();
+                int taskId = _kryoInput.readInt(true);
+                int streamId = _kryoInput.readInt(true);
+
+                List batch = new ArrayList(size);
+                for(int i = 0; i < size; ++i) {
+                    MessageId id = MessageId.deserialize(_kryoInput);
+                    List values = _kryo.deserializeFrom(_kryoInput);
+
+                    String componentName = _context.getComponentId(taskId);
+                    String streamName = _ids.getStreamName(componentName, streamId);
+
+                    batch.add(new TupleImpl(_context, values, taskId, streamName, id));
+                }
+                return batch;
+            }
+        } catch(IOException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+}
diff --git a/storm-core/src/jvm/backtype/storm/serialization/KryoTupleBatchSerializer.java b/storm-core/src/jvm/backtype/storm/serialization/KryoTupleBatchSerializer.java
new file mode 100644
index 00000000000..4d48d4cf461
--- /dev/null
+++ b/storm-core/src/jvm/backtype/storm/serialization/KryoTupleBatchSerializer.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package backtype.storm.serialization;
+
+import backtype.storm.task.GeneralTopologyContext;
+import backtype.storm.tuple.Batch;
+import backtype.storm.tuple.TupleImpl;
+import java.util.Map;
+
+public class KryoTupleBatchSerializer implements ITupleBatchSerializer {
+    KryoTupleSerializer _kryoTuple;
+    KryoBatchSerializer _kryoBatch;
+
+    public KryoTupleBatchSerializer(final Map conf, final GeneralTopologyContext context) {
+        _kryoTuple = new KryoTupleSerializer(conf, context);
+        _kryoBatch = new KryoBatchSerializer(conf, context);
+    }
+
+    public byte[] serialize(TupleImpl tuple) {
+        return _kryoTuple.serialize(tuple);
+    }
+
+    public byte[] serialize(Batch batch) {
+        return _kryoBatch.serialize(batch);
+    }
+
+}
diff --git a/storm-core/src/jvm/backtype/storm/serialization/KryoTupleDeserializer.java b/storm-core/src/jvm/backtype/storm/serialization/KryoTupleDeserializer.java
deleted file mode 100644
index 5a5e3a4ea3e..00000000000
--- a/storm-core/src/jvm/backtype/storm/serialization/KryoTupleDeserializer.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package backtype.storm.serialization;
-
-import backtype.storm.task.GeneralTopologyContext;
-import backtype.storm.tuple.MessageId;
-import backtype.storm.tuple.Tuple;
-import backtype.storm.tuple.TupleImpl;
-import backtype.storm.utils.WritableUtils;
-import com.esotericsoftware.kryo.io.Input;
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-
-public class KryoTupleDeserializer implements ITupleDeserializer {
-    GeneralTopologyContext _context;
-    KryoValuesDeserializer _kryo;
-    SerializationFactory.IdDictionary _ids;
-    Input _kryoInput;
-    
-    public KryoTupleDeserializer(final Map conf, final GeneralTopologyContext context) {
-        _kryo = new KryoValuesDeserializer(conf);
-        _context = context;
-        _ids = new SerializationFactory.IdDictionary(context.getRawTopology());
-        _kryoInput = new Input(1);
-    }        
-
-    public Tuple deserialize(byte[] ser) {
-        try {
-            _kryoInput.setBuffer(ser);
-            int taskId = _kryoInput.readInt(true);
-            int streamId = _kryoInput.readInt(true);
-            String componentName = _context.getComponentId(taskId);
-            String streamName = _ids.getStreamName(componentName, streamId);
-            MessageId id = MessageId.deserialize(_kryoInput);
-            List values = _kryo.deserializeFrom(_kryoInput);
-            return new TupleImpl(_context, values, taskId, streamName, id);
-        } catch(IOException e) {
-            throw new RuntimeException(e);
-        }
-    }
-}
diff --git a/storm-core/src/jvm/backtype/storm/serialization/KryoTupleSerializer.java b/storm-core/src/jvm/backtype/storm/serialization/KryoTupleSerializer.java
index af95cb055ae..407003644fd 100644
--- a/storm-core/src/jvm/backtype/storm/serialization/KryoTupleSerializer.java
+++ b/storm-core/src/jvm/backtype/storm/serialization/KryoTupleSerializer.java
@@ -18,7 +18,7 @@
 package backtype.storm.serialization;
 
 import backtype.storm.task.GeneralTopologyContext;
-import backtype.storm.tuple.Tuple;
+import backtype.storm.tuple.TupleImpl;
 import com.esotericsoftware.kryo.io.Output;
 import java.io.IOException;
 import java.util.Map;
@@ -34,10 +34,11 @@ public KryoTupleSerializer(final Map conf, final GeneralTopologyContext context)
         _ids = new SerializationFactory.IdDictionary(context.getRawTopology());
     }
 
-    public byte[] serialize(Tuple tuple) {
+    public byte[] serialize(TupleImpl tuple) {
         try {
             
             _kryoOut.clear();
+            _kryoOut.writeByte('T');
             _kryoOut.writeInt(tuple.getSourceTask(), true);
             _kryoOut.writeInt(_ids.getStreamId(tuple.getSourceComponent(), tuple.getSourceStreamId()), true);
             tuple.getMessageId().serialize(_kryoOut);
diff --git a/storm-core/src/jvm/backtype/storm/tuple/Batch.java b/storm-core/src/jvm/backtype/storm/tuple/Batch.java
new file mode 100644
index 00000000000..63ff3cd8b26
--- /dev/null
+++ b/storm-core/src/jvm/backtype/storm/tuple/Batch.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package backtype.storm.tuple;
+
+import backtype.storm.generated.GlobalStreamId;
+import backtype.storm.task.GeneralTopologyContext;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+public class Batch {
+    public final int capacity;
+    public final GeneralTopologyContext context;
+    public final String sourceComponent;
+    public final int sourceTaskId;
+    public final String streamId;
+    public final ArrayList> tupleBuffer;
+    public final ArrayList idBuffer;
+
+
+    public Batch(final int capacity, GeneralTopologyContext context, String sourceComponent, int sourceTaskId, String streamId) {
+        assert (capacity > 0);
+        assert (context != null);
+        
+        this.capacity = capacity;
+        this.context = context;
+        this.sourceComponent = sourceComponent;
+        this.sourceTaskId = sourceTaskId;
+        this.streamId = streamId;
+        
+        this.tupleBuffer = new ArrayList>(capacity);
+        this.idBuffer = new ArrayList(capacity);
+   }
+
+    public void add(List tuple, MessageId id) {
+        assert(this.tupleBuffer.size() < this.capacity);
+        assert(this.tupleBuffer.size() == this.idBuffer.size());
+
+        if(id == null) {
+            id = MessageId.makeUnanchored();
+        }
+
+        this.tupleBuffer.add(tuple);
+        this.idBuffer.add(id);
+    }
+
+    public int size() {
+        assert(this.tupleBuffer.size() == this.idBuffer.size());
+        return this.tupleBuffer.size();
+    }
+
+    public Batch newInstance() {
+        return new Batch(this.capacity, this.context, this.sourceComponent, this.sourceTaskId, this.streamId);
+    }
+
+    public List getAsTupleList() {
+        assert(this.tupleBuffer.size() == this.idBuffer.size());
+        
+        final int size = this.tupleBuffer.size();
+        
+        ArrayList result = new ArrayList<>(size);
+        for(int i = 0; i < size; ++i) {
+            result.add(new TupleImpl(context, this.tupleBuffer.get(i), sourceTaskId, this.streamId, this.idBuffer.get(i)));
+        }
+        
+        return result;
+    }
+    
+    public String toString() {
+        assert(this.tupleBuffer.size() == this.idBuffer.size());
+        String rc = "Batch(" + this.capacity + ") source: " + this.sourceComponent + " id: " + this.sourceTaskId + " stream: " + this.streamId + " data: [";
+        for (int i = 0; i < this.tupleBuffer.size(); ++i) {
+            rc += this.tupleBuffer.get(i) + "[" + this.idBuffer.get(i) + "]" + ", ";
+        }
+        return rc + "]";
+    }
+
+}
diff --git a/storm-core/test/clj/backtype/storm/serialization_test.clj b/storm-core/test/clj/backtype/storm/serialization_test.clj
index 7f1c0a9e734..44d16aecd9b 100644
--- a/storm-core/test/clj/backtype/storm/serialization_test.clj
+++ b/storm-core/test/clj/backtype/storm/serialization_test.clj
@@ -15,7 +15,7 @@
 ;; limitations under the License.
 (ns backtype.storm.serialization-test
   (:use [clojure test])
-  (:import [backtype.storm.serialization KryoTupleSerializer KryoTupleDeserializer
+  (:import [backtype.storm.serialization KryoTupleBatchSerializer KryoTupleBatchDeserializer
             KryoValuesSerializer KryoValuesDeserializer])
   (:import [backtype.storm.testing TestSerObject TestKryoDecorator])
   (:import [backtype.storm ConfigValidation])

From 4d3d63ee905a3734935ec622eabdaa7d2f295d6d Mon Sep 17 00:00:00 2001
From: mjsax 
Date: Tue, 29 Sep 2015 20:56:09 +0200
Subject: [PATCH 3/3] [STORM-855] Add tuple batching  - added internal tuple
 buffers (one for each output stream)  - added Spout/Bolt output batching  -
 added Bolt input debatching  - added batching of "acks"

---
 .../clj/backtype/storm/daemon/executor.clj    | 295 +++++++++++++-----
 .../src/clj/backtype/storm/daemon/task.clj    |  28 +-
 .../src/clj/backtype/storm/daemon/worker.clj  |  44 +--
 3 files changed, 219 insertions(+), 148 deletions(-)

diff --git a/storm-core/src/clj/backtype/storm/daemon/executor.clj b/storm-core/src/clj/backtype/storm/daemon/executor.clj
index d7a68bebf6a..6b0b7502505 100644
--- a/storm-core/src/clj/backtype/storm/daemon/executor.clj
+++ b/storm-core/src/clj/backtype/storm/daemon/executor.clj
@@ -15,26 +15,25 @@
 ;; limitations under the License.
 (ns backtype.storm.daemon.executor
   (:use [backtype.storm.daemon common])
-  (:import [backtype.storm.generated Grouping]
-           [java.io Serializable])
+  (:import [java.io Serializable])
   (:use [backtype.storm util config log timer stats])
-  (:import [java.util List Random HashMap ArrayList LinkedList Map])
+  (:import [java.util List Random HashMap ArrayList Map])
   (:import [backtype.storm ICredentialsListener])
-  (:import [backtype.storm.hooks ITaskHook])
-  (:import [backtype.storm.tuple Tuple Fields TupleImpl MessageId])
+  (:import [backtype.storm.task GeneralTopologyContext])
+  (:import [backtype.storm.tuple Tuple Fields TupleImpl MessageId Batch Values])
   (:import [backtype.storm.spout ISpoutWaitStrategy ISpout SpoutOutputCollector ISpoutOutputCollector])
-  (:import [backtype.storm.hooks.info SpoutAckInfo SpoutFailInfo
-            EmitInfo BoltFailInfo BoltAckInfo BoltExecuteInfo])
+  (:import [backtype.storm.hooks.info SpoutAckInfo SpoutFailInfo BoltFailInfo BoltAckInfo BoltExecuteInfo])
   (:import [backtype.storm.grouping CustomStreamGrouping])
   (:import [backtype.storm.task WorkerTopologyContext IBolt OutputCollector IOutputCollector])
   (:import [backtype.storm.generated GlobalStreamId])
   (:import [backtype.storm.utils Utils MutableObject RotatingMap RotatingMap$ExpiredCallback MutableLong Time DisruptorQueue WorkerBackpressureThread])
   (:import [com.lmax.disruptor InsufficientCapacityException])
-  (:import [backtype.storm.serialization KryoTupleSerializer KryoTupleDeserializer])
+  (:import [backtype.storm.serialization KryoTupleSerializer KryoBatchSerializer KryoTupleBatchSerializer KryoTupleBatchDeserializer])
   (:import [backtype.storm.daemon Shutdownable])
-  (:import [backtype.storm.metric.api IMetric IMetricsConsumer$TaskInfo IMetricsConsumer$DataPoint StateMetric])
-  (:import [backtype.storm Config Constants])
+  (:import [backtype.storm.metric.api IMetric IMetricsConsumer$TaskInfo IMetricsConsumer$DataPoint])
+  (:import [backtype.storm Constants])
   (:import [java.util.concurrent ConcurrentLinkedQueue])
+  (:import [backtype.storm.messaging TaskMessage])
   (:require [backtype.storm [tuple :as tuple] [thrift :as thrift]
              [cluster :as cluster] [disruptor :as disruptor] [stats :as stats]])
   (:require [backtype.storm.daemon [task :as task]])
@@ -261,7 +260,7 @@
                                     (exception-cause? java.io.InterruptedIOException error))
                                (log-message "Got interrupted excpetion shutting thread down...")
                                ((:suicide-fn <>))))
-     :deserializer (KryoTupleDeserializer. storm-conf worker-context)
+     :deserializer (KryoTupleBatchDeserializer. storm-conf worker-context)
      :sampler (mk-stats-sampler storm-conf)
      :backpressure (atom false)
      :spout-throttling-metrics (if (= executor-type :spout) 
@@ -286,11 +285,67 @@
         (do (reset! (:backpressure executor-data) false)
             (WorkerBackpressureThread/notifyBackpressureChecker (:backpressure-trigger (:worker executor-data))))))))
 
+;; TODO KryoTupleSerializer KryoBatchSerializer ??
+(defn- assert-can-serialize [^KryoTupleBatchSerializer serializer tuple-batch]
+  "Check that all of the tuples can be serialized by serializing them."
+  (fast-list-iter [[task tuple :as pair] tuple-batch]
+                  (.serialize serializer tuple)))
+
+(defn mk-transfer-fn [worker batchSizes]
+  (let [local-tasks (-> worker :task-ids set)
+        local-transfer (:transfer-local-fn worker)
+        ^DisruptorQueue transfer-queue (:transfer-queue worker)
+        task->node+port (:cached-task->node+port worker)
+        try-serialize-local ((:storm-conf worker) TOPOLOGY-TESTING-ALWAYS-TRY-SERIALIZE)
+        ser-tuple-fn (fn [^KryoTupleSerializer serializer ^Tuple t] (.serialize serializer t))
+        ser-batch-fn (fn [^KryoBatchSerializer serializer ^Batch b] (.serialize serializer b))
+        ;;ser-hybrid-fn (fn [^KryoTupleBatchSerializer serializer tuple-or-batch] (.serialize serializer tuple-or-batch))
+        ser-fn (fn [serializer tuple-batch s-fn]
+                 (let [local (ArrayList.)
+                       remoteMap (HashMap.)]
+                   (fast-list-iter [[task tupleOrBatch :as pair] tuple-batch]
+                                   (if (local-tasks task)
+                                     (.add local pair)
+
+                                     ;;Using java objects directly to avoid performance issues in java code
+                                     (do
+                                       (when (not (.get remoteMap task))
+                                         (.put remoteMap task (ArrayList.)))
+                                       (let [remote (.get remoteMap task)]
+                                         (if (not-nil? task)
+                                           (.add remote (TaskMessage. task (s-fn serializer tupleOrBatch)))
+                                           (log-warn "Can't transfer tuple - task value is nil. tuple type: " (pr-str (type tupleOrBatch)) " and information: " (pr-str tupleOrBatch)))
+                                         ))))
+                   (local-transfer local)
+                   (disruptor/publish transfer-queue remoteMap)))
+        ;; TODO KryoTupleBatchSerializer
+        transfer-fn (if (and (not (nil? batchSizes)) (> (.size batchSizes) 0))  ;; TODO check if each batch size is larger than zero
+                      (fn [^KryoBatchSerializer serializer tuple-batch]
+                        (ser-fn serializer tuple-batch ser-batch-fn))
+                      (fn [^KryoTupleSerializer serializer tuple-batch]
+                        (ser-fn serializer tuple-batch ser-tuple-fn))
+                      )
+        ;;transfer-fn (fn [^KryoTupleBatchSerializer serializer tuple-batch]
+        ;;                    (ser-fn serializer tuple-batch ser-hybrid-fn))
+        ]
+    (if try-serialize-local
+      (do
+        (log-warn "WILL TRY TO SERIALIZE ALL TUPLES (Turn off " TOPOLOGY-TESTING-ALWAYS-TRY-SERIALIZE " for production)")
+        (fn [^KryoTupleBatchSerializer serializer tuple-batch]
+          (assert-can-serialize serializer tuple-batch)
+          (transfer-fn serializer tuple-batch)))
+      transfer-fn)))
+
 (defn start-batch-transfer->worker-handler! [worker executor-data]
-  (let [worker-transfer-fn (:transfer-fn worker)
+  (let [worker-context (:worker-context executor-data)
+        batchSizes (.get_batch_sizes (.getComponentCommon worker-context (:component-id executor-data)))
+        worker-transfer-fn (mk-transfer-fn worker batchSizes)
         cached-emit (MutableObject. (ArrayList.))
         storm-conf (:storm-conf executor-data)
-        serializer (KryoTupleSerializer. storm-conf (:worker-context executor-data))
+        serializer (if (and (not (nil? batchSizes)) (> (.size batchSizes) 0)) ;; TODO check if each batch size is larger than zero
+                     (KryoBatchSerializer. storm-conf worker-context)
+                     (KryoTupleSerializer. storm-conf worker-context))
+        ;;serializer (KryoTupleBatchSerializer. storm-conf worker-context)
         ]
     (disruptor/consume-loop*
       (:batch-transfer-queue executor-data)
@@ -317,7 +372,7 @@
           [[nil (TupleImpl. worker-context [interval] Constants/SYSTEM_TASK_ID Constants/METRICS_TICK_STREAM_ID)]]))))))
 
 (defn metrics-tick
-  ([executor-data task-data ^TupleImpl tuple overflow-buffer]
+  ([executor-data task-data ^TupleImpl tuple transfer-metrics-fn]
    (let [{:keys [interval->task->metric-registry ^WorkerTopologyContext worker-context]} executor-data
          interval (.getInteger tuple 0)
          task-id (:task-id task-data)
@@ -337,12 +392,9 @@
                           (filter identity)
                           (into []))]
      (if (seq data-points)
-       (task/send-unanchored task-data Constants/METRICS_STREAM_ID [task-info data-points] overflow-buffer))))
-  ([executor-data task-data ^TupleImpl tuple]
-    (metrics-tick executor-data task-data tuple nil)
+       (task/send-unanchored task-data Constants/METRICS_STREAM_ID [task-info data-points] transfer-metrics-fn)))
     ))
 
-
 (defn setup-ticks! [worker executor-data]
   (let [storm-conf (:storm-conf executor-data)
         tick-time-secs (storm-conf TOPOLOGY-TICK-TUPLE-FREQ-SECS)
@@ -363,12 +415,22 @@
               [[nil (TupleImpl. context [tick-time-secs] Constants/SYSTEM_TASK_ID Constants/SYSTEM_TICK_STREAM_ID)]]
               )))))))
 
+(defn mk-transfer-tuple [transfer-fn ^GeneralTopologyContext worker-context ^Integer task-id overflow-buffer]
+  (fn emit-msg [^Integer out-task ^Values values ^MessageId tuple-id ^String out-stream-id]
+    (transfer-fn out-task
+                 (TupleImpl. worker-context
+                             values
+                             task-id
+                             out-stream-id
+                             tuple-id)
+                 overflow-buffer)))
+
 (defn mk-executor [worker executor-id initial-credentials]
   (let [executor-data (mk-executor-data worker executor-id)
         _ (log-message "Loading executor " (:component-id executor-data) ":" (pr-str executor-id))
         task-datas (->> executor-data
                         :task-ids
-                        (map (fn [t] [t (task/mk-task executor-data t)]))
+                        (map (fn [t] [t (task/mk-task executor-data t (mk-transfer-tuple (:transfer-fn executor-data) (:worker-context executor-data) t nil))]))
                         (into {})
                         (HashMap.))
         _ (log-message "Loaded executor tasks " (:component-id executor-data) ":" (pr-str executor-id))
@@ -454,20 +516,20 @@
       (stats/spout-acked-tuple! (:stats executor-data) (:stream tuple-info) time-delta))))
 
 (defn mk-task-receiver [executor-data tuple-action-fn]
-  (let [^KryoTupleDeserializer deserializer (:deserializer executor-data)
+  (let [^KryoTupleBatchDeserializer deserializer (:deserializer executor-data)
         task-ids (:task-ids executor-data)
         debug? (= true (-> executor-data :storm-conf (get TOPOLOGY-DEBUG)))
         ]
     (disruptor/clojure-handler
       (fn [tuple-batch sequence-id end-of-batch?]
         (fast-list-iter [[task-id msg] tuple-batch]
-          (let [^TupleImpl tuple (if (instance? Tuple msg) msg (.deserialize deserializer msg))]
-            (when debug? (log-message "Processing received message FOR " task-id " TUPLE: " tuple))
+          (let [tupleOrBatch (if (or (instance? Tuple msg) (instance? Batch msg)) msg (.deserialize deserializer msg))]
+            (when debug? (log-message "Processing received message FOR " task-id " TUPLE: " tupleOrBatch))
             (if task-id
-              (tuple-action-fn task-id tuple)
+              (tuple-action-fn task-id tupleOrBatch)
               ;; null task ids are broadcast tuples
               (fast-list-iter [task-id task-ids]
-                (tuple-action-fn task-id tuple)
+                (tuple-action-fn task-id tupleOrBatch)
                 ))
             ))))))
 
@@ -483,7 +545,7 @@
 
 ;; Send sampled data to the eventlogger if the global or component level
 ;; debug flag is set (via nimbus api).
-(defn send-to-eventlogger [executor-data task-data values overflow-buffer component-id message-id random]
+(defn send-to-eventlogger [executor-data task-data values component-id message-id random transfer-fn]
     (let [c->d @(:storm-component->debug-atom executor-data)
           options (get c->d component-id (get c->d (:storm-id executor-data)))
           spct    (if (and (not-nil? options) (:enable options)) (:samplingpct options) 0)]
@@ -494,7 +556,48 @@
           task-data
           EVENTLOGGER-STREAM-ID
           [component-id message-id (System/currentTimeMillis) values]
-          overflow-buffer))))
+          transfer-fn))))
+
+(defn init-batching-buffer [worker-context component-id task-id]
+  (let [batch-sizes (.get_batch_sizes (.getComponentCommon worker-context component-id))]
+    (if (not (nil? batch-sizes))
+      (let [consumer-ids (flatten (for [cids (vals (.getTargets worker-context component-id))] (keys cids)))
+            consumer-task-ids (flatten (for [cid consumer-ids] (into '() (.getComponentTasks worker-context cid))))
+            buffers-per-stream (HashMap.)]
+        (doseq [[stream-id batch-size] batch-sizes]
+          (.put buffers-per-stream stream-id (HashMap. (zipmap consumer-task-ids (repeatedly (count consumer-task-ids) #(Batch. batch-size worker-context component-id task-id stream-id))))))
+        buffers-per-stream)
+      (HashMap.)
+      )))
+
+(defn transfer-batch [^Integer out-task ^Values values ^MessageId out-id overflow-buffer ^HashMap output-batch-buffer transfer-fn ^Batch out-batch]
+  (.add out-batch values out-id)
+  (if (= (.size out-batch) (.capacity out-batch))
+    (do
+      (transfer-fn out-task
+                   out-batch
+                   overflow-buffer)
+      (.put output-batch-buffer out-task (.newInstance out-batch))
+      )))
+
+(defn mk-emit-mgs-fn [^GeneralTopologyContext worker-context ^Integer task-id overflow-buffer output-batch-buffers transfer-fn]
+  (let [stream-ids (.getComponentStreams worker-context (.getComponentId worker-context task-id))]
+    (into {} (map #(let [^HashMap buffers (.get output-batch-buffers %)]
+                    (-> [% (if (nil? buffers)
+                             (mk-transfer-tuple transfer-fn worker-context task-id overflow-buffer)
+                             (fn emit-msg [^Integer out-task ^Values values ^MessageId tuple-id ^String _]
+                               (let [^Batch out-batch (.get buffers out-task)]
+                                 (transfer-batch out-task
+                                                 values
+                                                 tuple-id
+                                                 overflow-buffer
+                                                 buffers
+                                                 transfer-fn
+                                                 out-batch)))
+                             )]))
+                  stream-ids
+                  ))
+    ))
 
 (defmethod mk-threads :spout [executor-data task-datas initial-credentials]
   (let [{:keys [storm-conf component-id worker-context transfer-fn report-error sampler open-or-prepare-was-called?]} executor-data
@@ -525,7 +628,10 @@
                           (let [stream-id (.getSourceStreamId tuple)]
                             (condp = stream-id
                               Constants/SYSTEM_TICK_STREAM_ID (.rotate pending)
-                              Constants/METRICS_TICK_STREAM_ID (metrics-tick executor-data (get task-datas task-id) tuple overflow-buffer)
+                              Constants/METRICS_TICK_STREAM_ID (metrics-tick executor-data
+                                                                             (get task-datas task-id)
+                                                                             tuple
+                                                                             (mk-transfer-tuple transfer-fn (:worker-context executor-data) task-id overflow-buffer))
                               Constants/CREDENTIALS_CHANGED_STREAM_ID 
                                 (let [task-data (get task-datas task-id)
                                       spout-obj (:object task-data)]
@@ -551,7 +657,7 @@
         has-eventloggers? (has-eventloggers? storm-conf)
         emitted-count (MutableLong. 0)
         empty-emit-streak (MutableLong. 0)]
-   
+
     [(async-loop
       (fn []
         ;; If topology was started in inactive state, don't call (.open spout) until it's activated first.
@@ -563,7 +669,14 @@
         (doseq [[task-id task-data] task-datas
                 :let [^ISpout spout-obj (:object task-data)
                      tasks-fn (:tasks-fn task-data)
-                     send-spout-msg (fn [out-stream-id values message-id out-task-id]
+                     emit-msg-fns (mk-emit-mgs-fn worker-context
+                                              task-id
+                                              overflow-buffer
+                                              (init-batching-buffer worker-context component-id task-id)
+                                              transfer-fn)
+                     ack-fn (get emit-msg-fns ACKER-INIT-STREAM-ID)
+                     event-fn (get emit-msg-fns EVENTLOGGER-STREAM-ID)
+                     send-spout-msg (fn [^String out-stream-id ^Values values ^MessageId message-id ^Integer out-task-id]
                                        (.increment emitted-count)
                                        (let [out-tasks (if out-task-id
                                                          (tasks-fn out-task-id out-stream-id values)
@@ -574,18 +687,14 @@
                                          (fast-list-iter [out-task out-tasks id out-ids]
                                                          (let [tuple-id (if rooted?
                                                                           (MessageId/makeRootId root-id id)
-                                                                          (MessageId/makeUnanchored))
-                                                               out-tuple (TupleImpl. worker-context
-                                                                                     values
-                                                                                     task-id
-                                                                                     out-stream-id
-                                                                                     tuple-id)]
-                                                           (transfer-fn out-task
-                                                                        out-tuple
-                                                                        overflow-buffer)
+                                                                          (MessageId/makeUnanchored))]
+                                                           ((get emit-msg-fns out-stream-id) out-task
+                                                                                             values
+                                                                                             tuple-id
+                                                                                             out-stream-id)
                                                            ))
                                          (if has-eventloggers?
-                                           (send-to-eventlogger executor-data task-data values overflow-buffer component-id message-id rand))
+                                           (send-to-eventlogger executor-data task-data values component-id message-id rand event-fn))
                                          (if (and rooted?
                                                   (not (.isEmpty out-ids)))
                                            (do
@@ -596,7 +705,7 @@
                                              (task/send-unanchored task-data
                                                                    ACKER-INIT-STREAM-ID
                                                                    [root-id (bit-xor-vals out-ids) task-id]
-                                                                   overflow-buffer))
+                                                                   ack-fn))
                                            (when message-id
                                              (ack-spout-msg executor-data task-data message-id
                                                             {:stream out-stream-id :values values}
@@ -718,7 +827,7 @@
         ;; the overflow buffer is might gradually fill degrading the performance gradually
         ;; eventually running out of memory, but at least prevent live-locks/deadlocks.
         overflow-buffer (if (storm-conf TOPOLOGY-BOLTS-OUTGOING-OVERFLOW-BUFFER-ENABLE) (ConcurrentLinkedQueue.) nil)
-        tuple-action-fn (fn [task-id ^TupleImpl tuple]
+        tuple-action-fn (fn [task-id tupleOrBatch]
                           ;; synchronization needs to be done with a key provided by this bolt, otherwise:
                           ;; spout 1 sends synchronization (s1), dies, same spout restarts somewhere else, sends synchronization (s2) and incremental update. s2 and update finish before s1 -> lose the incremental update
                           ;; TODO: for state sync, need to first send sync messages in a loop and receive tuples until synchronization
@@ -735,41 +844,47 @@
                           
                           ;;(log-debug "Received tuple " tuple " at task " task-id)
                           ;; need to do it this way to avoid reflection
-                          (let [stream-id (.getSourceStreamId tuple)]
-                            (condp = stream-id
-                              Constants/CREDENTIALS_CHANGED_STREAM_ID 
-                                (let [task-data (get task-datas task-id)
-                                      bolt-obj (:object task-data)]
-                                  (when (instance? ICredentialsListener bolt-obj)
-                                    (.setCredentials bolt-obj (.getValue tuple 0))))
-                              Constants/METRICS_TICK_STREAM_ID (metrics-tick executor-data (get task-datas task-id) tuple overflow-buffer)
-                              (let [task-data (get task-datas task-id)
-                                    ^IBolt bolt-obj (:object task-data)
-                                    user-context (:user-context task-data)
-                                    sampler? (sampler)
-                                    execute-sampler? (execute-sampler)
-                                    now (if (or sampler? execute-sampler?) (System/currentTimeMillis))
-                                    receive-queue (:receive-queue executor-data)]
-                                (when sampler?
-                                  (.setProcessSampleStartTime tuple now))
-                                (when execute-sampler?
-                                  (.setExecuteSampleStartTime tuple now))
-                                (.execute bolt-obj tuple)
-                                (let [delta (tuple-execute-time-delta! tuple)]
-                                  (when (= true (storm-conf TOPOLOGY-DEBUG))
-                                    (log-message "Execute done TUPLE " tuple " TASK: " task-id " DELTA: " delta))
- 
-                                  (task/apply-hooks user-context .boltExecute (BoltExecuteInfo. tuple task-id delta))
-                                  (when delta
-                                    (builtin-metrics/bolt-execute-tuple! (:builtin-metrics task-data)
-                                                                         executor-stats
-                                                                         (.getSourceComponent tuple)                                                      
-                                                                         (.getSourceStreamId tuple)
-                                                                         delta)
-                                    (stats/bolt-execute-tuple! executor-stats
-                                                               (.getSourceComponent tuple)
-                                                               (.getSourceStreamId tuple)
-                                                               delta)))))))
+                          (let [tuples (if (instance? TupleImpl tupleOrBatch)
+                                         (list tupleOrBatch)
+                                         (if (instance? Batch tupleOrBatch)
+                                           (.getAsTupleList tupleOrBatch)
+                                           tupleOrBatch))]
+                            (doseq [tuple tuples]
+                              (let [stream-id (.getSourceStreamId tuple)]
+                                (condp = stream-id
+                                  Constants/CREDENTIALS_CHANGED_STREAM_ID
+                                  (let [task-data (get task-datas task-id)
+                                        bolt-obj (:object task-data)]
+                                    (when (instance? ICredentialsListener bolt-obj)
+                                      (.setCredentials bolt-obj (.getValue tuple 0))))
+                                  Constants/METRICS_TICK_STREAM_ID (metrics-tick executor-data (get task-datas task-id) tuple (mk-transfer-tuple transfer-fn (:worker-context executor-data) task-id overflow-buffer))
+                                  (let [task-data (get task-datas task-id)
+                                        ^IBolt bolt-obj (:object task-data)
+                                        user-context (:user-context task-data)
+                                        sampler? (sampler)
+                                        execute-sampler? (execute-sampler)
+                                        now (if (or sampler? execute-sampler?) (System/currentTimeMillis))
+                                        receive-queue (:receive-queue executor-data)]
+                                    (when sampler?
+                                      (.setProcessSampleStartTime tuple now))
+                                    (when execute-sampler?
+                                      (.setExecuteSampleStartTime tuple now))
+                                    (.execute bolt-obj tuple)
+                                    (let [delta (tuple-execute-time-delta! tuple)]
+                                      (when (= true (storm-conf TOPOLOGY-DEBUG))
+                                        (log-message "Execute done TUPLE " tuple " TASK: " task-id " DELTA: " delta))
+
+                                      (task/apply-hooks user-context .boltExecute (BoltExecuteInfo. tuple task-id delta))
+                                      (when delta
+                                        (builtin-metrics/bolt-execute-tuple! (:builtin-metrics task-data)
+                                                                             executor-stats
+                                                                             (.getSourceComponent tuple)
+                                                                             (.getSourceStreamId tuple)
+                                                                             delta)
+                                        (stats/bolt-execute-tuple! executor-stats
+                                                                   (.getSourceComponent tuple)
+                                                                   (.getSourceStreamId tuple)
+                                                                   delta)))))))))
         has-eventloggers? (has-eventloggers? storm-conf)]
     
     ;; TODO: can get any SubscribedState objects out of the context now
@@ -785,6 +900,14 @@
                 :let [^IBolt bolt-obj (:object task-data)
                       tasks-fn (:tasks-fn task-data)
                       user-context (:user-context task-data)
+                      emit-msg-fns (mk-emit-mgs-fn worker-context
+                                               task-id
+                                               overflow-buffer
+                                               (init-batching-buffer worker-context component-id task-id)
+                                               transfer-fn)
+                      ack-fn (get emit-msg-fns ACKER-ACK-STREAM-ID)
+                      fail-fn (get emit-msg-fns ACKER-FAIL-STREAM-ID)
+                      event-fn (get emit-msg-fns EVENTLOGGER-STREAM-ID)
                       bolt-emit (fn [stream anchors values task]
                                   (let [out-tasks (if task
                                                     (tasks-fn task stream values)
@@ -799,15 +922,13 @@
                                                                             (fast-list-iter [root-id root-ids]
                                                                                             (put-xor! anchors-to-ids root-id edge-id))
                                                                             ))))
-                                                        (transfer-fn t
-                                                                   (TupleImpl. worker-context
-                                                                               values
-                                                                               task-id
-                                                                               stream
-                                                                               (MessageId/makeId anchors-to-ids))
-                                                                   overflow-buffer)))
+                                                      ((get emit-msg-fns stream)
+                                                        t
+                                                        values
+                                                        (MessageId/makeId anchors-to-ids)
+                                                        stream)))
                                     (if has-eventloggers?
-                                      (send-to-eventlogger executor-data task-data values overflow-buffer component-id nil rand))
+                                      (send-to-eventlogger executor-data task-data values component-id nil rand event-fn))
                                     (or out-tasks [])))]]
           (builtin-metrics/register-all (:builtin-metrics task-data) storm-conf user-context)
           (when (instance? ICredentialsListener bolt-obj) (.setCredentials bolt-obj initial-credentials)) 
@@ -839,7 +960,8 @@
                            (fast-map-iter [[root id] (.. tuple getMessageId getAnchorsToIds)]
                                           (task/send-unanchored task-data
                                                                 ACKER-ACK-STREAM-ID
-                                                                [root (bit-xor id ack-val)] overflow-buffer)
+                                                                [root (bit-xor id ack-val)]
+                                                                ack-fn)
                                           ))
                          (let [delta (tuple-time-delta! tuple)
                                debug? (= true (storm-conf TOPOLOGY-DEBUG))]
@@ -860,7 +982,8 @@
                          (fast-list-iter [root (.. tuple getMessageId getAnchors)]
                                          (task/send-unanchored task-data
                                                                ACKER-FAIL-STREAM-ID
-                                                               [root] overflow-buffer))
+                                                               [root]
+                                                               fail-fn))
                          (let [delta (tuple-time-delta! tuple)
                                debug? (= true (storm-conf TOPOLOGY-DEBUG))]
                            (when debug? 
diff --git a/storm-core/src/clj/backtype/storm/daemon/task.clj b/storm-core/src/clj/backtype/storm/daemon/task.clj
index 9cf2b858f8f..4c5e5d60d7c 100644
--- a/storm-core/src/clj/backtype/storm/daemon/task.clj
+++ b/storm-core/src/clj/backtype/storm/daemon/task.clj
@@ -16,11 +16,9 @@
 (ns backtype.storm.daemon.task
   (:use [backtype.storm.daemon common])
   (:use [backtype.storm config util log])
-  (:import [backtype.storm.hooks ITaskHook])
-  (:import [backtype.storm.tuple Tuple TupleImpl])
+  (:import [backtype.storm.tuple MessageId])
   (:import [backtype.storm.generated SpoutSpec Bolt StateSpoutSpec StormTopology])
-  (:import [backtype.storm.hooks.info SpoutAckInfo SpoutFailInfo
-            EmitInfo BoltFailInfo BoltAckInfo])
+  (:import [backtype.storm.hooks.info EmitInfo])
   (:import [backtype.storm.task TopologyContext ShellBolt WorkerTopologyContext])
   (:import [backtype.storm.utils Utils])
   (:import [backtype.storm.generated ShellComponent JavaObject])
@@ -105,21 +103,13 @@
 
 ;; TODO: this is all expensive... should be precomputed
 (defn send-unanchored
-  ([task-data stream values overflow-buffer]
-    (let [^TopologyContext topology-context (:system-context task-data)
-          tasks-fn (:tasks-fn task-data)
-          transfer-fn (-> task-data :executor-data :transfer-fn)
-          out-tuple (TupleImpl. topology-context
-                                 values
-                                 (.getThisTaskId topology-context)
-                                 stream)]
+   [task-data stream values transfer-fn]
+    (let [tasks-fn (:tasks-fn task-data)]
       (fast-list-iter [t (tasks-fn stream values)]
         (transfer-fn t
-                     out-tuple
-                     overflow-buffer)
-        )))
-    ([task-data stream values]
-      (send-unanchored task-data stream values nil)
+                     values
+                     (MessageId/makeUnanchored)
+                     stream))
       ))
 
 (defn mk-tasks-fn [task-data]
@@ -185,13 +175,13 @@
     :object (get-task-object (.getRawTopology ^TopologyContext (:system-context <>)) (:component-id executor-data))))
 
 
-(defn mk-task [executor-data task-id]
+(defn mk-task [executor-data task-id transfer-fn]
   (let [task-data (mk-task-data executor-data task-id)
         storm-conf (:storm-conf executor-data)]
     (doseq [klass (storm-conf TOPOLOGY-AUTO-TASK-HOOKS)]
       (.addTaskHook ^TopologyContext (:user-context task-data) (-> klass Class/forName .newInstance)))
     ;; when this is called, the threads for the executor haven't been started yet,
     ;; so we won't be risking trampling on the single-threaded claim strategy disruptor queue
-    (send-unanchored task-data SYSTEM-STREAM-ID ["startup"])
+    (send-unanchored task-data SYSTEM-STREAM-ID ["startup"] transfer-fn)
     task-data
     ))
diff --git a/storm-core/src/clj/backtype/storm/daemon/worker.clj b/storm-core/src/clj/backtype/storm/daemon/worker.clj
index 781a9599c01..59e4b1207c8 100644
--- a/storm-core/src/clj/backtype/storm/daemon/worker.clj
+++ b/storm-core/src/clj/backtype/storm/daemon/worker.clj
@@ -24,9 +24,8 @@
   (:import [java.util ArrayList HashMap])
   (:import [backtype.storm.utils Utils TransferDrainer ThriftTopologyUtils WorkerBackpressureThread DisruptorQueue])
   (:import [backtype.storm.messaging TransportFactory])
-  (:import [backtype.storm.messaging TaskMessage IContext IConnection ConnectionWithStatus ConnectionWithStatus$Status])
+  (:import [backtype.storm.messaging IContext IConnection ConnectionWithStatus ConnectionWithStatus$Status])
   (:import [backtype.storm.daemon Shutdownable])
-  (:import [backtype.storm.serialization KryoTupleSerializer])
   (:import [backtype.storm.generated StormTopology])
   (:import [backtype.storm.tuple Fields])
   (:import [backtype.storm.task WorkerTopologyContext])
@@ -109,11 +108,6 @@
               (log-warn "Received invalid messages for unknown tasks. Dropping... ")
               )))))))
 
-(defn- assert-can-serialize [^KryoTupleSerializer serializer tuple-batch]
-  "Check that all of the tuples can be serialized by serializing them."
-  (fast-list-iter [[task tuple :as pair] tuple-batch]
-    (.serialize serializer tuple)))
-
 (defn- mk-backpressure-handler [executors]
   "make a handler that checks and updates worker's backpressure flag"
   (disruptor/worker-backpressure-handler
@@ -146,41 +140,6 @@
       WorkerBackPressureThread to also check for all the executors' status"
       )))
 
-(defn mk-transfer-fn [worker]
-  (let [local-tasks (-> worker :task-ids set)
-        local-transfer (:transfer-local-fn worker)
-        ^DisruptorQueue transfer-queue (:transfer-queue worker)
-        task->node+port (:cached-task->node+port worker)
-        try-serialize-local ((:storm-conf worker) TOPOLOGY-TESTING-ALWAYS-TRY-SERIALIZE)
-
-        transfer-fn
-          (fn [^KryoTupleSerializer serializer tuple-batch]
-            (let [local (ArrayList.)
-                  remoteMap (HashMap.)]
-              (fast-list-iter [[task tuple :as pair] tuple-batch]
-                (if (local-tasks task)
-                  (.add local pair)
-
-                  ;;Using java objects directly to avoid performance issues in java code
-                  (do
-                    (when (not (.get remoteMap task))
-                      (.put remoteMap task (ArrayList.)))
-                    (let [remote (.get remoteMap task)]
-                      (if (not-nil? task)
-                        (.add remote (TaskMessage. task (.serialize serializer tuple)))
-                        (log-warn "Can't transfer tuple - task value is nil. tuple type: " (pr-str (type tuple)) " and information: " (pr-str tuple)))
-                     ))))
-
-              (local-transfer local)
-              (disruptor/publish transfer-queue remoteMap)))]
-    (if try-serialize-local
-      (do
-        (log-warn "WILL TRY TO SERIALIZE ALL TUPLES (Turn off " TOPOLOGY-TESTING-ALWAYS-TRY-SERIALIZE " for production)")
-        (fn [^KryoTupleSerializer serializer tuple-batch]
-          (assert-can-serialize serializer tuple-batch)
-          (transfer-fn serializer tuple-batch)))
-      transfer-fn)))
-
 (defn- mk-receive-queue-map [storm-conf executors]
   (->> executors
        ;; TODO: this depends on the type of executor
@@ -287,7 +246,6 @@
       :user-shared-resources (mk-user-resources <>)
       :transfer-local-fn (mk-transfer-local-fn <>)
       :receiver-thread-count (get storm-conf WORKER-RECEIVER-THREAD-COUNT)
-      :transfer-fn (mk-transfer-fn <>)
       :assignment-versions assignment-versions
       :backpressure (atom false) ;; whether this worker is going slow
       :backpressure-trigger (atom false) ;; a trigger for synchronization with executors