Skip to content

Commit

Permalink
remove estimated sync time calculations until we have a chance to make
Browse files Browse the repository at this point in the history
them correct across all databases
  • Loading branch information
chenson42 committed Jun 1, 2017
1 parent 2714b43 commit e0a6954
Show file tree
Hide file tree
Showing 5 changed files with 2 additions and 108 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ public class IncomingBatchSummary implements Serializable {
private String channel;
private String sqlMessage;
private long errorBatchId;
private float averageRowsPerMilli;

public String getNodeId() {
return nodeId;
Expand Down Expand Up @@ -113,12 +112,4 @@ public void setErrorBatchId(long errorBatchId) {
this.errorBatchId = errorBatchId;
}

public float getAverageRowsPerMilli() {
return averageRowsPerMilli;
}

public void setAverageRowsPerMilli(float averageRowsPerMilli) {
this.averageRowsPerMilli = averageRowsPerMilli;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ public class OutgoingBatchSummary implements Serializable {
private long errorBatchId;
private long totalBytes;
private long totalMillis;
private float averageRowsPerMilli;

public String getNodeId() {
return nodeId;
Expand Down Expand Up @@ -130,14 +129,6 @@ public long getTotalMillis() {
public void setTotalMillis(long totalMillis) {
this.totalMillis = totalMillis;
}

public float getAverageRowsPerMilli() {
return averageRowsPerMilli;
}

public void setAverageRowsPerMilli(float averageRowsPerMilli) {
this.averageRowsPerMilli = averageRowsPerMilli;
}


}
Original file line number Diff line number Diff line change
Expand Up @@ -102,9 +102,7 @@ public OutgoingBatches getOutgoingBatches(String nodeId, String channelThread, N

public List<OutgoingBatchSummary> findOutgoingBatchSummary(OutgoingBatch.Status ... statuses);

public List<OutgoingBatchSummary> findOutgoingBatchSummaryByChannel(OutgoingBatch.Status ... statuses);

public Map<String, Float> getNodeThroughputByChannel();
public List<OutgoingBatchSummary> findOutgoingBatchSummaryByChannel(OutgoingBatch.Status ... statuses);

public int countOutgoingBatches(List<String> nodeIds, List<String> channels,
List<OutgoingBatch.Status> statuses, List<String> loads);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
import java.util.TreeMap;

import org.apache.commons.lang.StringUtils;
import org.jumpmind.db.platform.DatabaseNamesConstants;
import org.jumpmind.db.sql.ISqlRowMapper;
import org.jumpmind.db.sql.ISqlTransaction;
import org.jumpmind.db.sql.Row;
Expand All @@ -45,6 +44,7 @@
import org.jumpmind.symmetric.common.ParameterConstants;
import org.jumpmind.symmetric.db.ISymmetricDialect;
import org.jumpmind.symmetric.ext.IOutgoingBatchFilter;
import org.jumpmind.symmetric.model.AbstractBatch.Status;
import org.jumpmind.symmetric.model.Channel;
import org.jumpmind.symmetric.model.LoadSummary;
import org.jumpmind.symmetric.model.NodeChannel;
Expand All @@ -53,7 +53,6 @@
import org.jumpmind.symmetric.model.NodeHost;
import org.jumpmind.symmetric.model.NodeSecurity;
import org.jumpmind.symmetric.model.OutgoingBatch;
import org.jumpmind.symmetric.model.AbstractBatch.Status;
import org.jumpmind.symmetric.model.OutgoingBatchSummary;
import org.jumpmind.symmetric.model.OutgoingBatches;
import org.jumpmind.symmetric.model.OutgoingLoadSummary;
Expand Down Expand Up @@ -652,56 +651,6 @@ public List<OutgoingBatchSummary> findOutgoingBatchSummaryByChannel(Status... st

return sqlTemplateDirty.query(sql, new OutgoingBatchSummaryChannelMapper(), args);
}

public Map<String, Float> getNodeThroughputByChannel() {

String sqlName = "getNodeThroughputByChannelSql";
boolean isAverageDateSupported = true;

if (platform.getName().equals(DatabaseNamesConstants.H2)) {
sqlName = "getNodeThroughputByChannelH2Sql";
isAverageDateSupported = false;
}
String sql = getSql(sqlName);
NodeThroughputMapper mapper = new NodeThroughputMapper(isAverageDateSupported);

sqlTemplateDirty.query(sql, mapper);
return mapper.getThroughputMap();
}

private class NodeThroughputMapper implements ISqlRowMapper<Object> {
Map<String, Float> throughputMap = new HashMap<String, Float>();
boolean isAverageDateSupported;

public NodeThroughputMapper(boolean isAverageDateSupported) {
this.isAverageDateSupported = isAverageDateSupported;
}

@Override
public Object mapRow(Row row) {
Long totalRows = row.getLong("total_rows");
long avgTime = 0;

if (this.isAverageDateSupported) {
Date avgCreateTime = row.getDateTime("average_create_time");
Date avgLastUpdatedTime = row.getDateTime("average_last_update_time");
avgTime = avgLastUpdatedTime.getTime() - avgCreateTime.getTime();
}
else {
avgTime = row.getLong("average_create_time") - row.getLong("average_last_update_time");
}

throughputMap.put(row.getString("node_id") + "-" + row.getString("channel_id") + "-" +
row.get("direction"), (float) (avgTime > 0 ? totalRows / avgTime : totalRows));
return null;
}

public Map<String, Float> getThroughputMap() {
return throughputMap;
}


}

public Set<Long> getActiveLoads(String sourceNodeId) {
Set<Long> loads = new HashSet<Long>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -240,41 +240,6 @@ public OutgoingBatchServiceSqlMap(IDatabasePlatform platform,

putSql("getAllBatchesSql", "select batch_id from $(outgoing_batch)");

putSql("getNodeThroughputByChannelSql", "select node_id, channel_id, direction, total_rows, "
+ " average_create_time, average_last_update_time from ( "
+ " select node_id, channel_id, 'outgoing' as direction, "
+ " sum(byte_count) as total_rows, avg(create_time) as average_create_time, "
+ " avg(last_update_time) as average_last_update_time "
+ " from sym_outgoing_batch where status = 'OK' and sent_count = 1 "
+ " group by node_id, channel_id order by node_id "
+ " ) a "
+ " union all "
+ " select node_id, channel_id, direction, total_rows, average_create_time, "
+ " average_last_update_time from ( "
+ " select node_id, channel_id, 'incoming' as direction, "
+ " sum(load_row_count) as total_rows, avg(create_time) as average_create_time, "
+ " avg(last_update_time) as average_last_update_time "
+ " from sym_incoming_batch where status = 'OK' "
+ " group by node_id, channel_id order by node_id "
+ " ) b");

putSql("getNodeThroughputByChannelH2Sql", "select node_id, channel_id, direction, total_rows, "
+ " average_create_time, average_last_update_time from ( "
+ " select node_id, channel_id, 'outgoing' as direction, "
+ " sum(byte_count) as total_rows, avg(datediff('ms', '1970-01-01', create_time)) as average_create_time, "
+ " avg(datediff('ms', '1970-01-01', last_update_time)) as average_last_update_time "
+ " from sym_outgoing_batch where status = 'OK' and sent_count = 1 "
+ " group by node_id, channel_id order by node_id "
+ " ) a "
+ " union all "
+ " select node_id, channel_id, direction, total_rows, average_create_time, "
+ " average_last_update_time from ( "
+ " select node_id, channel_id, 'incoming' as direction, "
+ " sum(load_row_count) as total_rows, avg(datediff('ms', '1970-01-01', create_time)) as average_create_time, "
+ " avg(datediff('ms', '1970-01-01', last_update_time)) as average_last_update_time "
+ " from sym_incoming_batch where status = 'OK' "
+ " group by node_id, channel_id order by node_id "
+ " ) b");
}

}

0 comments on commit e0a6954

Please sign in to comment.