package com.cyngn.chrono.storage.cassandra.udt;
import com.datastax.driver.mapping.annotations.Field;
import com.datastax.driver.mapping.annotations.FrozenValue;
import com.datastax.driver.mapping.annotations.UDT;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.lang.Override;
import java.lang.String;
import java.util.Set;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* UDT class for Cassandra - url_package
*/
@UDT(
keyspace = "chrono",
name = "url_package"
)
public class UrlPackage {
@Field
@JsonProperty
public String method;
@FrozenValue
@Field
@JsonProperty
public Set<String> urls;
public void setMethod(String method) {
this.method = method;
}
public String getMethod() {
return method;
}
public void setUrls(Set<String> urls) {
this.urls = urls;
}
public Set<String> getUrls() {
return urls;
}
@Override
public String toString() {
return "UrlPackage{" +
"method=" + method +
", urls=" + urls +
"}";
}
}
package com.cyngn.chrono.storage.cassandra.udt;
import com.datastax.driver.mapping.annotations.Field;
import com.datastax.driver.mapping.annotations.UDT;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.lang.Long;
import java.lang.Override;
import java.lang.String;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* UDT class for Cassandra - measurement
*/
@UDT(
keyspace = "chrono",
name = "measurement"
)
public class Measurement {
@Field
@JsonProperty
public String url;
@Field(name = "time_in_milli")
@JsonProperty("time_in_milli")
public Long timeInMilli;
public void setUrl(String url) {
this.url = url;
}
public String getUrl() {
return url;
}
public void setTimeInMilli(Long timeInMilli) {
this.timeInMilli = timeInMilli;
}
public Long getTimeInMilli() {
return timeInMilli;
}
@Override
public String toString() {
return "Measurement{" +
"url=" + url +
", timeInMilli=" + timeInMilli +
"}";
}
}
package com.cyngn.chrono.storage.cassandra.table;
import com.datastax.driver.mapping.annotations.ClusteringColumn;
import com.datastax.driver.mapping.annotations.Column;
import com.datastax.driver.mapping.annotations.PartitionKey;
import com.datastax.driver.mapping.annotations.Table;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.lang.Long;
import java.lang.Override;
import java.lang.String;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* Table class for Cassandra - payload
*/
@Table(
keyspace = "chrono",
name = "payload"
)
public class Payload {
@Column
@JsonProperty
@PartitionKey(0)
public String unit;
@Column
@JsonProperty
@ClusteringColumn(0)
public Long size;
@Column
@JsonProperty
public String data;
public void setUnit(String unit) {
this.unit = unit;
}
public String getUnit() {
return unit;
}
public void setSize(Long size) {
this.size = size;
}
public Long getSize() {
return size;
}
public void setData(String data) {
this.data = data;
}
public String getData() {
return data;
}
@Override
public String toString() {
return "Payload{" +
"unit=" + unit +
", size=" + size +
", data=" + data +
"}";
}
}
package com.cyngn.chrono.storage.cassandra.table;
import com.cyngn.chrono.storage.cassandra.udt.Measurement;
import com.datastax.driver.mapping.annotations.ClusteringColumn;
import com.datastax.driver.mapping.annotations.Column;
import com.datastax.driver.mapping.annotations.FrozenValue;
import com.datastax.driver.mapping.annotations.PartitionKey;
import com.datastax.driver.mapping.annotations.Table;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.lang.Override;
import java.lang.String;
import java.util.Date;
import java.util.List;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* Table class for Cassandra - report
*/
@Table(
keyspace = "chrono",
name = "report"
)
public class Report {
@Column(name = "batch_name")
@JsonProperty("batch_name")
@PartitionKey(0)
public String batchName;
@Column(name = "device_id")
@JsonProperty("device_id")
@ClusteringColumn(0)
public String deviceId;
@Column
@JsonProperty
@ClusteringColumn(1)
public Date created;
@Column(name = "client_ip")
@JsonProperty("client_ip")
public String clientIp;
@Column(name = "gps_coordinates")
@JsonProperty("gps_coordinates")
public String gpsCoordinates;
@FrozenValue
@Column
@JsonProperty
public List<Measurement> measurements;
@Column(name = "mobile_carrier")
@JsonProperty("mobile_carrier")
public String mobileCarrier;
@Column(name = "mobile_network_class")
@JsonProperty("mobile_network_class")
public String mobileNetworkClass;
@Column(name = "mobile_network_type")
@JsonProperty("mobile_network_type")
public String mobileNetworkType;
@Column(name = "mobile_rssi")
@JsonProperty("mobile_rssi")
public String mobileRssi;
@Column
@JsonProperty
public String mode;
@Column
@JsonProperty
public String tag;
@Column(name = "wifi_rssi")
@JsonProperty("wifi_rssi")
public String wifiRssi;
@Column(name = "wifi_state")
@JsonProperty("wifi_state")
public String wifiState;
public void setBatchName(String batchName) {
this.batchName = batchName;
}
public String getBatchName() {
return batchName;
}
public void setDeviceId(String deviceId) {
this.deviceId = deviceId;
}
public String getDeviceId() {
return deviceId;
}
public void setCreated(Date created) {
this.created = created;
}
public Date getCreated() {
return created;
}
public void setClientIp(String clientIp) {
this.clientIp = clientIp;
}
public String getClientIp() {
return clientIp;
}
public void setGpsCoordinates(String gpsCoordinates) {
this.gpsCoordinates = gpsCoordinates;
}
public String getGpsCoordinates() {
return gpsCoordinates;
}
public void setMeasurements(List<Measurement> measurements) {
this.measurements = measurements;
}
public List<Measurement> getMeasurements() {
return measurements;
}
public void setMobileCarrier(String mobileCarrier) {
this.mobileCarrier = mobileCarrier;
}
public String getMobileCarrier() {
return mobileCarrier;
}
public void setMobileNetworkClass(String mobileNetworkClass) {
this.mobileNetworkClass = mobileNetworkClass;
}
public String getMobileNetworkClass() {
return mobileNetworkClass;
}
public void setMobileNetworkType(String mobileNetworkType) {
this.mobileNetworkType = mobileNetworkType;
}
public String getMobileNetworkType() {
return mobileNetworkType;
}
public void setMobileRssi(String mobileRssi) {
this.mobileRssi = mobileRssi;
}
public String getMobileRssi() {
return mobileRssi;
}
public void setMode(String mode) {
this.mode = mode;
}
public String getMode() {
return mode;
}
public void setTag(String tag) {
this.tag = tag;
}
public String getTag() {
return tag;
}
public void setWifiRssi(String wifiRssi) {
this.wifiRssi = wifiRssi;
}
public String getWifiRssi() {
return wifiRssi;
}
public void setWifiState(String wifiState) {
this.wifiState = wifiState;
}
public String getWifiState() {
return wifiState;
}
@Override
public String toString() {
return "Report{" +
"batchName=" + batchName +
", deviceId=" + deviceId +
", created=" + created +
", clientIp=" + clientIp +
", gpsCoordinates=" + gpsCoordinates +
", measurements=" + measurements +
", mobileCarrier=" + mobileCarrier +
", mobileNetworkClass=" + mobileNetworkClass +
", mobileNetworkType=" + mobileNetworkType +
", mobileRssi=" + mobileRssi +
", mode=" + mode +
", tag=" + tag +
", wifiRssi=" + wifiRssi +
", wifiState=" + wifiState +
"}";
}
}
package com.cyngn.chrono.storage.cassandra.table;
import com.datastax.driver.mapping.annotations.ClusteringColumn;
import com.datastax.driver.mapping.annotations.Column;
import com.datastax.driver.mapping.annotations.PartitionKey;
import com.datastax.driver.mapping.annotations.Table;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.lang.Long;
import java.lang.Override;
import java.lang.String;
import java.util.Date;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* Table class for Cassandra - upload_data
*/
@Table(
keyspace = "chrono",
name = "upload_data"
)
public class UploadData {
@Column(name = "test_batch")
@JsonProperty("test_batch")
@PartitionKey(0)
public String testBatch;
@Column
@JsonProperty
@ClusteringColumn(0)
public String unit;
@Column
@JsonProperty
@ClusteringColumn(1)
public Long size;
@Column
@JsonProperty
@ClusteringColumn(2)
public Date created;
@Column
@JsonProperty
public String data;
public void setTestBatch(String testBatch) {
this.testBatch = testBatch;
}
public String getTestBatch() {
return testBatch;
}
public void setUnit(String unit) {
this.unit = unit;
}
public String getUnit() {
return unit;
}
public void setSize(Long size) {
this.size = size;
}
public Long getSize() {
return size;
}
public void setCreated(Date created) {
this.created = created;
}
public Date getCreated() {
return created;
}
public void setData(String data) {
this.data = data;
}
public String getData() {
return data;
}
@Override
public String toString() {
return "UploadData{" +
"testBatch=" + testBatch +
", unit=" + unit +
", size=" + size +
", created=" + created +
", data=" + data +
"}";
}
}
package com.cyngn.chrono.storage.cassandra.table;
import com.cyngn.chrono.storage.cassandra.udt.UrlPackage;
import com.datastax.driver.mapping.annotations.Column;
import com.datastax.driver.mapping.annotations.FrozenValue;
import com.datastax.driver.mapping.annotations.PartitionKey;
import com.datastax.driver.mapping.annotations.Table;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.lang.Override;
import java.lang.String;
import java.util.Date;
import java.util.List;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* Table class for Cassandra - test_batch
*/
@Table(
keyspace = "chrono",
name = "test_batch"
)
public class TestBatch {
@Column
@JsonProperty
@PartitionKey(0)
public String name;
@Column
@JsonProperty
public Date created;
@FrozenValue
@Column(name = "url_packages")
@JsonProperty("url_packages")
public List<UrlPackage> urlPackages;
public void setName(String name) {
this.name = name;
}
public String getName() {
return name;
}
public void setCreated(Date created) {
this.created = created;
}
public Date getCreated() {
return created;
}
public void setUrlPackages(List<UrlPackage> urlPackages) {
this.urlPackages = urlPackages;
}
public List<UrlPackage> getUrlPackages() {
return urlPackages;
}
@Override
public String toString() {
return "TestBatch{" +
"name=" + name +
", created=" + created +
", urlPackages=" + urlPackages +
"}";
}
}
package com.cyngn.chrono.storage.cassandra.dal;
import com.cyngn.chrono.storage.cassandra.table.Payload;
import com.datastax.driver.mapping.Result;
import com.datastax.driver.mapping.annotations.Accessor;
import com.datastax.driver.mapping.annotations.Param;
import com.datastax.driver.mapping.annotations.Query;
import com.google.common.util.concurrent.ListenableFuture;
import java.lang.String;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* Accessor for Cassandra entity - {@link com.cyngn.chrono.storage.cassandra.table.Payload}
*/
@Accessor
public interface PayloadAccessor {
@Query("SELECT * FROM chrono.payload WHERE unit=:unit")
ListenableFuture<Result<Payload>> getAll(@Param("unit") final String unit);
}
File: ReportAccessor.java
package com.cyngn.chrono.storage.cassandra.dal;
import com.cyngn.chrono.storage.cassandra.table.Report;
import com.datastax.driver.mapping.Result;
import com.datastax.driver.mapping.annotations.Accessor;
import com.datastax.driver.mapping.annotations.Param;
import com.datastax.driver.mapping.annotations.Query;
import com.google.common.util.concurrent.ListenableFuture;
import java.lang.String;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* Accessor for Cassandra entity - {@link com.cyngn.chrono.storage.cassandra.table.Report}
*/
@Accessor
public interface ReportAccessor {
@Query("SELECT * FROM chrono.report WHERE batch_name=:batch_name")
ListenableFuture<Result<Report>> getAll(@Param("batch_name") final String batchName);
@Query("SELECT * FROM chrono.report WHERE batch_name=:batch_name AND device_id=:device_id")
ListenableFuture<Result<Report>> getAll(@Param("batch_name") final String batchName, @Param("device_id") final String deviceId);
}
package com.cyngn.chrono.storage.cassandra.dal;
import com.cyngn.chrono.storage.cassandra.table.UploadData;
import com.datastax.driver.mapping.Result;
import com.datastax.driver.mapping.annotations.Accessor;
import com.datastax.driver.mapping.annotations.Param;
import com.datastax.driver.mapping.annotations.Query;
import com.google.common.util.concurrent.ListenableFuture;
import java.lang.Long;
import java.lang.String;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* Accessor for Cassandra entity - {@link com.cyngn.chrono.storage.cassandra.table.UploadData}
*/
@Accessor
public interface UploadDataAccessor {
@Query("SELECT * FROM chrono.upload_data WHERE test_batch=:test_batch")
ListenableFuture<Result<UploadData>> getAll(@Param("test_batch") final String testBatch);
@Query("SELECT * FROM chrono.upload_data WHERE test_batch=:test_batch AND unit=:unit")
ListenableFuture<Result<UploadData>> getAll(@Param("test_batch") final String testBatch, @Param("unit") final String unit);
@Query("SELECT * FROM chrono.upload_data WHERE test_batch=:test_batch AND unit=:unit AND size=:size")
ListenableFuture<Result<UploadData>> getAll(@Param("test_batch") final String testBatch, @Param("unit") final String unit, @Param("size") final Long size);
}
package com.cyngn.chrono.storage.cassandra.dal;
import com.cyngn.chrono.storage.cassandra.table.TestBatch;
import com.datastax.driver.mapping.Result;
import com.datastax.driver.mapping.annotations.Accessor;
import com.datastax.driver.mapping.annotations.Query;
import com.google.common.util.concurrent.ListenableFuture;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* Accessor for Cassandra entity - {@link com.cyngn.chrono.storage.cassandra.table.TestBatch}
*/
@Accessor
public interface TestBatchAccessor {
@Query("SELECT * FROM chrono.test_batch")
ListenableFuture<Result<TestBatch>> getAll();
}
package com.cyngn.chrono.storage.cassandra.dal;
import com.cyngn.vertx.async.ResultContext;
import java.lang.Object;
import java.util.function.Consumer;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* Common interface for all DAL classes
*/
public interface CommonDal<T> {
void save(final T entity, final Consumer<ResultContext> onComplete);
void get(final Consumer<ResultContext<T>> onComplete, final Object... primaryKeys);
void delete(final Consumer<ResultContext> onComplete, final Object... primaryKeys);
void delete(final T entity, final Consumer<ResultContext> onComplete);
}
package com.cyngn.chrono.storage.cassandra.dal;
import com.cyngn.chrono.storage.cassandra.table.Payload;
import com.cyngn.vertx.async.ResultContext;
import com.datastax.driver.mapping.MappingManager;
import com.datastax.driver.mapping.Result;
import com.englishtown.vertx.cassandra.CassandraSession;
import com.englishtown.vertx.cassandra.FutureUtils;
import com.englishtown.vertx.cassandra.mapping.VertxMapper;
import com.englishtown.vertx.cassandra.mapping.VertxMappingManager;
import com.englishtown.vertx.cassandra.mapping.impl.DefaultVertxMappingManager;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.ListenableFuture;
import io.vertx.core.Vertx;
import java.lang.Object;
import java.lang.Override;
import java.lang.String;
import java.lang.Throwable;
import java.lang.Void;
import java.util.List;
import java.util.function.Consumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* DAL for Cassandra entity - {@link com.cyngn.chrono.storage.cassandra.table.Payload}
*/
public class PayloadDal implements CommonDal<Payload> {
private static final Logger logger = LoggerFactory.getLogger(PayloadDal.class);
final CassandraSession session;
final VertxMapper<Payload> mapper;
final PayloadAccessor accessor;
final Vertx vertx;
public PayloadDal(final CassandraSession session) {
this.session = session;
VertxMappingManager manager = new DefaultVertxMappingManager(session);
mapper = manager.mapper(Payload.class);
MappingManager accessorMappingManager = new MappingManager(session.getSession());
accessor = accessorMappingManager.createAccessor(PayloadAccessor.class);
vertx = session.getVertx();
}
/**
* Save a {@link com.cyngn.chrono.storage.cassandra.table.Payload} object.
*/
public void save(final Payload payloadObj, final Consumer<ResultContext> onComplete) {
logger.info("save - {}", payloadObj);
mapper.saveAsync(payloadObj, new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
onComplete.accept(new ResultContext(true));
}
@Override
public void onFailure(Throwable error) {
logger.error("save - {}, ex: ", payloadObj, error);
onComplete.accept(new ResultContext(error, "Failed to save Payload: " + payloadObj));
}
});
}
/**
* Delete a {@link com.cyngn.chrono.storage.cassandra.table.Payload} object.
*/
public void delete(final Payload payloadObj, final Consumer<ResultContext> onComplete) {
logger.info("delete - {}", payloadObj);
mapper.deleteAsync(payloadObj, new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
onComplete.accept(new ResultContext(true));
}
@Override
public void onFailure(Throwable error) {
logger.error("delete - {}, ex: ", payloadObj, error);
onComplete.accept(new ResultContext(error, "Failed to delete Payload: " + payloadObj));
}
});
}
/**
* Delete a {@link com.cyngn.chrono.storage.cassandra.table.Payload} object by key.
*/
public void delete(final Consumer<ResultContext> onComplete, final Object... primaryKey) {
logger.info("delete - {}", primaryKey);
mapper.deleteAsync(new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
onComplete.accept(new ResultContext(true));
}
@Override
public void onFailure(Throwable error) {
logger.error("delete - {}, ex: ", primaryKey, error);
onComplete.accept(new ResultContext(error, "Failed to delete Payload by key: " + primaryKey));
}
}, primaryKey);
}
/**
* Get a {@link com.cyngn.chrono.storage.cassandra.table.Payload} object by primary key.
*/
public void get(final Consumer<ResultContext<Payload>> onComplete, final Object... primaryKey) {
logger.info("get - {}", primaryKey);
mapper.getAsync(new FutureCallback<Payload>() {
@Override
public void onSuccess(Payload result) {
onComplete.accept(new ResultContext(true, result));
}
@Override
public void onFailure(Throwable error) {
logger.error("get - {}, ex: ", primaryKey, error);
onComplete.accept(new ResultContext(error, "Failed to get Payload by key: " + primaryKey));
}
}, primaryKey);
}
/**
* Get all matching {@link com.cyngn.chrono.storage.cassandra.table.Payload}(s) by sub key.
*/
public void getAll(final String unit, final Consumer<ResultContext<List<Payload>>> onComplete) {
logger.info("getAll - unit: {}", unit);
ListenableFuture<Result<Payload>> future = accessor.getAll(unit);
FutureUtils.addCallback(future, new FutureCallback<Result<Payload>>() {
@Override
public void onSuccess(Result<Payload> result) {
onComplete.accept(new ResultContext(true, result.all()));
}
@Override
public void onFailure(Throwable error) {
logger.error("getAll - unit: {} ex: ", unit, error);
onComplete.accept(new ResultContext(error, "Failed to get all."));
}
}, vertx);
}
}
package com.cyngn.chrono.storage.cassandra.dal;
import com.cyngn.chrono.storage.cassandra.table.Report;
import com.cyngn.vertx.async.ResultContext;
import com.datastax.driver.mapping.MappingManager;
import com.datastax.driver.mapping.Result;
import com.englishtown.vertx.cassandra.CassandraSession;
import com.englishtown.vertx.cassandra.FutureUtils;
import com.englishtown.vertx.cassandra.mapping.VertxMapper;
import com.englishtown.vertx.cassandra.mapping.VertxMappingManager;
import com.englishtown.vertx.cassandra.mapping.impl.DefaultVertxMappingManager;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.ListenableFuture;
import io.vertx.core.Vertx;
import java.lang.Object;
import java.lang.Override;
import java.lang.String;
import java.lang.Throwable;
import java.lang.Void;
import java.util.List;
import java.util.function.Consumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* DAL for Cassandra entity - {@link com.cyngn.chrono.storage.cassandra.table.Report}
*/
public class ReportDal implements CommonDal<Report> {
private static final Logger logger = LoggerFactory.getLogger(ReportDal.class);
final CassandraSession session;
final VertxMapper<Report> mapper;
final ReportAccessor accessor;
final Vertx vertx;
public ReportDal(final CassandraSession session) {
this.session = session;
VertxMappingManager manager = new DefaultVertxMappingManager(session);
mapper = manager.mapper(Report.class);
MappingManager accessorMappingManager = new MappingManager(session.getSession());
accessor = accessorMappingManager.createAccessor(ReportAccessor.class);
vertx = session.getVertx();
}
/**
* Save a {@link com.cyngn.chrono.storage.cassandra.table.Report} object.
*/
public void save(final Report reportObj, final Consumer<ResultContext> onComplete) {
logger.info("save - {}", reportObj);
mapper.saveAsync(reportObj, new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
onComplete.accept(new ResultContext(true));
}
@Override
public void onFailure(Throwable error) {
logger.error("save - {}, ex: ", reportObj, error);
onComplete.accept(new ResultContext(error, "Failed to save Report: " + reportObj));
}
});
}
/**
* Delete a {@link com.cyngn.chrono.storage.cassandra.table.Report} object.
*/
public void delete(final Report reportObj, final Consumer<ResultContext> onComplete) {
logger.info("delete - {}", reportObj);
mapper.deleteAsync(reportObj, new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
onComplete.accept(new ResultContext(true));
}
@Override
public void onFailure(Throwable error) {
logger.error("delete - {}, ex: ", reportObj, error);
onComplete.accept(new ResultContext(error, "Failed to delete Report: " + reportObj));
}
});
}
/**
* Delete a {@link com.cyngn.chrono.storage.cassandra.table.Report} object by key.
*/
public void delete(final Consumer<ResultContext> onComplete, final Object... primaryKey) {
logger.info("delete - {}", primaryKey);
mapper.deleteAsync(new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
onComplete.accept(new ResultContext(true));
}
@Override
public void onFailure(Throwable error) {
logger.error("delete - {}, ex: ", primaryKey, error);
onComplete.accept(new ResultContext(error, "Failed to delete Report by key: " + primaryKey));
}
}, primaryKey);
}
/**
* Get a {@link com.cyngn.chrono.storage.cassandra.table.Report} object by primary key.
*/
public void get(final Consumer<ResultContext<Report>> onComplete, final Object... primaryKey) {
logger.info("get - {}", primaryKey);
mapper.getAsync(new FutureCallback<Report>() {
@Override
public void onSuccess(Report result) {
onComplete.accept(new ResultContext(true, result));
}
@Override
public void onFailure(Throwable error) {
logger.error("get - {}, ex: ", primaryKey, error);
onComplete.accept(new ResultContext(error, "Failed to get Report by key: " + primaryKey));
}
}, primaryKey);
}
/**
* Get all matching {@link com.cyngn.chrono.storage.cassandra.table.Report}(s) by sub key.
*/
public void getAll(final String batchName, final Consumer<ResultContext<List<Report>>> onComplete) {
logger.info("getAll - batchName: {}", batchName);
ListenableFuture<Result<Report>> future = accessor.getAll(batchName);
FutureUtils.addCallback(future, new FutureCallback<Result<Report>>() {
@Override
public void onSuccess(Result<Report> result) {
onComplete.accept(new ResultContext(true, result.all()));
}
@Override
public void onFailure(Throwable error) {
logger.error("getAll - batchName: {} ex: ", batchName, error);
onComplete.accept(new ResultContext(error, "Failed to get all."));
}
}, vertx);
}
/**
* Get all matching {@link com.cyngn.chrono.storage.cassandra.table.Report}(s) by sub key.
*/
public void getAll(final String batchName, final String deviceId, final Consumer<ResultContext<List<Report>>> onComplete) {
logger.info("getAll - batchName: {} deviceId: {}", batchName, deviceId);
ListenableFuture<Result<Report>> future = accessor.getAll(batchName, deviceId);
FutureUtils.addCallback(future, new FutureCallback<Result<Report>>() {
@Override
public void onSuccess(Result<Report> result) {
onComplete.accept(new ResultContext(true, result.all()));
}
@Override
public void onFailure(Throwable error) {
logger.error("getAll - batchName: {} deviceId: {} ex: ", batchName, deviceId, error);
onComplete.accept(new ResultContext(error, "Failed to get all."));
}
}, vertx);
}
}
package com.cyngn.chrono.storage.cassandra.dal;
import com.cyngn.chrono.storage.cassandra.table.UploadData;
import com.cyngn.vertx.async.ResultContext;
import com.datastax.driver.mapping.MappingManager;
import com.datastax.driver.mapping.Result;
import com.englishtown.vertx.cassandra.CassandraSession;
import com.englishtown.vertx.cassandra.FutureUtils;
import com.englishtown.vertx.cassandra.mapping.VertxMapper;
import com.englishtown.vertx.cassandra.mapping.VertxMappingManager;
import com.englishtown.vertx.cassandra.mapping.impl.DefaultVertxMappingManager;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.ListenableFuture;
import io.vertx.core.Vertx;
import java.lang.Long;
import java.lang.Object;
import java.lang.Override;
import java.lang.String;
import java.lang.Throwable;
import java.lang.Void;
import java.util.List;
import java.util.function.Consumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* DAL for Cassandra entity - {@link com.cyngn.chrono.storage.cassandra.table.UploadData}
*/
public class UploadDataDal implements CommonDal<UploadData> {
private static final Logger logger = LoggerFactory.getLogger(UploadDataDal.class);
final CassandraSession session;
final VertxMapper<UploadData> mapper;
final UploadDataAccessor accessor;
final Vertx vertx;
public UploadDataDal(final CassandraSession session) {
this.session = session;
VertxMappingManager manager = new DefaultVertxMappingManager(session);
mapper = manager.mapper(UploadData.class);
MappingManager accessorMappingManager = new MappingManager(session.getSession());
accessor = accessorMappingManager.createAccessor(UploadDataAccessor.class);
vertx = session.getVertx();
}
/**
* Save a {@link com.cyngn.chrono.storage.cassandra.table.UploadData} object.
*/
public void save(final UploadData uploadDataObj, final Consumer<ResultContext> onComplete) {
logger.info("save - {}", uploadDataObj);
mapper.saveAsync(uploadDataObj, new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
onComplete.accept(new ResultContext(true));
}
@Override
public void onFailure(Throwable error) {
logger.error("save - {}, ex: ", uploadDataObj, error);
onComplete.accept(new ResultContext(error, "Failed to save UploadData: " + uploadDataObj));
}
});
}
/**
* Delete a {@link com.cyngn.chrono.storage.cassandra.table.UploadData} object.
*/
public void delete(final UploadData uploadDataObj, final Consumer<ResultContext> onComplete) {
logger.info("delete - {}", uploadDataObj);
mapper.deleteAsync(uploadDataObj, new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
onComplete.accept(new ResultContext(true));
}
@Override
public void onFailure(Throwable error) {
logger.error("delete - {}, ex: ", uploadDataObj, error);
onComplete.accept(new ResultContext(error, "Failed to delete UploadData: " + uploadDataObj));
}
});
}
/**
* Delete a {@link com.cyngn.chrono.storage.cassandra.table.UploadData} object by key.
*/
public void delete(final Consumer<ResultContext> onComplete, final Object... primaryKey) {
logger.info("delete - {}", primaryKey);
mapper.deleteAsync(new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
onComplete.accept(new ResultContext(true));
}
@Override
public void onFailure(Throwable error) {
logger.error("delete - {}, ex: ", primaryKey, error);
onComplete.accept(new ResultContext(error, "Failed to delete UploadData by key: " + primaryKey));
}
}, primaryKey);
}
/**
* Get a {@link com.cyngn.chrono.storage.cassandra.table.UploadData} object by primary key.
*/
public void get(final Consumer<ResultContext<UploadData>> onComplete, final Object... primaryKey) {
logger.info("get - {}", primaryKey);
mapper.getAsync(new FutureCallback<UploadData>() {
@Override
public void onSuccess(UploadData result) {
onComplete.accept(new ResultContext(true, result));
}
@Override
public void onFailure(Throwable error) {
logger.error("get - {}, ex: ", primaryKey, error);
onComplete.accept(new ResultContext(error, "Failed to get UploadData by key: " + primaryKey));
}
}, primaryKey);
}
/**
* Get all matching {@link com.cyngn.chrono.storage.cassandra.table.UploadData}(s) by sub key.
*/
public void getAll(final String testBatch, final Consumer<ResultContext<List<UploadData>>> onComplete) {
logger.info("getAll - testBatch: {}", testBatch);
ListenableFuture<Result<UploadData>> future = accessor.getAll(testBatch);
FutureUtils.addCallback(future, new FutureCallback<Result<UploadData>>() {
@Override
public void onSuccess(Result<UploadData> result) {
onComplete.accept(new ResultContext(true, result.all()));
}
@Override
public void onFailure(Throwable error) {
logger.error("getAll - testBatch: {} ex: ", testBatch, error);
onComplete.accept(new ResultContext(error, "Failed to get all."));
}
}, vertx);
}
/**
* Get all matching {@link com.cyngn.chrono.storage.cassandra.table.UploadData}(s) by sub key.
*/
public void getAll(final String testBatch, final String unit, final Consumer<ResultContext<List<UploadData>>> onComplete) {
logger.info("getAll - testBatch: {} unit: {}", testBatch, unit);
ListenableFuture<Result<UploadData>> future = accessor.getAll(testBatch, unit);
FutureUtils.addCallback(future, new FutureCallback<Result<UploadData>>() {
@Override
public void onSuccess(Result<UploadData> result) {
onComplete.accept(new ResultContext(true, result.all()));
}
@Override
public void onFailure(Throwable error) {
logger.error("getAll - testBatch: {} unit: {} ex: ", testBatch, unit, error);
onComplete.accept(new ResultContext(error, "Failed to get all."));
}
}, vertx);
}
/**
* Get all matching {@link com.cyngn.chrono.storage.cassandra.table.UploadData}(s) by sub key.
*/
public void getAll(final String testBatch, final String unit, final Long size, final Consumer<ResultContext<List<UploadData>>> onComplete) {
logger.info("getAll - testBatch: {} unit: {} size: {}", testBatch, unit, size);
ListenableFuture<Result<UploadData>> future = accessor.getAll(testBatch, unit, size);
FutureUtils.addCallback(future, new FutureCallback<Result<UploadData>>() {
@Override
public void onSuccess(Result<UploadData> result) {
onComplete.accept(new ResultContext(true, result.all()));
}
@Override
public void onFailure(Throwable error) {
logger.error("getAll - testBatch: {} unit: {} size: {} ex: ", testBatch, unit, size, error);
onComplete.accept(new ResultContext(error, "Failed to get all."));
}
}, vertx);
}
}
package com.cyngn.chrono.storage.cassandra.dal;
import com.cyngn.chrono.storage.cassandra.table.TestBatch;
import com.cyngn.vertx.async.ResultContext;
import com.datastax.driver.mapping.MappingManager;
import com.datastax.driver.mapping.Result;
import com.englishtown.vertx.cassandra.CassandraSession;
import com.englishtown.vertx.cassandra.FutureUtils;
import com.englishtown.vertx.cassandra.mapping.VertxMapper;
import com.englishtown.vertx.cassandra.mapping.VertxMappingManager;
import com.englishtown.vertx.cassandra.mapping.impl.DefaultVertxMappingManager;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.ListenableFuture;
import io.vertx.core.Vertx;
import java.lang.Object;
import java.lang.Override;
import java.lang.Throwable;
import java.lang.Void;
import java.util.List;
import java.util.function.Consumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* DAL for Cassandra entity - {@link com.cyngn.chrono.storage.cassandra.table.TestBatch}
*/
public class TestBatchDal implements CommonDal<TestBatch> {
private static final Logger logger = LoggerFactory.getLogger(TestBatchDal.class);
final CassandraSession session;
final VertxMapper<TestBatch> mapper;
final TestBatchAccessor accessor;
final Vertx vertx;
public TestBatchDal(final CassandraSession session) {
this.session = session;
VertxMappingManager manager = new DefaultVertxMappingManager(session);
mapper = manager.mapper(TestBatch.class);
MappingManager accessorMappingManager = new MappingManager(session.getSession());
accessor = accessorMappingManager.createAccessor(TestBatchAccessor.class);
vertx = session.getVertx();
}
/**
* Save a {@link com.cyngn.chrono.storage.cassandra.table.TestBatch} object.
*/
public void save(final TestBatch testBatchObj, final Consumer<ResultContext> onComplete) {
logger.info("save - {}", testBatchObj);
mapper.saveAsync(testBatchObj, new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
onComplete.accept(new ResultContext(true));
}
@Override
public void onFailure(Throwable error) {
logger.error("save - {}, ex: ", testBatchObj, error);
onComplete.accept(new ResultContext(error, "Failed to save TestBatch: " + testBatchObj));
}
});
}
/**
* Delete a {@link com.cyngn.chrono.storage.cassandra.table.TestBatch} object.
*/
public void delete(final TestBatch testBatchObj, final Consumer<ResultContext> onComplete) {
logger.info("delete - {}", testBatchObj);
mapper.deleteAsync(testBatchObj, new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
onComplete.accept(new ResultContext(true));
}
@Override
public void onFailure(Throwable error) {
logger.error("delete - {}, ex: ", testBatchObj, error);
onComplete.accept(new ResultContext(error, "Failed to delete TestBatch: " + testBatchObj));
}
});
}
/**
* Delete a {@link com.cyngn.chrono.storage.cassandra.table.TestBatch} object by key.
*/
public void delete(final Consumer<ResultContext> onComplete, final Object... primaryKey) {
logger.info("delete - {}", primaryKey);
mapper.deleteAsync(new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
onComplete.accept(new ResultContext(true));
}
@Override
public void onFailure(Throwable error) {
logger.error("delete - {}, ex: ", primaryKey, error);
onComplete.accept(new ResultContext(error, "Failed to delete TestBatch by key: " + primaryKey));
}
}, primaryKey);
}
/**
* Get a {@link com.cyngn.chrono.storage.cassandra.table.TestBatch} object by primary key.
*/
public void get(final Consumer<ResultContext<TestBatch>> onComplete, final Object... primaryKey) {
logger.info("get - {}", primaryKey);
mapper.getAsync(new FutureCallback<TestBatch>() {
@Override
public void onSuccess(TestBatch result) {
onComplete.accept(new ResultContext(true, result));
}
@Override
public void onFailure(Throwable error) {
logger.error("get - {}, ex: ", primaryKey, error);
onComplete.accept(new ResultContext(error, "Failed to get TestBatch by key: " + primaryKey));
}
}, primaryKey);
}
public void getAll(final Consumer<ResultContext<List<TestBatch>>> onComplete) {
logger.info("getAll -");
ListenableFuture future = accessor.getAll();
FutureUtils.addCallback(future, new FutureCallback<Result<TestBatch>>() {
@Override
public void onSuccess(Result<TestBatch> result) {
onComplete.accept(new ResultContext(true, result.all()));
}
@Override
public void onFailure(Throwable error) {
logger.error("getAll - ex: ", error);
onComplete.accept(new ResultContext(error, "Failed to get all."));
}
}, vertx);
}
}
package com.cyngn.chrono.rest;
import com.cyngn.vertx.async.ResultContext;
import com.cyngn.vertx.web.HttpHelper;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.vertx.core.http.HttpServerRequest;
import io.vertx.ext.web.RoutingContext;
import java.lang.Boolean;
import java.lang.String;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* Central place to put shared functions for REST call processing.
*/
public class RestUtil {
private static final Logger logger = LoggerFactory.getLogger(RestUtil.class);
/**
* Does the query string of this request contain the full primary key?
*/
public static Boolean isValid(final HttpServerRequest request, final String[] primaryKey) {
String error = null;
for(int i = 0; i < primaryKey.length; i++) {
String key = primaryKey[i];
String value = request.getParam(key);
if(StringUtils.isEmpty(value)) {
error = "You must supply parameter: " + key;
HttpHelper.processErrorResponse(error, request.response(), HttpResponseStatus.BAD_REQUEST.code());
break;
}
}
return StringUtils.isEmpty(error);
}
/**
* Handles processing a get all result
*/
public static <T> void processGetAllResult(final RoutingContext context, final ResultContext<List<T>> result) {
if(result.succeeded) {
if(result.value != null) {
HttpHelper.processResponse(result.value, context.response());
} else {
HttpHelper.processResponse(context.response(), HttpResponseStatus.NOT_FOUND.code());
}
} else if(result.error != null) {
String error = "Could not GET with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("get_all - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not GET with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("get_all - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
}
}
package com.cyngn.chrono.rest;
import com.cyngn.chrono.storage.cassandra.dal.PayloadDal;
import com.cyngn.chrono.storage.cassandra.table.Payload;
import com.cyngn.vertx.web.HttpHelper;
import com.cyngn.vertx.web.JsonUtil;
import com.cyngn.vertx.web.RestApi;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.HttpMethod;
import io.vertx.core.http.HttpServerRequest;
import io.vertx.ext.web.RoutingContext;
import java.lang.Long;
import java.lang.Object;
import java.lang.Override;
import java.lang.String;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* REST Api for Cassandra entity - {@link com.cyngn.chrono.storage.cassandra.table.Payload}
*/
public class PayloadApi implements RestApi {
private static final Logger logger = LoggerFactory.getLogger(PayloadApi.class);
public static final String PAYLOAD_API = "/api/v1/payload";
public static final String PAYLOAD_ALL_API = "/api/v1/payload_all";
private final PayloadDal storage;
private final RestApi.RestApiDescriptor[] supportedApi = {
new RestApi.RestApiDescriptor(HttpMethod.POST, PAYLOAD_API, this::save),
new RestApi.RestApiDescriptor(HttpMethod.GET, PAYLOAD_API, this::get),
new RestApi.RestApiDescriptor(HttpMethod.DELETE, PAYLOAD_API, this::delete),
new RestApi.RestApiDescriptor(HttpMethod.GET, PAYLOAD_ALL_API, this::getAll)
};
final String[] primaryKey;
public PayloadApi(final PayloadDal storage) {
this.storage = storage;
primaryKey = new String[] {
"unit",
"size"
} ;
}
/**
* Save a {@link com.cyngn.chrono.storage.cassandra.table.Payload} object.
*/
public void save(final RoutingContext context) {
HttpServerRequest request = context.request();
if(request.isEnded()) {
save(context, context.getBody());
} else {
request.bodyHandler(buffer -> save(context, buffer));
}
}
/**
* Save a {@link com.cyngn.chrono.storage.cassandra.table.Payload} object.
*
* NOTE: this method is left intentionally package protected to allow you to call it in a different way
*/
void save(final RoutingContext context, final Buffer body) {
Payload entity = JsonUtil.parseJsonToObject(body.toString(), Payload.class);
if(entity == null) {
HttpHelper.processErrorResponse("Failed to parse body: " + body, context.response(), HttpResponseStatus.BAD_REQUEST.code());
return;
}
storage.save(entity, result -> {
if(result.succeeded) {
HttpHelper.processResponse(context.response());
} else if(result.error != null) {
String error = "Could not persist " + entity.toString() + ", error: " + result.error.getMessage();
logger.error("save - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not persist " + entity.toString() + ", error: " + result.errorMessage;
logger.error("save - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
} );
}
/**
* Delete a {@link com.cyngn.chrono.storage.cassandra.table.Payload} object.
*/
public void delete(final RoutingContext context) {
// if query params aren't valid a HttpResponseStatus.BAD_REQUEST will be sent with the missing field
Object[] queryKey = null;
if(RestUtil.isValid(context.request(), primaryKey)) {
queryKey = convertQueryString(context);
}
if(queryKey != null) {
storage.delete(result -> {
if(result.succeeded) {
HttpHelper.processResponse(context.response());
} else if (result.error != null) {
String error = "Could not DELETE with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("delete - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not DELETE with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("delete - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
} , queryKey);
}
}
/**
* Get a {@link com.cyngn.chrono.storage.cassandra.table.Payload} object.
*/
public void get(final RoutingContext context) {
// if query params aren't valid a HttpResponseStatus.BAD_REQUEST will be sent with the missing field
Object[] queryKey = null;
if(RestUtil.isValid(context.request(), primaryKey)) {
queryKey = convertQueryString(context);
}
if(queryKey != null) {
storage.get(result -> {
if(result.succeeded) {
if(result.value != null) {
HttpHelper.processResponse(result.value, context.response());
} else {
HttpHelper.processResponse(context.response(), HttpResponseStatus.NOT_FOUND.code());
}
} else if(result.error != null) {
String error = "Could not GET with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("get - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not GET with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("get - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
} , queryKey);
}
}
/**
* GetAll - gets a list of a {@link com.cyngn.chrono.storage.cassandra.table.Payload} object.
*/
public void getAll(final RoutingContext context) {
HttpServerRequest request = context.request();
int paramCount = request.params().size();
try {
//the query must start at the partition key
switch(paramCount) {
//partial key: unit
case 1:
storage.getAll(request.getParam("unit"), result -> RestUtil.processGetAllResult(context, result));
break;
default:
String error = "Invalid get all query: " + context.request().uri();
logger.error("get_all - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
break;
}
} catch (Exception ex) {
String error = "error with query: " + context.request().uri() + " error: " + ex.getMessage();
logger.error("get_all - {}", error, ex);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
}
}
@Override
public RestApi.RestApiDescriptor[] supportedApi() {
return supportedApi;
}
/**
* Convert query params to their Cassandra type.
*/
public Object[] convertQueryString(final RoutingContext context) {
HttpServerRequest request = context.request();
// if query params aren't valid a HttpResponseStatus.BAD_REQUEST will be sent with the missing field
Object[] values = new Object[2];
try {
values[0] = request.getParam("unit");
values[1] = Long.parseLong(request.getParam("size"));
} catch (Exception ex) {
HttpHelper.processErrorResponse(ex.getMessage(), context.response(), HttpResponseStatus.BAD_REQUEST.code());
return null;
}
return values;
}
}
package com.cyngn.chrono.rest;
import com.cyngn.chrono.storage.cassandra.dal.ReportDal;
import com.cyngn.chrono.storage.cassandra.table.Report;
import com.cyngn.vertx.web.HttpHelper;
import com.cyngn.vertx.web.JsonUtil;
import com.cyngn.vertx.web.RestApi;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.HttpMethod;
import io.vertx.core.http.HttpServerRequest;
import io.vertx.ext.web.RoutingContext;
import java.lang.Object;
import java.lang.Override;
import java.lang.String;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* REST Api for Cassandra entity - {@link com.cyngn.chrono.storage.cassandra.table.Report}
*/
public class ReportApi implements RestApi {
private static final Logger logger = LoggerFactory.getLogger(ReportApi.class);
public static final String REPORT_API = "/api/v1/report";
public static final String REPORT_ALL_API = "/api/v1/report_all";
private final ReportDal storage;
private final RestApi.RestApiDescriptor[] supportedApi = {
new RestApi.RestApiDescriptor(HttpMethod.POST, REPORT_API, this::save),
new RestApi.RestApiDescriptor(HttpMethod.GET, REPORT_API, this::get),
new RestApi.RestApiDescriptor(HttpMethod.DELETE, REPORT_API, this::delete),
new RestApi.RestApiDescriptor(HttpMethod.GET, REPORT_ALL_API, this::getAll)
};
final String[] primaryKey;
public ReportApi(final ReportDal storage) {
this.storage = storage;
primaryKey = new String[] {
"batch_name",
"device_id",
"created"
} ;
}
/**
* Save a {@link com.cyngn.chrono.storage.cassandra.table.Report} object.
*/
public void save(final RoutingContext context) {
HttpServerRequest request = context.request();
if(request.isEnded()) {
save(context, context.getBody());
} else {
request.bodyHandler(buffer -> save(context, buffer));
}
}
/**
* Save a {@link com.cyngn.chrono.storage.cassandra.table.Report} object.
*
* NOTE: this method is left intentionally package protected to allow you to call it in a different way
*/
void save(final RoutingContext context, final Buffer body) {
Report entity = JsonUtil.parseJsonToObject(body.toString(), Report.class);
if(entity == null) {
HttpHelper.processErrorResponse("Failed to parse body: " + body, context.response(), HttpResponseStatus.BAD_REQUEST.code());
return;
}
storage.save(entity, result -> {
if(result.succeeded) {
HttpHelper.processResponse(context.response());
} else if(result.error != null) {
String error = "Could not persist " + entity.toString() + ", error: " + result.error.getMessage();
logger.error("save - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not persist " + entity.toString() + ", error: " + result.errorMessage;
logger.error("save - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
} );
}
/**
* Delete a {@link com.cyngn.chrono.storage.cassandra.table.Report} object.
*/
public void delete(final RoutingContext context) {
// if query params aren't valid a HttpResponseStatus.BAD_REQUEST will be sent with the missing field
Object[] queryKey = null;
if(RestUtil.isValid(context.request(), primaryKey)) {
queryKey = convertQueryString(context);
}
if(queryKey != null) {
storage.delete(result -> {
if(result.succeeded) {
HttpHelper.processResponse(context.response());
} else if (result.error != null) {
String error = "Could not DELETE with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("delete - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not DELETE with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("delete - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
} , queryKey);
}
}
/**
* Get a {@link com.cyngn.chrono.storage.cassandra.table.Report} object.
*/
public void get(final RoutingContext context) {
// if query params aren't valid a HttpResponseStatus.BAD_REQUEST will be sent with the missing field
Object[] queryKey = null;
if(RestUtil.isValid(context.request(), primaryKey)) {
queryKey = convertQueryString(context);
}
if(queryKey != null) {
storage.get(result -> {
if(result.succeeded) {
if(result.value != null) {
HttpHelper.processResponse(result.value, context.response());
} else {
HttpHelper.processResponse(context.response(), HttpResponseStatus.NOT_FOUND.code());
}
} else if(result.error != null) {
String error = "Could not GET with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("get - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not GET with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("get - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
} , queryKey);
}
}
/**
* GetAll - gets a list of a {@link com.cyngn.chrono.storage.cassandra.table.Report} object.
*/
public void getAll(final RoutingContext context) {
HttpServerRequest request = context.request();
int paramCount = request.params().size();
try {
//the query must start at the partition key
switch(paramCount) {
//partial key: batchName
case 1:
storage.getAll(request.getParam("batch_name"), result -> RestUtil.processGetAllResult(context, result));
break;
//partial key: batchName, deviceId
case 2:
storage.getAll(request.getParam("batch_name"), request.getParam("device_id"), result -> RestUtil.processGetAllResult(context, result));
break;
default:
String error = "Invalid get all query: " + context.request().uri();
logger.error("get_all - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
break;
}
} catch (Exception ex) {
String error = "error with query: " + context.request().uri() + " error: " + ex.getMessage();
logger.error("get_all - {}", error, ex);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
}
}
@Override
public RestApi.RestApiDescriptor[] supportedApi() {
return supportedApi;
}
/**
* Convert query params to their Cassandra type.
*/
public Object[] convertQueryString(final RoutingContext context) {
HttpServerRequest request = context.request();
// if query params aren't valid a HttpResponseStatus.BAD_REQUEST will be sent with the missing field
Object[] values = new Object[3];
try {
values[0] = request.getParam("batch_name");
values[1] = request.getParam("device_id");
values[2] = new DateTime(request.getParam("created")).toDate();
} catch (Exception ex) {
HttpHelper.processErrorResponse(ex.getMessage(), context.response(), HttpResponseStatus.BAD_REQUEST.code());
return null;
}
return values;
}
}
package com.cyngn.chrono.rest;
import com.cyngn.chrono.storage.cassandra.dal.UploadDataDal;
import com.cyngn.chrono.storage.cassandra.table.UploadData;
import com.cyngn.vertx.web.HttpHelper;
import com.cyngn.vertx.web.JsonUtil;
import com.cyngn.vertx.web.RestApi;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.HttpMethod;
import io.vertx.core.http.HttpServerRequest;
import io.vertx.ext.web.RoutingContext;
import java.lang.Long;
import java.lang.Object;
import java.lang.Override;
import java.lang.String;
import org.joda.time.DateTime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* REST Api for Cassandra entity - {@link com.cyngn.chrono.storage.cassandra.table.UploadData}
*/
public class UploadDataApi implements RestApi {
private static final Logger logger = LoggerFactory.getLogger(UploadDataApi.class);
public static final String UPLOAD_DATA_API = "/api/v1/upload_data";
public static final String UPLOAD_DATA_ALL_API = "/api/v1/upload_data_all";
private final UploadDataDal storage;
private final RestApi.RestApiDescriptor[] supportedApi = {
new RestApi.RestApiDescriptor(HttpMethod.POST, UPLOAD_DATA_API, this::save),
new RestApi.RestApiDescriptor(HttpMethod.GET, UPLOAD_DATA_API, this::get),
new RestApi.RestApiDescriptor(HttpMethod.DELETE, UPLOAD_DATA_API, this::delete),
new RestApi.RestApiDescriptor(HttpMethod.GET, UPLOAD_DATA_ALL_API, this::getAll)
};
final String[] primaryKey;
public UploadDataApi(final UploadDataDal storage) {
this.storage = storage;
primaryKey = new String[] {
"test_batch",
"unit",
"size",
"created"
} ;
}
/**
* Save a {@link com.cyngn.chrono.storage.cassandra.table.UploadData} object.
*/
public void save(final RoutingContext context) {
HttpServerRequest request = context.request();
if(request.isEnded()) {
save(context, context.getBody());
} else {
request.bodyHandler(buffer -> save(context, buffer));
}
}
/**
* Save a {@link com.cyngn.chrono.storage.cassandra.table.UploadData} object.
*
* NOTE: this method is left intentionally package protected to allow you to call it in a different way
*/
void save(final RoutingContext context, final Buffer body) {
UploadData entity = JsonUtil.parseJsonToObject(body.toString(), UploadData.class);
if(entity == null) {
HttpHelper.processErrorResponse("Failed to parse body: " + body, context.response(), HttpResponseStatus.BAD_REQUEST.code());
return;
}
storage.save(entity, result -> {
if(result.succeeded) {
HttpHelper.processResponse(context.response());
} else if(result.error != null) {
String error = "Could not persist " + entity.toString() + ", error: " + result.error.getMessage();
logger.error("save - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not persist " + entity.toString() + ", error: " + result.errorMessage;
logger.error("save - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
} );
}
/**
* Delete a {@link com.cyngn.chrono.storage.cassandra.table.UploadData} object.
*/
public void delete(final RoutingContext context) {
// if query params aren't valid a HttpResponseStatus.BAD_REQUEST will be sent with the missing field
Object[] queryKey = null;
if(RestUtil.isValid(context.request(), primaryKey)) {
queryKey = convertQueryString(context);
}
if(queryKey != null) {
storage.delete(result -> {
if(result.succeeded) {
HttpHelper.processResponse(context.response());
} else if (result.error != null) {
String error = "Could not DELETE with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("delete - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not DELETE with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("delete - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
} , queryKey);
}
}
/**
* Get a {@link com.cyngn.chrono.storage.cassandra.table.UploadData} object.
*/
public void get(final RoutingContext context) {
// if query params aren't valid a HttpResponseStatus.BAD_REQUEST will be sent with the missing field
Object[] queryKey = null;
if(RestUtil.isValid(context.request(), primaryKey)) {
queryKey = convertQueryString(context);
}
if(queryKey != null) {
storage.get(result -> {
if(result.succeeded) {
if(result.value != null) {
HttpHelper.processResponse(result.value, context.response());
} else {
HttpHelper.processResponse(context.response(), HttpResponseStatus.NOT_FOUND.code());
}
} else if(result.error != null) {
String error = "Could not GET with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("get - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not GET with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("get - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
} , queryKey);
}
}
/**
* GetAll - gets a list of a {@link com.cyngn.chrono.storage.cassandra.table.UploadData} object.
*/
public void getAll(final RoutingContext context) {
HttpServerRequest request = context.request();
int paramCount = request.params().size();
try {
//the query must start at the partition key
switch(paramCount) {
//partial key: testBatch
case 1:
storage.getAll(request.getParam("test_batch"), result -> RestUtil.processGetAllResult(context, result));
break;
//partial key: testBatch, unit
case 2:
storage.getAll(request.getParam("test_batch"), request.getParam("unit"), result -> RestUtil.processGetAllResult(context, result));
break;
//partial key: testBatch, unit, size
case 3:
storage.getAll(request.getParam("test_batch"), request.getParam("unit"), Long.parseLong(request.getParam("size")), result -> RestUtil.processGetAllResult(context, result));
break;
default:
String error = "Invalid get all query: " + context.request().uri();
logger.error("get_all - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
break;
}
} catch (Exception ex) {
String error = "error with query: " + context.request().uri() + " error: " + ex.getMessage();
logger.error("get_all - {}", error, ex);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
}
}
@Override
public RestApi.RestApiDescriptor[] supportedApi() {
return supportedApi;
}
/**
* Convert query params to their Cassandra type.
*/
public Object[] convertQueryString(final RoutingContext context) {
HttpServerRequest request = context.request();
// if query params aren't valid a HttpResponseStatus.BAD_REQUEST will be sent with the missing field
Object[] values = new Object[4];
try {
values[0] = request.getParam("test_batch");
values[1] = request.getParam("unit");
values[2] = Long.parseLong(request.getParam("size"));
values[3] = new DateTime(request.getParam("created")).toDate();
} catch (Exception ex) {
HttpHelper.processErrorResponse(ex.getMessage(), context.response(), HttpResponseStatus.BAD_REQUEST.code());
return null;
}
return values;
}
}
package com.cyngn.chrono.rest;
import com.cyngn.chrono.storage.cassandra.dal.TestBatchDal;
import com.cyngn.chrono.storage.cassandra.table.TestBatch;
import com.cyngn.vertx.web.HttpHelper;
import com.cyngn.vertx.web.JsonUtil;
import com.cyngn.vertx.web.RestApi;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.HttpMethod;
import io.vertx.core.http.HttpServerRequest;
import io.vertx.ext.web.RoutingContext;
import java.lang.Object;
import java.lang.Override;
import java.lang.String;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* REST Api for Cassandra entity - {@link com.cyngn.chrono.storage.cassandra.table.TestBatch}
*/
public class TestBatchApi implements RestApi {
private static final Logger logger = LoggerFactory.getLogger(TestBatchApi.class);
public static final String TEST_BATCH_API = "/api/v1/test_batch";
public static final String TEST_BATCH_ALL_API = "/api/v1/test_batch_all";
private final TestBatchDal storage;
private final RestApi.RestApiDescriptor[] supportedApi = {
new RestApi.RestApiDescriptor(HttpMethod.POST, TEST_BATCH_API, this::save),
new RestApi.RestApiDescriptor(HttpMethod.GET, TEST_BATCH_API, this::get),
new RestApi.RestApiDescriptor(HttpMethod.DELETE, TEST_BATCH_API, this::delete),
new RestApi.RestApiDescriptor(HttpMethod.GET, TEST_BATCH_ALL_API, this::getAll)
};
final String[] primaryKey;
public TestBatchApi(final TestBatchDal storage) {
this.storage = storage;
primaryKey = new String[] {
"name"
} ;
}
/**
* Save a {@link com.cyngn.chrono.storage.cassandra.table.TestBatch} object.
*/
public void save(final RoutingContext context) {
HttpServerRequest request = context.request();
if(request.isEnded()) {
save(context, context.getBody());
} else {
request.bodyHandler(buffer -> save(context, buffer));
}
}
/**
* Save a {@link com.cyngn.chrono.storage.cassandra.table.TestBatch} object.
*
* NOTE: this method is left intentionally package protected to allow you to call it in a different way
*/
void save(final RoutingContext context, final Buffer body) {
TestBatch entity = JsonUtil.parseJsonToObject(body.toString(), TestBatch.class);
if(entity == null) {
HttpHelper.processErrorResponse("Failed to parse body: " + body, context.response(), HttpResponseStatus.BAD_REQUEST.code());
return;
}
storage.save(entity, result -> {
if(result.succeeded) {
HttpHelper.processResponse(context.response());
} else if(result.error != null) {
String error = "Could not persist " + entity.toString() + ", error: " + result.error.getMessage();
logger.error("save - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not persist " + entity.toString() + ", error: " + result.errorMessage;
logger.error("save - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
} );
}
/**
* Delete a {@link com.cyngn.chrono.storage.cassandra.table.TestBatch} object.
*/
public void delete(final RoutingContext context) {
// if query params aren't valid a HttpResponseStatus.BAD_REQUEST will be sent with the missing field
Object[] queryKey = null;
if(RestUtil.isValid(context.request(), primaryKey)) {
queryKey = convertQueryString(context);
}
if(queryKey != null) {
storage.delete(result -> {
if(result.succeeded) {
HttpHelper.processResponse(context.response());
} else if (result.error != null) {
String error = "Could not DELETE with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("delete - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not DELETE with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("delete - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
} , queryKey);
}
}
/**
* Get a {@link com.cyngn.chrono.storage.cassandra.table.TestBatch} object.
*/
public void get(final RoutingContext context) {
// if query params aren't valid a HttpResponseStatus.BAD_REQUEST will be sent with the missing field
Object[] queryKey = null;
if(RestUtil.isValid(context.request(), primaryKey)) {
queryKey = convertQueryString(context);
}
if(queryKey != null) {
storage.get(result -> {
if(result.succeeded) {
if(result.value != null) {
HttpHelper.processResponse(result.value, context.response());
} else {
HttpHelper.processResponse(context.response(), HttpResponseStatus.NOT_FOUND.code());
}
} else if(result.error != null) {
String error = "Could not GET with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("get - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.INTERNAL_SERVER_ERROR.code());
} else {
String error = "Could not GET with query: " + context.request().uri() + " error: " + result.error.getMessage();
logger.error("get - {}", error);
HttpHelper.processErrorResponse(error, context.response(), HttpResponseStatus.BAD_REQUEST.code());
}
} , queryKey);
}
}
/**
* GetAll - gets a list of a {@link com.cyngn.chrono.storage.cassandra.table.TestBatch} object.
*/
public void getAll(final RoutingContext context) {
// there's only 1 key so we do a get all
storage.getAll(result -> RestUtil.processGetAllResult(context, result));
}
@Override
public RestApi.RestApiDescriptor[] supportedApi() {
return supportedApi;
}
/**
* Convert query params to their Cassandra type.
*/
public Object[] convertQueryString(final RoutingContext context) {
HttpServerRequest request = context.request();
// if query params aren't valid a HttpResponseStatus.BAD_REQUEST will be sent with the missing field
Object[] values = new Object[1];
try {
values[0] = request.getParam("name");
} catch (Exception ex) {
HttpHelper.processErrorResponse(ex.getMessage(), context.response(), HttpResponseStatus.BAD_REQUEST.code());
return null;
}
return values;
}
}
package com.cyngn.chrono;
import com.cyngn.chrono.rest.PayloadApi;
import com.cyngn.chrono.rest.ReportApi;
import com.cyngn.chrono.rest.TestBatchApi;
import com.cyngn.chrono.rest.UploadDataApi;
import com.cyngn.chrono.storage.cassandra.dal.PayloadDal;
import com.cyngn.chrono.storage.cassandra.dal.ReportDal;
import com.cyngn.chrono.storage.cassandra.dal.TestBatchDal;
import com.cyngn.chrono.storage.cassandra.dal.UploadDataDal;
import com.cyngn.vertx.web.RestApi;
import com.cyngn.vertx.web.RouterTools;
import com.datastax.driver.core.Cluster;
import com.englishtown.vertx.cassandra.impl.DefaultCassandraSession;
import com.englishtown.vertx.cassandra.impl.JsonCassandraConfigurator;
import com.google.common.collect.Lists;
import io.vertx.core.AbstractVerticle;
import io.vertx.core.Future;
import io.vertx.core.http.HttpServer;
import io.vertx.core.json.JsonObject;
import io.vertx.core.shareddata.LocalMap;
import io.vertx.ext.web.Router;
import io.vertx.ext.web.handler.LoggerHandler;
import java.lang.Long;
import java.lang.Override;
import java.lang.String;
import java.lang.Thread;
import java.lang.Void;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* GENERATED CODE DO NOT MODIFY - last updated: 2015-12-17T21:37:15.628Z
* generated by exovert - https://github.com/cyngn/exovert
*
* Simple server that registers all {@link com.cyngn.vertx.web.RestApi} for CRUD operations.
*
* to build: ./gradlew clean shadowJar
* to run: java -jar build/libs/[project-name]-fat.jar -conf [your_conf.json]
*/
public class Server extends AbstractVerticle {
private static final Logger logger = LoggerFactory.getLogger(Server.class);
private static final String SHARED_DATA_KEY = "shared_data";
private static final String INITIALIZER_THREAD_KEY = "initializer_thread";
private LocalMap<String, Long> sharedData;
private HttpServer server;
private DefaultCassandraSession session;
private int port;
@Override
public void start(final Future<Void> startedResult) {
JsonObject config = config();
if(!config.containsKey("cassandra")) { stop(); }
sharedData = vertx.sharedData().getLocalMap(SHARED_DATA_KEY);
sharedData.putIfAbsent(INITIALIZER_THREAD_KEY, Thread.currentThread().getId());
session = new DefaultCassandraSession(Cluster.builder(), new JsonCassandraConfigurator(vertx), vertx);
port = config.getInteger("port", 80);
if(isInitializerThread()) {
try {
logger.info("Starting up server... on ip: {} port: {}", InetAddress.getLocalHost().getHostAddress(), port);
} catch(UnknownHostException ex) {
logger.error("Failed to get host ip address, ex: ", ex);
stop();
}
}
startServer();
startedResult.complete();
}
public boolean isInitializerThread() {
return sharedData.get(INITIALIZER_THREAD_KEY) == Thread.currentThread().getId();
}
private void buildApi(Router router) {
RouterTools.registerRootHandlers(router, LoggerHandler.create());
List<RestApi> apis = Lists.newArrayList(
new PayloadApi(new PayloadDal(session)),
new ReportApi(new ReportDal(session)),
new UploadDataApi(new UploadDataDal(session)),
new TestBatchApi(new TestBatchDal(session))
);
for(RestApi api: apis) {
api.init(router);
if(isInitializerThread()) {api.outputApi(logger);}
}
}
private void startServer() {
server = vertx.createHttpServer();
Router router = Router.router(vertx);
buildApi(router);
server.requestHandler(router::accept);
server.listen(port, "0.0.0.0", event -> {
if(event.failed()) {
logger.error("Failed to start server, error: ", event.cause());
stop();
} else {
logger.info("Thread: {} starting to handle request", Thread.currentThread().getId());
}
} );
}
@Override
public void stop() {
logger.info("Stopping the server.");
try {
if(server != null) { server.close(); }
} finally {
//make sure only one thread tries to shutdown.
Long shutdownThreadId = sharedData.putIfAbsent("shutdown", Thread.currentThread().getId());
if(shutdownThreadId == null) {
vertx.close(event -> {
logger.info("Vertx shutdown");
System.exit(-1);
} );
}
}
}
}
<configuration>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<!-- encoders are assigned the type
ch.qos.logback.classic.encoder.PatternLayoutEncoder by default -->
<encoder>
<pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<root level="INFO">
<appender-ref ref="STDOUT" />
</root>
</configuration>
{
"port" : 8080,
"cassandra": {
"seeds": ["localhost"],
"reconnect": {
"name": "exponential",
"base_delay": 1000,
"max_delay": 10000
}
}
}
buildscript {
repositories { jcenter() }
dependencies {
classpath 'com.github.jengelman.gradle.plugins:shadow:1.1.1'
}
}
apply plugin: 'java'
apply plugin: 'com.github.johnrengelman.shadow'
version = '0.1.0'
group = 'com.cyngn.chrono'
archivesBaseName = 'chrono'
if (!JavaVersion.current().java8Compatible) {
throw new IllegalStateException('''A Haiku:
| This needs Java 8,
| You are using something else,
| Refresh. Try again.'''.stripMargin())
}
repositories {
mavenCentral()
maven { url = 'http://oss.sonatype.org/content/repositories/snapshots/' }
maven { url = 'http://oss.sonatype.org/content/repositories/releases/' }
}
dependencies {
compile 'io.vertx:vertx-core:3.1.0'
compile "joda-time:joda-time:2.4"
compile "com.google.guava:guava:18.0"
compile "commons-lang:commons-lang:2.6"
compile "net.sf.jopt-simple:jopt-simple:4.9"
compile "com.cyngn.vertx:vertx-util:0.5.4"
compile "com.englishtown.vertx:vertx-cassandra:3.0.0"
compile "com.englishtown.vertx:vertx-cassandra-mapping:3.0.0"
compile "ch.qos.logback:logback-classic:1.0.13"
compile "ch.qos.logback:logback-core:1.0.13"
compile "io.vertx:vertx-codegen:3.0.0"
testCompile "junit:junit:4.11"
testCompile "io.vertx:vertx-unit:3.0.0"
}
task wrapper(type: Wrapper) {
gradleVersion = '2.5'
}
task release() << {}
gradle.taskGraph.whenReady {taskGraph ->
if (!taskGraph.hasTask(release)) {
version += '-SNAPSHOT'
}
}
task javadocJar(type: Jar) {
classifier = 'javadoc'
from javadoc
}
task sourcesJar(type: Jar) {
classifier = 'sources'
from sourceSets.main.allSource
}
artifacts {
archives javadocJar, sourcesJar
}
shadowJar {
classifier = 'fat'
manifest {
attributes 'Main-Class': 'io.vertx.core.Starter'
attributes 'Main-Verticle': 'com.cyngn.chrono.Server'
}
mergeServiceFiles {
include 'META-INF/services/io.vertx.core.spi.VerticleFactory'
}
}