From beff2ffdfa31bb9b7f4a3b89b1c8426aa084a95b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 9 Sep 2019 14:10:02 +0200 Subject: [PATCH] Adding automatic formatting of Java and Scala code --- .scalafmt.conf | 13 + build.sbt | 5 +- .../user/eventsourced/ShoppingCartEntity.java | 201 ++++--- .../behavior/ShoppingCartEntity.java | 179 +++--- .../user/gettingstarted/ShoppingCartMain.java | 11 +- .../javasupport/ClientActionContext.java | 41 +- .../io/cloudstate/javasupport/CloudState.java | 363 +++++++------ .../io/cloudstate/javasupport/Context.java | 10 +- .../cloudstate/javasupport/EffectContext.java | 54 +- .../cloudstate/javasupport/EntityContext.java | 15 +- .../io/cloudstate/javasupport/EntityId.java | 14 +- .../io/cloudstate/javasupport/Jsonable.java | 7 +- .../cloudstate/javasupport/ServiceCall.java | 28 +- .../javasupport/ServiceCallFactory.java | 33 +- .../javasupport/ServiceCallRef.java | 27 +- .../crdt/AbstractORMapWrapper.java | 200 +++---- .../javasupport/crdt/CommandContext.java | 54 +- .../javasupport/crdt/CommandHandler.java | 38 +- .../io/cloudstate/javasupport/crdt/Crdt.java | 7 +- .../javasupport/crdt/CrdtContext.java | 21 +- .../javasupport/crdt/CrdtCreationContext.java | 7 +- .../javasupport/crdt/CrdtEntity.java | 23 +- .../javasupport/crdt/CrdtEntityFactory.java | 25 +- .../javasupport/crdt/CrdtEntityHandler.java | 45 +- .../javasupport/crdt/CrdtFactory.java | 107 ++-- .../io/cloudstate/javasupport/crdt/Flag.java | 24 +- .../cloudstate/javasupport/crdt/GCounter.java | 32 +- .../io/cloudstate/javasupport/crdt/GSet.java | 29 +- .../javasupport/crdt/LWWRegister.java | 135 ++--- .../javasupport/crdt/LWWRegisterMap.java | 39 +- .../io/cloudstate/javasupport/crdt/ORMap.java | 87 +-- .../io/cloudstate/javasupport/crdt/ORSet.java | 31 +- .../javasupport/crdt/PNCounter.java | 58 +- .../javasupport/crdt/PNCounterMap.java | 138 ++--- .../crdt/StreamCancelledContext.java | 16 +- .../crdt/StreamedCommandContext.java | 80 +-- .../javasupport/crdt/SubscriptionContext.java | 10 +- .../io/cloudstate/javasupport/crdt/Vote.java | 100 ++-- .../javasupport/crdt/package-info.java | 23 +- .../eventsourced/BehaviorContext.java | 35 +- .../eventsourced/CommandContext.java | 59 +- .../eventsourced/CommandHandler.java | 38 +- .../eventsourced/EventBehaviorContext.java | 7 +- .../eventsourced/EventContext.java | 16 +- .../eventsourced/EventHandler.java | 27 +- .../eventsourced/EventSourcedContext.java | 7 +- .../eventsourced/EventSourcedEntity.java | 31 +- .../EventSourcedEntityCreationContext.java | 7 +- .../EventSourcedEntityFactory.java | 20 +- .../EventSourcedEntityHandler.java | 60 +-- .../javasupport/eventsourced/Snapshot.java | 19 +- .../eventsourced/SnapshotBehaviorContext.java | 7 +- .../eventsourced/SnapshotContext.java | 16 +- .../eventsourced/SnapshotHandler.java | 22 +- .../eventsourced/package-info.java | 23 +- .../impl/CloudStateAnnotation.java | 7 +- .../javasupport/impl/package-info.java | 6 +- .../cloudstate/javasupport/package-info.java | 23 +- .../javasupport/CloudStateRunner.scala | 57 +- .../javasupport/impl/AnySupport.scala | 220 +++++--- .../javasupport/impl/Contexts.scala | 24 +- .../impl/EntityDiscoveryImpl.scala | 21 +- .../javasupport/impl/ReflectionHelper.scala | 112 ++-- .../impl/ResolvedServiceCallFactory.scala | 11 +- .../impl/ResolvedServiceMethod.scala | 86 +-- .../impl/StatelessFunctionImpl.scala | 18 +- .../impl/crdt/AbstractCrdtFactory.scala | 15 +- .../crdt/AnnotationBasedCrdtSupport.scala | 160 ++++-- .../javasupport/impl/crdt/CrdtImpl.scala | 307 ++++++----- .../javasupport/impl/crdt/FlagImpl.scala | 10 +- .../javasupport/impl/crdt/GCounterImpl.scala | 7 +- .../javasupport/impl/crdt/GSetImpl.scala | 25 +- .../javasupport/impl/crdt/InternalCrdt.scala | 1 - .../impl/crdt/LWWRegisterImpl.scala | 16 +- .../javasupport/impl/crdt/ORMapImpl.scala | 98 ++-- .../javasupport/impl/crdt/ORSetImpl.scala | 34 +- .../javasupport/impl/crdt/PNCounterImpl.scala | 7 +- .../javasupport/impl/crdt/VoteImpl.scala | 10 +- .../AnnotationBasedEventSourcedSupport.scala | 242 +++++---- .../impl/eventsourced/EventSourcedImpl.scala | 227 ++++---- .../javasupport/impl/AnySupportSpec.scala | 11 +- .../crdt/AnnotationBasedCrdtSupportSpec.scala | 43 +- ...notationBasedEventSourcedSupportSpec.scala | 67 ++- .../cloudstate/operator/GenericStatus.scala | 19 +- .../cloudstate/operator/KnativeRevision.scala | 157 +++--- .../KnativeRevisionOperatorFactory.scala | 326 ++++++----- .../cloudstate/operator/OperatorConfig.scala | 2 +- .../cloudstate/operator/OperatorFactory.scala | 38 +- .../io/cloudstate/operator/OperatorMain.scala | 62 ++- .../cloudstate/operator/OperatorRunner.scala | 112 ++-- .../cloudstate/operator/ResourceHelper.scala | 90 ++-- .../cloudstate/operator/StatefulService.scala | 119 ++-- .../StatefulServiceOperatorFactory.scala | 238 ++++---- .../cloudstate/operator/StatefulStore.scala | 6 +- .../StatefulStoreOperatorFactory.scala | 77 +-- .../io/cloudstate/operator/Validated.scala | 43 +- .../io/cloudstate/operator/Watcher.scala | 106 ++-- .../stores/CassandraStoreSupport.scala | 35 +- .../operator/stores/CredentialsHelper.scala | 48 +- .../stores/InMemoryStoreSupport.scala | 14 +- .../stores/PostgresStoreSupport.scala | 70 ++- .../stores/StatefulStoreSupport.scala | 21 +- project/plugins.sbt | 4 + .../internal/svm/Substitutions.java | 21 +- .../internal/svm/Substitutions.java | 72 ++- .../proxy/AkkaHttpPrometheusExporter.scala | 14 +- .../proxy/CloudStateProxyMain.scala | 105 ++-- .../proxy/ConcurrencyEnforcer.scala | 68 +-- .../proxy/EntityDiscoveryManager.scala | 158 +++--- .../proxy/FileDescriptorBuilder.scala | 32 +- .../scala/io/cloudstate/proxy/HttpApi.scala | 347 +++++++----- .../io/cloudstate/proxy/Reflection.scala | 43 +- .../scala/io/cloudstate/proxy/Serve.scala | 78 +-- .../io/cloudstate/proxy/StatsCollector.scala | 49 +- .../cloudstate/proxy/UserFunctionRouter.scala | 83 +-- .../proxy/UserFunctionTypeSupport.scala | 65 ++- .../scala/io/cloudstate/proxy/Warmup.scala | 37 +- .../proxy/autoscaler/Autoscaler.scala | 330 +++++++----- .../KubernetesDeploymentScaler.scala | 103 ++-- .../proxy/autoscaler/NoAutoscaler.scala | 6 +- .../proxy/autoscaler/NoScaler.scala | 10 +- .../io/cloudstate/proxy/crdt/CrdtEntity.scala | 280 ++++++---- .../proxy/crdt/CrdtEntityManager.scala | 17 +- .../proxy/crdt/CrdtSerializers.scala | 18 +- .../proxy/crdt/CrdtSupportFactory.scala | 32 +- .../io/cloudstate/proxy/crdt/NodeVector.scala | 30 +- .../crdt/UserFunctionProtocolError.scala | 4 +- .../scala/io/cloudstate/proxy/crdt/Vote.scala | 102 ++-- .../proxy/crdt/WireTransformer.scala | 187 ++++--- .../DynamicLeastShardAllocationStrategy.scala | 30 +- .../eventsourced/EventSourcedEntity.scala | 128 +++-- .../EventSourcedSupportFactory.scala | 29 +- .../eventsourced/InMemSnapshotStore.scala | 19 +- .../io/cloudstate/proxy/HttpApiSpec.scala | 51 +- .../io/cloudstate/proxy/WarmupSpec.scala | 7 +- .../proxy/autoscaler/AutoscalerSpec.scala | 70 +-- .../proxy/crdt/AbstractCrdtEntitySpec.scala | 135 +++-- .../proxy/crdt/CrdtEntitySpec.scala | 17 +- .../proxy/crdt/FlagCrdtEntitySpec.scala | 7 +- .../proxy/crdt/GCounterCrdtEntitySpec.scala | 7 +- .../proxy/crdt/GSetCrdtEntitySpec.scala | 7 +- .../crdt/LWWRegisterCrdtEntitySpec.scala | 22 +- .../proxy/crdt/ORMapCrdtEntitySpec.scala | 65 ++- .../proxy/crdt/ORSetCrdtEntitySpec.scala | 11 +- .../proxy/crdt/PNCounterCrdtEntitySpec.scala | 7 +- .../SlickEnsureTablesExistReadyCheck.scala | 66 +-- .../io/cloudstate/samples/CrdtsClient.scala | 35 +- .../samples/ShoppingCartClient.scala | 13 +- .../cloudstate/samples/shoppingcart/Main.java | 18 +- .../shoppingcart/ShoppingCartEntity.java | 153 +++--- .../io/cloudstate/tck/CloudStateTCK.scala | 509 ++++++++++-------- 151 files changed, 5501 insertions(+), 4372 deletions(-) create mode 100644 .scalafmt.conf diff --git a/.scalafmt.conf b/.scalafmt.conf new file mode 100644 index 000000000..bd52ae3b3 --- /dev/null +++ b/.scalafmt.conf @@ -0,0 +1,13 @@ +version = 2.0.1 + +style = defaultWithAlign + +align.tokens = [off] +align.openParenDefnSite = true +align.openParenCallSite = true +danglingParentheses = true +docstrings = JavaDoc +indentOperator = spray +maxColumn = 120 +rewrite.rules = [RedundantBraces, RedundantParens, SortImports] +unindentTopLevelOperators = true \ No newline at end of file diff --git a/build.sbt b/build.sbt index 7dc77777b..cc3090551 100644 --- a/build.sbt +++ b/build.sbt @@ -26,10 +26,13 @@ inThisBuild(Seq( "scm:git@github.com:cloudstateio/cloudstate.git" )), developers := List( - Developer(id="jroper", name="James Roper", email="james@jazzy.id.au", url=url("https://jazzy.id.au")) + Developer(id="jroper", name="James Roper", email="james@jazzy.id.au", url=url("https://jazzy.id.au")), + Developer(id="viktorklang", name="Viktor Klang", email="viktor.klang@gmail.com", url=url("https://viktorklang.com")), ), sonatypeProfileName := "io.cloudstate", + + scalafmtOnCompile := true, )) // Make sure the version doesn't change each time it gets built, this ensures we don't rebuild the native image diff --git a/docs/src/test/java/docs/user/eventsourced/ShoppingCartEntity.java b/docs/src/test/java/docs/user/eventsourced/ShoppingCartEntity.java index f48d9da2a..31ae7b3d9 100644 --- a/docs/src/test/java/docs/user/eventsourced/ShoppingCartEntity.java +++ b/docs/src/test/java/docs/user/eventsourced/ShoppingCartEntity.java @@ -11,114 +11,109 @@ import java.util.Map; import java.util.stream.Collectors; - // #entity-class -@EventSourcedEntity( - persistenceId = "shopping-cart", - snapshotEvery = 20 -) +@EventSourcedEntity(persistenceId = "shopping-cart", snapshotEvery = 20) public class ShoppingCartEntity { -// #entity-class - - // #entity-state - private final Map cart = new LinkedHashMap<>(); - // #entity-state - - // #constructing - private final String entityId; - - public ShoppingCartEntity(@EntityId String entityId) { - this.entityId = entityId; + // #entity-class + + // #entity-state + private final Map cart = new LinkedHashMap<>(); + // #entity-state + + // #constructing + private final String entityId; + + public ShoppingCartEntity(@EntityId String entityId) { + this.entityId = entityId; + } + // #constructing + + // #get-cart + @CommandHandler + public Shoppingcart.Cart getCart() { + return Shoppingcart.Cart.newBuilder().addAllItems(cart.values()).build(); + } + // #get-cart + + // #add-item + @CommandHandler + public Empty addItem(Shoppingcart.AddLineItem item, CommandContext ctx) { + if (item.getQuantity() <= 0) { + ctx.fail("Cannot add negative quantity of to item" + item.getProductId()); } - // #constructing - - // #get-cart - @CommandHandler - public Shoppingcart.Cart getCart() { - return Shoppingcart.Cart.newBuilder() - .addAllItems(cart.values()) - .build(); - } - // #get-cart - - // #add-item - @CommandHandler - public Empty addItem(Shoppingcart.AddLineItem item, CommandContext ctx) { - if (item.getQuantity() <= 0) { - ctx.fail("Cannot add negative quantity of to item" + item.getProductId()); - } - ctx.emit(Domain.ItemAdded.newBuilder().setItem( + ctx.emit( + Domain.ItemAdded.newBuilder() + .setItem( Domain.LineItem.newBuilder() - .setProductId(item.getProductId()) - .setName(item.getName()) - .setQuantity(item.getQuantity()) - .build() - ).build()); - return Empty.getDefaultInstance(); - } - // #add-item - - // #item-added - @EventHandler - public void itemAdded(Domain.ItemAdded itemAdded) { - Shoppingcart.LineItem item = cart.get(itemAdded.getItem().getProductId()); - if (item == null) { - item = convert(itemAdded.getItem()); - } else { - item = item.toBuilder() - .setQuantity(item.getQuantity() + itemAdded.getItem().getQuantity()) - .build(); - } - cart.put(item.getProductId(), item); + .setProductId(item.getProductId()) + .setName(item.getName()) + .setQuantity(item.getQuantity()) + .build()) + .build()); + return Empty.getDefaultInstance(); + } + // #add-item + + // #item-added + @EventHandler + public void itemAdded(Domain.ItemAdded itemAdded) { + Shoppingcart.LineItem item = cart.get(itemAdded.getItem().getProductId()); + if (item == null) { + item = convert(itemAdded.getItem()); + } else { + item = + item.toBuilder() + .setQuantity(item.getQuantity() + itemAdded.getItem().getQuantity()) + .build(); } - - private Shoppingcart.LineItem convert(Domain.LineItem item) { - return Shoppingcart.LineItem.newBuilder() - .setProductId(item.getProductId()) - .setName(item.getName()) - .setQuantity(item.getQuantity()) - .build(); - } - // #item-added - - // #snapshot - @Snapshot - public Domain.Cart snapshot() { - return Domain.Cart.newBuilder() - .addAllItems( - cart.values().stream() - .map(this::convert) - .collect(Collectors.toList()) - ).build(); - } - - private Domain.LineItem convert(Shoppingcart.LineItem item) { - return Domain.LineItem.newBuilder() - .setProductId(item.getProductId()) - .setName(item.getName()) - .setQuantity(item.getQuantity()) - .build(); - } - // #snapshot - - // #handle-snapshot - @SnapshotHandler - public void handleSnapshot(Domain.Cart cart) { - this.cart.clear(); - for (Domain.LineItem item : cart.getItemsList()) { - this.cart.put(item.getProductId(), convert(item)); - } - } - // #handle-snapshot - - // #register - public static void main(String... args) { - new CloudState().registerEventSourcedEntity( - ShoppingCartEntity.class, - Shoppingcart.getDescriptor().findServiceByName("ShoppingCartService"), - Domain.getDescriptor() - ).start(); + cart.put(item.getProductId(), item); + } + + private Shoppingcart.LineItem convert(Domain.LineItem item) { + return Shoppingcart.LineItem.newBuilder() + .setProductId(item.getProductId()) + .setName(item.getName()) + .setQuantity(item.getQuantity()) + .build(); + } + // #item-added + + // #snapshot + @Snapshot + public Domain.Cart snapshot() { + return Domain.Cart.newBuilder() + .addAllItems(cart.values().stream().map(this::convert).collect(Collectors.toList())) + .build(); + } + + private Domain.LineItem convert(Shoppingcart.LineItem item) { + return Domain.LineItem.newBuilder() + .setProductId(item.getProductId()) + .setName(item.getName()) + .setQuantity(item.getQuantity()) + .build(); + } + // #snapshot + + // #handle-snapshot + @SnapshotHandler + public void handleSnapshot(Domain.Cart cart) { + this.cart.clear(); + for (Domain.LineItem item : cart.getItemsList()) { + this.cart.put(item.getProductId(), convert(item)); } - // #register + } + // #handle-snapshot + + // #register + public static void main(String... args) { + new CloudState() + .registerEventSourcedEntity( + ShoppingCartEntity.class, + Shoppingcart.getDescriptor().findServiceByName("ShoppingCartService"), + Domain.getDescriptor()) + .start(); + } + // #register } diff --git a/docs/src/test/java/docs/user/eventsourced/behavior/ShoppingCartEntity.java b/docs/src/test/java/docs/user/eventsourced/behavior/ShoppingCartEntity.java index 0e56cfef2..d2ad2ef6f 100644 --- a/docs/src/test/java/docs/user/eventsourced/behavior/ShoppingCartEntity.java +++ b/docs/src/test/java/docs/user/eventsourced/behavior/ShoppingCartEntity.java @@ -10,114 +10,111 @@ import java.util.Map; import java.util.stream.Collectors; - // #content @EventSourcedEntity public class ShoppingCartEntity { - private final Map cart = new LinkedHashMap<>(); - private boolean checkedout = false; - - public ShoppingCartEntity(EventSourcedEntityCreationContext ctx) { - ctx.become(new Open(), this); - } - - class Open { - @CommandHandler - public Empty addItem(Shoppingcart.AddLineItem item, CommandContext ctx) { - if (item.getQuantity() <= 0) { - ctx.fail("Cannot add negative quantity of to item" + item.getProductId()); - } - ctx.emit(Domain.ItemAdded.newBuilder().setItem( - Domain.LineItem.newBuilder() - .setProductId(item.getProductId()) - .setName(item.getName()) - .setQuantity(item.getQuantity()) - .build() - ).build()); - return Empty.getDefaultInstance(); - } + private final Map cart = new LinkedHashMap<>(); + private boolean checkedout = false; - @CommandHandler - public Empty checkout(CommandContext ctx) { - ctx.emit(Domain.CheckedOut.getDefaultInstance()); - return Empty.getDefaultInstance(); - } + public ShoppingCartEntity(EventSourcedEntityCreationContext ctx) { + ctx.become(new Open(), this); + } - @EventHandler - public void itemAdded(Domain.ItemAdded itemAdded) { - Shoppingcart.LineItem item = cart.get(itemAdded.getItem().getProductId()); - if (item == null) { - item = convert(itemAdded.getItem()); - } else { - item = item.toBuilder() - .setQuantity(item.getQuantity() + itemAdded.getItem().getQuantity()) - .build(); - } - cart.put(item.getProductId(), item); - } - - @EventHandler(eventClass = Domain.CheckedOut.class) - public void checkedOut(EventBehaviorContext ctx) { - checkedout = true; - ctx.become(new CheckedOut(), this); - } + class Open { + @CommandHandler + public Empty addItem(Shoppingcart.AddLineItem item, CommandContext ctx) { + if (item.getQuantity() <= 0) { + ctx.fail("Cannot add negative quantity of to item" + item.getProductId()); + } + ctx.emit( + Domain.ItemAdded.newBuilder() + .setItem( + Domain.LineItem.newBuilder() + .setProductId(item.getProductId()) + .setName(item.getName()) + .setQuantity(item.getQuantity()) + .build()) + .build()); + return Empty.getDefaultInstance(); } - class CheckedOut { - @CommandHandler - public Empty addItem(CommandContext ctx) { - throw ctx.fail("Can't add more items to an already checked out shopping cart"); - } - - @CommandHandler - public Empty checkout(CommandContext ctx) { - throw ctx.fail("Shopping cart is already checked out"); - } + @CommandHandler + public Empty checkout(CommandContext ctx) { + ctx.emit(Domain.CheckedOut.getDefaultInstance()); + return Empty.getDefaultInstance(); } - @CommandHandler - public Shoppingcart.Cart getCart() { - return Shoppingcart.Cart.newBuilder() - .addAllItems(cart.values()) + @EventHandler + public void itemAdded(Domain.ItemAdded itemAdded) { + Shoppingcart.LineItem item = cart.get(itemAdded.getItem().getProductId()); + if (item == null) { + item = convert(itemAdded.getItem()); + } else { + item = + item.toBuilder() + .setQuantity(item.getQuantity() + itemAdded.getItem().getQuantity()) .build(); + } + cart.put(item.getProductId(), item); } - @Snapshot - public Domain.Cart snapshot() { - return Domain.Cart.newBuilder() - .addAllItems( - cart.values().stream() - .map(this::convert) - .collect(Collectors.toList()) - ).build(); + @EventHandler(eventClass = Domain.CheckedOut.class) + public void checkedOut(EventBehaviorContext ctx) { + checkedout = true; + ctx.become(new CheckedOut(), this); } + } - @SnapshotHandler - public void handleSnapshot(Domain.Cart cart, SnapshotBehaviorContext ctx) { - this.cart.clear(); - for (Domain.LineItem item : cart.getItemsList()) { - this.cart.put(item.getProductId(), convert(item)); - } - this.checkedout = cart.getCheckedout(); - if (this.checkedout) { - ctx.become(new CheckedOut(), this); - } + class CheckedOut { + @CommandHandler + public Empty addItem(CommandContext ctx) { + throw ctx.fail("Can't add more items to an already checked out shopping cart"); } - private Shoppingcart.LineItem convert(Domain.LineItem item) { - return Shoppingcart.LineItem.newBuilder() - .setProductId(item.getProductId()) - .setName(item.getName()) - .setQuantity(item.getQuantity()) - .build(); + @CommandHandler + public Empty checkout(CommandContext ctx) { + throw ctx.fail("Shopping cart is already checked out"); } - - private Domain.LineItem convert(Shoppingcart.LineItem item) { - return Domain.LineItem.newBuilder() - .setProductId(item.getProductId()) - .setName(item.getName()) - .setQuantity(item.getQuantity()) - .build(); + } + + @CommandHandler + public Shoppingcart.Cart getCart() { + return Shoppingcart.Cart.newBuilder().addAllItems(cart.values()).build(); + } + + @Snapshot + public Domain.Cart snapshot() { + return Domain.Cart.newBuilder() + .addAllItems(cart.values().stream().map(this::convert).collect(Collectors.toList())) + .build(); + } + + @SnapshotHandler + public void handleSnapshot(Domain.Cart cart, SnapshotBehaviorContext ctx) { + this.cart.clear(); + for (Domain.LineItem item : cart.getItemsList()) { + this.cart.put(item.getProductId(), convert(item)); + } + this.checkedout = cart.getCheckedout(); + if (this.checkedout) { + ctx.become(new CheckedOut(), this); } + } + + private Shoppingcart.LineItem convert(Domain.LineItem item) { + return Shoppingcart.LineItem.newBuilder() + .setProductId(item.getProductId()) + .setName(item.getName()) + .setQuantity(item.getQuantity()) + .build(); + } + + private Domain.LineItem convert(Shoppingcart.LineItem item) { + return Domain.LineItem.newBuilder() + .setProductId(item.getProductId()) + .setName(item.getName()) + .setQuantity(item.getQuantity()) + .build(); + } } // #content diff --git a/docs/src/test/java/docs/user/gettingstarted/ShoppingCartMain.java b/docs/src/test/java/docs/user/gettingstarted/ShoppingCartMain.java index 100173373..d3a85c449 100644 --- a/docs/src/test/java/docs/user/gettingstarted/ShoppingCartMain.java +++ b/docs/src/test/java/docs/user/gettingstarted/ShoppingCartMain.java @@ -7,12 +7,13 @@ public class ShoppingCartMain { public static void main(String... args) { - new CloudState().registerEventSourcedEntity( - ShoppingCartEntity.class, - Shoppingcart.getDescriptor().findServiceByName("ShoppingCart") - ).start(); + new CloudState() + .registerEventSourcedEntity( + ShoppingCartEntity.class, + Shoppingcart.getDescriptor().findServiceByName("ShoppingCart")) + .start(); } } // #shopping-cart-main -class ShoppingCartEntity {} \ No newline at end of file +class ShoppingCartEntity {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/ClientActionContext.java b/java-support/src/main/java/io/cloudstate/javasupport/ClientActionContext.java index b5806296e..0e1257a8e 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/ClientActionContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/ClientActionContext.java @@ -2,27 +2,28 @@ /** * Context that provides client actions, which include failing and forwarding. - *

- * These contexts are typically made available in response to commands. + * + *

These contexts are typically made available in response to commands. */ public interface ClientActionContext extends Context { - /** - * Fail the command with the given message. - * - * @param errorMessage The error message to send to the client. - */ - RuntimeException fail(String errorMessage); + /** + * Fail the command with the given message. + * + * @param errorMessage The error message to send to the client. + */ + RuntimeException fail(String errorMessage); - /** - * Instruct the proxy to forward handling of this command to another entity served by this stateful function. - *

- * The command will be forwarded after successful completion of handling this command, including any persistence - * that this command does. - *

- * {@link ServiceCall} instances can be created using the {@link ServiceCallFactory} obtained from any (including - * this) contexts {@link Context#serviceCallFactory()} method. - * - * @param to The service call to forward command processing to. - */ - void forward(ServiceCall to); + /** + * Instruct the proxy to forward handling of this command to another entity served by this + * stateful function. + * + *

The command will be forwarded after successful completion of handling this command, + * including any persistence that this command does. + * + *

{@link ServiceCall} instances can be created using the {@link ServiceCallFactory} obtained + * from any (including this) contexts {@link Context#serviceCallFactory()} method. + * + * @param to The service call to forward command processing to. + */ + void forward(ServiceCall to); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/CloudState.java b/java-support/src/main/java/io/cloudstate/javasupport/CloudState.java index 321e4e32b..cadfd1dcf 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/CloudState.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/CloudState.java @@ -17,178 +17,209 @@ import java.util.Map; /** - * The CloudState class is the main interface to configuring entities to deploy, - * and subsequently starting a local server which will expose these entities to - * the CloudState Proxy Sidecar. + * The CloudState class is the main interface to configuring entities to deploy, and subsequently + * starting a local server which will expose these entities to the CloudState Proxy Sidecar. */ public final class CloudState { - private final Map services = new HashMap<>(); - private ClassLoader classLoader = getClass().getClassLoader(); - private String typeUrlPrefix = AnySupport.DefaultTypeUrlPrefix(); - private AnySupport.Prefer prefer = AnySupport.PREFER_JAVA(); - - /** - * Sets the ClassLoader to be used for reflective access, - * the default value is the ClassLoader of the CloudState class. - * - * @param classLoader A non-null ClassLoader to be used for reflective access. - * @return This CloudState instance. - */ - public CloudState withClassLoader(ClassLoader classLoader) { - this.classLoader = classLoader; - return this; + private final Map services = new HashMap<>(); + private ClassLoader classLoader = getClass().getClassLoader(); + private String typeUrlPrefix = AnySupport.DefaultTypeUrlPrefix(); + private AnySupport.Prefer prefer = AnySupport.PREFER_JAVA(); + + /** + * Sets the ClassLoader to be used for reflective access, the default value is the ClassLoader of + * the CloudState class. + * + * @param classLoader A non-null ClassLoader to be used for reflective access. + * @return This CloudState instance. + */ + public CloudState withClassLoader(ClassLoader classLoader) { + this.classLoader = classLoader; + return this; + } + + /** + * Sets the type URL prefix to be used when serializing and deserializing types from and to + * Protobyf Any values. Defaults to "type.googleapis.com". + * + * @param prefix the type URL prefix to be used. + * @return This CloudState instance. + */ + public CloudState withTypeUrlPrefix(String prefix) { + this.typeUrlPrefix = prefix; + return this; + } + + /** + * When locating protobufs, if both a Java and a ScalaPB generated class is found on the + * classpath, this specifies that Java should be preferred. + * + * @return This CloudState instance. + */ + public CloudState preferJavaProtobufs() { + this.prefer = AnySupport.PREFER_JAVA(); + return this; + } + + /** + * When locating protobufs, if both a Java and a ScalaPB generated class is found on the + * classpath, this specifies that Scala should be preferred. + * + * @return This CloudState instance. + */ + public CloudState preferScalaProtobufs() { + this.prefer = AnySupport.PREFER_SCALA(); + return this; + } + + /** + * Register an annotated event sourced entity. + * + *

The entity class must be annotated with {@link + * io.cloudstate.javasupport.eventsourced.EventSourcedEntity}. + * + * @param entityClass The entity class. + * @param descriptor The descriptor for the service that this entity implements. + * @param additionalDescriptors Any additional descriptors that should be used to look up protobuf + * types when needed. + * @return This stateful service builder. + */ + public CloudState registerEventSourcedEntity( + Class entityClass, + Descriptors.ServiceDescriptor descriptor, + Descriptors.FileDescriptor... additionalDescriptors) { + + EventSourcedEntity entity = entityClass.getAnnotation(EventSourcedEntity.class); + if (entity == null) { + throw new IllegalArgumentException( + entityClass + " does not declare an " + EventSourcedEntity.class + " annotation!"); } - /** - * Sets the type URL prefix to be used when serializing and deserializing - * types from and to Protobyf Any values. Defaults to "type.googleapis.com". - * - * @param prefix the type URL prefix to be used. - * @return This CloudState instance. - */ - public CloudState withTypeUrlPrefix(String prefix) { - this.typeUrlPrefix = prefix; - return this; + final String persistenceId; + final int snapshotEvery; + if (entity.persistenceId().isEmpty()) { + persistenceId = entityClass.getSimpleName(); + snapshotEvery = 0; // Default + } else { + persistenceId = entity.persistenceId(); + snapshotEvery = entity.snapshotEvery(); } - /** - * When locating protobufs, if both a Java and a ScalaPB generated class is found on the classpath, - * this specifies that Java should be preferred. - * - * @return This CloudState instance. - */ - public CloudState preferJavaProtobufs() { - this.prefer = AnySupport.PREFER_JAVA(); - return this; - } - - /** - * When locating protobufs, if both a Java and a ScalaPB generated class is found on the classpath, - * this specifies that Scala should be preferred. - * - * @return This CloudState instance. - */ - public CloudState preferScalaProtobufs() { - this.prefer = AnySupport.PREFER_SCALA(); - return this; - } - - /** - * Register an annotated event sourced entity. - *

- * The entity class must be annotated with {@link io.cloudstate.javasupport.eventsourced.EventSourcedEntity}. - * - * @param entityClass The entity class. - * @param descriptor The descriptor for the service that this entity implements. - * @param additionalDescriptors Any additional descriptors that should be used to look up protobuf types when needed. - * @return This stateful service builder. - */ - public CloudState registerEventSourcedEntity(Class entityClass, Descriptors.ServiceDescriptor descriptor, - Descriptors.FileDescriptor... additionalDescriptors) { - - EventSourcedEntity entity = entityClass.getAnnotation(EventSourcedEntity.class); - if (entity == null) { - throw new IllegalArgumentException(entityClass + " does not declare an " + EventSourcedEntity.class + " annotation!"); - } - - final String persistenceId; - final int snapshotEvery; - if (entity.persistenceId().isEmpty()) { - persistenceId = entityClass.getSimpleName(); - snapshotEvery = 0; // Default - } else { - persistenceId = entity.persistenceId(); - snapshotEvery = entity.snapshotEvery(); - } - - final AnySupport anySupport = newAnySupport(additionalDescriptors); - - services.put(descriptor.getFullName(), new EventSourcedStatefulService( - new AnnotationBasedEventSourcedSupport(entityClass, anySupport, descriptor), descriptor, - anySupport, persistenceId, snapshotEvery - )); - - return this; - } - - /** - * Register an event sourced entity factor. - *

- * This is a low level API intended for custom (eg, non reflection based) mechanisms for implementing the entity. - * - * @param factory The event sourced factory. - * @param descriptor The descriptor for the service that this entity implements. - * @param persistenceId The persistence id for this entity. - * @param snapshotEvery Specifies how snapshots of the entity state should be made: Zero means use default from configuration file. (Default) Any negative value means never snapshot. Any positive value means snapshot at-or-after that number of events. - * @param additionalDescriptors Any additional descriptors that should be used to look up protobuf types when needed. - * @return This stateful service builder. - */ - public CloudState registerEventSourcedEntity(EventSourcedEntityFactory factory, Descriptors.ServiceDescriptor descriptor, - String persistenceId, int snapshotEvery, Descriptors.FileDescriptor... additionalDescriptors) { - services.put(descriptor.getFullName(), new EventSourcedStatefulService(factory, descriptor, - newAnySupport(additionalDescriptors), persistenceId, snapshotEvery)); - - return this; - - } - - /** - * Register an annotated CRDT entity. - *

- * The entity class must be annotated with {@link io.cloudstate.javasupport.crdt.CrdtEntity}. - * - * @param entityClass The entity class. - * @param descriptor The descriptor for the service that this entity implements. - * @param additionalDescriptors Any additional descriptors that should be used to look up protobuf types when needed. - * @return This stateful service builder. - */ - public CloudState registerCrdtEntity(Class entityClass, Descriptors.ServiceDescriptor descriptor, - Descriptors.FileDescriptor... additionalDescriptors) { - - CrdtEntity entity = entityClass.getAnnotation(CrdtEntity.class); - if (entity == null) { - throw new IllegalArgumentException(entityClass + " does not declare an " + CrdtEntity.class + " annotation!"); - } - - final AnySupport anySupport = newAnySupport(additionalDescriptors); - - services.put(descriptor.getFullName(), new CrdtStatefulService( - new AnnotationBasedCrdtSupport(entityClass, anySupport, descriptor), descriptor, anySupport - )); - - return this; - } - - /** - * Register an CRDt entity factory. - *

- * This is a low level API intended for custom (eg, non reflection based) mechanisms for implementing the entity. - * - * @param factory The CRDT factory. - * @param descriptor The descriptor for the service that this entity implements. - * @param additionalDescriptors Any additional descriptors that should be used to look up protobuf types when needed. - * @return This stateful service builder. - */ - public CloudState registerCrdtEntity(CrdtEntityFactory factory, Descriptors.ServiceDescriptor descriptor, - Descriptors.FileDescriptor... additionalDescriptors) { - services.put(descriptor.getFullName(), new CrdtStatefulService(factory, descriptor, - newAnySupport(additionalDescriptors))); - - return this; - - } - - /** - * Starts a server with the configured entities. - * - * @return a CompletionStage which will be completed when the server has shut down. - */ - public CompletionStage start() { - return new CloudStateRunner(services).run(); - } - - private AnySupport newAnySupport(Descriptors.FileDescriptor[] descriptors) { - return new AnySupport(descriptors, classLoader, typeUrlPrefix, prefer); + final AnySupport anySupport = newAnySupport(additionalDescriptors); + + services.put( + descriptor.getFullName(), + new EventSourcedStatefulService( + new AnnotationBasedEventSourcedSupport(entityClass, anySupport, descriptor), + descriptor, + anySupport, + persistenceId, + snapshotEvery)); + + return this; + } + + /** + * Register an event sourced entity factor. + * + *

This is a low level API intended for custom (eg, non reflection based) mechanisms for + * implementing the entity. + * + * @param factory The event sourced factory. + * @param descriptor The descriptor for the service that this entity implements. + * @param persistenceId The persistence id for this entity. + * @param snapshotEvery Specifies how snapshots of the entity state should be made: Zero means use + * default from configuration file. (Default) Any negative value means never snapshot. Any + * positive value means snapshot at-or-after that number of events. + * @param additionalDescriptors Any additional descriptors that should be used to look up protobuf + * types when needed. + * @return This stateful service builder. + */ + public CloudState registerEventSourcedEntity( + EventSourcedEntityFactory factory, + Descriptors.ServiceDescriptor descriptor, + String persistenceId, + int snapshotEvery, + Descriptors.FileDescriptor... additionalDescriptors) { + services.put( + descriptor.getFullName(), + new EventSourcedStatefulService( + factory, + descriptor, + newAnySupport(additionalDescriptors), + persistenceId, + snapshotEvery)); + + return this; + } + + /** + * Register an annotated CRDT entity. + * + *

The entity class must be annotated with {@link io.cloudstate.javasupport.crdt.CrdtEntity}. + * + * @param entityClass The entity class. + * @param descriptor The descriptor for the service that this entity implements. + * @param additionalDescriptors Any additional descriptors that should be used to look up protobuf + * types when needed. + * @return This stateful service builder. + */ + public CloudState registerCrdtEntity( + Class entityClass, + Descriptors.ServiceDescriptor descriptor, + Descriptors.FileDescriptor... additionalDescriptors) { + + CrdtEntity entity = entityClass.getAnnotation(CrdtEntity.class); + if (entity == null) { + throw new IllegalArgumentException( + entityClass + " does not declare an " + CrdtEntity.class + " annotation!"); } + final AnySupport anySupport = newAnySupport(additionalDescriptors); + + services.put( + descriptor.getFullName(), + new CrdtStatefulService( + new AnnotationBasedCrdtSupport(entityClass, anySupport, descriptor), + descriptor, + anySupport)); + + return this; + } + + /** + * Register an CRDt entity factory. + * + *

This is a low level API intended for custom (eg, non reflection based) mechanisms for + * implementing the entity. + * + * @param factory The CRDT factory. + * @param descriptor The descriptor for the service that this entity implements. + * @param additionalDescriptors Any additional descriptors that should be used to look up protobuf + * types when needed. + * @return This stateful service builder. + */ + public CloudState registerCrdtEntity( + CrdtEntityFactory factory, + Descriptors.ServiceDescriptor descriptor, + Descriptors.FileDescriptor... additionalDescriptors) { + services.put( + descriptor.getFullName(), + new CrdtStatefulService(factory, descriptor, newAnySupport(additionalDescriptors))); + + return this; + } + + /** + * Starts a server with the configured entities. + * + * @return a CompletionStage which will be completed when the server has shut down. + */ + public CompletionStage start() { + return new CloudStateRunner(services).run(); + } + + private AnySupport newAnySupport(Descriptors.FileDescriptor[] descriptors) { + return new AnySupport(descriptors, classLoader, typeUrlPrefix, prefer); + } } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/Context.java b/java-support/src/main/java/io/cloudstate/javasupport/Context.java index 8169a8e78..5e6458397 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/Context.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/Context.java @@ -1,11 +1,7 @@ package io.cloudstate.javasupport; -/** - * Root class of all contexts. - */ +/** Root class of all contexts. */ public interface Context { - /** - * Get the service call factory for this stateful service. - */ - ServiceCallFactory serviceCallFactory(); + /** Get the service call factory for this stateful service. */ + ServiceCallFactory serviceCallFactory(); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/EffectContext.java b/java-support/src/main/java/io/cloudstate/javasupport/EffectContext.java index e593896b0..6814d55e6 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/EffectContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/EffectContext.java @@ -1,34 +1,32 @@ package io.cloudstate.javasupport; -/** - * A context that allows instructing the proxy to perform a side effect. - */ +/** A context that allows instructing the proxy to perform a side effect. */ public interface EffectContext extends Context { - /** - * Invoke the referenced service call as an effect once this action is completed. - *

- * The effect will be performed asynchronously, ie, the proxy won't wait for the effect to finish before sending - * the reply. - *

- * {@link ServiceCall} instances can be created using the {@link ServiceCallFactory} obtained from any (including - * this) contexts {@link Context#serviceCallFactory()} method. - * - * @param effect The service call to make as an effect effect. - */ - default void effect(ServiceCall effect) { - this.effect(effect, false); - } + /** + * Invoke the referenced service call as an effect once this action is completed. + * + *

The effect will be performed asynchronously, ie, the proxy won't wait for the effect to + * finish before sending the reply. + * + *

{@link ServiceCall} instances can be created using the {@link ServiceCallFactory} obtained + * from any (including this) contexts {@link Context#serviceCallFactory()} method. + * + * @param effect The service call to make as an effect effect. + */ + default void effect(ServiceCall effect) { + this.effect(effect, false); + } - /** - * Invoke the referenced service call as an effect once this action is completed. - *

- * {@link ServiceCall} instances can be created using the {@link ServiceCallFactory} obtained from any (including - * this) contexts {@link Context#serviceCallFactory()} method. - * - * @param effect The service call to make as an effect effect. - * @param synchronous Whether the effect should be performed synchronously (ie, wait till it has finished before - * sending a reply) or asynchronously. - */ - void effect(ServiceCall effect, boolean synchronous); + /** + * Invoke the referenced service call as an effect once this action is completed. + * + *

{@link ServiceCall} instances can be created using the {@link ServiceCallFactory} obtained + * from any (including this) contexts {@link Context#serviceCallFactory()} method. + * + * @param effect The service call to make as an effect effect. + * @param synchronous Whether the effect should be performed synchronously (ie, wait till it has + * finished before sending a reply) or asynchronously. + */ + void effect(ServiceCall effect, boolean synchronous); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/EntityContext.java b/java-support/src/main/java/io/cloudstate/javasupport/EntityContext.java index 31c83e857..53b7e005a 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/EntityContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/EntityContext.java @@ -1,14 +1,15 @@ package io.cloudstate.javasupport; /** - * Root context for all contexts that pertain to entities, that is, things that are addressable via an entity id. + * Root context for all contexts that pertain to entities, that is, things that are addressable via + * an entity id. */ public interface EntityContext extends Context { - /** - * The id of the entity that this context is for. - * - * @return The entity id. - */ - String entityId(); + /** + * The id of the entity that this context is for. + * + * @return The entity id. + */ + String entityId(); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/EntityId.java b/java-support/src/main/java/io/cloudstate/javasupport/EntityId.java index dd78b069c..e89bf6dbc 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/EntityId.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/EntityId.java @@ -1,6 +1,5 @@ package io.cloudstate.javasupport; - import io.cloudstate.javasupport.impl.CloudStateAnnotation; import java.lang.annotation.ElementType; @@ -10,14 +9,13 @@ /** * Annotation used to indicate that the annotated parameter accepts an entity id. - *

- * This parameter may appear on handler methods and constructors for any class that provides behavior for stateful - * service entity. - *

- * The type of the parameter must be {@link String}. + * + *

This parameter may appear on handler methods and constructors for any class that provides + * behavior for stateful service entity. + * + *

The type of the parameter must be {@link String}. */ @CloudStateAnnotation @Target(ElementType.PARAMETER) @Retention(RetentionPolicy.RUNTIME) -public @interface EntityId { -} +public @interface EntityId {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/Jsonable.java b/java-support/src/main/java/io/cloudstate/javasupport/Jsonable.java index ce1239d37..de4eb6a43 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/Jsonable.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/Jsonable.java @@ -7,11 +7,8 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; -/** - * Indicates that this class can be serialized to/from JSON. - */ +/** Indicates that this class can be serialized to/from JSON. */ @CloudStateAnnotation @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.TYPE) -public @interface Jsonable { -} +public @interface Jsonable {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/ServiceCall.java b/java-support/src/main/java/io/cloudstate/javasupport/ServiceCall.java index 4957183ae..da11cbd16 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/ServiceCall.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/ServiceCall.java @@ -3,22 +3,20 @@ import com.google.protobuf.Any; import com.google.protobuf.Descriptors; -/** - * Represents a call to a service, performed either as a forward, or as an effect. - */ +/** Represents a call to a service, performed either as a forward, or as an effect. */ public interface ServiceCall { - /** - * The reference to the call. - * - * @return The reference to the call. - */ - ServiceCallRef ref(); + /** + * The reference to the call. + * + * @return The reference to the call. + */ + ServiceCallRef ref(); - /** - * The message to pass to the call when the call is invoked. - * - * @return The message to pass to the call, serialized as an {@link Any}. - */ - Any message(); + /** + * The message to pass to the call when the call is invoked. + * + * @return The message to pass to the call, serialized as an {@link Any}. + */ + Any message(); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/ServiceCallFactory.java b/java-support/src/main/java/io/cloudstate/javasupport/ServiceCallFactory.java index 6e4dfefc7..4dcddb1c7 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/ServiceCallFactory.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/ServiceCallFactory.java @@ -2,23 +2,24 @@ /** * A service call factory. - *

- * This is used to create {@link ServiceCall}'s that can be passed to {@link EffectContext#effect(ServiceCall)} and - * {@link ClientActionContext#forward(ServiceCall)} f}. + * + *

This is used to create {@link ServiceCall}'s that can be passed to {@link + * EffectContext#effect(ServiceCall)} and {@link ClientActionContext#forward(ServiceCall)} f}. */ public interface ServiceCallFactory { - /** - * Lookup a reference to the service call with the given name and method. - * - * @param serviceName The fully qualified name of a gRPC service that this stateful service serves. - * @param methodName The name of a method on the gRPC service. - * @param messageType The expected type of the input message to the method. - * @param The type of the parameter that it accepts. - * @return A reference to the service call. - * @throws java.util.NoSuchElementException if the service or method is not found. - * @throws IllegalArgumentException if the accepted input type for the method doesn't match - * messageType. - */ - ServiceCallRef lookup(String serviceName, String methodName, Class messageType); + /** + * Lookup a reference to the service call with the given name and method. + * + * @param serviceName The fully qualified name of a gRPC service that this stateful service + * serves. + * @param methodName The name of a method on the gRPC service. + * @param messageType The expected type of the input message to the method. + * @param The type of the parameter that it accepts. + * @return A reference to the service call. + * @throws java.util.NoSuchElementException if the service or method is not found. + * @throws IllegalArgumentException if the accepted input type for the method doesn't match + * messageType. + */ + ServiceCallRef lookup(String serviceName, String methodName, Class messageType); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/ServiceCallRef.java b/java-support/src/main/java/io/cloudstate/javasupport/ServiceCallRef.java index cf4cdb2f1..63b499466 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/ServiceCallRef.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/ServiceCallRef.java @@ -8,18 +8,19 @@ * @param The type of message the call accepts. */ public interface ServiceCallRef { - /** - * The protobuf descriptor for the method. - * - * @return The protobuf descriptor for the method. - */ - Descriptors.MethodDescriptor method(); + /** + * The protobuf descriptor for the method. + * + * @return The protobuf descriptor for the method. + */ + Descriptors.MethodDescriptor method(); - /** - * Create a call from this reference, using the given message as the message to pass to it when it's invoked. - * - * @param message The message to pass to the method. - * @return A service call that can be used as a forward or effect. - */ - ServiceCall createCall(T message); + /** + * Create a call from this reference, using the given message as the message to pass to it when + * it's invoked. + * + * @param message The message to pass to the method. + * @return A service call that can be used as a forward or effect. + */ + ServiceCall createCall(T message); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/AbstractORMapWrapper.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/AbstractORMapWrapper.java index 753c219b3..cadb2c079 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/AbstractORMapWrapper.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/AbstractORMapWrapper.java @@ -2,141 +2,141 @@ import java.util.*; -/** - * Utility class for helping implement {@link ORMap} based CRDTs. - */ -abstract class AbstractORMapWrapper extends AbstractMap implements Map { +/** Utility class for helping implement {@link ORMap} based CRDTs. */ +abstract class AbstractORMapWrapper extends AbstractMap + implements Map { - final ORMap ormap; + final ORMap ormap; - AbstractORMapWrapper(ORMap ormap) { - this.ormap = ormap; - } + AbstractORMapWrapper(ORMap ormap) { + this.ormap = ormap; + } - abstract V getCrdtValue(C crdt); + abstract V getCrdtValue(C crdt); - abstract void setCrdtValue(C crdt, V value); + abstract void setCrdtValue(C crdt, V value); - abstract C getOrUpdateCrdt(K key, V value); + abstract C getOrUpdateCrdt(K key, V value); - @Override - public int size() { - return ormap.size(); - } + @Override + public int size() { + return ormap.size(); + } - @Override - public boolean containsKey(Object key) { - return ormap.containsKey(key); - } + @Override + public boolean containsKey(Object key) { + return ormap.containsKey(key); + } - @Override - public V get(Object key) { - C crdt = ormap.get(key); - if (crdt != null) { - return getCrdtValue(crdt); - } else { - return null; - } + @Override + public V get(Object key) { + C crdt = ormap.get(key); + if (crdt != null) { + return getCrdtValue(crdt); + } else { + return null; + } + } + + @Override + public V put(K key, V value) { + C existing = ormap.get(key); + if (existing != null) { + V old = getCrdtValue(existing); + setCrdtValue(existing, value); + return old; + } else { + getOrUpdateCrdt(key, value); + return null; } + } + + @Override + public V remove(Object key) { + C old = ormap.remove(key); + if (old != null) { + return getCrdtValue(old); + } else { + return null; + } + } - @Override - public V put(K key, V value) { - C existing = ormap.get(key); - if (existing != null) { - V old = getCrdtValue(existing); - setCrdtValue(existing, value); - return old; - } else { - getOrUpdateCrdt(key, value); - return null; - } + @Override + public void clear() { + ormap.clear(); + } + + @Override + public Set keySet() { + return ormap.keySet(); + } + + @Override + public Set> entrySet() { + return new EntrySet(); + } + + private final class MapEntry implements Entry { + private final Entry entry; + + MapEntry(Entry entry) { + this.entry = entry; } @Override - public V remove(Object key) { - C old = ormap.remove(key); - if (old != null) { - return getCrdtValue(old); - } else { - return null; - } + public K getKey() { + return entry.getKey(); } @Override - public void clear() { - ormap.clear(); + public V getValue() { + return getCrdtValue(entry.getValue()); } @Override - public Set keySet() { - return ormap.keySet(); + public V setValue(V value) { + V old = getCrdtValue(entry.getValue()); + setCrdtValue(entry.getValue(), value); + return old; } + } + private final class EntrySet extends AbstractSet> implements Set> { @Override - public Set> entrySet() { - return new EntrySet(); + public int size() { + return ormap.size(); } - private final class MapEntry implements Entry { - private final Entry entry; - - MapEntry(Entry entry) { - this.entry = entry; - } + @Override + public Iterator> iterator() { + return new Iterator>() { + private final Iterator> iter = ormap.entrySet().iterator(); @Override - public K getKey() { - return entry.getKey(); + public boolean hasNext() { + return iter.hasNext(); } @Override - public V getValue() { - return getCrdtValue(entry.getValue()); + public Entry next() { + return new MapEntry(iter.next()); } @Override - public V setValue(V value) { - V old = getCrdtValue(entry.getValue()); - setCrdtValue(entry.getValue(), value); - return old; + public void remove() { + iter.remove(); } + }; } - private final class EntrySet extends AbstractSet> implements Set> { - @Override - public int size() { - return ormap.size(); - } - - @Override - public Iterator> iterator() { - return new Iterator>() { - private final Iterator> iter = ormap.entrySet().iterator(); - @Override - public boolean hasNext() { - return iter.hasNext(); - } - - @Override - public Entry next() { - return new MapEntry(iter.next()); - } - - @Override - public void remove() { - iter.remove(); - } - }; - } - - @Override - public boolean add(Entry kvEntry) { - return !kvEntry.getValue().equals(put(kvEntry.getKey(), kvEntry.getValue())); - } + @Override + public boolean add(Entry kvEntry) { + return !kvEntry.getValue().equals(put(kvEntry.getKey(), kvEntry.getValue())); + } - @Override - public void clear() { - ormap.clear(); - } + @Override + public void clear() { + ormap.clear(); } + } } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CommandContext.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CommandContext.java index fe90c9f85..048cd8b7d 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CommandContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CommandContext.java @@ -7,33 +7,35 @@ /** * Context for handling a command. - *

- * This may be passed to any {@link CommandHandler} annotated element. + * + *

This may be passed to any {@link CommandHandler} annotated element. */ -public interface CommandContext extends CrdtContext, CrdtFactory, EffectContext, ClientActionContext { - /** - * The id of the command. This is an internal ID generated by the proxy, and is unique to a given entity stream. - * It may be used for debugging purposes. - * - * @return The ID of the command. - */ - long commandId(); +public interface CommandContext + extends CrdtContext, CrdtFactory, EffectContext, ClientActionContext { + /** + * The id of the command. This is an internal ID generated by the proxy, and is unique to a given + * entity stream. It may be used for debugging purposes. + * + * @return The ID of the command. + */ + long commandId(); - /** - * The name of the command. - *

- * Corresponds to the name of the rpc call in the protobuf definition. - * - * @return The name of the command. - */ - String commandName(); + /** + * The name of the command. + * + *

Corresponds to the name of the rpc call in the protobuf definition. + * + * @return The name of the command. + */ + String commandName(); - /** - * Delete the CRDT. - *

- * When a CRDT is deleted, it may not be created again. Additionally, CRDT deletion results in tombstones that - * get accumulated for the life of the cluster. If you expect to delete CRDTs frequently, it's recommended that you - * store them in a single or sharded {@link ORMap}, rather than individual CRDTs. - */ - void delete(); + /** + * Delete the CRDT. + * + *

When a CRDT is deleted, it may not be created again. Additionally, CRDT deletion results in + * tombstones that get accumulated for the life of the cluster. If you expect to delete CRDTs + * frequently, it's recommended that you store them in a single or sharded {@link ORMap}, rather + * than individual CRDTs. + */ + void delete(); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CommandHandler.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CommandHandler.java index c1f72856d..f937d068b 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CommandHandler.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CommandHandler.java @@ -9,28 +9,30 @@ /** * Marks a method as a command handler. - *

- * This method will be invoked whenever the service call with name that matches this command handlers name is invoked. - *

- * The method may take the command object as a parameter, its type must match the gRPC service input type. - *

- * The return type of the method must match the gRPC services output type. - *

- * The method may also take a {@link CommandContext}, and/or a {@link io.cloudstate.javasupport.EntityId} annotated - * {@link String} parameter. + * + *

This method will be invoked whenever the service call with name that matches this command + * handlers name is invoked. + * + *

The method may take the command object as a parameter, its type must match the gRPC service + * input type. + * + *

The return type of the method must match the gRPC services output type. + * + *

The method may also take a {@link CommandContext}, and/or a {@link + * io.cloudstate.javasupport.EntityId} annotated {@link String} parameter. */ @CloudStateAnnotation @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) public @interface CommandHandler { - /** - * The name of the command to handle. - *

- * If not specified, the name of the method will be used as the command name, with the first letter capitalized to - * match the gRPC convention of capitalizing rpc method names. - * - * @return The command name. - */ - String name() default ""; + /** + * The name of the command to handle. + * + *

If not specified, the name of the method will be used as the command name, with the first + * letter capitalized to match the gRPC convention of capitalizing rpc method names. + * + * @return The command name. + */ + String name() default ""; } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/Crdt.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/Crdt.java index 663b238d3..15bcc87f7 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/Crdt.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/Crdt.java @@ -1,7 +1,4 @@ package io.cloudstate.javasupport.crdt; -/** - * Root interface for all CRDTs. - */ -public interface Crdt { -} +/** Root interface for all CRDTs. */ +public interface Crdt {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtContext.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtContext.java index 32fabe4f7..22b3dddcd 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtContext.java @@ -4,16 +4,15 @@ import java.util.Optional; -/** - * Root context for all CRDT contexts. - */ +/** Root context for all CRDT contexts. */ public interface CrdtContext extends EntityContext { - /** - * The current CRDT, if it's been created. - * - * @param crdtClass The type of the CRDT that is expected. - * @return The current CRDT, or empty if none has been created yet. - * @throws IllegalStateException If the current CRDT does not match the passed in crdtClass type. - */ - Optional state(Class crdtClass) throws IllegalStateException; + /** + * The current CRDT, if it's been created. + * + * @param crdtClass The type of the CRDT that is expected. + * @return The current CRDT, or empty if none has been created yet. + * @throws IllegalStateException If the current CRDT does not match the passed in crdtClass + * type. + */ + Optional state(Class crdtClass) throws IllegalStateException; } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtCreationContext.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtCreationContext.java index 70d86ba64..459e918fc 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtCreationContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtCreationContext.java @@ -2,8 +2,7 @@ /** * Context for CRDT creation. - *

- * This is available for injection into the constructor of a CRDT. + * + *

This is available for injection into the constructor of a CRDT. */ -public interface CrdtCreationContext extends CrdtContext, CrdtFactory { -} +public interface CrdtCreationContext extends CrdtContext, CrdtFactory {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtEntity.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtEntity.java index e1dce3855..e59c28211 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtEntity.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtEntity.java @@ -9,18 +9,19 @@ /** * A CRDT backed entity. - *

- * CRDT entities store their state in a subclass {@link Crdt}. These may be created using a {@link CrdtFactory}, which - * can be injected into the constructor or as a parameter to any {@link CommandHandler} annotated method. - *

- * Only one CRDT may be created, it is important that before creating a CRDT, the entity should check whether the CRDT - * has already been created, for example, it may have been created on another node and replicated to this node. To - * check, either use the {@link CrdtContext#state(Class)} method, which can be injected into the constructor or any - * {@link CommandHandler} method, or have an instance of the CRDT wrapped in {@link java.util.Optional} injected into - * the constructor or command handler methods. + * + *

CRDT entities store their state in a subclass {@link Crdt}. These may be created using a + * {@link CrdtFactory}, which can be injected into the constructor or as a parameter to any {@link + * CommandHandler} annotated method. + * + *

Only one CRDT may be created, it is important that before creating a CRDT, the entity should + * check whether the CRDT has already been created, for example, it may have been created on another + * node and replicated to this node. To check, either use the {@link CrdtContext#state(Class)} + * method, which can be injected into the constructor or any {@link CommandHandler} method, or have + * an instance of the CRDT wrapped in {@link java.util.Optional} injected into the constructor or + * command handler methods. */ @CloudStateAnnotation @Target(ElementType.TYPE) @Retention(RetentionPolicy.RUNTIME) -public @interface CrdtEntity { -} +public @interface CrdtEntity {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtEntityFactory.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtEntityFactory.java index 41a096eaa..e81ee9854 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtEntityFactory.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtEntityFactory.java @@ -2,18 +2,19 @@ /** * Low level interface for handling commands for CRDTs. - *

- * Generally, this should not be used, rather, a {@link CrdtEntity} annotated class should be used. + * + *

Generally, this should not be used, rather, a {@link CrdtEntity} annotated class should be + * used. */ public interface CrdtEntityFactory { - /** - * Create a CRDT entity handler for the given context. - *

- * This will be invoked each time a new CRDT entity stream from the proxy is established, for handling commands\ - * for a single CRDT. - * - * @param context The creation context. - * @return The handler to handle commands. - */ - CrdtEntityHandler create(CrdtCreationContext context); + /** + * Create a CRDT entity handler for the given context. + * + *

This will be invoked each time a new CRDT entity stream from the proxy is established, for + * handling commands\ for a single CRDT. + * + * @param context The creation context. + * @return The handler to handle commands. + */ + CrdtEntityHandler create(CrdtCreationContext context); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtEntityHandler.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtEntityHandler.java index e69f36e0d..7069bf710 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtEntityHandler.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtEntityHandler.java @@ -6,29 +6,30 @@ /** * Low level interface for handling CRDT commands. - *

- * These are instantiated by a {@link CrdtEntityFactory}. - *

- * Generally, this should not be used, rather, a {@link CrdtEntity} annotated class should be used. + * + *

These are instantiated by a {@link CrdtEntityFactory}. + * + *

Generally, this should not be used, rather, a {@link CrdtEntity} annotated class should be + * used. */ public interface CrdtEntityHandler { - /** - * Handle the given command. During the handling of a command, a CRDT may be created (if not already created) and - * updated. - * - * @param command The command to handle. - * @param context The context for the command. - * @return A reply to the command, if any is sent. - */ - Optional handleCommand(Any command, CommandContext context); + /** + * Handle the given command. During the handling of a command, a CRDT may be created (if not + * already created) and updated. + * + * @param command The command to handle. + * @param context The context for the command. + * @return A reply to the command, if any is sent. + */ + Optional handleCommand(Any command, CommandContext context); - /** - * Handle the given stream command. During the handling of a command, a CRDT may be created (if not already created) - * and updated. - * - * @param command The command to handle. - * @param context The context for the command. - * @return A reply to the command, if any is sent. - */ - Optional handleStreamedCommand(Any command, StreamedCommandContext context); + /** + * Handle the given stream command. During the handling of a command, a CRDT may be created (if + * not already created) and updated. + * + * @param command The command to handle. + * @param context The context for the command. + * @return A reply to the command, if any is sent. + */ + Optional handleStreamedCommand(Any command, StreamedCommandContext context); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtFactory.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtFactory.java index f184cf6bb..d8007059b 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtFactory.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/CrdtFactory.java @@ -2,66 +2,67 @@ /** * Factory for creating CRDTs. - *

- * This is used both by CRDT contexts that allow creating CRDTs, as well as by CRDTs that allow nesting other CRDTs. - *

- * CRDTs may only be created by a supplied CRDT factory, CRDTs created any other way will not be known by the - * library and so won't have their deltas synced to and from the proxy. + * + *

This is used both by CRDT contexts that allow creating CRDTs, as well as by CRDTs that allow + * nesting other CRDTs. + * + *

CRDTs may only be created by a supplied CRDT factory, CRDTs created any other way will not be + * known by the library and so won't have their deltas synced to and from the proxy. */ public interface CrdtFactory { - /** - * Create a new GCounter. - * - * @return The new GCounter. - */ - GCounter newGCounter(); + /** + * Create a new GCounter. + * + * @return The new GCounter. + */ + GCounter newGCounter(); - /** - * Create a new PNCounter. - * - * @return The new PNCounter. - */ - PNCounter newPNCounter(); + /** + * Create a new PNCounter. + * + * @return The new PNCounter. + */ + PNCounter newPNCounter(); - /** - * Create a new GSet. - * - * @return The new GSet. - */ - GSet newGSet(); + /** + * Create a new GSet. + * + * @return The new GSet. + */ + GSet newGSet(); - /** - * Create a new ORSet. - * - * @return The new ORSet. - */ - ORSet newORSet(); + /** + * Create a new ORSet. + * + * @return The new ORSet. + */ + ORSet newORSet(); - /** - * Create a new Flag. - * - * @return The new Flag. - */ - Flag newFlag(); + /** + * Create a new Flag. + * + * @return The new Flag. + */ + Flag newFlag(); - /** - * Create a new LWWRegister. - * - * @return The new LWWRegister. - */ - LWWRegister newLWWRegister(T value); + /** + * Create a new LWWRegister. + * + * @return The new LWWRegister. + */ + LWWRegister newLWWRegister(T value); - /** - * Create a new ORMap. - * - * @return The new ORMap. - */ - ORMap newORMap(); + /** + * Create a new ORMap. + * + * @return The new ORMap. + */ + ORMap newORMap(); - /** - * Create a new Vote. - * - * @return The new Vote. - */ - Vote newVote(); + /** + * Create a new Vote. + * + * @return The new Vote. + */ + Vote newVote(); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/Flag.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/Flag.java index d328eccec..cc4e6c8fd 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/Flag.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/Flag.java @@ -2,20 +2,18 @@ /** * A flag CRDT. - *

- * A flag is a boolean value that starts out as false, and once set to true, stays - * true, it cannot be set back to false. + * + *

A flag is a boolean value that starts out as false, and once set to true + * , stays true, it cannot be set back to false. */ public interface Flag extends Crdt { - /** - * Whether this flag is enabled. - * - * @return True if the flag is enabled. - */ - boolean isEnabled(); + /** + * Whether this flag is enabled. + * + * @return True if the flag is enabled. + */ + boolean isEnabled(); - /** - * Enable this flag. - */ - void enable(); + /** Enable this flag. */ + void enable(); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/GCounter.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/GCounter.java index dc322224a..414f3402b 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/GCounter.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/GCounter.java @@ -2,23 +2,23 @@ /** * A Grow-only Counter. - *

- * A Grow-only Counter can be incremented, but can't be decremented. + * + *

A Grow-only Counter can be incremented, but can't be decremented. */ public interface GCounter extends Crdt { - /** - * Get the current value of the counter. - * - * @return The current value of the counter. - */ - long getValue(); + /** + * Get the current value of the counter. + * + * @return The current value of the counter. + */ + long getValue(); - /** - * Increment the counter. - * - * @param by The amount to increment the counter by. - * @return The new value of the counter. - * @throws IllegalArgumentException If by is negative. - */ - long increment(long by) throws IllegalArgumentException; + /** + * Increment the counter. + * + * @param by The amount to increment the counter by. + * @return The new value of the counter. + * @throws IllegalArgumentException If by is negative. + */ + long increment(long by) throws IllegalArgumentException; } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/GSet.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/GSet.java index 23b28da9e..940638424 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/GSet.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/GSet.java @@ -4,24 +4,23 @@ /** * A Grow-only Set. - *

- * A Grow-only Set can have elements added to it, but cannot have elements removed from it. - *

- * Care needs to be taken to ensure that the serialized value of elements in the set is stable. For example, if using - * protobufs, the serialized value of any maps contain in the protobuf is not stable, and can yield a different set of - * bytes for the same logically equal element. Hence maps, should be avoided. Additionally, some changes in protobuf - * schemas which are backwards compatible from a protobuf perspective, such as changing from sint32 to int32, do result - * in different serialized bytes, and so must be avoided. + * + *

A Grow-only Set can have elements added to it, but cannot have elements removed from it. + * + *

Care needs to be taken to ensure that the serialized value of elements in the set is stable. + * For example, if using protobufs, the serialized value of any maps contain in the protobuf is not + * stable, and can yield a different set of bytes for the same logically equal element. Hence maps, + * should be avoided. Additionally, some changes in protobuf schemas which are backwards compatible + * from a protobuf perspective, such as changing from sint32 to int32, do result in different + * serialized bytes, and so must be avoided. * * @param The value of the set elements */ public interface GSet extends Crdt, Set { - /** - * Remove is not support on a Grow-only set. - */ - @Override - default boolean remove(Object o) { - throw new UnsupportedOperationException("Remove is not supported on a Grow-only Set."); - } + /** Remove is not support on a Grow-only set. */ + @Override + default boolean remove(Object o) { + throw new UnsupportedOperationException("Remove is not supported on a Grow-only Set."); + } } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/LWWRegister.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/LWWRegister.java index f136d36cf..459301f27 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/LWWRegister.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/LWWRegister.java @@ -2,87 +2,88 @@ /** * A Last-Write-Wins Register. - *

- * This uses a clock value to determine which of two concurrent writes should win. When both clock values are the same, - * an ordering defined over the node addresses is used to break the tie. - *

- * By default, the clock used is the clock of the node that set the value. This can be affected by clock skew, which - * means two successive writes delegated to two separate nodes could result in the first one winning. This can be - * avoided by using a custom clock with a domain specific clock value, if such a causally ordered value is available. + * + *

This uses a clock value to determine which of two concurrent writes should win. When both + * clock values are the same, an ordering defined over the node addresses is used to break the tie. + * + *

By default, the clock used is the clock of the node that set the value. This can be affected + * by clock skew, which means two successive writes delegated to two separate nodes could result in + * the first one winning. This can be avoided by using a custom clock with a domain specific clock + * value, if such a causally ordered value is available. * * @param */ public interface LWWRegister extends Crdt { - /** - * Get the current value of the register. - * - * @return The current value of the register. - */ - T get(); + /** + * Get the current value of the register. + * + * @return The current value of the register. + */ + T get(); + + /** + * Set the current value of the register, using the default clock. + * + * @param value The value of the register to set. + * @return The old value of the register. + */ + default T set(T value) { + return set(value, Clock.DEFAULT, 0); + } + + /** + * Set the current value of the register, using the given custom clock and clock value if + * required. + * + * @param value The value of the register to set. + * @param clock The clock to use. + * @param customClockValue The custom clock value to use if the clock selected is a custom clock. + * This is ignored if the clock is not a custom clock. + * @return The old value of the register. + */ + T set(T value, Clock clock, long customClockValue); + + /** A clock. */ + enum Clock { + + /** The default clock, uses the current system time as the clock value. */ + DEFAULT, /** - * Set the current value of the register, using the default clock. + * A reverse clock, based on the system clock. + * + *

Using this effectively achieves First-Write-Wins semantics. * - * @param value The value of the register to set. - * @return The old value of the register. + *

This is susceptible to the same clock skew problems as the default clock. */ - default T set(T value) { - return set(value, Clock.DEFAULT, 0); - } + REVERSE, /** - * Set the current value of the register, using the given custom clock and clock value if required. + * A custom clock. * - * @param value The value of the register to set. - * @param clock The clock to use. - * @param customClockValue The custom clock value to use if the clock selected is a custom clock. This is ignored - * if the clock is not a custom clock. - * @return The old value of the register. + *

The custom clock value is passed by using the customClockValue parameter on + * the {@link LWWRegister#set(Object, Clock, long)} method. The value should be a domain + * specific monotonically increasing value. For example, if the source of the value for this + * register is a single device, that device may attach a sequence number to each update, that + * sequence number can be used to guarantee that the register will converge to the last update + * emitted by that device. */ - T set(T value, Clock clock, long customClockValue); + CUSTOM, /** - * A clock. + * A custom clock, that automatically increments the custom value if the local clock value is + * greater than it. + * + *

This is like {@link Clock#CUSTOM}, however if when performing the update in the proxy, + * it's found that the clock value of the register is greater than the specified clock value for + * the update, the proxy will instead use the current clock value of the register plus one. + * + *

This can guarantee that updates done on the same node will be causally ordered (addressing + * problems caused by the system clock being adjusted), but will not guarantee causal ordering + * for updates on different nodes, since it's possible that an update on a different node has + * not yet been replicated to this node. */ - enum Clock { - - /** - * The default clock, uses the current system time as the clock value. - */ - DEFAULT, - - /** - * A reverse clock, based on the system clock. - *

- * Using this effectively achieves First-Write-Wins semantics. - *

- * This is susceptible to the same clock skew problems as the default clock. - */ - REVERSE, - - /** - * A custom clock. - *

- * The custom clock value is passed by using the customClockValue parameter on the - * {@link LWWRegister#set(Object, Clock, long)} method. The value should be a domain specific monotonically - * increasing value. For example, if the source of the value for this register is a single device, that device - * may attach a sequence number to each update, that sequence number can be used to guarantee that the register - * will converge to the last update emitted by that device. - */ - CUSTOM, - - /** - * A custom clock, that automatically increments the custom value if the local clock value is greater than it. - *

- * This is like {@link Clock#CUSTOM}, however if when performing the update in the proxy, it's found that the - * clock value of the register is greater than the specified clock value for the update, the proxy will instead - * use the current clock value of the register plus one. - *

- * This can guarantee that updates done on the same node will be causally ordered (addressing problems caused by - * the system clock being adjusted), but will not guarantee causal ordering for updates on different nodes, - * since it's possible that an update on a different node has not yet been replicated to this node. - */ - CUSTOM_AUTO_INCREMENT - } + CUSTOM_AUTO_INCREMENT + } } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/LWWRegisterMap.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/LWWRegisterMap.java index e7502fb91..3c1038e4d 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/LWWRegisterMap.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/LWWRegisterMap.java @@ -4,31 +4,32 @@ /** * Convenience wrapper class for {@link ORMap} that uses {@link LWWRegister}'s for values. - *

- * This is useful as it allows the map to be used more idiomatically, with plain {@link Map#get(Object)} and - * {@link Map#put(Object, Object)} calls for values. + * + *

This is useful as it allows the map to be used more idiomatically, with plain {@link + * Map#get(Object)} and {@link Map#put(Object, Object)} calls for values. * * @param The type for keys. * @param The type for values. */ -public final class LWWRegisterMap extends AbstractORMapWrapper> implements Map { +public final class LWWRegisterMap extends AbstractORMapWrapper> + implements Map { - public LWWRegisterMap(ORMap> ormap) { - super(ormap); - } + public LWWRegisterMap(ORMap> ormap) { + super(ormap); + } - @Override - V getCrdtValue(LWWRegister crdt) { - return crdt.get(); - } + @Override + V getCrdtValue(LWWRegister crdt) { + return crdt.get(); + } - @Override - void setCrdtValue(LWWRegister crdt, V value) { - crdt.set(value); - } + @Override + void setCrdtValue(LWWRegister crdt, V value) { + crdt.set(value); + } - @Override - LWWRegister getOrUpdateCrdt(K key, V value) { - return ormap.getOrCreate(key, f -> f.newLWWRegister(value)); - } + @Override + LWWRegister getOrUpdateCrdt(K key, V value) { + return ormap.getOrCreate(key, f -> f.newLWWRegister(value)); + } } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/ORMap.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/ORMap.java index 9f63259d4..a383db4a8 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/ORMap.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/ORMap.java @@ -5,52 +5,55 @@ /** * An Observed-Removed Map. - *

- * An Observed-Removed Map allows both the addition and removal of objects in a map. A removal can only be done if - * all of the additions that caused the key to be in the map have been seen by this node. This means that, for example, - * if node 1 adds key A, and node 2 also adds key A, then node 1's addition is replicated to node 3, and node 3 - * deletes it before node 2's addition is replicated, then the item will still be in the map because node 2's addition - * had not yet been observed by node 3. However, if both additions had been replicated to node 3, then the key will be - * removed. - *

- * The values of the map are themselves CRDTs, and hence allow concurrent updates that will eventually converge. Values - * may only be inserted using the {@link ORMap#getOrCreate(Object, Function)} function, using the {@link CrdtFactory} - * passed in to the creation callback. Invoking {@link ORMap#put(Object, Crdt)} or any other insertion method will throw - * a {@link UnsupportedOperationException}. - *

- * While removing entries from the map is supported, if the entries are added back again, it is possible that the value - * of the deleted entry may be merged into the value of the current entry, depending on whether the removal has been - * replicated to all nodes before the addition is performed. - *

- * The map may contain different CRDT types as values, however, for a given key, the type must never change. If two - * different types for the same key are inserted on different nodes, the CRDT will enter an invalid state that can never - * be merged, and behavior of the CRDT is undefined. - *

- * Care needs to be taken to ensure that the serialized value of keys in the set is stable. For example, if using - * protobufs, the serialized value of any maps contained in the protobuf is not stable, and can yield a different set of - * bytes for the same logically equal element. Hence maps should be avoided. Additionally, some changes in protobuf - * schemas which are backwards compatible from a protobuf perspective, such as changing from sint32 to int32, do result - * in different serialized bytes, and so must be avoided. + * + *

An Observed-Removed Map allows both the addition and removal of objects in a map. A removal + * can only be done if all of the additions that caused the key to be in the map have been seen by + * this node. This means that, for example, if node 1 adds key A, and node 2 also adds key A, then + * node 1's addition is replicated to node 3, and node 3 deletes it before node 2's addition is + * replicated, then the item will still be in the map because node 2's addition had not yet been + * observed by node 3. However, if both additions had been replicated to node 3, then the key will + * be removed. + * + *

The values of the map are themselves CRDTs, and hence allow concurrent updates that will + * eventually converge. Values may only be inserted using the {@link ORMap#getOrCreate(Object, + * Function)} function, using the {@link CrdtFactory} passed in to the creation callback. Invoking + * {@link ORMap#put(Object, Crdt)} or any other insertion method will throw a {@link + * UnsupportedOperationException}. + * + *

While removing entries from the map is supported, if the entries are added back again, it is + * possible that the value of the deleted entry may be merged into the value of the current entry, + * depending on whether the removal has been replicated to all nodes before the addition is + * performed. + * + *

The map may contain different CRDT types as values, however, for a given key, the type must + * never change. If two different types for the same key are inserted on different nodes, the CRDT + * will enter an invalid state that can never be merged, and behavior of the CRDT is undefined. + * + *

Care needs to be taken to ensure that the serialized value of keys in the set is stable. For + * example, if using protobufs, the serialized value of any maps contained in the protobuf is not + * stable, and can yield a different set of bytes for the same logically equal element. Hence maps + * should be avoided. Additionally, some changes in protobuf schemas which are backwards compatible + * from a protobuf perspective, such as changing from sint32 to int32, do result in different + * serialized bytes, and so must be avoided. * * @param The type of keys. * @param The CRDT to be used for values. */ public interface ORMap extends Crdt, Map { - /** - * Get or create an entry in the map with the given key. - * - * @param key The key of the entry. - * @param create A callback used to create the value using the given {@link CrdtFactory} if an entry for the key is - * not currently in the map. - * @return The existing or newly created value for the given key. - */ - V getOrCreate(K key, Function create); + /** + * Get or create an entry in the map with the given key. + * + * @param key The key of the entry. + * @param create A callback used to create the value using the given {@link CrdtFactory} if an + * entry for the key is not currently in the map. + * @return The existing or newly created value for the given key. + */ + V getOrCreate(K key, Function create); - /** - * Not supported on ORMap. Use {@link ORMap#getOrCreate(Object, Function)} instead. - */ - @Override - default V put(K key, V value) { - throw new UnsupportedOperationException("put is not supported on ORMap, use getOrCreate instead"); - } + /** Not supported on ORMap. Use {@link ORMap#getOrCreate(Object, Function)} instead. */ + @Override + default V put(K key, V value) { + throw new UnsupportedOperationException( + "put is not supported on ORMap, use getOrCreate instead"); + } } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/ORSet.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/ORSet.java index cdeea78d8..c65df59b9 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/ORSet.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/ORSet.java @@ -4,21 +4,22 @@ /** * An Observed-Removed Set. - *

- * An Observed-Removed Set allows both the addition and removal of elements in a set. A removal can only be done if - * all of the additions that added the key have been seen by this node. This means that, for example if node 1 adds - * element A, and node 2 also adds element A, then node 1's addition is replicated to node 3, and node 3 deletes it - * before node 2's addition is replicated, then the element will still be in the map because node 2's addition - * had not yet been observed by node 3, and will cause the element to be re-added when node 3 receives it. However, if - * both additions had been replicated to node 3, then the element will be removed. - *

- * Care needs to be taken to ensure that the serialized value of elements in the set is stable. For example, if using - * protobufs, the serialized value of any maps contained in the protobuf is not stable, and can yield a different set of - * bytes for the same logically equal element. Hence maps should be avoided. Additionally, some changes in protobuf - * schemas which are backwards compatible from a protobuf perspective, such as changing from sint32 to int32, do result - * in different serialized bytes, and so must be avoided. + * + *

An Observed-Removed Set allows both the addition and removal of elements in a set. A removal + * can only be done if all of the additions that added the key have been seen by this node. This + * means that, for example if node 1 adds element A, and node 2 also adds element A, then node 1's + * addition is replicated to node 3, and node 3 deletes it before node 2's addition is replicated, + * then the element will still be in the map because node 2's addition had not yet been observed by + * node 3, and will cause the element to be re-added when node 3 receives it. However, if both + * additions had been replicated to node 3, then the element will be removed. + * + *

Care needs to be taken to ensure that the serialized value of elements in the set is stable. + * For example, if using protobufs, the serialized value of any maps contained in the protobuf is + * not stable, and can yield a different set of bytes for the same logically equal element. Hence + * maps should be avoided. Additionally, some changes in protobuf schemas which are backwards + * compatible from a protobuf perspective, such as changing from sint32 to int32, do result in + * different serialized bytes, and so must be avoided. * * @param The type of elements. */ -public interface ORSet extends Crdt, Set { -} +public interface ORSet extends Crdt, Set {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/PNCounter.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/PNCounter.java index 0fa48bb0e..5a0faf423 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/PNCounter.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/PNCounter.java @@ -2,37 +2,37 @@ /** * A Positive-Negative Counter. - *

- * A Positive-Negative Counter is a counter that allows both incrementing, and decrementing. It is based on two - * {@link GCounter}'s, a positive one that is incremented for every increment, and a negative one that is incremented - * for every decrement. The current value of the counter is calculated by subtracting the negative counter from the - * positive counter. + * + *

A Positive-Negative Counter is a counter that allows both incrementing, and decrementing. It + * is based on two {@link GCounter}'s, a positive one that is incremented for every increment, and a + * negative one that is incremented for every decrement. The current value of the counter is + * calculated by subtracting the negative counter from the positive counter. */ public interface PNCounter extends Crdt { - /** - * Get the current value of the counter. - * - * @return The current value of the counter. - */ - long getValue(); + /** + * Get the current value of the counter. + * + * @return The current value of the counter. + */ + long getValue(); - /** - * Increment the counter. - *

- * If by is negative, then the counter will be decremented by that much instead. - * - * @param by The amount to increment the counter by. - * @return The new value of the counter. - */ - long increment(long by); + /** + * Increment the counter. + * + *

If by is negative, then the counter will be decremented by that much instead. + * + * @param by The amount to increment the counter by. + * @return The new value of the counter. + */ + long increment(long by); - /** - * Decrement the counter. - *

- * If by is negative, then the counter will be incremented by that much instead. - * - * @param by The amount to decrement the counter by. - * @return The new value of the counter. - */ - long decrement(long by); + /** + * Decrement the counter. + * + *

If by is negative, then the counter will be incremented by that much instead. + * + * @param by The amount to decrement the counter by. + * @return The new value of the counter. + */ + long decrement(long by); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/PNCounterMap.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/PNCounterMap.java index af025ac74..52cab3b3c 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/PNCounterMap.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/PNCounterMap.java @@ -4,85 +4,87 @@ /** * Convenience wrapper class for {@link ORMap} that uses {@link PNCounter}'s for values. - *

- * This offers a few extra methods for interacting with the map. + * + *

This offers a few extra methods for interacting with the map. * * @param The type for keys. */ -public final class PNCounterMap extends AbstractORMapWrapper implements Map { +public final class PNCounterMap extends AbstractORMapWrapper + implements Map { - public PNCounterMap(ORMap ormap) { - super(ormap); - } + public PNCounterMap(ORMap ormap) { + super(ormap); + } - /** - * Get the value for the given key. - *

- * This differs from {@link Map#get(Object)} in that it returns a primitive long, and thus avoids an - * allocation. - * - * @param key The key to get the value for. - * @return The current value of the counter at that key, or zero if no counter exists for that key. - */ - public long getValue(Object key) { - PNCounter counter = ormap.get(key); - if (counter != null) { - return counter.getValue(); - } else { - return 0; - } + /** + * Get the value for the given key. + * + *

This differs from {@link Map#get(Object)} in that it returns a primitive long, + * and thus avoids an allocation. + * + * @param key The key to get the value for. + * @return The current value of the counter at that key, or zero if no counter exists for that + * key. + */ + public long getValue(Object key) { + PNCounter counter = ormap.get(key); + if (counter != null) { + return counter.getValue(); + } else { + return 0; } + } - /** - * Increment the counter at the given key by the given amount. - *

- * The counter will be created if it is not already in the map. - * - * @param key The key of the counter. - * @param by The amount to increment by. - * @return The new value of the counter. - */ - public long increment(Object key, long by) { - return getOrUpdate(key).increment(by); - } + /** + * Increment the counter at the given key by the given amount. + * + *

The counter will be created if it is not already in the map. + * + * @param key The key of the counter. + * @param by The amount to increment by. + * @return The new value of the counter. + */ + public long increment(Object key, long by) { + return getOrUpdate(key).increment(by); + } - /** - * Decrement the counter at the given key by the given amount. - *

- * The counter will be created if it is not already in the map. - * - * @param key The key of the counter. - * @param by The amount to decrement by. - * @return The new value of the counter. - */ - public long decrement(Object key, long by) { - return getOrUpdate(key).decrement(by); - } + /** + * Decrement the counter at the given key by the given amount. + * + *

The counter will be created if it is not already in the map. + * + * @param key The key of the counter. + * @param by The amount to decrement by. + * @return The new value of the counter. + */ + public long decrement(Object key, long by) { + return getOrUpdate(key).decrement(by); + } - /** - * Not supported on PNCounter, use increment/decrement instead. - */ - @Override - public Long put(K key, Long value) { - throw new UnsupportedOperationException("Put is not supported on PNCounterMap, use increment or decrement instead"); - } + /** Not supported on PNCounter, use increment/decrement instead. */ + @Override + public Long put(K key, Long value) { + throw new UnsupportedOperationException( + "Put is not supported on PNCounterMap, use increment or decrement instead"); + } - @Override - Long getCrdtValue(PNCounter pnCounter) { - return pnCounter.getValue(); - } + @Override + Long getCrdtValue(PNCounter pnCounter) { + return pnCounter.getValue(); + } - @Override - void setCrdtValue(PNCounter pnCounter, Long value) { - throw new UnsupportedOperationException("Using value mutating methods on PNCounterMap is not supported, use increment or decrement instead"); - } + @Override + void setCrdtValue(PNCounter pnCounter, Long value) { + throw new UnsupportedOperationException( + "Using value mutating methods on PNCounterMap is not supported, use increment or decrement instead"); + } - @Override - PNCounter getOrUpdateCrdt(K key, Long value) { - return ormap.getOrCreate(key, CrdtFactory::newPNCounter); - } + @Override + PNCounter getOrUpdateCrdt(K key, Long value) { + return ormap.getOrCreate(key, CrdtFactory::newPNCounter); + } - private PNCounter getOrUpdate(Object key) { - return ormap.getOrCreate((K) key, CrdtFactory::newPNCounter); - } + private PNCounter getOrUpdate(Object key) { + return ormap.getOrCreate((K) key, CrdtFactory::newPNCounter); + } } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/StreamCancelledContext.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/StreamCancelledContext.java index 0aa0a3931..4816d9694 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/StreamCancelledContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/StreamCancelledContext.java @@ -6,14 +6,14 @@ /** * Context for a stream cancelled event. - *

- * This is sent to callbacks registered by {@link StreamedCommandContext#onCancel(Consumer)}. + * + *

This is sent to callbacks registered by {@link StreamedCommandContext#onCancel(Consumer)}. */ public interface StreamCancelledContext extends CrdtContext, EffectContext { - /** - * The id of the command that the stream was for. - * - * @return The ID of the command. - */ - long commandId(); + /** + * The id of the command that the stream was for. + * + * @return The ID of the command. + */ + long commandId(); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/StreamedCommandContext.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/StreamedCommandContext.java index 19788d69d..eb8dfa71e 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/StreamedCommandContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/StreamedCommandContext.java @@ -6,48 +6,48 @@ /** * Context for handling a streamed command. - *

- * This may be passed to any {@link CommandHandler} annotated element that corresponds to a command whose output is - * streamed. + * + *

This may be passed to any {@link CommandHandler} annotated element that corresponds to a + * command whose output is streamed. */ public interface StreamedCommandContext extends CommandContext { - /** - * Whether the call is actually streamed. - *

- * When a command is handled via the HTTP adapter, the command will not be streamed since the HTTP adapter does not - * support streaming, and this will return false. In that case, calls to - * {@link StreamedCommandContext#onChange(Function)} and {@link StreamedCommandContext#onCancel(Consumer)} will - * fail. - * - * @return True if the command is actually streamed. - */ - boolean isStreamed(); + /** + * Whether the call is actually streamed. + * + *

When a command is handled via the HTTP adapter, the command will not be streamed since the + * HTTP adapter does not support streaming, and this will return false. In that case, + * calls to {@link StreamedCommandContext#onChange(Function)} and {@link + * StreamedCommandContext#onCancel(Consumer)} will fail. + * + * @return True if the command is actually streamed. + */ + boolean isStreamed(); - /** - * Register an on change callback for this command. - *

- * The callback will be invoked any time the CRDT changes. The callback may inspect the CRDT, but any attempt - * to modify the CRDT will be ignored and the CRDT will crash. - *

- * If the callback returns a value, that value will be sent down the stream. Alternatively, the callback may forward - * messages to other entities via the passed in {@link SubscriptionContext}. The callback may also emit side effects - * to other entities via that context. - * - * @param subscriber The subscriber callback. - */ - void onChange(Function> subscriber); + /** + * Register an on change callback for this command. + * + *

The callback will be invoked any time the CRDT changes. The callback may inspect the CRDT, + * but any attempt to modify the CRDT will be ignored and the CRDT will crash. + * + *

If the callback returns a value, that value will be sent down the stream. Alternatively, the + * callback may forward messages to other entities via the passed in {@link SubscriptionContext}. + * The callback may also emit side effects to other entities via that context. + * + * @param subscriber The subscriber callback. + */ + void onChange(Function> subscriber); - /** - * Register an on cancel callback for this command. - *

- * This will be invoked if the client initiates a stream cancel. It will not be invoked if the entity cancels the - * stream itself via {@link SubscriptionContext#endStream()} from an - * {@link StreamedCommandContext#onChange(Function)} callback. - *

- * An on cancel callback may update the CRDT, and may emit side effects via the passed in - * {@link StreamCancelledContext}. - * - * @param effect The effect to perform when this stream is cancelled. - */ - void onCancel(Consumer effect); + /** + * Register an on cancel callback for this command. + * + *

This will be invoked if the client initiates a stream cancel. It will not be invoked if the + * entity cancels the stream itself via {@link SubscriptionContext#endStream()} from an {@link + * StreamedCommandContext#onChange(Function)} callback. + * + *

An on cancel callback may update the CRDT, and may emit side effects via the passed in + * {@link StreamCancelledContext}. + * + * @param effect The effect to perform when this stream is cancelled. + */ + void onCancel(Consumer effect); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/SubscriptionContext.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/SubscriptionContext.java index d6fa8fef0..9e35d7645 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/SubscriptionContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/SubscriptionContext.java @@ -6,12 +6,10 @@ import java.util.function.Function; /** - * The context for a subscription, passed with every invocation of a {@link StreamedCommandContext#onChange(Function)} - * callback. + * The context for a subscription, passed with every invocation of a {@link + * StreamedCommandContext#onChange(Function)} callback. */ public interface SubscriptionContext extends CrdtContext, EffectContext, ClientActionContext { - /** - * End this stream. - */ - void endStream(); + /** End this stream. */ + void endStream(); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/Vote.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/Vote.java index bc04aa769..61e62995c 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/Vote.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/Vote.java @@ -2,62 +2,62 @@ /** * A Vote CRDT. - *

- * This CRDT is used to allow all the nodes in a cluster to vote on a condition. + * + *

This CRDT is used to allow all the nodes in a cluster to vote on a condition. */ public interface Vote extends Crdt { - /** - * Get the current value for this nodes vote. - * - * @return This nodes vote. - */ - boolean getSelfVote(); + /** + * Get the current value for this nodes vote. + * + * @return This nodes vote. + */ + boolean getSelfVote(); - /** - * Get the number of voters participating in the vote (ie, the number of nodes in the cluster). - * - * @return The number of voters. - */ - int getVoters(); + /** + * Get the number of voters participating in the vote (ie, the number of nodes in the cluster). + * + * @return The number of voters. + */ + int getVoters(); - /** - * Get the number of votes for. - * - * @return The number of votes for. - */ - int getVotesFor(); + /** + * Get the number of votes for. + * + * @return The number of votes for. + */ + int getVotesFor(); - /** - * Update this nodes vote to the given value. - * - * @param vote The vote this node is contributing. - */ - void vote(boolean vote); + /** + * Update this nodes vote to the given value. + * + * @param vote The vote this node is contributing. + */ + void vote(boolean vote); - /** - * Has at least one node voted true? - * - * @return True if at least one node has voted true. - */ - default boolean isAtLeastOne() { - return getVotesFor() > 0; - } + /** + * Has at least one node voted true? + * + * @return True if at least one node has voted true. + */ + default boolean isAtLeastOne() { + return getVotesFor() > 0; + } - /** - * Have a majority of nodes voted true? - * - * @return True if more than half of the nodes have voted true. - */ - default boolean isMajority() { - return getVotesFor() > getVoters() / 2; - } + /** + * Have a majority of nodes voted true? + * + * @return True if more than half of the nodes have voted true. + */ + default boolean isMajority() { + return getVotesFor() > getVoters() / 2; + } - /** - * Is the vote unanimous? - * - * @return True if all nodes have voted true. - */ - default boolean isUnanimous() { - return getVotesFor() == getVoters(); - } + /** + * Is the vote unanimous? + * + * @return True if all nodes have voted true. + */ + default boolean isUnanimous() { + return getVotesFor() == getVoters(); + } } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/crdt/package-info.java b/java-support/src/main/java/io/cloudstate/javasupport/crdt/package-info.java index 53f1937a6..c8da9ba17 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/crdt/package-info.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/crdt/package-info.java @@ -1,14 +1,15 @@ /** * Conflict-free Replicated Data Type support. - *

- * CRDT entities can be annotated with the {@link io.cloudstate.javasupport.crdt.CrdtEntity @CrdtEntity} annotation, - * and supply command handlers using the {@link io.cloudstate.javasupport.crdt.CommandHandler @CommandHandler} - * annotation. - *

- * The data stored by a CRDT entity can be stored in a subtype of {@link io.cloudstate.javasupport.crdt.Crdt}. These - * can be created using a {@link io.cloudstate.javasupport.crdt.CrdtFactory}, which is a super-interface of both the - * {@link io.cloudstate.javasupport.crdt.CrdtCreationContext}, available for injection constructors, and of the - * {@link io.cloudstate.javasupport.crdt.CommandContext}, available for injection in {@code @CommandHandler} annotated - * methods. + * + *

CRDT entities can be annotated with the {@link + * io.cloudstate.javasupport.crdt.CrdtEntity @CrdtEntity} annotation, and supply command handlers + * using the {@link io.cloudstate.javasupport.crdt.CommandHandler @CommandHandler} annotation. + * + *

The data stored by a CRDT entity can be stored in a subtype of {@link + * io.cloudstate.javasupport.crdt.Crdt}. These can be created using a {@link + * io.cloudstate.javasupport.crdt.CrdtFactory}, which is a super-interface of both the {@link + * io.cloudstate.javasupport.crdt.CrdtCreationContext}, available for injection constructors, and of + * the {@link io.cloudstate.javasupport.crdt.CommandContext}, available for injection in + * {@code @CommandHandler} annotated methods. */ -package io.cloudstate.javasupport.crdt; \ No newline at end of file +package io.cloudstate.javasupport.crdt; diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/BehaviorContext.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/BehaviorContext.java index 23687b8ea..d4eea345d 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/BehaviorContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/BehaviorContext.java @@ -1,23 +1,22 @@ package io.cloudstate.javasupport.eventsourced; -/** - * Superinterface of all contexts that allow changing the current entities behavior behavior. - */ +/** Superinterface of all contexts that allow changing the current entities behavior behavior. */ public interface BehaviorContext extends EventSourcedContext { - /** - * Become the new behavior specified by the given behavior objects. - *

- * More than one object may be passed to allow composing behaviors from multiple objects. If two objects define - * a handler for the same event or command, the one that comes earlier in the supplied array of objects is the one - * that is used. - *

- * Note that event and snapshot handlers, where handlers are matched on a given behavior object by specificity - * (ie, a handler for a child class will take precedence over a handler for a parent class), this precedence is - * not honored across multiple behaviors. So, if the first behavior defines an event handler for {@link Object}, - * that handler will always win, regardless of what handlers are defined on subsequent behaviors. - * - * @param behaviors The behaviors to use for subsequent commands and events. - */ - void become(Object... behaviors); + /** + * Become the new behavior specified by the given behavior objects. + * + *

More than one object may be passed to allow composing behaviors from multiple objects. If + * two objects define a handler for the same event or command, the one that comes earlier in the + * supplied array of objects is the one that is used. + * + *

Note that event and snapshot handlers, where handlers are matched on a given behavior object + * by specificity (ie, a handler for a child class will take precedence over a handler for a + * parent class), this precedence is not honored across multiple behaviors. So, if the first + * behavior defines an event handler for {@link Object}, that handler will always win, regardless + * of what handlers are defined on subsequent behaviors. + * + * @param behaviors The behaviors to use for subsequent commands and events. + */ + void become(Object... behaviors); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/CommandContext.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/CommandContext.java index 94b41ed8a..f8c0b5ccf 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/CommandContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/CommandContext.java @@ -5,37 +5,38 @@ /** * An event sourced command context. - *

- * Methods annotated with {@link CommandHandler} may take this is a parameter. It allows emitting new events in response - * to a command, along with forwarding the result to other entities, and performing side effects on other entities. + * + *

Methods annotated with {@link CommandHandler} may take this is a parameter. It allows emitting + * new events in response to a command, along with forwarding the result to other entities, and + * performing side effects on other entities. */ public interface CommandContext extends EventSourcedContext, ClientActionContext, EffectContext { - /** - * The current sequence number of events in this entity. - * - * @return The current sequence number. - */ - long sequenceNumber(); + /** + * The current sequence number of events in this entity. + * + * @return The current sequence number. + */ + long sequenceNumber(); - /** - * The name of the command being executed. - * - * @return The name of the command. - */ - String commandName(); + /** + * The name of the command being executed. + * + * @return The name of the command. + */ + String commandName(); - /** - * The id of the command being executed. - * - * @return The id of the command. - */ - long commandId(); + /** + * The id of the command being executed. + * + * @return The id of the command. + */ + long commandId(); - /** - * Emit the given event. The event will be persisted, and the handler of the event defined in the current behavior - * will immediately be executed to pick it up. - * - * @param event The event to emit. - */ - void emit(Object event); -} \ No newline at end of file + /** + * Emit the given event. The event will be persisted, and the handler of the event defined in the + * current behavior will immediately be executed to pick it up. + * + * @param event The event to emit. + */ + void emit(Object event); +} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/CommandHandler.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/CommandHandler.java index ef9ba7c60..0fd309e3d 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/CommandHandler.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/CommandHandler.java @@ -9,28 +9,30 @@ /** * Marks a method as a command handler. - *

- * This method will be invoked whenever the service call with name that matches this command handlers name is invoked. - *

- * The method may take the command object as a parameter, its type must match the gRPC service input type. - *

- * The return type of the method must match the gRPC services output type. - *

- * The method may also take a {@link CommandContext}, and/or a {@link io.cloudstate.javasupport.EntityId} annotated - * {@link String} parameter. + * + *

This method will be invoked whenever the service call with name that matches this command + * handlers name is invoked. + * + *

The method may take the command object as a parameter, its type must match the gRPC service + * input type. + * + *

The return type of the method must match the gRPC services output type. + * + *

The method may also take a {@link CommandContext}, and/or a {@link + * io.cloudstate.javasupport.EntityId} annotated {@link String} parameter. */ @CloudStateAnnotation @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) public @interface CommandHandler { - /** - * The name of the command to handle. - *

- * If not specified, the name of the method will be used as the command name, with the first letter capitalized to - * match the gRPC convention of capitalizing rpc method names. - * - * @return The command name. - */ - String name() default ""; + /** + * The name of the command to handle. + * + *

If not specified, the name of the method will be used as the command name, with the first + * letter capitalized to match the gRPC convention of capitalizing rpc method names. + * + * @return The command name. + */ + String name() default ""; } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventBehaviorContext.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventBehaviorContext.java index 1d34abed9..6fa677e68 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventBehaviorContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventBehaviorContext.java @@ -1,8 +1,7 @@ package io.cloudstate.javasupport.eventsourced; /** - * Event handler context that allows changing behavior. This can be passed to all {@link EventHandler} annotated - * methods. + * Event handler context that allows changing behavior. This can be passed to all {@link + * EventHandler} annotated methods. */ -public interface EventBehaviorContext extends EventContext, BehaviorContext { -} +public interface EventBehaviorContext extends EventContext, BehaviorContext {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventContext.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventContext.java index 2227131e1..6f132b656 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventContext.java @@ -1,13 +1,11 @@ package io.cloudstate.javasupport.eventsourced; -/** - * Context for an event. - */ +/** Context for an event. */ public interface EventContext extends EventSourcedContext { - /** - * The sequence number of the current event being processed. - * - * @return The sequence number. - */ - long sequenceNumber(); + /** + * The sequence number of the current event being processed. + * + * @return The sequence number. + */ + long sequenceNumber(); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventHandler.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventHandler.java index 65a85de17..eab0d880b 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventHandler.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventHandler.java @@ -9,22 +9,23 @@ /** * Marks a method as an event handler. - *

- * This method will be invoked whenever an event matching this event handlers event class is either replayed on entity - * recovery, by a command handler. - *

- * The method may take the event object as a parameter. - *

- * Methods annotated with this may take an {@link EventBehaviorContext}. + * + *

This method will be invoked whenever an event matching this event handlers event class is + * either replayed on entity recovery, by a command handler. + * + *

The method may take the event object as a parameter. + * + *

Methods annotated with this may take an {@link EventBehaviorContext}. */ @CloudStateAnnotation @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) public @interface EventHandler { - /** - * The event class. Generally, this will be determined by looking at the parameter of the event handler method, - * however if the event doesn't need to be passed to the method (for example, perhaps it contains no data), then - * this can be used to indicate which event this handler handles. - */ - Class eventClass() default Object.class; + /** + * The event class. Generally, this will be determined by looking at the parameter of the event + * handler method, however if the event doesn't need to be passed to the method (for example, + * perhaps it contains no data), then this can be used to indicate which event this handler + * handles. + */ + Class eventClass() default Object.class; } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedContext.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedContext.java index 94ae73f02..56f41a881 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedContext.java @@ -2,8 +2,5 @@ import io.cloudstate.javasupport.EntityContext; -/** - * Root context for all event sourcing contexts. - */ -public interface EventSourcedContext extends EntityContext { -} +/** Root context for all event sourcing contexts. */ +public interface EventSourcedContext extends EntityContext {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntity.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntity.java index b8a37c550..90a7e10eb 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntity.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntity.java @@ -7,26 +7,23 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; -/** - * An event sourced entity. - */ +/** An event sourced entity. */ @CloudStateAnnotation @Target(ElementType.TYPE) @Retention(RetentionPolicy.RUNTIME) public @interface EventSourcedEntity { - /** - * The name of the persistence id. - *

- * If not specifed, defaults to the entities unqualified classname. It's strongly recommended that you specify it - * explicitly. - */ - String persistenceId() default ""; + /** + * The name of the persistence id. + * + *

If not specifed, defaults to the entities unqualified classname. It's strongly recommended + * that you specify it explicitly. + */ + String persistenceId() default ""; - /** - * Specifies how snapshots of the entity state should be made: - * Zero means use default from configuration file. (Default) - * Any negative value means never snapshot. - * Any positive value means snapshot at-or-after that number of events. - */ - int snapshotEvery() default 0; + /** + * Specifies how snapshots of the entity state should be made: Zero means use default from + * configuration file. (Default) Any negative value means never snapshot. Any positive value means + * snapshot at-or-after that number of events. + */ + int snapshotEvery() default 0; } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntityCreationContext.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntityCreationContext.java index 59a2b4fde..aedcefc8d 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntityCreationContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntityCreationContext.java @@ -2,8 +2,7 @@ /** * Creation context for {@link EventSourcedEntity} annotated entities. - *

- * This may be accepted as an argument to the constructor of an event sourced entity. + * + *

This may be accepted as an argument to the constructor of an event sourced entity. */ -public interface EventSourcedEntityCreationContext extends EventSourcedContext, BehaviorContext { -} +public interface EventSourcedEntityCreationContext extends EventSourcedContext, BehaviorContext {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntityFactory.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntityFactory.java index 852323749..742400e44 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntityFactory.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntityFactory.java @@ -2,16 +2,16 @@ /** * Low level interface for handling events and commands on an entity. - *

- * Generally, this should not be needed, instead, a class annotated with the {@link EventHandler}, - * {@link CommandHandler} and similar annotations should be used. + * + *

Generally, this should not be needed, instead, a class annotated with the {@link + * EventHandler}, {@link CommandHandler} and similar annotations should be used. */ public interface EventSourcedEntityFactory { - /** - * Create an entity handler for the given context. - * - * @param context The context. - * @return The handler for the given context. - */ - EventSourcedEntityHandler create(EventSourcedContext context); + /** + * Create an entity handler for the given context. + * + * @param context The context. + * @return The handler for the given context. + */ + EventSourcedEntityHandler create(EventSourcedContext context); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntityHandler.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntityHandler.java index ab92f4326..39c9a149a 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntityHandler.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/EventSourcedEntityHandler.java @@ -7,40 +7,40 @@ /** * Low level interface for handling events and commands on an entity. * - * Generally, this should not be needed, instead, a class annotated with the {@link EventHandler}, - * {@link CommandHandler} and similar annotations should be used. + *

Generally, this should not be needed, instead, a class annotated with the {@link + * EventHandler}, {@link CommandHandler} and similar annotations should be used. */ public interface EventSourcedEntityHandler { - /** - * Handle the given event. - * - * @param event The event to handle. - * @param context The event context. - */ - void handleEvent(Any event, EventContext context); + /** + * Handle the given event. + * + * @param event The event to handle. + * @param context The event context. + */ + void handleEvent(Any event, EventContext context); - /** - * Handle the given command. - * - * @param command The command to handle. - * @param context The command context. - * @return The reply to the command, if the command isn't being forwarded elsewhere. - */ - Optional handleCommand(Any command, CommandContext context); + /** + * Handle the given command. + * + * @param command The command to handle. + * @param context The command context. + * @return The reply to the command, if the command isn't being forwarded elsewhere. + */ + Optional handleCommand(Any command, CommandContext context); - /** - * Handle the given snapshot. - * - * @param snapshot The snapshot to handle. - * @param context The snapshot context. - */ - void handleSnapshot(Any snapshot, SnapshotContext context); + /** + * Handle the given snapshot. + * + * @param snapshot The snapshot to handle. + * @param context The snapshot context. + */ + void handleSnapshot(Any snapshot, SnapshotContext context); - /** - * Snapshot the object. - * - * @return The current snapshot, if this object supports snapshoting, otherwise empty. - */ - Optional snapshot(SnapshotContext context); + /** + * Snapshot the object. + * + * @return The current snapshot, if this object supports snapshoting, otherwise empty. + */ + Optional snapshot(SnapshotContext context); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/Snapshot.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/Snapshot.java index f1b0c0f13..af29c849e 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/Snapshot.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/Snapshot.java @@ -9,17 +9,16 @@ /** * Marks a method as a snapshot method. - *

- * An event sourced behavior may have at most one of these. When provided, it will be periodically (every - * n events emitted) be invoked to retrieve a snapshot of the current state, to be persisted, so that the - * event log can be loaded without replaying the entire history. - *

- * The method must return the current state of the entity. - *

- * The method may accept a {@link SnapshotContext} parameter. + * + *

An event sourced behavior may have at most one of these. When provided, it will be + * periodically (every n events emitted) be invoked to retrieve a snapshot of the current + * state, to be persisted, so that the event log can be loaded without replaying the entire history. + * + *

The method must return the current state of the entity. + * + *

The method may accept a {@link SnapshotContext} parameter. */ @CloudStateAnnotation @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) -public @interface Snapshot { -} +public @interface Snapshot {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/SnapshotBehaviorContext.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/SnapshotBehaviorContext.java index 500287828..00fb71c30 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/SnapshotBehaviorContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/SnapshotBehaviorContext.java @@ -2,8 +2,7 @@ /** * Snapshot context that allows changing behavior. - *

- * This may be passed to any {@link SnapshotHandler} annotated methods. + * + *

This may be passed to any {@link SnapshotHandler} annotated methods. */ -public interface SnapshotBehaviorContext extends SnapshotContext, BehaviorContext { -} +public interface SnapshotBehaviorContext extends SnapshotContext, BehaviorContext {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/SnapshotContext.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/SnapshotContext.java index 6b7b35b02..d83d34f12 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/SnapshotContext.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/SnapshotContext.java @@ -1,13 +1,11 @@ package io.cloudstate.javasupport.eventsourced; -/** - * A snapshot context. - */ +/** A snapshot context. */ public interface SnapshotContext extends EventSourcedContext { - /** - * The sequence number of the last event that this snapshot includes. - * - * @return The sequence number. - */ - long sequenceNumber(); + /** + * The sequence number of the last event that this snapshot includes. + * + * @return The sequence number. + */ + long sequenceNumber(); } diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/SnapshotHandler.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/SnapshotHandler.java index a7937bcd7..cf5a874a0 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/SnapshotHandler.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/SnapshotHandler.java @@ -9,17 +9,19 @@ /** * Marks a method as a snapshot handler. - *

- * If, when recovering an entity, that entity has a snapshot, the snapshot will be passed to a corresponding snapshot - * handler method whose argument matches its type. The entity must set its current state to that snapshot. - *

- * An entity may declare more than one snapshot handler if it wants different handling for different types. - *

- * The snapshot handler method may additionally accept a {@link SnapshotBehaviorContext} parameter, allowing it to - * access context for the snapshot, and potentially change behavior based on the state from the snapshot, if required. + * + *

If, when recovering an entity, that entity has a snapshot, the snapshot will be passed to a + * corresponding snapshot handler method whose argument matches its type. The entity must set its + * current state to that snapshot. + * + *

An entity may declare more than one snapshot handler if it wants different handling for + * different types. + * + *

The snapshot handler method may additionally accept a {@link SnapshotBehaviorContext} + * parameter, allowing it to access context for the snapshot, and potentially change behavior based + * on the state from the snapshot, if required. */ @CloudStateAnnotation @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) -public @interface SnapshotHandler { -} +public @interface SnapshotHandler {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/package-info.java b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/package-info.java index 712a0d605..8c512f898 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/package-info.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/eventsourced/package-info.java @@ -1,14 +1,15 @@ /** * Event Sourcing support. - *

- * Event sourced entities can be annotated with the - * {@link io.cloudstate.javasupport.eventsourced.EventSourcedEntity @EventSourcedEntity} annotation, - * and supply command handlers using the {@link io.cloudstate.javasupport.eventsourced.CommandHandler @CommandHandler} - * annotation. - *

- * In addition, {@link io.cloudstate.javasupport.eventsourced.EventHandler @EventHandler} annotated methods should be - * defined to handle events, and {@link io.cloudstate.javasupport.eventsourced.Snapshot @Snapshot} and - * {@link io.cloudstate.javasupport.eventsourced.SnapshotHandler @SnapshotHandler} annotated methods should be defined - * to produce and handle snapshots respectively. + * + *

Event sourced entities can be annotated with the {@link + * io.cloudstate.javasupport.eventsourced.EventSourcedEntity @EventSourcedEntity} annotation, and + * supply command handlers using the {@link + * io.cloudstate.javasupport.eventsourced.CommandHandler @CommandHandler} annotation. + * + *

In addition, {@link io.cloudstate.javasupport.eventsourced.EventHandler @EventHandler} + * annotated methods should be defined to handle events, and {@link + * io.cloudstate.javasupport.eventsourced.Snapshot @Snapshot} and {@link + * io.cloudstate.javasupport.eventsourced.SnapshotHandler @SnapshotHandler} annotated methods should + * be defined to produce and handle snapshots respectively. */ -package io.cloudstate.javasupport.eventsourced; \ No newline at end of file +package io.cloudstate.javasupport.eventsourced; diff --git a/java-support/src/main/java/io/cloudstate/javasupport/impl/CloudStateAnnotation.java b/java-support/src/main/java/io/cloudstate/javasupport/impl/CloudStateAnnotation.java index 45e4aab2a..c7df5d5db 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/impl/CloudStateAnnotation.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/impl/CloudStateAnnotation.java @@ -5,10 +5,7 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; -/** - * Mark annotation for all CloudState annotations - */ +/** Mark annotation for all CloudState annotations */ @Target(ElementType.ANNOTATION_TYPE) @Retention(RetentionPolicy.RUNTIME) -public @interface CloudStateAnnotation { -} +public @interface CloudStateAnnotation {} diff --git a/java-support/src/main/java/io/cloudstate/javasupport/impl/package-info.java b/java-support/src/main/java/io/cloudstate/javasupport/impl/package-info.java index f011018c7..e4de3d5a8 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/impl/package-info.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/impl/package-info.java @@ -1,4 +1,2 @@ -/** - * Internal implementation classes for CloudState Java Support. - */ -package io.cloudstate.javasupport.impl; \ No newline at end of file +/** Internal implementation classes for CloudState Java Support. */ +package io.cloudstate.javasupport.impl; diff --git a/java-support/src/main/java/io/cloudstate/javasupport/package-info.java b/java-support/src/main/java/io/cloudstate/javasupport/package-info.java index cef64fd1d..74d3009ee 100644 --- a/java-support/src/main/java/io/cloudstate/javasupport/package-info.java +++ b/java-support/src/main/java/io/cloudstate/javasupport/package-info.java @@ -1,15 +1,18 @@ /** * Root package for the CloudState Java Support library. - *

- * The main entry point to creating a CloudState Java server is the {@link io.cloudstate.javasupport.CloudState} - * class. - *

- * For information about specific entity types, see: + * + *

The main entry point to creating a CloudState Java server is the {@link + * io.cloudstate.javasupport.CloudState} class. + * + *

For information about specific entity types, see: + * *

- *
io.cloudstate.javasupport.eventsourced
- *
Event Sourcing support
- *
io.cloudstate.javasupport.crdt
- *
Conflict-free Replicated Data Type support
+ *
+ * io.cloudstate.javasupport.eventsourced + *
Event Sourcing support + *
+ * io.cloudstate.javasupport.crdt + *
Conflict-free Replicated Data Type support *
*/ -package io.cloudstate.javasupport; \ No newline at end of file +package io.cloudstate.javasupport; diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/CloudStateRunner.scala b/java-support/src/main/scala/io/cloudstate/javasupport/CloudStateRunner.scala index 6934818f2..ca984ecb3 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/CloudStateRunner.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/CloudStateRunner.scala @@ -41,9 +41,9 @@ object CloudStateRunner { validate() def this(config: Config) = { this( - userFunctionInterface = config.getString("user-function-interface"), - userFunctionPort = config.getInt("user-function-port"), - snapshotEvery = config.getInt("eventsourced.snapshot-every") + userFunctionInterface = config.getString("user-function-interface"), + userFunctionPort = config.getInt("user-function-port"), + snapshotEvery = config.getInt("eventsourced.snapshot-every") ) } @@ -61,11 +61,12 @@ object CloudStateRunner { * * CloudStateRunner can be seen as a low-level API for cases where [[io.cloudstate.javasupport.CloudState.start()]] isn't enough. */ -final class CloudStateRunner private[this](_system: ActorSystem, services: Map[String, StatefulService]) { +final class CloudStateRunner private[this] (_system: ActorSystem, services: Map[String, StatefulService]) { private[this] implicit final val system = _system private[this] implicit final val materializer: Materializer = ActorMaterializer() - private[this] final val configuration = new CloudStateRunner.Configuration(system.settings.config.getConfig("cloudstate")) + private[this] final val configuration = + new CloudStateRunner.Configuration(system.settings.config.getConfig("cloudstate")) // TODO JavaDoc def this(services: java.util.Map[String, StatefulService]) { @@ -85,27 +86,27 @@ final class CloudStateRunner private[this](_system: ActorSystem, services: Map[S private[this] def createRoutes(): PartialFunction[HttpRequest, Future[HttpResponse]] = { - val serviceRoutes = services.groupBy(_._2.getClass).foldLeft(PartialFunction.empty[HttpRequest, Future[HttpResponse]]) { + val serviceRoutes = + services.groupBy(_._2.getClass).foldLeft(PartialFunction.empty[HttpRequest, Future[HttpResponse]]) { - case (route, (serviceClass, eventSourcedServices: Map[String, EventSourcedStatefulService] @unchecked)) - if serviceClass == classOf[EventSourcedStatefulService] => + case (route, (serviceClass, eventSourcedServices: Map[String, EventSourcedStatefulService] @unchecked)) + if serviceClass == classOf[EventSourcedStatefulService] => val eventSourcedImpl = new EventSourcedImpl(system, eventSourcedServices, rootContext, configuration) route orElse EventSourcedHandler.partial(eventSourcedImpl) - case (route, (serviceClass, crdtServices: Map[String, CrdtStatefulService] @unchecked)) - if serviceClass == classOf[CrdtStatefulService] => - val crdtImpl = new CrdtImpl(system, crdtServices, rootContext) - route orElse CrdtHandler.partial(crdtImpl) + case (route, (serviceClass, crdtServices: Map[String, CrdtStatefulService] @unchecked)) + if serviceClass == classOf[CrdtStatefulService] => + val crdtImpl = new CrdtImpl(system, crdtServices, rootContext) + route orElse CrdtHandler.partial(crdtImpl) - case (_, (serviceClass, _)) => - sys.error(s"Unknown StatefulService: $serviceClass") - } + case (_, (serviceClass, _)) => + sys.error(s"Unknown StatefulService: $serviceClass") + } val entityDiscovery = EntityDiscoveryHandler.partial(new EntityDiscoveryImpl(system, services)) serviceRoutes orElse - entityDiscovery orElse - { case _ => Future.successful(HttpResponse(StatusCodes.NotFound)) } + entityDiscovery orElse { case _ => Future.successful(HttpResponse(StatusCodes.NotFound)) } } /** @@ -114,17 +115,19 @@ final class CloudStateRunner private[this](_system: ActorSystem, services: Map[S * @return a CompletionStage which will be completed when the server has shut down. */ def run(): CompletionStage[Done] = { - val serverBindingFuture = Http.get(system).bindAndHandleAsync( - createRoutes(), - configuration.userFunctionInterface, - configuration.userFunctionPort, - HttpConnectionContext(UseHttp2.Always)) + val serverBindingFuture = Http + .get(system) + .bindAndHandleAsync(createRoutes(), + configuration.userFunctionInterface, + configuration.userFunctionPort, + HttpConnectionContext(UseHttp2.Always)) // FIXME Register an onTerminate callback to unbind the Http server - FutureConverters. - toJava(serverBindingFuture). - thenCompose( + FutureConverters + .toJava(serverBindingFuture) + .thenCompose( binding => system.getWhenTerminated.thenCompose(_ => FutureConverters.toJava(binding.unbind())) - ).thenApply(_ => Done) + ) + .thenApply(_ => Done) } /** @@ -141,6 +144,7 @@ final class CloudStateRunner private[this](_system: ActorSystem, services: Map[S * to deploy. */ trait StatefulService { + /** * @return a Protobuf ServiceDescriptor of its externally accessible gRPC API */ @@ -160,4 +164,3 @@ trait StatefulService { // TODO JavaDoc def resolvedMethods: Option[Map[String, ResolvedServiceMethod[_, _]]] } - diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/AnySupport.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/AnySupport.scala index fc1b36065..26989d545 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/AnySupport.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/AnySupport.scala @@ -5,7 +5,16 @@ import java.util.Locale import com.fasterxml.jackson.databind.ObjectMapper import com.google.common.base.CaseFormat -import com.google.protobuf.{ByteString, CodedInputStream, CodedOutputStream, Descriptors, Parser, UnsafeByteOperations, WireFormat, Any => JavaPbAny} +import com.google.protobuf.{ + ByteString, + CodedInputStream, + CodedOutputStream, + Descriptors, + Parser, + UnsafeByteOperations, + WireFormat, + Any => JavaPbAny +} import com.google.protobuf.any.{Any => ScalaPbAny} import io.cloudstate.javasupport.Jsonable import io.cloudstate.javasupport.impl.AnySupport.Prefer.{Java, Scala} @@ -60,38 +69,46 @@ object AnySupport { }, new Primitive[java.lang.Long] { override def fieldType = WireFormat.FieldType.INT64 - override def defaultValue = 0l - override def write(stream: CodedOutputStream, t: java.lang.Long) = stream.writeInt64(CloudStatePrimitiveFieldNumber, t) + override def defaultValue = 0L + override def write(stream: CodedOutputStream, t: java.lang.Long) = + stream.writeInt64(CloudStatePrimitiveFieldNumber, t) override def read(stream: CodedInputStream) = stream.readInt64() }, new Primitive[java.lang.Float] { override def fieldType = WireFormat.FieldType.FLOAT override def defaultValue = 0f - override def write(stream: CodedOutputStream, t: java.lang.Float) = stream.writeFloat(CloudStatePrimitiveFieldNumber, t) + override def write(stream: CodedOutputStream, t: java.lang.Float) = + stream.writeFloat(CloudStatePrimitiveFieldNumber, t) override def read(stream: CodedInputStream) = stream.readFloat() }, new Primitive[java.lang.Double] { override def fieldType = WireFormat.FieldType.DOUBLE override def defaultValue = 0d - override def write(stream: CodedOutputStream, t: java.lang.Double) = stream.writeDouble(CloudStatePrimitiveFieldNumber, t) + override def write(stream: CodedOutputStream, t: java.lang.Double) = + stream.writeDouble(CloudStatePrimitiveFieldNumber, t) override def read(stream: CodedInputStream) = stream.readDouble() }, new Primitive[java.lang.Boolean] { override def fieldType = WireFormat.FieldType.BOOL override def defaultValue = false - override def write(stream: CodedOutputStream, t: java.lang.Boolean) = stream.writeBool(CloudStatePrimitiveFieldNumber, t) + override def write(stream: CodedOutputStream, t: java.lang.Boolean) = + stream.writeBool(CloudStatePrimitiveFieldNumber, t) override def read(stream: CodedInputStream) = stream.readBool() } ) - private final val ClassToPrimitives = Primitives.map(p => p.clazz -> p) - .asInstanceOf[Seq[(Any, Primitive[Any])]].toMap - private final val NameToPrimitives = Primitives.map(p => p.fullName -> p) - .asInstanceOf[Seq[(String, Primitive[Any])]].toMap + private final val ClassToPrimitives = Primitives + .map(p => p.clazz -> p) + .asInstanceOf[Seq[(Any, Primitive[Any])]] + .toMap + private final val NameToPrimitives = Primitives + .map(p => p.fullName -> p) + .asInstanceOf[Seq[(String, Primitive[Any])]] + .toMap private final val objectMapper = new ObjectMapper() - private def primitiveToBytes[T](primitive: Primitive[T], value: T): ByteString = { + private def primitiveToBytes[T](primitive: Primitive[T], value: T): ByteString = if (value != primitive.defaultValue) { val baos = new ByteArrayOutputStream() val stream = CodedOutputStream.newInstance(baos) @@ -99,26 +116,26 @@ object AnySupport { stream.flush() UnsafeByteOperations.unsafeWrap(baos.toByteArray) } else ByteString.EMPTY - } private def bytesToPrimitive[T](primitive: Primitive[T], bytes: ByteString) = { val stream = bytes.newCodedInput() - if (Stream.continually(stream.readTag()) - .takeWhile(_ != 0) - .exists { tag => - if (primitive.tag != tag) { - stream.skipField(tag) - false - } else true - }) { + if (Stream + .continually(stream.readTag()) + .takeWhile(_ != 0) + .exists { tag => + if (primitive.tag != tag) { + stream.skipField(tag) + false + } else true + }) { primitive.read(stream) } else primitive.defaultValue } /** - * When locating protobufs, if both a Java and a ScalaPB generated class is found on the classpath, this says which - * should be preferred. - */ + * When locating protobufs, if both a Java and a ScalaPB generated class is found on the classpath, this says which + * should be preferred. + */ sealed trait Prefer final object Prefer { case object Java extends Prefer @@ -128,11 +145,13 @@ object AnySupport { final val PREFER_JAVA = Java final val PREFER_SCALA = Scala - def flattenDescriptors(descriptors: Seq[Descriptors.FileDescriptor]): Map[String, Descriptors.FileDescriptor] = { + def flattenDescriptors(descriptors: Seq[Descriptors.FileDescriptor]): Map[String, Descriptors.FileDescriptor] = flattenDescriptors(Map.empty, descriptors) - } - private def flattenDescriptors(seenSoFar: Map[String, Descriptors.FileDescriptor], descriptors: Seq[Descriptors.FileDescriptor]): Map[String, Descriptors.FileDescriptor] = { + private def flattenDescriptors( + seenSoFar: Map[String, Descriptors.FileDescriptor], + descriptors: Seq[Descriptors.FileDescriptor] + ): Map[String, Descriptors.FileDescriptor] = descriptors.foldLeft(seenSoFar) { case (results, descriptor) => val descriptorName = descriptor.getName @@ -142,10 +161,11 @@ object AnySupport { flattenDescriptors(withDesc, descriptor.getDependencies.asScala ++ descriptor.getPublicDependencies.asScala) } } - } } -class AnySupport(descriptors: Array[Descriptors.FileDescriptor], classLoader: ClassLoader, typeUrlPrefix: String = AnySupport.DefaultTypeUrlPrefix, +class AnySupport(descriptors: Array[Descriptors.FileDescriptor], + classLoader: ClassLoader, + typeUrlPrefix: String = AnySupport.DefaultTypeUrlPrefix, prefer: AnySupport.Prefer = AnySupport.Prefer.Java) { import AnySupport._ private val allDescriptors = flattenDescriptors(descriptors) @@ -159,9 +179,8 @@ class AnySupport(descriptors: Array[Descriptors.FileDescriptor], classLoader: Cl private val reflectionCache = TrieMap.empty[String, Try[ResolvedType[Any]]] - private def strippedFileName(fileName: String) = { + private def strippedFileName(fileName: String) = fileName.split(Array('/', '\\')).last.stripSuffix(".proto") - } private def tryResolveJavaPbType(typeDescriptor: Descriptors.Descriptor) = { val fileDescriptor = typeDescriptor.getFile @@ -175,7 +194,8 @@ class AnySupport(descriptors: Array[Descriptors.FileDescriptor], classLoader: Cl val outerClassName = if (options.hasJavaMultipleFiles && options.getJavaMultipleFiles) "" else if (options.hasJavaOuterClassname) options.getJavaOuterClassname + "$" - else if (fileDescriptor.getName.nonEmpty) CaseFormat.LOWER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, strippedFileName(fileDescriptor.getName)) + "$" + else if (fileDescriptor.getName.nonEmpty) + CaseFormat.LOWER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, strippedFileName(fileDescriptor.getName)) + "$" else "" val className = packageName + outerClassName + typeDescriptor.getName @@ -184,8 +204,11 @@ class AnySupport(descriptors: Array[Descriptors.FileDescriptor], classLoader: Cl val clazz = classLoader.loadClass(className) if (classOf[com.google.protobuf.Message].isAssignableFrom(clazz)) { val parser = clazz.getMethod("parser").invoke(null).asInstanceOf[Parser[com.google.protobuf.Message]] - Some(new JavaPbResolvedType(clazz.asInstanceOf[Class[com.google.protobuf.Message]], - typeUrlPrefix + "/" + typeDescriptor.getFullName, parser)) + Some( + new JavaPbResolvedType(clazz.asInstanceOf[Class[com.google.protobuf.Message]], + typeUrlPrefix + "/" + typeDescriptor.getFullName, + parser) + ) } else { None } @@ -193,9 +216,15 @@ class AnySupport(descriptors: Array[Descriptors.FileDescriptor], classLoader: Cl case cnfe: ClassNotFoundException => log.debug("Failed to load class", cnfe) None - case nsme: NoSuchElementException => throw SerializationException(s"Found com.google.protobuf.Message class $className to deserialize protobuf ${typeDescriptor.getFullName} but it didn't have a static parser() method on it.", nsme) - case iae @ (_: IllegalAccessException | _: IllegalArgumentException) => throw SerializationException(s"Could not invoke $className.parser()", iae) - case cce: ClassCastException => throw SerializationException(s"$className.parser() did not return a ${classOf[Parser[_]]}", cce) + case nsme: NoSuchElementException => + throw SerializationException( + s"Found com.google.protobuf.Message class $className to deserialize protobuf ${typeDescriptor.getFullName} but it didn't have a static parser() method on it.", + nsme + ) + case iae @ (_: IllegalAccessException | _: IllegalArgumentException) => + throw SerializationException(s"Could not invoke $className.parser()", iae) + case cce: ClassCastException => + throw SerializationException(s"$className.parser() did not return a ${classOf[Parser[_]]}", cce) } } @@ -231,10 +260,13 @@ class AnySupport(descriptors: Array[Descriptors.FileDescriptor], classLoader: Cl val clazz = classLoader.loadClass(className) val companion = classLoader.loadClass(companionName) if (classOf[GeneratedMessageCompanion[_]].isAssignableFrom(companion) && - classOf[scalapb.GeneratedMessage].isAssignableFrom(clazz)) { + classOf[scalapb.GeneratedMessage].isAssignableFrom(clazz)) { val companionObject = companion.getField("MODULE$").get(null).asInstanceOf[GeneratedMessageCompanion[_]] - Some(new ScalaPbResolvedType(clazz.asInstanceOf[Class[scalapb.GeneratedMessage]], typeUrlPrefix + "/" + typeDescriptor.getFullName, - companionObject)) + Some( + new ScalaPbResolvedType(clazz.asInstanceOf[Class[scalapb.GeneratedMessage]], + typeUrlPrefix + "/" + typeDescriptor.getFullName, + companionObject) + ) } else { None } @@ -246,59 +278,75 @@ class AnySupport(descriptors: Array[Descriptors.FileDescriptor], classLoader: Cl }) } - def resolveTypeDescriptor(typeDescriptor: Descriptors.Descriptor): ResolvedType[Any] = { - reflectionCache.getOrElseUpdate(typeDescriptor.getFullName, Try { - val maybeResolvedType = if (prefer == Prefer.Java) { - tryResolveJavaPbType(typeDescriptor) orElse - tryResolveScalaPbType(typeDescriptor) - } else { - tryResolveScalaPbType(typeDescriptor) orElse - tryResolveJavaPbType(typeDescriptor) - } + def resolveTypeDescriptor(typeDescriptor: Descriptors.Descriptor): ResolvedType[Any] = + reflectionCache + .getOrElseUpdate( + typeDescriptor.getFullName, + Try { + val maybeResolvedType = if (prefer == Prefer.Java) { + tryResolveJavaPbType(typeDescriptor) orElse + tryResolveScalaPbType(typeDescriptor) + } else { + tryResolveScalaPbType(typeDescriptor) orElse + tryResolveJavaPbType(typeDescriptor) + } - maybeResolvedType match { - case Some(resolvedType) => resolvedType.asInstanceOf[ResolvedType[Any]] - case None => - throw SerializationException("Could not determine serializer for type " + typeDescriptor.getFullName) - } - }).get - } + maybeResolvedType match { + case Some(resolvedType) => resolvedType.asInstanceOf[ResolvedType[Any]] + case None => + throw SerializationException("Could not determine serializer for type " + typeDescriptor.getFullName) + } + } + ) + .get - def resolveServiceDescriptor(serviceDescriptor: Descriptors.ServiceDescriptor): Map[String, ResolvedServiceMethod[_, _]] = { + def resolveServiceDescriptor( + serviceDescriptor: Descriptors.ServiceDescriptor + ): Map[String, ResolvedServiceMethod[_, _]] = serviceDescriptor.getMethods.asScala.map { method => - method.getName -> ResolvedServiceMethod(method, resolveTypeDescriptor(method.getInputType), - resolveTypeDescriptor(method.getOutputType)) + method.getName -> ResolvedServiceMethod(method, + resolveTypeDescriptor(method.getInputType), + resolveTypeDescriptor(method.getOutputType)) }.toMap - } - private def resolveTypeUrl(typeName: String): Option[ResolvedType[_]] = { + private def resolveTypeUrl(typeName: String): Option[ResolvedType[_]] = allTypes.get(typeName).map(resolveTypeDescriptor) - } private def decodeJson(typeUrl: String, bytes: ByteString) = { val jsonType = typeUrl.substring(CloudStateJson.length) - reflectionCache.getOrElseUpdate("$json$" + jsonType, Try { - try { - val jsonClass = classLoader.loadClass(jsonType) - if (jsonClass.getAnnotation(classOf[Jsonable]) == null) { - throw SerializationException(s"Illegal CloudEvents json class, no @Jsonable annotation is present: $jsonType") + reflectionCache + .getOrElseUpdate( + "$json$" + jsonType, + Try { + try { + val jsonClass = classLoader.loadClass(jsonType) + if (jsonClass.getAnnotation(classOf[Jsonable]) == null) { + throw SerializationException( + s"Illegal CloudEvents json class, no @Jsonable annotation is present: $jsonType" + ) + } + new JacksonResolvedType(jsonClass.asInstanceOf[Class[Any]], + typeUrl, + objectMapper.readerFor(jsonClass), + objectMapper.writerFor(jsonClass)) + } catch { + case cnfe: ClassNotFoundException => + throw SerializationException("Could not load JSON class: " + jsonType, cnfe) + } } - new JacksonResolvedType(jsonClass.asInstanceOf[Class[Any]], typeUrl, objectMapper.readerFor(jsonClass), objectMapper.writerFor(jsonClass)) - } catch { - case cnfe: ClassNotFoundException => throw SerializationException("Could not load JSON class: " + jsonType, cnfe) - } - }).get.parseFrom(bytesToPrimitive(BytesPrimitive, bytes)) + ) + .get + .parseFrom(bytesToPrimitive(BytesPrimitive, bytes)) } - def encodeJava(value: Any): JavaPbAny = { + def encodeJava(value: Any): JavaPbAny = value match { case javaPbAny: JavaPbAny => javaPbAny case scalaPbAny: ScalaPbAny => ScalaPbAny.toJavaProto(scalaPbAny) case _ => ScalaPbAny.toJavaProto(encodeScala(value)) } - } - def encodeScala(value: Any): ScalaPbAny = { + def encodeScala(value: Any): ScalaPbAny = value match { case javaPbAny: JavaPbAny => ScalaPbAny.fromJavaProto(javaPbAny) case scalaPbAny: ScalaPbAny => scalaPbAny @@ -327,15 +375,16 @@ class AnySupport(descriptors: Array[Descriptors.FileDescriptor], classLoader: Cl ScalaPbAny(CloudStateJson + value.getClass.getName, primitiveToBytes(BytesPrimitive, json)) case other => - throw SerializationException(s"Don't know how to serialize object of type ${other.getClass}. Try passing a protobuf, using a primitive type, or using a type annotated with @Jsonable.") + throw SerializationException( + s"Don't know how to serialize object of type ${other.getClass}. Try passing a protobuf, using a primitive type, or using a type annotated with @Jsonable." + ) } - } def decode(any: ScalaPbAny): Any = decode(any.typeUrl, any.value) def decode(any: JavaPbAny): Any = decode(any.getTypeUrl, any.getValue) - private def decode(typeUrl: String, bytes: ByteString): Any = { + private def decode(typeUrl: String, bytes: ByteString): Any = if (typeUrl.startsWith(CloudStatePrimitive)) { NameToPrimitives.get(typeUrl) match { case Some(primitive) => @@ -349,11 +398,17 @@ class AnySupport(descriptors: Array[Descriptors.FileDescriptor], classLoader: Cl val typeName = typeUrl.split("/", 2) match { case Array(host, typeName) => if (host != typeUrlPrefix) { - log.warn("Message type [{}] does not match configured type url prefix [{}]", typeUrl: Any, typeUrlPrefix: Any) + log.warn("Message type [{}] does not match configured type url prefix [{}]", + typeUrl: Any, + typeUrlPrefix: Any) } typeName case _ => - log.warn("Message type [{}] does not have a url prefix, it should have one that matchers the configured type url prefix [{}]", typeUrl: Any, typeUrlPrefix: Any) + log.warn( + "Message type [{}] does not have a url prefix, it should have one that matchers the configured type url prefix [{}]", + typeUrl: Any, + typeUrlPrefix: Any + ) typeUrl } @@ -364,13 +419,10 @@ class AnySupport(descriptors: Array[Descriptors.FileDescriptor], classLoader: Cl throw SerializationException("Unable to find descriptor for type: " + typeUrl) } } - } - def decodeProtobuf(typeDescriptor: Descriptors.Descriptor, any: ScalaPbAny) = { + def decodeProtobuf(typeDescriptor: Descriptors.Descriptor, any: ScalaPbAny) = resolveTypeDescriptor(typeDescriptor).parseFrom(any.value) - } - } -final case class SerializationException(msg: String, cause: Throwable = null) extends RuntimeException(msg, cause) \ No newline at end of file +final case class SerializationException(msg: String, cause: Throwable = null) extends RuntimeException(msg, cause) diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/Contexts.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/Contexts.scala index 8d6eff6d0..5b8323b32 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/Contexts.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/Contexts.scala @@ -54,22 +54,26 @@ private[impl] trait AbstractClientActionContext extends ClientActionContext { if (forward.isDefined) { throw new IllegalStateException("This context has already forwarded.") } - forward = Some(Forward( - serviceName = to.ref().method().getService.getFullName, - commandName = to.ref().method().getName, - payload = Some(ScalaPbAny.fromJavaProto(to.message())), - )) + forward = Some( + Forward( + serviceName = to.ref().method().getService.getFullName, + commandName = to.ref().method().getName, + payload = Some(ScalaPbAny.fromJavaProto(to.message())) + ) + ) } final def hasError: Boolean = error.isDefined - final def createClientAction(reply: Optional[JavaPbAny], allowNoReply: Boolean): Option[ClientAction] = { + final def createClientAction(reply: Optional[JavaPbAny], allowNoReply: Boolean): Option[ClientAction] = error match { case Some(msg) => Some(ClientAction(ClientAction.Action.Failure(Failure(commandId, msg)))) case None => if (reply.isPresent) { if (forward.isDefined) { - throw new IllegalStateException("Both a reply was returned, and a forward message was sent, choose one or the other.") + throw new IllegalStateException( + "Both a reply was returned, and a forward message was sent, choose one or the other." + ) } Some(ClientAction(ClientAction.Action.Reply(Reply(Some(ScalaPbAny.fromJavaProto(reply.get())))))) } else if (forward.isDefined) { @@ -80,8 +84,8 @@ private[impl] trait AbstractClientActionContext extends ClientActionContext { throw new RuntimeException("No reply or forward returned by command handler!") } } - } } -object FailInvoked extends Throwable with NoStackTrace { override def toString: String = "CommandContext.fail(…) invoked" } - +object FailInvoked extends Throwable with NoStackTrace { + override def toString: String = "CommandContext.fail(…) invoked" +} diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/EntityDiscoveryImpl.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/EntityDiscoveryImpl.scala index f0585a60a..a16fc1483 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/EntityDiscoveryImpl.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/EntityDiscoveryImpl.scala @@ -26,7 +26,8 @@ import io.cloudstate.javasupport.{BuildInfo, StatefulService} class EntityDiscoveryImpl(system: ActorSystem, services: Map[String, StatefulService]) extends EntityDiscovery { private val serviceInfo = ServiceInfo( - serviceRuntime = sys.props.getOrElse("java.runtime.name", "") + " " + sys.props.getOrElse("java.runtime.version", ""), + serviceRuntime = sys.props.getOrElse("java.runtime.name", "") + " " + sys.props.getOrElse("java.runtime.version", + ""), supportLibraryName = BuildInfo.name, supportLibraryVersion = BuildInfo.version ) @@ -35,20 +36,26 @@ class EntityDiscoveryImpl(system: ActorSystem, services: Map[String, StatefulSer * Discover what entities the user function wishes to serve. */ override def discover(in: ProxyInfo): scala.concurrent.Future[EntitySpec] = { - system.log.info(s"Received discovery call from sidecar [${in.proxyName} ${in.proxyVersion}] supporting CloudState ${in.protocolMajorVersion}.${in.protocolMinorVersion}") - system.log.debug(s"Supported sidecar entity types: ${in.supportedEntityTypes.mkString("[",",","]")}") + system.log.info( + s"Received discovery call from sidecar [${in.proxyName} ${in.proxyVersion}] supporting CloudState ${in.protocolMajorVersion}.${in.protocolMinorVersion}" + ) + system.log.debug(s"Supported sidecar entity types: ${in.supportedEntityTypes.mkString("[", ",", "]")}") val unsupportedServices = services.values.filterNot { service => in.supportedEntityTypes.contains(service.entityType) } if (unsupportedServices.nonEmpty) { - system.log.error("Proxy doesn't support the entity types for the following services: " + unsupportedServices.map(s => s.descriptor.getFullName + ": " + s.entityType).mkString(", ")) + system.log.error( + "Proxy doesn't support the entity types for the following services: " + unsupportedServices + .map(s => s.descriptor.getFullName + ": " + s.entityType) + .mkString(", ") + ) // Don't fail though. The proxy may give us more information as to why it doesn't support them if we send back unsupported services. // eg, the proxy doesn't have a configured journal, and so can't support event sourcing. } - if ( false ) // TODO verify compatibility with in.protocolMajorVersion & in.protocolMinorVersion + if (false) // TODO verify compatibility with in.protocolMajorVersion & in.protocolMinorVersion Future.failed(new Exception("Proxy version not compatible with library protocol support version")) else { val allDescriptors = AnySupport.flattenDescriptors(services.values.map(_.descriptor.getFile).toSeq) @@ -64,7 +71,7 @@ class EntityDiscoveryImpl(system: ActorSystem, services: Map[String, StatefulSer Future.successful(EntitySpec(fileDescriptorSet, entities, Some(serviceInfo))) } } - + /** * Report an error back to the user function. This will only be invoked to tell the user function * that it has done something wrong, eg, violated the protocol, tried to use an entity type that @@ -75,4 +82,4 @@ class EntityDiscoveryImpl(system: ActorSystem, services: Map[String, StatefulSer system.log.error(s"Error reported from sidecar: ${in.message}") Future.successful(com.google.protobuf.empty.Empty.defaultInstance) } -} \ No newline at end of file +} diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/ReflectionHelper.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/ReflectionHelper.scala index becb5dbcb..08a8e11ee 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/ReflectionHelper.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/ReflectionHelper.scala @@ -11,17 +11,15 @@ import scala.reflect.ClassTag private[impl] object ReflectionHelper { - def getAllDeclaredMethods(clazz: Class[_]): Seq[Method] = { + def getAllDeclaredMethods(clazz: Class[_]): Seq[Method] = if (clazz.getSuperclass == null || clazz.getSuperclass == classOf[Object]) { clazz.getDeclaredMethods } else { clazz.getDeclaredMethods.toVector ++ getAllDeclaredMethods(clazz.getSuperclass) } - } - def isWithinBounds(clazz: Class[_], upper: Class[_], lower: Class[_]): Boolean = { + def isWithinBounds(clazz: Class[_], upper: Class[_], lower: Class[_]): Boolean = upper.isAssignableFrom(clazz) && clazz.isAssignableFrom(lower) - } def ensureAccessible[T <: AccessibleObject](accessible: T): T = { if (!accessible.isAccessible) { @@ -30,13 +28,12 @@ private[impl] object ReflectionHelper { accessible } - def getCapitalizedName(member: Member): String = { + def getCapitalizedName(member: Member): String = // These use unicode upper/lower case definitions, rather than locale sensitive, // which is what we want. if (member.getName.charAt(0).isLower) { member.getName.charAt(0).toUpper + member.getName.drop(1) } else member.getName - } final case class InvocationContext[+C <: Context](mainArgument: AnyRef, context: C) trait ParameterHandler[-C <: Context] extends (InvocationContext[C] => AnyRef) @@ -56,64 +53,83 @@ private[impl] object ReflectionHelper { final case class MethodParameter(method: Executable, param: Int) { def parameterType: Class[_] = method.getParameterTypes()(param) def genericParameterType: Type = method.getGenericParameterTypes()(param) - def annotation[A <: Annotation: ClassTag] = method.getParameterAnnotations()(param) - .find(a => implicitly[ClassTag[A]].runtimeClass.isInstance(a)) + def annotation[A <: Annotation: ClassTag] = + method + .getParameterAnnotations()(param) + .find(a => implicitly[ClassTag[A]].runtimeClass.isInstance(a)) } - def getParameterHandlers[C <: Context: ClassTag](method: Executable)(extras: PartialFunction[MethodParameter, ParameterHandler[C]] = PartialFunction.empty): Array[ParameterHandler[C]] = { + def getParameterHandlers[C <: Context: ClassTag](method: Executable)( + extras: PartialFunction[MethodParameter, ParameterHandler[C]] = PartialFunction.empty + ): Array[ParameterHandler[C]] = { val handlers = Array.ofDim[ParameterHandler[_]](method.getParameterCount) for (i <- 0 until method.getParameterCount) { val parameter = MethodParameter(method, i) // First match things that we can be specific about val contextClass = implicitly[ClassTag[C]].runtimeClass - handlers(i) = if (isWithinBounds(parameter.parameterType, classOf[Context], contextClass)) - ContextParameterHandler - else if (classOf[Context].isAssignableFrom(parameter.parameterType)) - // It's a context parameter who is not within the lower bound of the contexts supported by this method - throw new RuntimeException(s"Unsupported context parameter on ${method.getName}, ${parameter.parameterType} must be the same or a super type of $contextClass") - else if (parameter.parameterType == classOf[ServiceCallFactory]) - ServiceCallFactoryParameterHandler - else if (parameter.annotation[EntityId].isDefined) { - if (parameter.parameterType != classOf[String]) { - throw new RuntimeException(s"@EntityId annotated parameter on method ${method.getName} has type ${parameter.parameterType}, must be String.") - } - EntityIdParameterHandler - } else - extras.applyOrElse(parameter, (p: MethodParameter) => MainArgumentParameterHandler(p.parameterType)) + handlers(i) = + if (isWithinBounds(parameter.parameterType, classOf[Context], contextClass)) + ContextParameterHandler + else if (classOf[Context].isAssignableFrom(parameter.parameterType)) + // It's a context parameter who is not within the lower bound of the contexts supported by this method + throw new RuntimeException( + s"Unsupported context parameter on ${method.getName}, ${parameter.parameterType} must be the same or a super type of $contextClass" + ) + else if (parameter.parameterType == classOf[ServiceCallFactory]) + ServiceCallFactoryParameterHandler + else if (parameter.annotation[EntityId].isDefined) { + if (parameter.parameterType != classOf[String]) { + throw new RuntimeException( + s"@EntityId annotated parameter on method ${method.getName} has type ${parameter.parameterType}, must be String." + ) + } + EntityIdParameterHandler + } else + extras.applyOrElse(parameter, (p: MethodParameter) => MainArgumentParameterHandler(p.parameterType)) } handlers.asInstanceOf[Array[ParameterHandler[C]]] } - final class CommandHandlerInvoker[CommandContext <: Context : ClassTag](val method: Method, - val serviceMethod: ResolvedServiceMethod[_, _], - extraParameters: PartialFunction[MethodParameter, ParameterHandler[CommandContext]] = PartialFunction.empty) { + final class CommandHandlerInvoker[CommandContext <: Context: ClassTag]( + val method: Method, + val serviceMethod: ResolvedServiceMethod[_, _], + extraParameters: PartialFunction[MethodParameter, ParameterHandler[CommandContext]] = PartialFunction.empty + ) { private val name = serviceMethod.descriptor.getFullName private val parameters = ReflectionHelper.getParameterHandlers[CommandContext](method)(extraParameters) if (parameters.count(_.isInstanceOf[MainArgumentParameterHandler[_]]) > 1) { - throw new RuntimeException(s"CommandHandler method $method must defined at most one non context parameter to handle commands, the parameters defined were: ${parameters.collect { case MainArgumentParameterHandler(clazz) => clazz.getName }.mkString(",")}") + throw new RuntimeException( + s"CommandHandler method $method must defined at most one non context parameter to handle commands, the parameters defined were: ${parameters + .collect { case MainArgumentParameterHandler(clazz) => clazz.getName } + .mkString(",")}" + ) } parameters.foreach { case MainArgumentParameterHandler(inClass) if !inClass.isAssignableFrom(serviceMethod.inputType.typeClass) => - throw new RuntimeException(s"Incompatible command class $inClass for command $name, expected ${serviceMethod.inputType.typeClass}") + throw new RuntimeException( + s"Incompatible command class $inClass for command $name, expected ${serviceMethod.inputType.typeClass}" + ) case _ => } - private def serialize(result: AnyRef) = { - JavaPbAny.newBuilder().setTypeUrl(serviceMethod.outputType.typeUrl) + private def serialize(result: AnyRef) = + JavaPbAny + .newBuilder() + .setTypeUrl(serviceMethod.outputType.typeUrl) .setValue(serviceMethod.outputType.asInstanceOf[ResolvedType[Any]].toByteString(result)) .build() - } - private def verifyOutputType(t: Type): Unit = { + private def verifyOutputType(t: Type): Unit = if (!serviceMethod.outputType.typeClass.isAssignableFrom(getRawType(t))) { - throw new RuntimeException(s"Incompatible return class $t for command $name, expected ${serviceMethod.outputType.typeClass}") + throw new RuntimeException( + s"Incompatible return class $t for command $name, expected ${serviceMethod.outputType.typeClass}" + ) } - } - private val handleResult: AnyRef => Optional[JavaPbAny] = if (method.getReturnType == Void.TYPE) { - _ => Optional.empty() + private val handleResult: AnyRef => Optional[JavaPbAny] = if (method.getReturnType == Void.TYPE) { _ => + Optional.empty() } else if (method.getReturnType == classOf[Optional[_]]) { verifyOutputType(getFirstParameter(method.getGenericReturnType)) @@ -145,32 +161,34 @@ private[impl] object ReflectionHelper { case _ => classOf[Object] } - - def getFirstParameter(t: Type): Class[_] = { + def getFirstParameter(t: Type): Class[_] = t match { case pt: ParameterizedType => getRawType(pt.getActualTypeArguments()(0)) case _ => classOf[AnyRef] } - } /** - * Verifies that none of the given methods have CloudState annotations that are not allowed. - * - * This is designed to eagerly catch mistakes such as importing the wrong CommandHandler annotation. - */ - def validateNoBadMethods(methods: Seq[Method], entity: Class[_ <: Annotation], allowed: Set[Class[_ <: Annotation]]): Unit = { + * Verifies that none of the given methods have CloudState annotations that are not allowed. + * + * This is designed to eagerly catch mistakes such as importing the wrong CommandHandler annotation. + */ + def validateNoBadMethods(methods: Seq[Method], + entity: Class[_ <: Annotation], + allowed: Set[Class[_ <: Annotation]]): Unit = methods.foreach { method => method.getAnnotations.foreach { annotation => - if (annotation.annotationType().getAnnotation(classOf[CloudStateAnnotation]) != null && !allowed(annotation.annotationType())) { + if (annotation.annotationType().getAnnotation(classOf[CloudStateAnnotation]) != null && !allowed( + annotation.annotationType() + )) { val maybeAlternative = allowed.find(_.getSimpleName == annotation.annotationType().getSimpleName) - throw new RuntimeException(s"Annotation @${annotation.annotationType().getName} on method ${method.getDeclaringClass.getName}." + + throw new RuntimeException( + s"Annotation @${annotation.annotationType().getName} on method ${method.getDeclaringClass.getName}." + s"${method.getName} not allowed in @${entity.getName} annotated entity." + maybeAlternative.fold("")(alterative => s" Did you mean to use @${alterative.getName}?") ) } } } - } } diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/ResolvedServiceCallFactory.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/ResolvedServiceCallFactory.scala index b74c08f91..bad047d15 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/ResolvedServiceCallFactory.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/ResolvedServiceCallFactory.scala @@ -3,7 +3,7 @@ package io.cloudstate.javasupport.impl import io.cloudstate.javasupport.{ServiceCallFactory, ServiceCallRef, StatefulService} class ResolvedServiceCallFactory(services: Map[String, StatefulService]) extends ServiceCallFactory { - override def lookup[T](serviceName: String, methodName: String, methodType: Class[T]): ServiceCallRef[T] = { + override def lookup[T](serviceName: String, methodName: String, methodType: Class[T]): ServiceCallRef[T] = services.get(serviceName) match { case Some(service) => service.resolvedMethods match { @@ -12,15 +12,18 @@ class ResolvedServiceCallFactory(services: Map[String, StatefulService]) extends case Some(method) if method.inputType.typeClass.isAssignableFrom(methodType) => method.asInstanceOf[ServiceCallRef[T]] case Some(badTypedMethod) => - throw new IllegalArgumentException(s"The input type ${badTypedMethod.inputType.typeClass.getName} of $serviceName.$methodName does not match the requested message type ${methodType.getName}") + throw new IllegalArgumentException( + s"The input type ${badTypedMethod.inputType.typeClass.getName} of $serviceName.$methodName does not match the requested message type ${methodType.getName}" + ) case None => throw new NoSuchElementException(s"No method named $methodName found on service $serviceName") } case None => - throw new IllegalStateException(s"Service $serviceName does not provide resolved methods and so can't be looked up by this factory") + throw new IllegalStateException( + s"Service $serviceName does not provide resolved methods and so can't be looked up by this factory" + ) } case _ => throw new NoSuchElementException(s"No service named $serviceName is being handled by this stateful service") } - } } diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/ResolvedServiceMethod.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/ResolvedServiceMethod.scala index 43e00a8ac..c9b907aab 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/ResolvedServiceMethod.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/ResolvedServiceMethod.scala @@ -1,80 +1,90 @@ package io.cloudstate.javasupport.impl import com.fasterxml.jackson.databind.{ObjectReader, ObjectWriter} -import com.google.protobuf.{ByteString, Descriptors, Parser, UnsafeByteOperations, Message => JavaMessage, Any => JavaPbAny} +import com.google.protobuf.{ + ByteString, + Descriptors, + Parser, + UnsafeByteOperations, + Message => JavaMessage, + Any => JavaPbAny +} import io.cloudstate.javasupport.{ServiceCall, ServiceCallRef} /** - * A resolved service method. - */ -final case class ResolvedServiceMethod[I, O](descriptor: Descriptors.MethodDescriptor, inputType: ResolvedType[I], - outputType: ResolvedType[O]) extends ServiceCallRef[I] { + * A resolved service method. + */ +final case class ResolvedServiceMethod[I, O](descriptor: Descriptors.MethodDescriptor, + inputType: ResolvedType[I], + outputType: ResolvedType[O]) + extends ServiceCallRef[I] { def outputStreamed: Boolean = descriptor.isServerStreaming def name: String = descriptor.getName override def method(): Descriptors.MethodDescriptor = descriptor - override def createCall(message: I): ServiceCall = { - ResolvedServiceCall(this, JavaPbAny.newBuilder() - .setTypeUrl(inputType.typeUrl) - .setValue(inputType.toByteString(message)) - .build() - ) - } + override def createCall(message: I): ServiceCall = + ResolvedServiceCall(this, + JavaPbAny + .newBuilder() + .setTypeUrl(inputType.typeUrl) + .setValue(inputType.toByteString(message)) + .build()) } final case class ResolvedServiceCall(ref: ServiceCallRef[_], message: JavaPbAny) extends ServiceCall /** - * A resolved type - */ + * A resolved type + */ trait ResolvedType[T] { + /** - * The class for this type. - */ + * The class for this type. + */ def typeClass: Class[T] /** - * The URL for this type. - */ + * The URL for this type. + */ def typeUrl: String /** - * Parse the given bytes into this type. - */ + * Parse the given bytes into this type. + */ def parseFrom(bytes: ByteString): T /** - * Convert the given value into a byte string. - */ + * Convert the given value into a byte string. + */ def toByteString(value: T): ByteString } -private final class JavaPbResolvedType[T <: JavaMessage]( - override val typeClass: Class[T], - override val typeUrl: String, - parser: Parser[T]) extends ResolvedType[T] { +private final class JavaPbResolvedType[T <: JavaMessage](override val typeClass: Class[T], + override val typeUrl: String, + parser: Parser[T]) + extends ResolvedType[T] { override def parseFrom(bytes: ByteString): T = parser.parseFrom(bytes) override def toByteString(value: T): ByteString = value.toByteString } -private final class ScalaPbResolvedType[T <: scalapb.GeneratedMessage]( - override val typeClass: Class[T], - override val typeUrl: String, - companion: scalapb.GeneratedMessageCompanion[_]) extends ResolvedType[T] { +private final class ScalaPbResolvedType[T <: scalapb.GeneratedMessage](override val typeClass: Class[T], + override val typeUrl: String, + companion: scalapb.GeneratedMessageCompanion[_]) + extends ResolvedType[T] { override def parseFrom(bytes: ByteString): T = companion.parseFrom(bytes.newCodedInput()).asInstanceOf[T] override def toByteString(value: T): ByteString = value.toByteString } /** - * Not a real protobuf parser, but is useful none the less. - */ -private final class JacksonResolvedType[T]( - override val typeClass: Class[T], - override val typeUrl: String, - reader: ObjectReader, - writer: ObjectWriter) extends ResolvedType[T] { + * Not a real protobuf parser, but is useful none the less. + */ +private final class JacksonResolvedType[T](override val typeClass: Class[T], + override val typeUrl: String, + reader: ObjectReader, + writer: ObjectWriter) + extends ResolvedType[T] { override def parseFrom(bytes: ByteString): T = reader.readValue(bytes.toByteArray) override def toByteString(value: T): ByteString = UnsafeByteOperations.unsafeWrap(writer.writeValueAsBytes(value)) } @@ -82,4 +92,4 @@ private final class JacksonResolvedType[T]( trait ResolvedEntityFactory { // TODO JavaDoc def resolvedMethods: Map[String, ResolvedServiceMethod[_, _]] -} \ No newline at end of file +} diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/StatelessFunctionImpl.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/StatelessFunctionImpl.scala index 4c61e2da5..3f349df49 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/StatelessFunctionImpl.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/StatelessFunctionImpl.scala @@ -22,8 +22,16 @@ import io.cloudstate.javasupport.CloudState // FIXME Implement support for this class StatelessFunctionImpl(system: ActorSystem) extends StatelessFunction { - override def handleUnary(in: io.cloudstate.protocol.function.FunctionCommand): scala.concurrent.Future[io.cloudstate.protocol.function.FunctionReply] = ??? - override def handleStreamedIn(in: akka.stream.scaladsl.Source[io.cloudstate.protocol.function.FunctionCommand, akka.NotUsed]): scala.concurrent.Future[io.cloudstate.protocol.function.FunctionReply] = ??? - override def handleStreamedOut(in: io.cloudstate.protocol.function.FunctionCommand): akka.stream.scaladsl.Source[io.cloudstate.protocol.function.FunctionReply, akka.NotUsed] = ??? - override def handleStreamed(in: akka.stream.scaladsl.Source[io.cloudstate.protocol.function.FunctionCommand, akka.NotUsed]): akka.stream.scaladsl.Source[io.cloudstate.protocol.function.FunctionReply, akka.NotUsed] = ??? -} \ No newline at end of file + override def handleUnary( + in: io.cloudstate.protocol.function.FunctionCommand + ): scala.concurrent.Future[io.cloudstate.protocol.function.FunctionReply] = ??? + override def handleStreamedIn( + in: akka.stream.scaladsl.Source[io.cloudstate.protocol.function.FunctionCommand, akka.NotUsed] + ): scala.concurrent.Future[io.cloudstate.protocol.function.FunctionReply] = ??? + override def handleStreamedOut( + in: io.cloudstate.protocol.function.FunctionCommand + ): akka.stream.scaladsl.Source[io.cloudstate.protocol.function.FunctionReply, akka.NotUsed] = ??? + override def handleStreamed( + in: akka.stream.scaladsl.Source[io.cloudstate.protocol.function.FunctionCommand, akka.NotUsed] + ): akka.stream.scaladsl.Source[io.cloudstate.protocol.function.FunctionReply, akka.NotUsed] = ??? +} diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/AbstractCrdtFactory.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/AbstractCrdtFactory.scala index 3046e49fb..a91979fe3 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/AbstractCrdtFactory.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/AbstractCrdtFactory.scala @@ -1,6 +1,17 @@ package io.cloudstate.javasupport.impl.crdt -import io.cloudstate.javasupport.crdt.{Crdt, CrdtFactory, Flag, GCounter, GSet, LWWRegister, ORMap, ORSet, PNCounter, Vote} +import io.cloudstate.javasupport.crdt.{ + Crdt, + CrdtFactory, + Flag, + GCounter, + GSet, + LWWRegister, + ORMap, + ORSet, + PNCounter, + Vote +} import io.cloudstate.javasupport.impl.AnySupport // TODO JavaDoc @@ -30,4 +41,4 @@ trait AbstractCrdtFactory extends CrdtFactory { newCrdt(new ORMapImpl[K, InternalCrdt](anySupport)).asInstanceOf[ORMap[K, V]] // TODO JavaDoc override def newVote(): Vote = newCrdt(new VoteImpl) -} \ No newline at end of file +} diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/AnnotationBasedCrdtSupport.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/AnnotationBasedCrdtSupport.scala index 9e7f9db52..f34d27a06 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/AnnotationBasedCrdtSupport.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/AnnotationBasedCrdtSupport.scala @@ -1,30 +1,64 @@ package io.cloudstate.javasupport.impl.crdt import java.lang.reflect.{Constructor, Executable, InvocationTargetException} -import java.util.{Optional, function} +import java.util.{function, Optional} import java.util.function.Consumer import com.google.protobuf.{Descriptors, Any => JavaPbAny} import io.cloudstate.javasupport.{Context, ServiceCall, ServiceCallFactory} -import io.cloudstate.javasupport.crdt.{CommandContext, CommandHandler, Crdt, CrdtContext, CrdtCreationContext, CrdtEntity, CrdtEntityFactory, CrdtEntityHandler, Flag, GCounter, GSet, LWWRegister, LWWRegisterMap, ORMap, ORSet, PNCounter, PNCounterMap, StreamCancelledContext, StreamedCommandContext, SubscriptionContext, Vote} -import io.cloudstate.javasupport.impl.ReflectionHelper.{CommandHandlerInvoker, InvocationContext, MainArgumentParameterHandler, MethodParameter, ParameterHandler} -import io.cloudstate.javasupport.impl.{AnySupport, ReflectionHelper, ResolvedEntityFactory, ResolvedServiceMethod, ResolvedType} +import io.cloudstate.javasupport.crdt.{ + CommandContext, + CommandHandler, + Crdt, + CrdtContext, + CrdtCreationContext, + CrdtEntity, + CrdtEntityFactory, + CrdtEntityHandler, + Flag, + GCounter, + GSet, + LWWRegister, + LWWRegisterMap, + ORMap, + ORSet, + PNCounter, + PNCounterMap, + StreamCancelledContext, + StreamedCommandContext, + SubscriptionContext, + Vote +} +import io.cloudstate.javasupport.impl.ReflectionHelper.{ + CommandHandlerInvoker, + InvocationContext, + MainArgumentParameterHandler, + MethodParameter, + ParameterHandler +} +import io.cloudstate.javasupport.impl.{ + AnySupport, + ReflectionHelper, + ResolvedEntityFactory, + ResolvedServiceMethod, + ResolvedType +} import scala.reflect.ClassTag - /** - * Annotation based implementation of the [[io.cloudstate.javasupport.crdt.CrdtEntityFactory]]. - */ -private[impl] class AnnotationBasedCrdtSupport(entityClass: Class[_], anySupport: AnySupport, - override val resolvedMethods: Map[String, ResolvedServiceMethod[_, _]], - factory: Option[CrdtCreationContext => AnyRef] = None) extends CrdtEntityFactory with ResolvedEntityFactory { + * Annotation based implementation of the [[io.cloudstate.javasupport.crdt.CrdtEntityFactory]]. + */ +private[impl] class AnnotationBasedCrdtSupport(entityClass: Class[_], + anySupport: AnySupport, + override val resolvedMethods: Map[String, ResolvedServiceMethod[_, _]], + factory: Option[CrdtCreationContext => AnyRef] = None) + extends CrdtEntityFactory + with ResolvedEntityFactory { // TODO JavaDoc - def this(entityClass: Class[_], anySupport: AnySupport, - serviceDescriptor: Descriptors.ServiceDescriptor) = + def this(entityClass: Class[_], anySupport: AnySupport, serviceDescriptor: Descriptors.ServiceDescriptor) = this(entityClass, anySupport, anySupport.resolveServiceDescriptor(serviceDescriptor)) - private val constructor: CrdtCreationContext => AnyRef = factory.getOrElse { entityClass.getConstructors match { case Array(single) => @@ -47,25 +81,32 @@ private[impl] class AnnotationBasedCrdtSupport(entityClass: Class[_], anySupport } else annotation.name() val serviceMethod = resolvedMethods.getOrElse(name, { - throw new RuntimeException(s"Command handler method ${method.getName} for command $name found, but the service has no command by that name.") + throw new RuntimeException( + s"Command handler method ${method.getName} for command $name found, but the service has no command by that name." + ) }) (ReflectionHelper.ensureAccessible(method), serviceMethod) } - def getHandlers[C <: CrdtContext : ClassTag](streamed: Boolean) = - handlers.filter(_._2.outputStreamed == streamed) - .map { - case (method, serviceMethod) => new CommandHandlerInvoker[C](method, serviceMethod, CrdtAnnotationHelper.crdtParameterHandlers) - } - .groupBy(_.serviceMethod.name) - .map { - case (commandName, Seq(invoker)) => commandName -> invoker - case (commandName, many) => throw new RuntimeException(s"Multiple methods found for handling command of name $commandName: ${many.map(_.method.getName)}") - } + def getHandlers[C <: CrdtContext: ClassTag](streamed: Boolean) = + handlers + .filter(_._2.outputStreamed == streamed) + .map { + case (method, serviceMethod) => + new CommandHandlerInvoker[C](method, serviceMethod, CrdtAnnotationHelper.crdtParameterHandlers) + } + .groupBy(_.serviceMethod.name) + .map { + case (commandName, Seq(invoker)) => commandName -> invoker + case (commandName, many) => + throw new RuntimeException( + s"Multiple methods found for handling command of name $commandName: ${many.map(_.method.getName)}" + ) + } (getHandlers[CommandContext](false), getHandlers[StreamedCommandContext[AnyRef]](true)) } - + // TODO JavaDoc override def create(context: CrdtCreationContext): CrdtEntityHandler = { val entity = constructor(context) @@ -80,27 +121,35 @@ private[impl] class AnnotationBasedCrdtSupport(entityClass: Class[_], anySupport } maybeResult.getOrElse { - throw new RuntimeException(s"No command handler found for command [${context.commandName()}] on CRDT entity: $entityClass") + throw new RuntimeException( + s"No command handler found for command [${context.commandName()}] on CRDT entity: $entityClass" + ) } } - override def handleStreamedCommand(command: JavaPbAny, context: StreamedCommandContext[JavaPbAny]): Optional[JavaPbAny] = unwrap { + override def handleStreamedCommand(command: JavaPbAny, + context: StreamedCommandContext[JavaPbAny]): Optional[JavaPbAny] = unwrap { val maybeResult = streamedCommandHandlers.get(context.commandName()).map { handler => - val adaptedContext = new AdaptedStreamedCommandContext(context, handler.serviceMethod.outputType.asInstanceOf[ResolvedType[AnyRef]]) + val adaptedContext = + new AdaptedStreamedCommandContext(context, + handler.serviceMethod.outputType.asInstanceOf[ResolvedType[AnyRef]]) handler.invoke(entity, command, adaptedContext) } maybeResult.getOrElse { - throw new RuntimeException(s"No streamed command handler found for command [${context.commandName()}] on CRDT entity: $entityClass") + throw new RuntimeException( + s"No streamed command handler found for command [${context.commandName()}] on CRDT entity: $entityClass" + ) } } - private def unwrap[T](block: => T): T = try { - block - } catch { - case ite: InvocationTargetException if ite.getCause != null => - throw ite.getCause - } + private def unwrap[T](block: => T): T = + try { + block + } catch { + case ite: InvocationTargetException if ite.getCause != null => + throw ite.getCause + } } } @@ -109,39 +158,50 @@ private object CrdtAnnotationHelper { val crdtParameterHandlers: PartialFunction[MethodParameter, ParameterHandler[CrdtContext]] = { case crdt if classOf[Crdt].isAssignableFrom(crdt.parameterType) => new CrdtParameterHandler(crdt.parameterType.asInstanceOf[Class[_ <: Crdt]], crdt.method) - case crdt if crdt.parameterType == classOf[Optional[_]] && - classOf[Crdt].isAssignableFrom(ReflectionHelper.getFirstParameter(crdt.genericParameterType)) => - new OptionalCrdtParameterHandler(ReflectionHelper.getFirstParameter(crdt.genericParameterType).asInstanceOf[Class[_ <: Crdt]], crdt.method) + case crdt + if crdt.parameterType == classOf[Optional[_]] && + classOf[Crdt].isAssignableFrom(ReflectionHelper.getFirstParameter(crdt.genericParameterType)) => + new OptionalCrdtParameterHandler( + ReflectionHelper.getFirstParameter(crdt.genericParameterType).asInstanceOf[Class[_ <: Crdt]], + crdt.method + ) } - private class CrdtParameterHandler(crdtClass: Class[_ <: Crdt], method: Executable) extends ParameterHandler[CrdtContext] { + private class CrdtParameterHandler(crdtClass: Class[_ <: Crdt], method: Executable) + extends ParameterHandler[CrdtContext] { override def apply(ctx: InvocationContext[CrdtContext]): AnyRef = { val state = ctx.context.state(crdtClass) if (state.isPresent) { state.get() } else { - throw new IllegalStateException(s"${method.getDeclaringClass.getName}.${method.getName} requires a CRDT " + - s"of type ${crdtClass.getName}, but this entity has no CRDT created for it yet.") + throw new IllegalStateException( + s"${method.getDeclaringClass.getName}.${method.getName} requires a CRDT " + + s"of type ${crdtClass.getName}, but this entity has no CRDT created for it yet." + ) } } } - private class OptionalCrdtParameterHandler(crdtClass: Class[_ <: Crdt], method: Executable) extends ParameterHandler[CrdtContext] { - override def apply(ctx: InvocationContext[CrdtContext]): AnyRef = { + private class OptionalCrdtParameterHandler(crdtClass: Class[_ <: Crdt], method: Executable) + extends ParameterHandler[CrdtContext] { + override def apply(ctx: InvocationContext[CrdtContext]): AnyRef = ctx.context.state(crdtClass) - } } } -private final class AdaptedStreamedCommandContext(val delegate: StreamedCommandContext[JavaPbAny], resolvedType: ResolvedType[AnyRef]) extends StreamedCommandContext[AnyRef] { +private final class AdaptedStreamedCommandContext(val delegate: StreamedCommandContext[JavaPbAny], + resolvedType: ResolvedType[AnyRef]) + extends StreamedCommandContext[AnyRef] { override def isStreamed: Boolean = delegate.isStreamed - def onChange(subscriber: function.Function[SubscriptionContext, Optional[AnyRef]]): Unit = { + def onChange(subscriber: function.Function[SubscriptionContext, Optional[AnyRef]]): Unit = delegate.onChange { ctx => val result = subscriber(ctx) if (result.isPresent) { - Optional.of(JavaPbAny.newBuilder() + Optional.of( + JavaPbAny + .newBuilder() .setTypeUrl(resolvedType.typeUrl) .setValue(resolvedType.toByteString(result.get)) .build() @@ -150,7 +210,6 @@ private final class AdaptedStreamedCommandContext(val delegate: StreamedCommandC Optional.empty() } } - } override def onCancel(effect: Consumer[StreamCancelledContext]): Unit = delegate.onCancel(effect) @@ -177,7 +236,8 @@ private final class AdaptedStreamedCommandContext(val delegate: StreamedCommandC } private final class EntityConstructorInvoker(constructor: Constructor[_]) extends (CrdtCreationContext => AnyRef) { - private val parameters = ReflectionHelper.getParameterHandlers[CrdtCreationContext](constructor)(CrdtAnnotationHelper.crdtParameterHandlers) + private val parameters = + ReflectionHelper.getParameterHandlers[CrdtCreationContext](constructor)(CrdtAnnotationHelper.crdtParameterHandlers) parameters.foreach { case MainArgumentParameterHandler(clazz) => throw new RuntimeException(s"Don't know how to handle argument of type $clazz in constructor") @@ -189,5 +249,3 @@ private final class EntityConstructorInvoker(constructor: Constructor[_]) extend constructor.newInstance(parameters.map(_.apply(ctx)): _*).asInstanceOf[AnyRef] } } - - diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/CrdtImpl.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/CrdtImpl.scala index 1da5333fe..c005a88ad 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/CrdtImpl.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/CrdtImpl.scala @@ -16,7 +16,7 @@ package io.cloudstate.javasupport.impl.crdt -import java.util.{Optional, function} +import java.util.{function, Optional} import java.util.function.Consumer import akka.NotUsed @@ -24,8 +24,24 @@ import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import com.google.protobuf.Descriptors import io.cloudstate.javasupport.{Context, ServiceCallFactory, StatefulService} -import io.cloudstate.javasupport.crdt.{CommandContext, CrdtContext, CrdtCreationContext, CrdtEntityFactory, StreamCancelledContext, StreamedCommandContext, SubscriptionContext} -import io.cloudstate.javasupport.impl.{AbstractClientActionContext, AbstractEffectContext, ActivatableContext, AnySupport, FailInvoked, ResolvedEntityFactory, ResolvedServiceMethod} +import io.cloudstate.javasupport.crdt.{ + CommandContext, + CrdtContext, + CrdtCreationContext, + CrdtEntityFactory, + StreamCancelledContext, + StreamedCommandContext, + SubscriptionContext +} +import io.cloudstate.javasupport.impl.{ + AbstractClientActionContext, + AbstractEffectContext, + ActivatableContext, + AnySupport, + FailInvoked, + ResolvedEntityFactory, + ResolvedServiceMethod +} import io.cloudstate.protocol.crdt._ import io.cloudstate.protocol.crdt.CrdtStreamIn.{Message => In} import io.cloudstate.protocol.entity.{Command, Failure, StreamCancelled} @@ -36,32 +52,32 @@ import scala.compat.java8.OptionConverters._ import scala.collection.JavaConverters._ final class CrdtStatefulService(val factory: CrdtEntityFactory, - override val descriptor: Descriptors.ServiceDescriptor, - val anySupport: AnySupport -) extends StatefulService { + override val descriptor: Descriptors.ServiceDescriptor, + val anySupport: AnySupport) + extends StatefulService { override final val entityType = Crdt.name - override def resolvedMethods: Option[Map[String, ResolvedServiceMethod[_, _]]] = { + override def resolvedMethods: Option[Map[String, ResolvedServiceMethod[_, _]]] = factory match { case resolved: ResolvedEntityFactory => Some(resolved.resolvedMethods) case _ => None } - } private val streamed = descriptor.getMethods.asScala.filter(_.toProto.getServerStreaming).map(_.getName).toSet def isStreamed(command: String): Boolean = streamed(command) } class CrdtImpl(system: ActorSystem, services: Map[String, CrdtStatefulService], rootContext: Context) extends Crdt { + /** - * After invoking handle, the first message sent will always be a CrdtInit message, containing the entity ID, and, - * if it exists or is available, the current state of the entity. After that, one or more commands may be sent, - * as well as deltas as they arrive, and the entire state if either the entity is created, or the proxy wishes the - * user function to replace its entire state. - * The user function must respond with one reply per command in. They do not necessarily have to be sent in the same - * order that the commands were sent, the command ID is used to correlate commands to replies. - */ - def handle(in: Source[CrdtStreamIn, NotUsed]): Source[CrdtStreamOut, NotUsed] = { + * After invoking handle, the first message sent will always be a CrdtInit message, containing the entity ID, and, + * if it exists or is available, the current state of the entity. After that, one or more commands may be sent, + * as well as deltas as they arrive, and the entire state if either the entity is created, or the proxy wishes the + * user function to replace its entire state. + * The user function must respond with one reply per command in. They do not necessarily have to be sent in the same + * order that the commands were sent, the command ID is used to correlate commands to replies. + */ + def handle(in: Source[CrdtStreamIn, NotUsed]): Source[CrdtStreamOut, NotUsed] = in.prefixAndTail(1) .flatMapConcat { case (Seq(CrdtStreamIn(In.Init(init))), source) => @@ -69,52 +85,53 @@ class CrdtImpl(system: ActorSystem, services: Map[String, CrdtStatefulService], case _ => // todo better error throw new RuntimeException("Expected Init message") - }.recover { - case e => - // FIXME translate to failure message - throw e - } - - } + } + .recover { + case e => + // FIXME translate to failure message + throw e + } private def runEntity(init: CrdtInit): Flow[CrdtStreamIn, CrdtStreamOut, NotUsed] = { - val service = services.getOrElse(init.serviceName, throw new RuntimeException(s"Service not found: ${init.serviceName}")) + val service = + services.getOrElse(init.serviceName, throw new RuntimeException(s"Service not found: ${init.serviceName}")) val runner = new EntityRunner(service, init.entityId, init.state.map { state => CrdtStateTransformer.create(state, service.anySupport) }) - Flow[CrdtStreamIn].mapConcat { in => - in.message match { - case In.Command(command) => - runner.handleCommand(command) - case In.Changed(delta) => - runner.handleDelta(delta).map { msg => - CrdtStreamOut(CrdtStreamOut.Message.StreamedMessage(msg)) - } - case In.State(state) => - runner.handleState(state).map { msg => - CrdtStreamOut(CrdtStreamOut.Message.StreamedMessage(msg)) - } - case In.Deleted(_) => - // ??? - Nil - case In.StreamCancelled(cancelled) => - runner.handleStreamCancelled(cancelled) - case In.Init(_) => - throw new IllegalStateException("Duplicate init event for the same entity") - case In.Empty => - throw new RuntimeException("Empty or unknown in message") + Flow[CrdtStreamIn] + .mapConcat { in => + in.message match { + case In.Command(command) => + runner.handleCommand(command) + case In.Changed(delta) => + runner.handleDelta(delta).map { msg => + CrdtStreamOut(CrdtStreamOut.Message.StreamedMessage(msg)) + } + case In.State(state) => + runner.handleState(state).map { msg => + CrdtStreamOut(CrdtStreamOut.Message.StreamedMessage(msg)) + } + case In.Deleted(_) => + // ??? + Nil + case In.StreamCancelled(cancelled) => + runner.handleStreamCancelled(cancelled) + case In.Init(_) => + throw new IllegalStateException("Duplicate init event for the same entity") + case In.Empty => + throw new RuntimeException("Empty or unknown in message") + } + } + .recover { + case err => + system.log.error(err, "Unexpected error, terminating CRDT.") + CrdtStreamOut(CrdtStreamOut.Message.Failure(Failure(description = err.getMessage))) } - }.recover { - case err => - system.log.error(err, "Unexpected error, terminating CRDT.") - CrdtStreamOut(CrdtStreamOut.Message.Failure(Failure(description = err.getMessage))) - } } - private class EntityRunner(service: CrdtStatefulService, entityId: String, - private var crdt: Option[InternalCrdt]) { + private class EntityRunner(service: CrdtStatefulService, entityId: String, private var crdt: Option[InternalCrdt]) { private var crdtIsNew = false private var subscribers = Map.empty[Long, function.Function[SubscriptionContext, Optional[JavaPbAny]]] @@ -129,13 +146,12 @@ class CrdtImpl(system: ActorSystem, services: Map[String, CrdtStatefulService], } verifyNoDelta("creation") - private def verifyNoDelta(scope: String): Unit = { + private def verifyNoDelta(scope: String): Unit = crdt match { case Some(changed) if changed.hasDelta && !crdtIsNew => throw new RuntimeException(s"CRDT was changed during $scope, this is not allowed.") case _ => } - } def handleState(state: CrdtState): List[CrdtStreamedMessage] = { crdt match { @@ -147,9 +163,14 @@ class CrdtImpl(system: ActorSystem, services: Map[String, CrdtStatefulService], def handleDelta(delta: CrdtDelta): List[CrdtStreamedMessage] = { crdt match { - case Some(existing) => existing.applyDelta.applyOrElse(delta.delta, { noMatch: CrdtDelta.Delta => - throw new IllegalStateException(s"Received delta ${noMatch.value.getClass}, but it doesn't match the CRDT that this entity has: ${existing.name}") - }) + case Some(existing) => + existing.applyDelta.applyOrElse( + delta.delta, { noMatch: CrdtDelta.Delta => + throw new IllegalStateException( + s"Received delta ${noMatch.value.getClass}, but it doesn't match the CRDT that this entity has: ${existing.name}" + ) + } + ) case None => throw new IllegalStateException("Received delta for CRDT before it was created.") } notifySubscribers() @@ -182,10 +203,14 @@ class CrdtImpl(system: ActorSystem, services: Map[String, CrdtStatefulService], if (ctx.hasError) { verifyNoDelta("failed command handling") - CrdtStreamOut(CrdtStreamOut.Message.Reply(CrdtReply( - commandId = command.id, - clientAction = clientAction - ))) :: Nil + CrdtStreamOut( + CrdtStreamOut.Message.Reply( + CrdtReply( + commandId = command.id, + clientAction = clientAction + ) + ) + ) :: Nil } else { val crdtAction = ctx.createCrdtAction() @@ -199,13 +224,17 @@ class CrdtImpl(system: ActorSystem, services: Map[String, CrdtStatefulService], case _ => false } - CrdtStreamOut(CrdtStreamOut.Message.Reply(CrdtReply( - commandId = command.id, - clientAction = clientAction, - stateAction = crdtAction, - sideEffects = ctx.sideEffects, - streamed = streamAccepted - ))) :: streamedMessages.map(m => CrdtStreamOut(CrdtStreamOut.Message.StreamedMessage(m))) + CrdtStreamOut( + CrdtStreamOut.Message.Reply( + CrdtReply( + commandId = command.id, + clientAction = clientAction, + stateAction = crdtAction, + sideEffects = ctx.sideEffects, + streamed = streamAccepted + ) + ) + ) :: streamedMessages.map(m => CrdtStreamOut(CrdtStreamOut.Message.StreamedMessage(m))) } } @@ -223,16 +252,24 @@ class CrdtImpl(system: ActorSystem, services: Map[String, CrdtStatefulService], val crdtAction = ctx.createCrdtAction() if (crdtAction.isDefined) { - CrdtStreamOut(CrdtStreamOut.Message.StreamCancelledResponse(CrdtStreamCancelledResponse( - commandId = cancelled.id, - stateAction = crdtAction, - sideEffects = ctx.sideEffects, - ))) :: notifySubscribers().map(m => CrdtStreamOut(CrdtStreamOut.Message.StreamedMessage(m))) + CrdtStreamOut( + CrdtStreamOut.Message.StreamCancelledResponse( + CrdtStreamCancelledResponse( + commandId = cancelled.id, + stateAction = crdtAction, + sideEffects = ctx.sideEffects + ) + ) + ) :: notifySubscribers().map(m => CrdtStreamOut(CrdtStreamOut.Message.StreamedMessage(m))) } else { - CrdtStreamOut(CrdtStreamOut.Message.StreamCancelledResponse(CrdtStreamCancelledResponse( - commandId = cancelled.id, - sideEffects = ctx.sideEffects, - ))) :: Nil + CrdtStreamOut( + CrdtStreamOut.Message.StreamCancelledResponse( + CrdtStreamCancelledResponse( + commandId = cancelled.id, + sideEffects = ctx.sideEffects + ) + ) + ) :: Nil } case None => @@ -241,46 +278,53 @@ class CrdtImpl(system: ActorSystem, services: Map[String, CrdtStatefulService], } - private def notifySubscribers(): List[CrdtStreamedMessage] = { - subscribers.collect(Function.unlift { - case (id, callback) => - val context = new CrdtSubscriptionContext(id) - val reply = try { - callback(context) - } catch { - case FailInvoked => - Optional.empty[JavaPbAny]() - } finally { - context.deactivate() - } + private def notifySubscribers(): List[CrdtStreamedMessage] = + subscribers + .collect(Function.unlift { + case (id, callback) => + val context = new CrdtSubscriptionContext(id) + val reply = try { + callback(context) + } catch { + case FailInvoked => + Optional.empty[JavaPbAny]() + } finally { + context.deactivate() + } - val clientAction = context.createClientAction(reply, allowNoReply = true) + val clientAction = context.createClientAction(reply, allowNoReply = true) - if (context.hasError) { - subscribers -= id - cancelListeners -= id - Some(CrdtStreamedMessage( - commandId = id, - clientAction = clientAction - )) - } else if (clientAction.isDefined || context.isEnded || context.sideEffects.nonEmpty) { - if (context.isEnded) { + if (context.hasError) { subscribers -= id cancelListeners -= id + Some( + CrdtStreamedMessage( + commandId = id, + clientAction = clientAction + ) + ) + } else if (clientAction.isDefined || context.isEnded || context.sideEffects.nonEmpty) { + if (context.isEnded) { + subscribers -= id + cancelListeners -= id + } + Some( + CrdtStreamedMessage( + commandId = id, + clientAction = clientAction, + sideEffects = context.sideEffects, + endStream = context.isEnded + ) + ) + } else { + None } - Some(CrdtStreamedMessage( - commandId = id, - clientAction = clientAction, - sideEffects = context.sideEffects, - endStream = context.isEnded - )) - } else { - None - } - }).toList - } + }) + .toList - class CrdtStreamedCommandContext(command: Command) extends CrdtCommandContext(command) with StreamedCommandContext[JavaPbAny] { + class CrdtStreamedCommandContext(command: Command) + extends CrdtCommandContext(command) + with StreamedCommandContext[JavaPbAny] { private final var changeCallback: Option[function.Function[SubscriptionContext, Optional[JavaPbAny]]] = None private final var cancelCallback: Option[Consumer[StreamCancelledContext]] = None @@ -307,31 +351,34 @@ class CrdtImpl(system: ActorSystem, services: Map[String, CrdtStatefulService], } } - class CrdtCommandContext(command: Command) extends CommandContext - with AbstractCrdtContext - with CapturingCrdtFactory - with AbstractEffectContext - with AbstractClientActionContext - with DeletableContext - with ActivatableContext { + class CrdtCommandContext(command: Command) + extends CommandContext + with AbstractCrdtContext + with CapturingCrdtFactory + with AbstractEffectContext + with AbstractClientActionContext + with DeletableContext + with ActivatableContext { override final def commandId: Long = command.id override final def commandName(): String = command.name } - class CrdtStreamCancelledContext(cancelled: StreamCancelled) extends StreamCancelledContext - with CapturingCrdtFactory - with AbstractEffectContext - with ActivatableContext { + class CrdtStreamCancelledContext(cancelled: StreamCancelled) + extends StreamCancelledContext + with CapturingCrdtFactory + with AbstractEffectContext + with ActivatableContext { override final def commandId(): Long = cancelled.id } - class CrdtSubscriptionContext(override val commandId: Long) extends SubscriptionContext - with AbstractCrdtContext - with AbstractClientActionContext - with AbstractEffectContext - with ActivatableContext { + class CrdtSubscriptionContext(override val commandId: Long) + extends SubscriptionContext + with AbstractCrdtContext + with AbstractClientActionContext + with AbstractEffectContext + with ActivatableContext { private final var ended = false override final def endStream(): Unit = { @@ -354,7 +401,9 @@ class CrdtImpl(system: ActorSystem, services: Map[String, CrdtStatefulService], Optional.of(crdtType.cast(crdt)) case None => Optional.empty() case Some(wrongType) => - throw new IllegalStateException(s"The current ${wrongType.name} CRDT state doesn't match requested type of ${crdtType.getSimpleName}") + throw new IllegalStateException( + s"The current ${wrongType.name} CRDT state doesn't match requested type of ${crdtType.getSimpleName}" + ) } override final def entityId(): String = EntityRunner.this.entityId @@ -413,4 +462,4 @@ class CrdtImpl(system: ActorSystem, services: Map[String, CrdtStatefulService], } } } -} \ No newline at end of file +} diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/FlagImpl.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/FlagImpl.scala index 374cea244..798f29263 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/FlagImpl.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/FlagImpl.scala @@ -10,18 +10,18 @@ private[crdt] final class FlagImpl extends InternalCrdt with Flag { override def isEnabled: Boolean = value - override def enable(): Unit = { + override def enable(): Unit = if (!deltaValue && !value) { deltaValue = true value = true } - } override def hasDelta: Boolean = deltaValue - override def delta: Option[CrdtDelta.Delta] = if (hasDelta) { - Some(CrdtDelta.Delta.Flag(FlagDelta(deltaValue))) - } else None + override def delta: Option[CrdtDelta.Delta] = + if (hasDelta) { + Some(CrdtDelta.Delta.Flag(FlagDelta(deltaValue))) + } else None override def resetDelta(): Unit = deltaValue = false diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/GCounterImpl.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/GCounterImpl.scala index a2f5f15b3..a0468680f 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/GCounterImpl.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/GCounterImpl.scala @@ -21,9 +21,10 @@ private[crdt] final class GCounterImpl extends InternalCrdt with GCounter { override def hasDelta: Boolean = deltaValue != 0 - override def delta: Option[CrdtDelta.Delta] = if (hasDelta) { - Some(CrdtDelta.Delta.Gcounter(GCounterDelta(deltaValue))) - } else None + override def delta: Option[CrdtDelta.Delta] = + if (hasDelta) { + Some(CrdtDelta.Delta.Gcounter(GCounterDelta(deltaValue))) + } else None override def resetDelta(): Unit = deltaValue = 0 diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/GSetImpl.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/GSetImpl.scala index 4ac333691..da083aa74 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/GSetImpl.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/GSetImpl.scala @@ -10,7 +10,10 @@ import com.google.protobuf.any.{Any => ScalaPbAny} import scala.collection.JavaConverters._ -private[crdt] final class GSetImpl[T](anySupport: AnySupport) extends util.AbstractSet[T] with InternalCrdt with GSet[T] { +private[crdt] final class GSetImpl[T](anySupport: AnySupport) + extends util.AbstractSet[T] + with InternalCrdt + with GSet[T] { override final val name = "GSet" private val value = new util.HashSet[T]() private val added = new util.HashSet[ScalaPbAny]() @@ -21,12 +24,13 @@ private[crdt] final class GSetImpl[T](anySupport: AnySupport) extends util.Abstr override def contains(o: Any): Boolean = value.contains(o) - override def add(e: T): Boolean = if (value.contains(e)) { - false - } else { - added.add(anySupport.encodeScala(e)) - value.add(e) - } + override def add(e: T): Boolean = + if (value.contains(e)) { + false + } else { + added.add(anySupport.encodeScala(e)) + value.add(e) + } override def remove(o: Any): Boolean = throw new UnsupportedOperationException("Cannot remove elements from a GSet") @@ -34,9 +38,10 @@ private[crdt] final class GSetImpl[T](anySupport: AnySupport) extends util.Abstr override def hasDelta: Boolean = !added.isEmpty - override def delta: Option[CrdtDelta.Delta] = if (hasDelta) { - Some(CrdtDelta.Delta.Gset(GSetDelta(added.asScala.toVector))) - } else None + override def delta: Option[CrdtDelta.Delta] = + if (hasDelta) { + Some(CrdtDelta.Delta.Gset(GSetDelta(added.asScala.toVector))) + } else None override def resetDelta(): Unit = added.clear() diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/InternalCrdt.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/InternalCrdt.scala index 9676ccf59..bc16d650f 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/InternalCrdt.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/InternalCrdt.scala @@ -12,4 +12,3 @@ private[crdt] trait InternalCrdt extends Crdt { def applyDelta: PartialFunction[CrdtDelta.Delta, Unit] def applyState: PartialFunction[CrdtState.State, Unit] } - diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/LWWRegisterImpl.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/LWWRegisterImpl.scala index 700a711af..9a985d25c 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/LWWRegisterImpl.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/LWWRegisterImpl.scala @@ -28,9 +28,10 @@ private[crdt] final class LWWRegisterImpl[T](anySupport: AnySupport) extends Int override def hasDelta: Boolean = deltaValue.isDefined - override def delta: Option[CrdtDelta.Delta] = if (hasDelta) { - Some(CrdtDelta.Delta.Lwwregister(LWWRegisterDelta(deltaValue, convertClock(clock), customClockValue))) - } else None + override def delta: Option[CrdtDelta.Delta] = + if (hasDelta) { + Some(CrdtDelta.Delta.Lwwregister(LWWRegisterDelta(deltaValue, convertClock(clock), customClockValue))) + } else None override def resetDelta(): Unit = { deltaValue = None @@ -39,10 +40,12 @@ private[crdt] final class LWWRegisterImpl[T](anySupport: AnySupport) extends Int } override def state: CrdtState.State = - CrdtState.State.Lwwregister(LWWRegisterState(Some(anySupport.encodeScala(value)), convertClock(clock), customClockValue)) + CrdtState.State.Lwwregister( + LWWRegisterState(Some(anySupport.encodeScala(value)), convertClock(clock), customClockValue) + ) override val applyDelta = { - case CrdtDelta.Delta.Lwwregister(LWWRegisterDelta(Some(any), _ , _)) => + case CrdtDelta.Delta.Lwwregister(LWWRegisterDelta(Some(any), _, _)) => this.value = anySupport.decode(any).asInstanceOf[T] } @@ -51,14 +54,13 @@ private[crdt] final class LWWRegisterImpl[T](anySupport: AnySupport) extends Int this.value = anySupport.decode(any).asInstanceOf[T] } - private def convertClock(clock: LWWRegister.Clock): CrdtClock = { + private def convertClock(clock: LWWRegister.Clock): CrdtClock = clock match { case LWWRegister.Clock.DEFAULT => CrdtClock.DEFAULT case LWWRegister.Clock.REVERSE => CrdtClock.REVERSE case LWWRegister.Clock.CUSTOM => CrdtClock.CUSTOM case LWWRegister.Clock.CUSTOM_AUTO_INCREMENT => CrdtClock.CUSTOM_AUTO_INCREMENT } - } override def toString = s"LWWRegister($value)" } diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/ORMapImpl.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/ORMapImpl.scala index 922ce09da..5dcf213b2 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/ORMapImpl.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/ORMapImpl.scala @@ -1,7 +1,7 @@ package io.cloudstate.javasupport.impl.crdt import java.util -import java.util.{Map, function} +import java.util.{function, Map} import com.google.protobuf.any.{Any => ScalaPbAny} import io.cloudstate.javasupport.crdt.{Crdt, CrdtFactory, ORMap} @@ -12,25 +12,28 @@ import org.slf4j.LoggerFactory import scala.collection.JavaConverters._ /** - * A few notes on implementation: - * - * - put, and any similar operations (such as Map.Entry.setValue) are not supported, because the only way to create - * a CRDT is using a CrdtFactory, and we only make CrdtFactory's available in very specific contexts, such as in the - * getOrCreate method. The getOrCreate method is the only way to insert something new into the map. - * - All mechanisms for removal are supported - eg, calling remove directly, calling remove on any of the derived sets - * (entrySet, keySet, values), and calling remove on the entrySet iterator. - * - ju.AbstractMap is very useful, though bases most of its implementation on entrySet, so we need to take care to - * efficiently implement operations that it implements in O(n) time that we can do in O(1) time, such as - * get/remove/containsKey. - */ -private[crdt] final class ORMapImpl[K, V <: InternalCrdt](anySupport: AnySupport) extends util.AbstractMap[K, V] with InternalCrdt with ORMap[K, V] { + * A few notes on implementation: + * + * - put, and any similar operations (such as Map.Entry.setValue) are not supported, because the only way to create + * a CRDT is using a CrdtFactory, and we only make CrdtFactory's available in very specific contexts, such as in the + * getOrCreate method. The getOrCreate method is the only way to insert something new into the map. + * - All mechanisms for removal are supported - eg, calling remove directly, calling remove on any of the derived sets + * (entrySet, keySet, values), and calling remove on the entrySet iterator. + * - ju.AbstractMap is very useful, though bases most of its implementation on entrySet, so we need to take care to + * efficiently implement operations that it implements in O(n) time that we can do in O(1) time, such as + * get/remove/containsKey. + */ +private[crdt] final class ORMapImpl[K, V <: InternalCrdt](anySupport: AnySupport) + extends util.AbstractMap[K, V] + with InternalCrdt + with ORMap[K, V] { override final val name = "ORMap" private val value = new util.HashMap[K, V]() private val added = new util.HashMap[K, (ScalaPbAny, V)]() private val removed = new util.HashSet[ScalaPbAny]() private var cleared = false - override def getOrCreate(key: K, create: function.Function[CrdtFactory, V]): V = { + override def getOrCreate(key: K, create: function.Function[CrdtFactory, V]): V = if (value.containsKey(key)) { value.get(key) } else { @@ -40,7 +43,9 @@ private[crdt] final class ORMapImpl[K, V <: InternalCrdt](anySupport: AnySupport override protected def anySupport: AnySupport = ORMapImpl.this.anySupport override protected def newCrdt[C <: InternalCrdt](crdt: C): C = { if (internalCrdt != null) { - throw new IllegalStateException("getOrCreate creation callback must only be used to create one CRDT at a time") + throw new IllegalStateException( + "getOrCreate creation callback must only be used to create one CRDT at a time" + ) } internalCrdt = crdt crdt @@ -49,20 +54,22 @@ private[crdt] final class ORMapImpl[K, V <: InternalCrdt](anySupport: AnySupport if (crdt == null) { throw new IllegalArgumentException("getOrCreate creation callback must return a CRDT") } else if (crdt != internalCrdt) { - throw new IllegalArgumentException("CRDT returned by getOrCreate creation callback must have been created by the CrdtFactory passed to it") + throw new IllegalArgumentException( + "CRDT returned by getOrCreate creation callback must have been created by the CrdtFactory passed to it" + ) } value.put(key, crdt) added.put(key, (encodedKey, crdt)) crdt } - } override def containsKey(key: Any): Boolean = value.containsKey(key) override def get(key: Any): V = value.get(key) - override def put(key: K, value: V): V = throw new UnsupportedOperationException("Cannot put on an ORMap, use getOrCreate instead.") + override def put(key: K, value: V): V = + throw new UnsupportedOperationException("Cannot put on an ORMap, use getOrCreate instead.") override def remove(key: Any): V = { if (value.containsKey(key)) { @@ -120,30 +127,34 @@ private[crdt] final class ORMapImpl[K, V <: InternalCrdt](anySupport: AnySupport added.clear() } - override def hasDelta: Boolean = { + override def hasDelta: Boolean = if (cleared || !added.isEmpty || !removed.isEmpty) { true } else { value.values().asScala.exists(_.hasDelta) } - } - override def delta: Option[CrdtDelta.Delta] = if (hasDelta) { - val updated = (value.asScala -- this.added.keySet().asScala).collect { - case (key, changed) if changed.hasDelta => - ORMapEntryDelta(Some(anySupport.encodeScala(key)), changed.delta.map(CrdtDelta(_))) - } - val added = this.added.asScala.values.map { - case (key, crdt) => ORMapEntry(Some(key), Some(CrdtState(crdt.state))) - } + override def delta: Option[CrdtDelta.Delta] = + if (hasDelta) { + val updated = (value.asScala -- this.added.keySet().asScala).collect { + case (key, changed) if changed.hasDelta => + ORMapEntryDelta(Some(anySupport.encodeScala(key)), changed.delta.map(CrdtDelta(_))) + } + val added = this.added.asScala.values.map { + case (key, crdt) => ORMapEntry(Some(key), Some(CrdtState(crdt.state))) + } - Some(CrdtDelta.Delta.Ormap(ORMapDelta( - cleared = cleared, - removed = removed.asScala.toVector, - updated = updated.toVector, - added = added.toVector - ))) - } else None + Some( + CrdtDelta.Delta.Ormap( + ORMapDelta( + cleared = cleared, + removed = removed.asScala.toVector, + updated = updated.toVector, + added = added.toVector + ) + ) + ) + } else None override def resetDelta(): Unit = { cleared = false @@ -152,11 +163,14 @@ private[crdt] final class ORMapImpl[K, V <: InternalCrdt](anySupport: AnySupport value.values().asScala.foreach(_.resetDelta()) } - override def state: CrdtState.State = CrdtState.State.Ormap(ORMapState( - value.asScala.map { - case (key, crdt) => ORMapEntry(Some(anySupport.encodeScala(key)), Some(CrdtState(crdt.state))) - }.toVector - )) + override def state: CrdtState.State = + CrdtState.State.Ormap( + ORMapState( + value.asScala.map { + case (key, crdt) => ORMapEntry(Some(anySupport.encodeScala(key)), Some(CrdtState(crdt.state))) + }.toVector + ) + ) override val applyDelta = { case CrdtDelta.Delta.Ormap(ORMapDelta(cleared, removed, updated, added)) => @@ -176,7 +190,7 @@ private[crdt] final class ORMapImpl[K, V <: InternalCrdt](anySupport: AnySupport added.foreach { case ORMapEntry(Some(key), Some(state)) => value.put(anySupport.decode(key).asInstanceOf[K], - CrdtStateTransformer.create(state, anySupport).asInstanceOf[V]) + CrdtStateTransformer.create(state, anySupport).asInstanceOf[V]) } } @@ -186,7 +200,7 @@ private[crdt] final class ORMapImpl[K, V <: InternalCrdt](anySupport: AnySupport values.foreach { case ORMapEntry(Some(key), Some(state)) => value.put(anySupport.decode(key).asInstanceOf[K], - CrdtStateTransformer.create(state, anySupport).asInstanceOf[V]) + CrdtStateTransformer.create(state, anySupport).asInstanceOf[V]) } } diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/ORSetImpl.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/ORSetImpl.scala index abac6f7b9..4968c24ef 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/ORSetImpl.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/ORSetImpl.scala @@ -22,19 +22,20 @@ private[crdt] class ORSetImpl[T](anySupport: AnySupport) extends util.AbstractSe override def contains(o: Any): Boolean = value.contains(o) - override def add(e: T): Boolean = if (value.contains(e)) { - false - } else { - val encoded = anySupport.encodeScala(e) - if (removed.contains(encoded)) { - removed.remove(encoded) + override def add(e: T): Boolean = + if (value.contains(e)) { + false } else { - added.add(anySupport.encodeScala(e)) + val encoded = anySupport.encodeScala(e) + if (removed.contains(encoded)) { + removed.remove(encoded) + } else { + added.add(anySupport.encodeScala(e)) + } + value.add(e) } - value.add(e) - } - override def remove(o: Any): Boolean = { + override def remove(o: Any): Boolean = if (!value.contains(o)) { false } else { @@ -51,7 +52,6 @@ private[crdt] class ORSetImpl[T](anySupport: AnySupport) extends util.AbstractSe } true } - } override def iterator(): util.Iterator[T] = new util.Iterator[T] { private val iter = value.iterator() @@ -84,9 +84,12 @@ private[crdt] class ORSetImpl[T](anySupport: AnySupport) extends util.AbstractSe override def hasDelta: Boolean = cleared || !added.isEmpty || !removed.isEmpty - override def delta: Option[CrdtDelta.Delta] = if (hasDelta) { - Some(CrdtDelta.Delta.Orset(ORSetDelta(cleared, removed = removed.asScala.toVector, added = added.asScala.toVector))) - } else None + override def delta: Option[CrdtDelta.Delta] = + if (hasDelta) { + Some( + CrdtDelta.Delta.Orset(ORSetDelta(cleared, removed = removed.asScala.toVector, added = added.asScala.toVector)) + ) + } else None override def resetDelta(): Unit = { cleared = false @@ -94,7 +97,8 @@ private[crdt] class ORSetImpl[T](anySupport: AnySupport) extends util.AbstractSe removed.clear() } - override def state: CrdtState.State = CrdtState.State.Orset(ORSetState(value.asScala.toSeq.map(anySupport.encodeScala))) + override def state: CrdtState.State = + CrdtState.State.Orset(ORSetState(value.asScala.toSeq.map(anySupport.encodeScala))) override val applyDelta = { case CrdtDelta.Delta.Orset(ORSetDelta(cleared, removed, added)) => diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/PNCounterImpl.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/PNCounterImpl.scala index 48691ca7d..f23049b18 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/PNCounterImpl.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/PNCounterImpl.scala @@ -20,9 +20,10 @@ private[crdt] final class PNCounterImpl extends InternalCrdt with PNCounter { override def hasDelta: Boolean = deltaValue != 0 - override def delta: Option[CrdtDelta.Delta] = if (hasDelta) { - Some(CrdtDelta.Delta.Pncounter(PNCounterDelta(deltaValue))) - } else None + override def delta: Option[CrdtDelta.Delta] = + if (hasDelta) { + Some(CrdtDelta.Delta.Pncounter(PNCounterDelta(deltaValue))) + } else None override def resetDelta(): Unit = deltaValue = 0 diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/VoteImpl.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/VoteImpl.scala index bc56e89ef..6db2854f5 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/VoteImpl.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/crdt/VoteImpl.scala @@ -16,7 +16,7 @@ private[crdt] final class VoteImpl extends InternalCrdt with Vote { override def getVotesFor: Int = votesFor - override def vote(vote: Boolean): Unit = { + override def vote(vote: Boolean): Unit = if (selfVote != vote) { if (selfVoteChanged) { selfVoteChanged = false @@ -30,13 +30,13 @@ private[crdt] final class VoteImpl extends InternalCrdt with Vote { votesFor -= 1 } } - } override def hasDelta: Boolean = selfVoteChanged - override def delta: Option[CrdtDelta.Delta] = if (selfVoteChanged) { - Some(CrdtDelta.Delta.Vote(VoteDelta(selfVote))) - } else None + override def delta: Option[CrdtDelta.Delta] = + if (selfVoteChanged) { + Some(CrdtDelta.Delta.Vote(VoteDelta(selfVote))) + } else None override def resetDelta(): Unit = selfVoteChanged = false diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/eventsourced/AnnotationBasedEventSourcedSupport.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/eventsourced/AnnotationBasedEventSourcedSupport.scala index df4a78ef8..aef8e32e8 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/eventsourced/AnnotationBasedEventSourcedSupport.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/eventsourced/AnnotationBasedEventSourcedSupport.scala @@ -12,24 +12,25 @@ import com.google.protobuf.{Descriptors, Any => JavaPbAny} import io.cloudstate.javasupport.ServiceCallFactory /** - * Annotation based implementation of the [[EventSourcedEntityFactory]]. - */ -private[impl] class AnnotationBasedEventSourcedSupport(entityClass: Class[_], anySupport: AnySupport, - override val resolvedMethods: Map[String, ResolvedServiceMethod[_, _]], - factory: Option[EventSourcedEntityCreationContext => AnyRef] = None) - extends EventSourcedEntityFactory with ResolvedEntityFactory { - - def this(entityClass: Class[_], anySupport: AnySupport, - serviceDescriptor: Descriptors.ServiceDescriptor) = + * Annotation based implementation of the [[EventSourcedEntityFactory]]. + */ +private[impl] class AnnotationBasedEventSourcedSupport( + entityClass: Class[_], + anySupport: AnySupport, + override val resolvedMethods: Map[String, ResolvedServiceMethod[_, _]], + factory: Option[EventSourcedEntityCreationContext => AnyRef] = None +) extends EventSourcedEntityFactory + with ResolvedEntityFactory { + + def this(entityClass: Class[_], anySupport: AnySupport, serviceDescriptor: Descriptors.ServiceDescriptor) = this(entityClass, anySupport, anySupport.resolveServiceDescriptor(serviceDescriptor)) private val behaviorReflectionCache = TrieMap.empty[Class[_], EventBehaviorReflection] // Eagerly reflect over/validate the entity class behaviorReflectionCache.put(entityClass, EventBehaviorReflection(entityClass, resolvedMethods)) - override def create(context: EventSourcedContext): EventSourcedEntityHandler = { + override def create(context: EventSourcedContext): EventSourcedEntityHandler = new EntityHandler(context) - } private val constructor: EventSourcedEntityCreationContext => AnyRef = factory.getOrElse { entityClass.getConstructors match { @@ -40,9 +41,9 @@ private[impl] class AnnotationBasedEventSourcedSupport(entityClass: Class[_], an } } - private def getCachedBehaviorReflection(behavior: AnyRef) = { - behaviorReflectionCache.getOrElseUpdate(behavior.getClass, EventBehaviorReflection(behavior.getClass, resolvedMethods)) - } + private def getCachedBehaviorReflection(behavior: AnyRef) = + behaviorReflectionCache.getOrElseUpdate(behavior.getClass, + EventBehaviorReflection(behavior.getClass, resolvedMethods)) private def validateBehaviors(behaviors: Seq[AnyRef]): Seq[AnyRef] = { behaviors.foreach(getCachedBehaviorReflection) @@ -74,24 +75,26 @@ private[impl] class AnnotationBasedEventSourcedSupport(entityClass: Class[_], an val event = anySupport.decode(anyEvent).asInstanceOf[AnyRef] if (!currentBehaviors.exists { behavior => - getCachedBehaviorReflection(behavior).getCachedEventHandlerForClass(event.getClass) match { - case Some(handler) => - var active = true - val ctx = new DelegatingEventSourcedContext(context) with EventBehaviorContext { - override def become(behavior: AnyRef*): Unit = { - if (!active) throw new IllegalStateException("Context is not active!") - currentBehaviors = validateBehaviors(behavior) - } - override def sequenceNumber(): Long = context.sequenceNumber() + getCachedBehaviorReflection(behavior).getCachedEventHandlerForClass(event.getClass) match { + case Some(handler) => + var active = true + val ctx = new DelegatingEventSourcedContext(context) with EventBehaviorContext { + override def become(behavior: AnyRef*): Unit = { + if (!active) throw new IllegalStateException("Context is not active!") + currentBehaviors = validateBehaviors(behavior) + } + override def sequenceNumber(): Long = context.sequenceNumber() + } + handler.invoke(behavior, event, ctx) + active = false + true + case None => + false } - handler.invoke(behavior, event, ctx) - active = false - true - case None => - false - } - }) { - throw new RuntimeException(s"No event handler found for event ${event.getClass} on any of the current behaviors: $behaviorsString") + }) { + throw new RuntimeException( + s"No event handler found for event ${event.getClass} on any of the current behaviors: $behaviorsString" + ) } } @@ -103,32 +106,36 @@ private[impl] class AnnotationBasedEventSourcedSupport(entityClass: Class[_], an }) maybeResult.getOrElse { - throw new RuntimeException(s"No command handler found for command [${context.commandName()}] on any of the current behaviors: $behaviorsString") + throw new RuntimeException( + s"No command handler found for command [${context.commandName()}] on any of the current behaviors: $behaviorsString" + ) } } override def handleSnapshot(anySnapshot: JavaPbAny, context: SnapshotContext): Unit = unwrap { val snapshot = anySupport.decode(anySnapshot).asInstanceOf[AnyRef] if (!currentBehaviors.exists { behavior => - getCachedBehaviorReflection(behavior).getCachedSnapshotHandlerForClass(snapshot.getClass) match { - case Some(handler) => - var active = true - val ctx = new DelegatingEventSourcedContext(context) with SnapshotBehaviorContext { - override def become(behavior: AnyRef*): Unit = { - if (!active) throw new IllegalStateException("Context is not active!") - currentBehaviors = validateBehaviors(behavior) - } - override def sequenceNumber(): Long = context.sequenceNumber() + getCachedBehaviorReflection(behavior).getCachedSnapshotHandlerForClass(snapshot.getClass) match { + case Some(handler) => + var active = true + val ctx = new DelegatingEventSourcedContext(context) with SnapshotBehaviorContext { + override def become(behavior: AnyRef*): Unit = { + if (!active) throw new IllegalStateException("Context is not active!") + currentBehaviors = validateBehaviors(behavior) + } + override def sequenceNumber(): Long = context.sequenceNumber() + } + handler.invoke(behavior, snapshot, ctx) + active = false + true + + case None => + false } - handler.invoke(behavior, snapshot, ctx) - active = false - true - - case None => - false - } - }) { - throw new RuntimeException(s"No snapshot handler found for snapshot ${snapshot.getClass} on any of the current behaviors $behaviorsString") + }) { + throw new RuntimeException( + s"No snapshot handler found for snapshot ${snapshot.getClass} on any of the current behaviors $behaviorsString" + ) } } @@ -144,71 +151,75 @@ private[impl] class AnnotationBasedEventSourcedSupport(entityClass: Class[_], an } } - private def unwrap[T](block: => T): T = try { - block - } catch { - case ite: InvocationTargetException if ite.getCause != null => - throw ite.getCause - } + private def unwrap[T](block: => T): T = + try { + block + } catch { + case ite: InvocationTargetException if ite.getCause != null => + throw ite.getCause + } private def behaviorsString = currentBehaviors.map(_.getClass).mkString(", ") } - private abstract class DelegatingEventSourcedContext(delegate: EventSourcedContext) extends EventSourcedContext { override def entityId(): String = delegate.entityId() override def serviceCallFactory(): ServiceCallFactory = delegate.serviceCallFactory() } } -private class EventBehaviorReflection(eventHandlers: Map[Class[_], EventHandlerInvoker], - val commandHandlers: Map[String, ReflectionHelper.CommandHandlerInvoker[CommandContext]], - snapshotHandlers: Map[Class[_], SnapshotHandlerInvoker], - val snapshotInvoker: Option[SnapshotInvoker]) { +private class EventBehaviorReflection( + eventHandlers: Map[Class[_], EventHandlerInvoker], + val commandHandlers: Map[String, ReflectionHelper.CommandHandlerInvoker[CommandContext]], + snapshotHandlers: Map[Class[_], SnapshotHandlerInvoker], + val snapshotInvoker: Option[SnapshotInvoker] +) { /** - * We use a cache in addition to the info we've discovered by reflection so that an event handler can be declared - * for a superclass of an event. - */ + * We use a cache in addition to the info we've discovered by reflection so that an event handler can be declared + * for a superclass of an event. + */ private val eventHandlerCache = TrieMap.empty[Class[_], Option[EventHandlerInvoker]] private val snapshotHandlerCache = TrieMap.empty[Class[_], Option[SnapshotHandlerInvoker]] - def getCachedEventHandlerForClass(clazz: Class[_]): Option[EventHandlerInvoker] = { + def getCachedEventHandlerForClass(clazz: Class[_]): Option[EventHandlerInvoker] = eventHandlerCache.getOrElseUpdate(clazz, getHandlerForClass(eventHandlers)(clazz)) - } - def getCachedSnapshotHandlerForClass(clazz: Class[_]): Option[SnapshotHandlerInvoker] = { + def getCachedSnapshotHandlerForClass(clazz: Class[_]): Option[SnapshotHandlerInvoker] = snapshotHandlerCache.getOrElseUpdate(clazz, getHandlerForClass(snapshotHandlers)(clazz)) - } - private def getHandlerForClass[T](handlers: Map[Class[_], T])(clazz: Class[_]): Option[T] = { + private def getHandlerForClass[T](handlers: Map[Class[_], T])(clazz: Class[_]): Option[T] = handlers.get(clazz) match { - case some@ Some(_) => some + case some @ Some(_) => some case None => clazz.getInterfaces.collectFirst(Function.unlift(getHandlerForClass(handlers))) match { - case some@ Some(_) => some + case some @ Some(_) => some case None if clazz.getSuperclass != null => getHandlerForClass(handlers)(clazz.getSuperclass) case None => None } } - } } private object EventBehaviorReflection { - def apply(behaviorClass: Class[_], serviceMethods: Map[String, ResolvedServiceMethod[_, _]]): EventBehaviorReflection = { + def apply(behaviorClass: Class[_], + serviceMethods: Map[String, ResolvedServiceMethod[_, _]]): EventBehaviorReflection = { val allMethods = ReflectionHelper.getAllDeclaredMethods(behaviorClass) val eventHandlers = allMethods .filter(_.getAnnotation(classOf[EventHandler]) != null) .map { method => new EventHandlerInvoker(ReflectionHelper.ensureAccessible(method)) - }.groupBy(_.eventClass) + } + .groupBy(_.eventClass) .map { case (eventClass, Seq(invoker)) => (eventClass: Any) -> invoker case (clazz, many) => - throw new RuntimeException(s"Multiple methods found for handling event of type $clazz: ${many.map(_.method.getName)}") - }.asInstanceOf[Map[Class[_], EventHandlerInvoker]] + throw new RuntimeException( + s"Multiple methods found for handling event of type $clazz: ${many.map(_.method.getName)}" + ) + } + .asInstanceOf[Map[Class[_], EventHandlerInvoker]] val commandHandlers = allMethods .filter(_.getAnnotation(classOf[CommandHandler]) != null) @@ -219,45 +230,62 @@ private object EventBehaviorReflection { } else annotation.name() val serviceMethod = serviceMethods.getOrElse(name, { - throw new RuntimeException(s"Command handler method ${method.getName} for command $name found, but the service has no command by that name.") + throw new RuntimeException( + s"Command handler method ${method.getName} for command $name found, but the service has no command by that name." + ) }) - new ReflectionHelper.CommandHandlerInvoker[CommandContext](ReflectionHelper.ensureAccessible(method), serviceMethod) - }.groupBy(_.serviceMethod.name) + new ReflectionHelper.CommandHandlerInvoker[CommandContext](ReflectionHelper.ensureAccessible(method), + serviceMethod) + } + .groupBy(_.serviceMethod.name) .map { case (commandName, Seq(invoker)) => commandName -> invoker - case (commandName, many) => throw new RuntimeException(s"Multiple methods found for handling command of name $commandName: ${many.map(_.method.getName)}") + case (commandName, many) => + throw new RuntimeException( + s"Multiple methods found for handling command of name $commandName: ${many.map(_.method.getName)}" + ) } val snapshotHandlers = allMethods .filter(_.getAnnotation(classOf[SnapshotHandler]) != null) .map { method => new SnapshotHandlerInvoker(ReflectionHelper.ensureAccessible(method)) - }.groupBy(_.snapshotClass) + } + .groupBy(_.snapshotClass) .map { case (snapshotClass, Seq(invoker)) => (snapshotClass: Any) -> invoker - case (clazz, many) => throw new RuntimeException(s"Multiple methods found for handling snapshot of type $clazz: ${many.map(_.method.getName)}") - }.asInstanceOf[Map[Class[_], SnapshotHandlerInvoker]] + case (clazz, many) => + throw new RuntimeException( + s"Multiple methods found for handling snapshot of type $clazz: ${many.map(_.method.getName)}" + ) + } + .asInstanceOf[Map[Class[_], SnapshotHandlerInvoker]] val snapshotInvoker = allMethods .filter(_.getAnnotation(classOf[Snapshot]) != null) .map { method => new SnapshotInvoker(ReflectionHelper.ensureAccessible(method)) } match { - case Seq() => None - case Seq(single) => - Some(single) - case _ => - throw new RuntimeException(s"Multiple snapshoting methods found on behavior $behaviorClass") - } + case Seq() => None + case Seq(single) => + Some(single) + case _ => + throw new RuntimeException(s"Multiple snapshoting methods found on behavior $behaviorClass") + } - ReflectionHelper.validateNoBadMethods(allMethods, classOf[EventSourcedEntity], Set(classOf[EventHandler], classOf[CommandHandler], classOf[SnapshotHandler], classOf[Snapshot])) + ReflectionHelper.validateNoBadMethods( + allMethods, + classOf[EventSourcedEntity], + Set(classOf[EventHandler], classOf[CommandHandler], classOf[SnapshotHandler], classOf[Snapshot]) + ) new EventBehaviorReflection(eventHandlers, commandHandlers, snapshotHandlers, snapshotInvoker) } } -private class EntityConstructorInvoker(constructor: Constructor[_]) extends (EventSourcedEntityCreationContext => AnyRef) { +private class EntityConstructorInvoker(constructor: Constructor[_]) + extends (EventSourcedEntityCreationContext => AnyRef) { private val parameters = ReflectionHelper.getParameterHandlers[EventSourcedEntityCreationContext](constructor)() parameters.foreach { case MainArgumentParameterHandler(clazz) => @@ -287,15 +315,21 @@ private class EventHandlerInvoker(val method: Method) { case MainArgumentParameterHandler(clazz) => clazz } match { case Array() => annotationEventClass.getOrElse(classOf[Object]) - case Array(handlerClass) => annotationEventClass match { - case None => handlerClass - case Some(annotated) if handlerClass.isAssignableFrom(annotated) || annotated.isInterface => - annotated - case Some(nonAssignable) => - throw new RuntimeException(s"EventHandler method $method has defined an eventHandler class $nonAssignable that can never be assignable from it's parameter $handlerClass") - } + case Array(handlerClass) => + annotationEventClass match { + case None => handlerClass + case Some(annotated) if handlerClass.isAssignableFrom(annotated) || annotated.isInterface => + annotated + case Some(nonAssignable) => + throw new RuntimeException( + s"EventHandler method $method has defined an eventHandler class $nonAssignable that can never be assignable from it's parameter $handlerClass" + ) + } case other => - throw new RuntimeException(s"EventHandler method $method must defined at most one non context parameter to handle events, the parameters defined were: ${other.mkString(",")}") + throw new RuntimeException( + s"EventHandler method $method must defined at most one non context parameter to handle events, the parameters defined were: ${other + .mkString(",")}" + ) } def invoke(obj: AnyRef, event: AnyRef, context: EventBehaviorContext): Unit = { @@ -315,7 +349,10 @@ private class SnapshotHandlerInvoker(val method: Method) { } match { case Array(handlerClass) => handlerClass case other => - throw new RuntimeException(s"SnapshotHandler method $method must defined at most one non context parameter to handle snapshots, the parameters defined were: ${other.mkString(",")}") + throw new RuntimeException( + s"SnapshotHandler method $method must defined at most one non context parameter to handle snapshots, the parameters defined were: ${other + .mkString(",")}" + ) } def invoke(obj: AnyRef, snapshot: AnyRef, context: SnapshotBehaviorContext): Unit = { @@ -330,7 +367,9 @@ private class SnapshotInvoker(val method: Method) { parameters.foreach { case MainArgumentParameterHandler(clazz) => - throw new RuntimeException(s"Don't know how to handle argument of type $clazz in snapshot method: " + method.getName) + throw new RuntimeException( + s"Don't know how to handle argument of type $clazz in snapshot method: " + method.getName + ) case _ => } @@ -340,4 +379,3 @@ private class SnapshotInvoker(val method: Method) { } } - diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/eventsourced/EventSourcedImpl.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/eventsourced/EventSourcedImpl.scala index 957164b3c..923fc83f7 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/eventsourced/EventSourcedImpl.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/eventsourced/EventSourcedImpl.scala @@ -26,40 +26,58 @@ import com.google.protobuf.any.{Any => ScalaPbAny} import io.cloudstate.javasupport.CloudStateRunner.Configuration import io.cloudstate.javasupport.{Context, ServiceCallFactory, StatefulService} import io.cloudstate.javasupport.eventsourced._ -import io.cloudstate.javasupport.impl.{AbstractClientActionContext, AbstractEffectContext, ActivatableContext, AnySupport, FailInvoked, ResolvedEntityFactory, ResolvedServiceMethod} -import io.cloudstate.protocol.event_sourced.EventSourcedStreamIn.Message.{Command => InCommand, Empty => InEmpty, Event => InEvent, Init => InInit} +import io.cloudstate.javasupport.impl.{ + AbstractClientActionContext, + AbstractEffectContext, + ActivatableContext, + AnySupport, + FailInvoked, + ResolvedEntityFactory, + ResolvedServiceMethod +} +import io.cloudstate.protocol.event_sourced.EventSourcedStreamIn.Message.{ + Command => InCommand, + Empty => InEmpty, + Event => InEvent, + Init => InInit +} import io.cloudstate.protocol.event_sourced.EventSourcedStreamOut.Message.{Reply => OutReply} import io.cloudstate.protocol.event_sourced._ final class EventSourcedStatefulService(val factory: EventSourcedEntityFactory, - override val descriptor: Descriptors.ServiceDescriptor, - val anySupport: AnySupport, - override val persistenceId: String, - val snapshotEvery: Int) extends StatefulService { + override val descriptor: Descriptors.ServiceDescriptor, + val anySupport: AnySupport, + override val persistenceId: String, + val snapshotEvery: Int) + extends StatefulService { - override def resolvedMethods: Option[Map[String, ResolvedServiceMethod[_, _]]] = { + override def resolvedMethods: Option[Map[String, ResolvedServiceMethod[_, _]]] = factory match { case resolved: ResolvedEntityFactory => Some(resolved.resolvedMethods) case _ => None } - } override final val entityType = EventSourced.name - final def withSnapshotEvery(snapshotEvery: Int): EventSourcedStatefulService = { + final def withSnapshotEvery(snapshotEvery: Int): EventSourcedStatefulService = if (snapshotEvery != this.snapshotEvery) new EventSourcedStatefulService(this.factory, this.descriptor, this.anySupport, this.persistenceId, snapshotEvery) else this - } } -final class EventSourcedImpl(_system: ActorSystem, _services: Map[String, EventSourcedStatefulService], rootContext: Context, configuration: Configuration) extends EventSourced { +final class EventSourcedImpl(_system: ActorSystem, + _services: Map[String, EventSourcedStatefulService], + rootContext: Context, + configuration: Configuration) + extends EventSourced { private final val system = _system - private final val services = _services.iterator.map({ - case (name, esss) => - // FIXME overlay configuration provided by _system - (name, if (esss.snapshotEvery == 0) esss.withSnapshotEvery(configuration.snapshotEvery) else esss) - }).toMap + private final val services = _services.iterator + .map({ + case (name, esss) => + // FIXME overlay configuration provided by _system + (name, if (esss.snapshotEvery == 0) esss.withSnapshotEvery(configuration.snapshotEvery) else esss) + }) + .toMap /** * The stream. One stream will be established per active entity. @@ -73,7 +91,9 @@ final class EventSourcedImpl(_system: ActorSystem, _services: Map[String, EventS * persisted the entity should handle itself, applying them to its own state, as if they had * arrived as events when the event stream was being replayed on load. */ - override def handle(in: akka.stream.scaladsl.Source[EventSourcedStreamIn, akka.NotUsed]): akka.stream.scaladsl.Source[EventSourcedStreamOut, akka.NotUsed] = { + override def handle( + in: akka.stream.scaladsl.Source[EventSourcedStreamIn, akka.NotUsed] + ): akka.stream.scaladsl.Source[EventSourcedStreamOut, akka.NotUsed] = in.prefixAndTail(1) .flatMapConcat { case (Seq(EventSourcedStreamIn(InInit(init))), source) => @@ -81,15 +101,16 @@ final class EventSourcedImpl(_system: ActorSystem, _services: Map[String, EventS case _ => // todo better error throw new RuntimeException("Expected Init message") - }.recover { + } + .recover { case e => // FIXME translate to failure message throw e } - } private def runEntity(init: EventSourcedInit): Flow[EventSourcedStreamIn, EventSourcedStreamOut, NotUsed] = { - val service = services.getOrElse(init.serviceName, throw new RuntimeException(s"Service not found: ${init.serviceName}")) + val service = + services.getOrElse(init.serviceName, throw new RuntimeException(s"Service not found: ${init.serviceName}")) val handler = service.factory.create(new EventSourcedContextImpl(init.entityId)) val entityId = init.entityId @@ -104,84 +125,102 @@ final class EventSourcedImpl(_system: ActorSystem, _services: Map[String, EventS } handler.handleSnapshot(ScalaPbAny.toJavaProto(any), context) snapshotSequence - }).getOrElse(0l) - - - Flow[EventSourcedStreamIn].map(_.message).scan[(Long, Option[EventSourcedStreamOut.Message])]((startingSequenceNumber, None)) { - case (_, InEvent(event)) => - val context = new EventContextImpl(entityId, event.sequence) - val ev = ScalaPbAny.toJavaProto(event.payload.get) // FIXME empty? - handler.handleEvent(ev, context) - (event.sequence, None) - case ((sequence, _), InCommand(command)) => - if (entityId != command.entityId) throw new IllegalStateException("Receiving entity is not the intended recipient of command") - val cmd = ScalaPbAny.toJavaProto(command.payload.get) - val context = new CommandContextImpl(entityId, sequence, command.name, command.id, service.anySupport, handler, service.snapshotEvery) - - val reply = try { - handler.handleCommand(cmd, context) // FIXME is this allowed to throw - } catch { - case FailInvoked => - Optional.empty[JavaPbAny]() + }).getOrElse(0L) + + Flow[EventSourcedStreamIn] + .map(_.message) + .scan[(Long, Option[EventSourcedStreamOut.Message])]((startingSequenceNumber, None)) { + case (_, InEvent(event)) => + val context = new EventContextImpl(entityId, event.sequence) + val ev = ScalaPbAny.toJavaProto(event.payload.get) // FIXME empty? + handler.handleEvent(ev, context) + (event.sequence, None) + case ((sequence, _), InCommand(command)) => + if (entityId != command.entityId) + throw new IllegalStateException("Receiving entity is not the intended recipient of command") + val cmd = ScalaPbAny.toJavaProto(command.payload.get) + val context = new CommandContextImpl(entityId, + sequence, + command.name, + command.id, + service.anySupport, + handler, + service.snapshotEvery) + + val reply = try { + handler.handleCommand(cmd, context) // FIXME is this allowed to throw + } catch { + case FailInvoked => + Optional.empty[JavaPbAny]() // Ignore, error already captured - } finally { - context.deactivate() // Very important! - } - - val clientAction = context.createClientAction(reply, false) - - if (!context.hasError) { - val endSequenceNumber = sequence + context.events.size - - val snapshot = - if (context.performSnapshot) { - val s = handler.snapshot(new SnapshotContext with AbstractContext { - override def entityId: String = entityId - override def sequenceNumber: Long = endSequenceNumber - }) - if (s.isPresent) Option(ScalaPbAny.fromJavaProto(s.get)) else None - } else None - - (endSequenceNumber, Some(OutReply( - EventSourcedReply( - command.id, - clientAction, - context.sideEffects, - context.events, - snapshot - ) - ))) - } else { - (sequence, Some(OutReply( - EventSourcedReply( - commandId = command.id, - clientAction = clientAction - ) - ))) - } - case (_, InInit(i)) => - throw new IllegalStateException("Entity already inited") - case (_, InEmpty) => - throw new IllegalStateException("Received empty/unknown message") - }.collect { - case (_, Some(message)) => EventSourcedStreamOut(message) - } + } finally { + context.deactivate() // Very important! + } + + val clientAction = context.createClientAction(reply, false) + + if (!context.hasError) { + val endSequenceNumber = sequence + context.events.size + + val snapshot = + if (context.performSnapshot) { + val s = handler.snapshot(new SnapshotContext with AbstractContext { + override def entityId: String = entityId + override def sequenceNumber: Long = endSequenceNumber + }) + if (s.isPresent) Option(ScalaPbAny.fromJavaProto(s.get)) else None + } else None + + (endSequenceNumber, + Some( + OutReply( + EventSourcedReply( + command.id, + clientAction, + context.sideEffects, + context.events, + snapshot + ) + ) + )) + } else { + (sequence, + Some( + OutReply( + EventSourcedReply( + commandId = command.id, + clientAction = clientAction + ) + ) + )) + } + case (_, InInit(i)) => + throw new IllegalStateException("Entity already inited") + case (_, InEmpty) => + throw new IllegalStateException("Received empty/unknown message") + } + .collect { + case (_, Some(message)) => EventSourcedStreamOut(message) + } } trait AbstractContext extends EventSourcedContext { override def serviceCallFactory(): ServiceCallFactory = rootContext.serviceCallFactory() } - class CommandContextImpl( - override val entityId: String, - override val sequenceNumber: Long, - override val commandName: String, - override val commandId: Long, - val anySupport: AnySupport, - val handler: EventSourcedEntityHandler, - val snapshotEvery: Int) extends CommandContext with AbstractContext with AbstractClientActionContext - with AbstractEffectContext with ActivatableContext { - + class CommandContextImpl(override val entityId: String, + override val sequenceNumber: Long, + override val commandName: String, + override val commandId: Long, + val anySupport: AnySupport, + val handler: EventSourcedEntityHandler, + val snapshotEvery: Int) + extends CommandContext + with AbstractContext + with AbstractClientActionContext + with AbstractEffectContext + with ActivatableContext { + final var events: Vector[ScalaPbAny] = Vector.empty final var performSnapshot: Boolean = false @@ -196,5 +235,7 @@ final class EventSourcedImpl(_system: ActorSystem, _services: Map[String, EventS } class EventSourcedContextImpl(override final val entityId: String) extends EventSourcedContext with AbstractContext - class EventContextImpl(entityId: String, override final val sequenceNumber: Long) extends EventSourcedContextImpl(entityId) with EventContext -} \ No newline at end of file + class EventContextImpl(entityId: String, override final val sequenceNumber: Long) + extends EventSourcedContextImpl(entityId) + with EventContext +} diff --git a/java-support/src/test/scala/io/cloudstate/javasupport/impl/AnySupportSpec.scala b/java-support/src/test/scala/io/cloudstate/javasupport/impl/AnySupportSpec.scala index 8600fa436..9933770c8 100644 --- a/java-support/src/test/scala/io/cloudstate/javasupport/impl/AnySupportSpec.scala +++ b/java-support/src/test/scala/io/cloudstate/javasupport/impl/AnySupportSpec.scala @@ -12,8 +12,10 @@ import scala.beans.BeanProperty class AnySupportSpec extends WordSpec with Matchers with OptionValues { private val anySupport = new AnySupport(Array(Shoppingcart.getDescriptor, EventSourcedProto.javaDescriptor), - getClass.getClassLoader, "com.example") - private val addLineItem = Shoppingcart.AddLineItem.newBuilder() + getClass.getClassLoader, + "com.example") + private val addLineItem = Shoppingcart.AddLineItem + .newBuilder() .setName("item") .setProductId("id") .setQuantity(10) @@ -35,8 +37,7 @@ class AnySupportSpec extends WordSpec with Matchers with OptionValues { } "support resolving a service descriptor" in { - val methods = anySupport.resolveServiceDescriptor( - Shoppingcart.getDescriptor.findServiceByName("ShoppingCart")) + val methods = anySupport.resolveServiceDescriptor(Shoppingcart.getDescriptor.findServiceByName("ShoppingCart")) methods should have size 3 val method = methods("AddItem") @@ -67,7 +68,7 @@ class AnySupportSpec extends WordSpec with Matchers with OptionValues { "support se/deserializing strings" in testPrimitive("string", "foo", "") "support se/deserializing ints" in testPrimitive("int32", 10, 0) - "support se/deserializing longs" in testPrimitive("int64", 10l, 0l) + "support se/deserializing longs" in testPrimitive("int64", 10L, 0L) "support se/deserializing floats" in testPrimitive("float", 0.5f, 0f) "support se/deserializing doubles" in testPrimitive("double", 0.5d, 0d) "support se/deserializing bytes" in testPrimitive("bytes", ByteString.copyFromUtf8("foo"), ByteString.EMPTY) diff --git a/java-support/src/test/scala/io/cloudstate/javasupport/impl/crdt/AnnotationBasedCrdtSupportSpec.scala b/java-support/src/test/scala/io/cloudstate/javasupport/impl/crdt/AnnotationBasedCrdtSupportSpec.scala index 3cbfd5f17..0be42f345 100644 --- a/java-support/src/test/scala/io/cloudstate/javasupport/impl/crdt/AnnotationBasedCrdtSupportSpec.scala +++ b/java-support/src/test/scala/io/cloudstate/javasupport/impl/crdt/AnnotationBasedCrdtSupportSpec.scala @@ -17,7 +17,8 @@ class AnnotationBasedCrdtSupportSpec extends WordSpec with Matchers { trait BaseContext extends Context { override def serviceCallFactory(): ServiceCallFactory = new ServiceCallFactory { - override def lookup[T](serviceName: String, methodName: String, messageType: Class[T]): ServiceCallRef[T] = throw new NoSuchElementException + override def lookup[T](serviceName: String, methodName: String, messageType: Class[T]): ServiceCallRef[T] = + throw new NoSuchElementException } } @@ -29,13 +30,18 @@ class AnnotationBasedCrdtSupportSpec extends WordSpec with Matchers { val anySupport = new AnySupport(Array(Shoppingcart.getDescriptor), this.getClass.getClassLoader) object MockCreationContext extends MockCreationContext(None) - class MockCreationContext(crdt: Option[Crdt] = None) extends CrdtCreationContext with BaseContext with CrdtFactoryContext { + class MockCreationContext(crdt: Option[Crdt] = None) + extends CrdtCreationContext + with BaseContext + with CrdtFactoryContext { override def entityId(): String = "foo" override def state[T <: Crdt](crdtType: Class[T]): Optional[T] = crdt match { case Some(crdt) if crdtType.isInstance(crdt) => Optional.of(crdtType.cast(crdt)) case None => Optional.empty() case Some(wrongType) => - throw new IllegalStateException(s"The current ${wrongType} CRDT state doesn't match requested type of ${crdtType.getSimpleName}") + throw new IllegalStateException( + s"The current ${wrongType} CRDT state doesn't match requested type of ${crdtType.getSimpleName}" + ) } } @@ -54,22 +60,22 @@ class AnnotationBasedCrdtSupportSpec extends WordSpec with Matchers { } case class Wrapped(value: String) - val descriptor = Shoppingcart.getDescriptor.findServiceByName("ShoppingCart") + val descriptor = Shoppingcart.getDescriptor + .findServiceByName("ShoppingCart") .findMethodByName("AddItem") val method = ResolvedServiceMethod(descriptor, StringResolvedType, WrappedResolvedType) - def create(behavior: AnyRef, methods: ResolvedServiceMethod[_, _]*) = { - new AnnotationBasedCrdtSupport(behavior.getClass, anySupport, methods.map(m => m.descriptor.getName -> m).toMap, - Some(_ => behavior)).create(new MockCreationContext()) - } + def create(behavior: AnyRef, methods: ResolvedServiceMethod[_, _]*) = + new AnnotationBasedCrdtSupport(behavior.getClass, + anySupport, + methods.map(m => m.descriptor.getName -> m).toMap, + Some(_ => behavior)).create(new MockCreationContext()) - def create(clazz: Class[_], crdt: Option[Crdt] = None) = { + def create(clazz: Class[_], crdt: Option[Crdt] = None) = new AnnotationBasedCrdtSupport(clazz, anySupport, Map.empty, None).create(new MockCreationContext(crdt)) - } - def command(str: String) = { + def command(str: String) = ScalaPbAny.toJavaProto(ScalaPbAny(StringResolvedType.typeUrl, StringResolvedType.toByteString(str))) - } def decodeWrapped(any: JavaPbAny) = { any.getTypeUrl should ===(WrappedResolvedType.typeUrl) @@ -88,7 +94,8 @@ class AnnotationBasedCrdtSupportSpec extends WordSpec with Matchers { } "there is an optional CRDT constructor and the CRDT is the wrong type" in { - an [IllegalStateException] should be thrownBy create(classOf[OptionalCrdtConstructorTest], Some(MockCreationContext.newGCounter())) + an[IllegalStateException] should be thrownBy create(classOf[OptionalCrdtConstructorTest], + Some(MockCreationContext.newGCounter())) } "there is a CRDT constructor and the CRDT is non empty" in { @@ -96,11 +103,12 @@ class AnnotationBasedCrdtSupportSpec extends WordSpec with Matchers { } "there is a CRDT constructor and the CRDT is empty" in { - an [IllegalStateException] should be thrownBy create(classOf[CrdtConstructorTest], None) + an[IllegalStateException] should be thrownBy create(classOf[CrdtConstructorTest], None) } "there is a CRDT constructor and the CRDT is the wrong type" in { - an [IllegalStateException] should be thrownBy create(classOf[CrdtConstructorTest], Some(MockCreationContext.newGCounter())) + an[IllegalStateException] should be thrownBy create(classOf[CrdtConstructorTest], + Some(MockCreationContext.newGCounter())) } } @@ -117,11 +125,10 @@ private class OptionalEmptyCrdtConstructorTest(crdt: Optional[Vote]) { @CrdtEntity private class OptionalCrdtConstructorTest(crdt: Optional[Vote]) { crdt.isPresent shouldBe true - crdt.get shouldBe a [Vote] + crdt.get shouldBe a[Vote] } @CrdtEntity private class CrdtConstructorTest(crdt: Vote) { - crdt shouldBe a [Vote] + crdt shouldBe a[Vote] } - diff --git a/java-support/src/test/scala/io/cloudstate/javasupport/impl/eventsourced/AnnotationBasedEventSourcedSupportSpec.scala b/java-support/src/test/scala/io/cloudstate/javasupport/impl/eventsourced/AnnotationBasedEventSourcedSupportSpec.scala index 61bff6d53..33c730f23 100644 --- a/java-support/src/test/scala/io/cloudstate/javasupport/impl/eventsourced/AnnotationBasedEventSourcedSupportSpec.scala +++ b/java-support/src/test/scala/io/cloudstate/javasupport/impl/eventsourced/AnnotationBasedEventSourcedSupportSpec.scala @@ -14,7 +14,8 @@ class AnnotationBasedEventSourcedSupportSpec extends WordSpec with Matchers { trait BaseContext extends Context { override def serviceCallFactory(): ServiceCallFactory = new ServiceCallFactory { - override def lookup[T](serviceName: String, methodName: String, messageType: Class[T]): ServiceCallRef[T] = throw new NoSuchElementException + override def lookup[T](serviceName: String, methodName: String, messageType: Class[T]): ServiceCallRef[T] = + throw new NoSuchElementException } } @@ -55,22 +56,22 @@ class AnnotationBasedEventSourcedSupportSpec extends WordSpec with Matchers { case class Wrapped(value: String) val anySupport = new AnySupport(Array(Shoppingcart.getDescriptor), this.getClass.getClassLoader) - val descriptor = Shoppingcart.getDescriptor.findServiceByName("ShoppingCart") + val descriptor = Shoppingcart.getDescriptor + .findServiceByName("ShoppingCart") .findMethodByName("AddItem") val method = ResolvedServiceMethod(descriptor, StringResolvedType, WrappedResolvedType) - def create(behavior: AnyRef, methods: ResolvedServiceMethod[_, _]*) = { - new AnnotationBasedEventSourcedSupport(behavior.getClass, anySupport, methods.map(m => m.descriptor.getName -> m).toMap, - Some(_ => behavior)).create(MockContext) - } + def create(behavior: AnyRef, methods: ResolvedServiceMethod[_, _]*) = + new AnnotationBasedEventSourcedSupport(behavior.getClass, + anySupport, + methods.map(m => m.descriptor.getName -> m).toMap, + Some(_ => behavior)).create(MockContext) - def create(clazz: Class[_]) = { + def create(clazz: Class[_]) = new AnnotationBasedEventSourcedSupport(clazz, anySupport, Map.empty, None).create(MockContext) - } - def command(str: String) = { + def command(str: String) = ScalaPbAny.toJavaProto(ScalaPbAny(StringResolvedType.typeUrl, StringResolvedType.toByteString(str))) - } def decodeWrapped(any: JavaPbAny) = { any.getTypeUrl should ===(WrappedResolvedType.typeUrl) @@ -134,7 +135,7 @@ class AnnotationBasedEventSourcedSupportSpec extends WordSpec with Matchers { @EventHandler def handle(@EntityId eid: String, event: String, ctx: EventContext) = { event should ===("my-event") - eid should===("foo") + eid should ===("foo") ctx.sequenceNumber() shouldBe 10 invoked = true } @@ -244,14 +245,17 @@ class AnnotationBasedEventSourcedSupportSpec extends WordSpec with Matchers { } "multi arg command handler" in { - val handler = create(new { - @CommandHandler - def addItem(msg: String, @EntityId eid: String, ctx: CommandContext) = { - eid should ===("foo") - ctx.commandName() should ===("AddItem") - Wrapped(msg) - } - }, method) + val handler = create( + new { + @CommandHandler + def addItem(msg: String, @EntityId eid: String, ctx: CommandContext) = { + eid should ===("foo") + ctx.commandName() should ===("AddItem") + Wrapped(msg) + } + }, + method + ) decodeWrapped(handler.handleCommand(command("blah"), new MockCommandContext).get) should ===(Wrapped("blah")) } @@ -272,41 +276,36 @@ class AnnotationBasedEventSourcedSupportSpec extends WordSpec with Matchers { "fail if there's a bad context type" in { a[RuntimeException] should be thrownBy create(new { @CommandHandler - def addItem(msg: String, ctx: EventContext) = { + def addItem(msg: String, ctx: EventContext) = Wrapped(msg) - } }, method) } "fail if there's two command handlers for the same command" in { a[RuntimeException] should be thrownBy create(new { @CommandHandler - def addItem(msg: String, ctx: CommandContext) = { + def addItem(msg: String, ctx: CommandContext) = Wrapped(msg) - } @CommandHandler - def addItem(msg: String) = { + def addItem(msg: String) = Wrapped(msg) - } }, method) } "fail if there's no command with that name" in { a[RuntimeException] should be thrownBy create(new { @CommandHandler - def wrongName(msg: String) = { + def wrongName(msg: String) = Wrapped(msg) - } }, method) } "fail if there's a CRDT command handler" in { val ex = the[RuntimeException] thrownBy create(new { - @io.cloudstate.javasupport.crdt.CommandHandler - def addItem(msg: String) = { - Wrapped(msg) - } - }, method) + @io.cloudstate.javasupport.crdt.CommandHandler + def addItem(msg: String) = + Wrapped(msg) + }, method) ex.getMessage should include("Did you mean") ex.getMessage should include(classOf[CommandHandler].getName) } @@ -316,7 +315,7 @@ class AnnotationBasedEventSourcedSupportSpec extends WordSpec with Matchers { @CommandHandler def addItem(): Wrapped = throw new RuntimeException("foo") }, method) - val ex = the [RuntimeException] thrownBy handler.handleCommand(command("nothing"), new MockCommandContext) + val ex = the[RuntimeException] thrownBy handler.handleCommand(command("nothing"), new MockCommandContext) ex.getMessage should ===("foo") } @@ -477,5 +476,3 @@ private class MultiArgConstructorTest(ctx: EventSourcedContext, @EntityId entity @EventSourcedEntity private class UnsupportedConstructorParameter(foo: String) - - diff --git a/operator/src/main/scala/io/cloudstate/operator/GenericStatus.scala b/operator/src/main/scala/io/cloudstate/operator/GenericStatus.scala index acf48ae13..33715bff0 100644 --- a/operator/src/main/scala/io/cloudstate/operator/GenericStatus.scala +++ b/operator/src/main/scala/io/cloudstate/operator/GenericStatus.scala @@ -21,7 +21,7 @@ import java.time.ZonedDateTime import play.api.libs.json.{Format, Json} case class GenericStatus( - conditions: Option[List[Condition]] + conditions: Option[List[Condition]] ) object GenericStatus { @@ -29,16 +29,17 @@ object GenericStatus { } case class Condition( - `type`: String, - status: String, - reason: Option[String] = None, - message: Option[String] = None, - severity: Option[String] = None, - lastUpdateTime: Option[ZonedDateTime] = None, - lastTransitionTime: Option[ZonedDateTime] = None + `type`: String, + status: String, + reason: Option[String] = None, + message: Option[String] = None, + severity: Option[String] = None, + lastUpdateTime: Option[ZonedDateTime] = None, + lastTransitionTime: Option[ZonedDateTime] = None ) object Condition { - private implicit val timeFormat: Format[ZonedDateTime] = Format(skuber.json.format.timeReads, skuber.json.format.timewWrites) + private implicit val timeFormat: Format[ZonedDateTime] = + Format(skuber.json.format.timeReads, skuber.json.format.timewWrites) implicit val format: Format[Condition] = Json.format } diff --git a/operator/src/main/scala/io/cloudstate/operator/KnativeRevision.scala b/operator/src/main/scala/io/cloudstate/operator/KnativeRevision.scala index c0a6550b4..b009e0b60 100644 --- a/operator/src/main/scala/io/cloudstate/operator/KnativeRevision.scala +++ b/operator/src/main/scala/io/cloudstate/operator/KnativeRevision.scala @@ -23,7 +23,19 @@ import play.api.libs.json._ import skuber.json.format._ import skuber.ResourceSpecification.Subresources import skuber.apiextensions.CustomResourceDefinition -import skuber.{Container, CustomResource, EnvFromSource, EnvVar, Lifecycle, ListResource, Probe, Resource, ResourceDefinition, SecurityContext, Volume} +import skuber.{ + Container, + CustomResource, + EnvFromSource, + EnvVar, + Lifecycle, + ListResource, + Probe, + Resource, + ResourceDefinition, + SecurityContext, + Volume +} object KnativeRevision { @@ -33,12 +45,12 @@ object KnativeRevision { type ResourceList = ListResource[Resource] case class Spec( - containers: List[Container], - volumes: Option[List[Volume]], - serviceAccountName: Option[String], - containerConcurrency: Option[Long], - timeoutSeconds: Option[Long], - deployer: Option[Deployer] + containers: List[Container], + volumes: Option[List[Volume]], + serviceAccountName: Option[String], + containerConcurrency: Option[Long], + timeoutSeconds: Option[Long], + deployer: Option[Deployer] ) object Spec { @@ -51,38 +63,41 @@ object KnativeRevision { // So we need to be a little special in how we deal with that. val imagePullPolicyReads = ( (JsPath \ "image").read[String] and - (JsPath \ "imagePullPolicy").formatNullableEnum(Container.PullPolicy) - ) ((image, pullPolicy) => pullPolicy.getOrElse { - if (image.endsWith(":latest")) Container.PullPolicy.Always else Container.PullPolicy.IfNotPresent - }) - val imagePullPolicyFormat = OFormat(imagePullPolicyReads, (JsPath \ "imagePullPolicy").formatEnum(Container.PullPolicy)) + (JsPath \ "imagePullPolicy").formatNullableEnum(Container.PullPolicy) + )( + (image, pullPolicy) => + pullPolicy.getOrElse { + if (image.endsWith(":latest")) Container.PullPolicy.Always else Container.PullPolicy.IfNotPresent + } + ) + val imagePullPolicyFormat = + OFormat(imagePullPolicyReads, (JsPath \ "imagePullPolicy").formatEnum(Container.PullPolicy)) ( (JsPath \ "name").formatWithDefault[String]("") and - (JsPath \ "image").format[String] and - (JsPath \ "command").formatMaybeEmptyList[String] and - (JsPath \ "args").formatMaybeEmptyList[String] and - (JsPath \ "workingDir").formatNullable[String] and - (JsPath \ "ports").formatMaybeEmptyList[Container.Port] and - (JsPath \ "env").formatMaybeEmptyList[EnvVar] and - (JsPath \ "resources").formatNullable[Resource.Requirements] and - (JsPath \ "volumeMounts").formatMaybeEmptyList[Volume.Mount] and - (JsPath \ "livenessProbe").formatNullable[Probe] and - (JsPath \ "readinessProbe").formatNullable[Probe] and - (JsPath \ "lifecycle").formatNullable[Lifecycle] and - (JsPath \ "terminationMessagePath").formatNullable[String] and - (JsPath \ "terminationMessagePolicy").formatNullableEnum(Container.TerminationMessagePolicy) and - imagePullPolicyFormat and - (JsPath \ "securityContext").formatNullable[SecurityContext] and - (JsPath \ "envFrom").formatMaybeEmptyList[EnvFromSource] and - (JsPath \ "stdin").formatNullable[Boolean] and - (JsPath \ "stdinOnce").formatNullable[Boolean] and - (JsPath \ "tty").formatNullable[Boolean] and - (JsPath \ "volumeDevices").formatMaybeEmptyList[Volume.Device] - ) (Container.apply, unlift(Container.unapply)) + (JsPath \ "image").format[String] and + (JsPath \ "command").formatMaybeEmptyList[String] and + (JsPath \ "args").formatMaybeEmptyList[String] and + (JsPath \ "workingDir").formatNullable[String] and + (JsPath \ "ports").formatMaybeEmptyList[Container.Port] and + (JsPath \ "env").formatMaybeEmptyList[EnvVar] and + (JsPath \ "resources").formatNullable[Resource.Requirements] and + (JsPath \ "volumeMounts").formatMaybeEmptyList[Volume.Mount] and + (JsPath \ "livenessProbe").formatNullable[Probe] and + (JsPath \ "readinessProbe").formatNullable[Probe] and + (JsPath \ "lifecycle").formatNullable[Lifecycle] and + (JsPath \ "terminationMessagePath").formatNullable[String] and + (JsPath \ "terminationMessagePolicy").formatNullableEnum(Container.TerminationMessagePolicy) and + imagePullPolicyFormat and + (JsPath \ "securityContext").formatNullable[SecurityContext] and + (JsPath \ "envFrom").formatMaybeEmptyList[EnvFromSource] and + (JsPath \ "stdin").formatNullable[Boolean] and + (JsPath \ "stdinOnce").formatNullable[Boolean] and + (JsPath \ "tty").formatNullable[Boolean] and + (JsPath \ "volumeDevices").formatMaybeEmptyList[Volume.Device] + )(Container.apply, unlift(Container.unapply)) } - implicit val format: Format[Spec] = Json.format } @@ -96,28 +111,32 @@ object KnativeRevision { case OperatorConstants.CloudStateDeployerName => configPath.read[CloudStateDeployer].map(identity) case other => - configPath.readNullable[JsValue] + configPath + .readNullable[JsValue] .map(config => UnknownDeployer(other, config)) } } implicit val writes: Writes[Deployer] = ( (__ \ "name").write[String] and - (__ \ "config").writeNullable[JsValue] - )((dep: Deployer) => dep match { - case KnativeServingDeployer => (KnativeServingDeployerName, None) - case es: CloudStateDeployer => - (OperatorConstants.CloudStateDeployerName, Some(Json.toJson(es)(CloudStateDeployer.format))) - case UnknownDeployer(name, config) => (name, config) - }) + (__ \ "config").writeNullable[JsValue] + )( + (dep: Deployer) => + dep match { + case KnativeServingDeployer => (KnativeServingDeployerName, None) + case es: CloudStateDeployer => + (OperatorConstants.CloudStateDeployerName, Some(Json.toJson(es)(CloudStateDeployer.format))) + case UnknownDeployer(name, config) => (name, config) + } + ) } case object KnativeServingDeployer extends Deployer case class CloudStateDeployer( - journal: Journal, - sidecarResources: Option[Resource.Requirements], - sidecarJvmMemory: Option[String] + journal: Journal, + sidecarResources: Option[Resource.Requirements], + sidecarJvmMemory: Option[String] ) extends Deployer object CloudStateDeployer { @@ -127,8 +146,8 @@ object KnativeRevision { case class UnknownDeployer(name: String, config: Option[JsValue]) extends Deployer case class Journal( - name: String, - config: Option[JsObject] + name: String, + config: Option[JsObject] ) object Journal { @@ -136,35 +155,37 @@ object KnativeRevision { } case class Status( - observedGeneration: Option[Long], - conditions: List[Condition], - serviceName: Option[String], - logUrl: Option[String], - imageDigest: Option[String] + observedGeneration: Option[Long], + conditions: List[Condition], + serviceName: Option[String], + logUrl: Option[String], + imageDigest: Option[String] ) object Status { implicit val format: Format[Status] = ( (__ \ "observedGeneration").formatNullable[Long] and - (__ \ "conditions").formatNullable[List[Condition]] - .inmap[List[Condition]](_.getOrElse(Nil), Some(_)) and - (__ \ "serviceName").formatNullable[String] and - (__ \ "logUrl").formatNullable[String] and - (__ \ "imageDigest").formatNullable[String] - ) (Status.apply, unlift(Status.unapply)) + (__ \ "conditions") + .formatNullable[List[Condition]] + .inmap[List[Condition]](_.getOrElse(Nil), Some(_)) and + (__ \ "serviceName").formatNullable[String] and + (__ \ "logUrl").formatNullable[String] and + (__ \ "imageDigest").formatNullable[String] + )(Status.apply, unlift(Status.unapply)) } case class Condition( - `type`: String, - status: String, - severity: Option[String] = None, - lastTransitionTime: Option[ZonedDateTime] = None, - reason: Option[String] = None, - message: Option[String] = None + `type`: String, + status: String, + severity: Option[String] = None, + lastTransitionTime: Option[ZonedDateTime] = None, + reason: Option[String] = None, + message: Option[String] = None ) object Condition { - private implicit val timeFormat: Format[ZonedDateTime] = Format(skuber.json.format.timeReads, skuber.json.format.timewWrites) + private implicit val timeFormat: Format[ZonedDateTime] = + Format(skuber.json.format.timeReads, skuber.json.format.timewWrites) implicit val format: Format[Condition] = Json.format } @@ -173,9 +194,7 @@ object KnativeRevision { version = KnativeServingVersion, kind = RevisionKind, shortNames = List("rev"), - subresources = Some(Subresources() - .withStatusSubresource - ) + subresources = Some(Subresources().withStatusSubresource) ) implicit val statusSubEnabled = CustomResource.statusMethodsEnabler[Resource] @@ -183,4 +202,4 @@ object KnativeRevision { val crd = CustomResourceDefinition[Resource] def apply(name: String, spec: Spec) = CustomResource[Spec, Status](spec).withName(name) -} \ No newline at end of file +} diff --git a/operator/src/main/scala/io/cloudstate/operator/KnativeRevisionOperatorFactory.scala b/operator/src/main/scala/io/cloudstate/operator/KnativeRevisionOperatorFactory.scala index 8e153c3be..5cf29b446 100644 --- a/operator/src/main/scala/io/cloudstate/operator/KnativeRevisionOperatorFactory.scala +++ b/operator/src/main/scala/io/cloudstate/operator/KnativeRevisionOperatorFactory.scala @@ -30,34 +30,35 @@ import scala.concurrent.{ExecutionContext, Future} import KnativeRevision._ class KnativeRevisionOperatorFactory(implicit mat: Materializer, ec: ExecutionContext) - extends OperatorFactory[KnativeRevision.Status, Resource] { + extends OperatorFactory[KnativeRevision.Status, Resource] { import OperatorConstants._ - override def apply(client: KubernetesClient, config: OperatorConfig): Operator = new KnativeRevisionOperator(client, config) + override def apply(client: KubernetesClient, config: OperatorConfig): Operator = + new KnativeRevisionOperator(client, config) class KnativeRevisionOperator(client: KubernetesClient, config: OperatorConfig) extends Operator { private val helper = new ResourceHelper(client) - private def isOwnedByKnativeRevisionController(deployment: Deployment): Boolean = { + private def isOwnedByKnativeRevisionController(deployment: Deployment): Boolean = deployment.metadata.ownerReferences .find(_.controller.contains(true)) - .exists(ref => ref.apiVersion.startsWith(KnativeServingGroup + "/") - && ref.kind == RevisionKind) - } + .exists( + ref => + ref.apiVersion.startsWith(KnativeServingGroup + "/") + && ref.kind == RevisionKind + ) - override def handleChanged(resource: Resource): Future[StatusUpdate] = { + override def handleChanged(resource: Resource): Future[StatusUpdate] = resource.spec.deployer match { case Some(esd: CloudStateDeployer) => reconcile(resource, esd) case _ => Future.successful(StatusUpdate.None) } - } - - override def handleDeleted(resource: Resource): Future[Done] = { + override def handleDeleted(resource: Resource): Future[Done] = resource.spec.deployer match { case Some(esd: CloudStateDeployer) => for { @@ -78,34 +79,36 @@ class KnativeRevisionOperatorFactory(implicit mat: Materializer, ec: ExecutionCo Future.successful(Done) } - } - - override def statusFromError(error: Throwable, existing: Option[Resource]): StatusUpdate = { + override def statusFromError(error: Throwable, existing: Option[Resource]): StatusUpdate = existing match { case Some(revision) => - updateCondition(revision, KnativeRevision.Condition( - `type` = StatefulStoreConditionType, - status = UnknownStatus, - reason = Some("UnknownError"), - message = Some(error.getMessage), - lastTransitionTime = Some(ZonedDateTime.now()) - )) + updateCondition( + revision, + KnativeRevision.Condition( + `type` = StatefulStoreConditionType, + status = UnknownStatus, + reason = Some("UnknownError"), + message = Some(error.getMessage), + lastTransitionTime = Some(ZonedDateTime.now()) + ) + ) case None => println("Unknown error handling revision change, but we don't have an existing revision to update: " + error) error.printStackTrace() StatusUpdate.None } - } - private def updateCondition(revision: KnativeRevision.Resource, condition: KnativeRevision.Condition): StatusUpdate = { + private def updateCondition(revision: KnativeRevision.Resource, + condition: KnativeRevision.Condition): StatusUpdate = { val status = revision.status.getOrElse(new KnativeRevision.Status(None, Nil, None, None, None)) // First check if the condition has actually changed - important, because otherwise we might end up in an // infinite loop with the Knative operator - if (status.conditions.exists(c => - c.`type` == condition.`type` && - c.status == condition.status && - c.reason == condition.reason - )) { + if (status.conditions.exists( + c => + c.`type` == condition.`type` && + c.status == condition.status && + c.reason == condition.reason + )) { // Hasn't changed, don't update. StatusUpdate.None } else { @@ -132,8 +135,10 @@ class KnativeRevisionOperatorFactory(implicit mat: Materializer, ec: ExecutionCo } yield statusUpdate } - private def reconcileDeployment(revision: KnativeRevision.Resource, deployer: CloudStateDeployer, - maybeJournal: Option[StatefulStore.Resource], maybeDeployment: Option[Deployment]) = { + private def reconcileDeployment(revision: KnativeRevision.Resource, + deployer: CloudStateDeployer, + maybeJournal: Option[StatefulStore.Resource], + maybeDeployment: Option[Deployment]) = { val deploymentName = deploymentNameFor(revision) // for expression over eithers, only progresses when they return Right, otherwise we end up with Left of condition @@ -148,7 +153,8 @@ class KnativeRevisionOperatorFactory(implicit mat: Materializer, ec: ExecutionCo case Some(existing) => // todo why will the spec be None? val existingSpec = existing.spec.get - val desired = existing.copy(spec = newDeployment.spec) + val desired = existing + .copy(spec = newDeployment.spec) // Preserve current scale .withReplicas(existingSpec.replicas.getOrElse(1)) // Selector is immutable so preserve that too @@ -177,12 +183,15 @@ class KnativeRevisionOperatorFactory(implicit mat: Materializer, ec: ExecutionCo for { _ <- ensureRbacPermissionsInNamespace(revision.spec.serviceAccountName.getOrElse("default")) _ <- deploymentFuture - } yield updateCondition(revision, KnativeRevision.Condition( - `type` = StatefulStoreConditionType, - status = TrueStatus, - severity = Some("Info"), - lastTransitionTime = Some(ZonedDateTime.now()) - )) + } yield updateCondition( + revision, + KnativeRevision.Condition( + `type` = StatefulStoreConditionType, + status = TrueStatus, + severity = Some("Info"), + lastTransitionTime = Some(ZonedDateTime.now()) + ) + ) } result match { @@ -193,11 +202,16 @@ class KnativeRevisionOperatorFactory(implicit mat: Materializer, ec: ExecutionCo } } - private def errorCondition(`type`: String, reason: String, message: String) = { - KnativeRevision.Condition(`type`, FalseStatus, Some("Error"), Some(ZonedDateTime.now()), Some(reason), Some(message)) - } + private def errorCondition(`type`: String, reason: String, message: String) = + KnativeRevision.Condition(`type`, + FalseStatus, + Some("Error"), + Some(ZonedDateTime.now()), + Some(reason), + Some(message)) - private def verifyWeOwnDeployment(name: String, maybeDeployment: Option[Deployment]): Either[KnativeRevision.Condition, Done] = { + private def verifyWeOwnDeployment(name: String, + maybeDeployment: Option[Deployment]): Either[KnativeRevision.Condition, Done] = maybeDeployment match { case None => Right(Done) @@ -205,17 +219,26 @@ class KnativeRevisionOperatorFactory(implicit mat: Materializer, ec: ExecutionCo if (isOwnedByKnativeRevisionController(deployment)) { Right(Done) } else { - Left(errorCondition(ConditionResourcesAvailable, ConditionResourcesAvailableNotOwned, - s"There is an existing Deployment $name that we do not own.")) + Left( + errorCondition(ConditionResourcesAvailable, + ConditionResourcesAvailableNotOwned, + s"There is an existing Deployment $name that we do not own.") + ) } } - } - private def validateJournal(revision: KnativeRevision.Resource, deployer: CloudStateDeployer, - maybeJournal: Option[StatefulStore.Resource]): Either[KnativeRevision.Condition, Container] = { + private def validateJournal( + revision: KnativeRevision.Resource, + deployer: CloudStateDeployer, + maybeJournal: Option[StatefulStore.Resource] + ): Either[KnativeRevision.Condition, Container] = maybeJournal match { case None => - Left(errorCondition(StatefulStoreConditionType, "JournalNotFound", s"Journal with name ${deployer.journal.name} not found.")) + Left( + errorCondition(StatefulStoreConditionType, + "JournalNotFound", + s"Journal with name ${deployer.journal.name} not found.") + ) case Some(journal) => journal.spec.`type` match { case Some(CassandraStatefulStoreType) => @@ -227,43 +250,65 @@ class KnativeRevisionOperatorFactory(implicit mat: Materializer, ec: ExecutionCo case Some(keyspace) => Right(createCassandraSideCar(revision, deployer, serviceName, keyspace)) case None => - Left(errorCondition(StatefulStoreConditionType, "MissingKeyspace", - "No keyspace declared for Cassandra journal")) + Left( + errorCondition(StatefulStoreConditionType, + "MissingKeyspace", + "No keyspace declared for Cassandra journal") + ) } case None => - Left(errorCondition(StatefulStoreConditionType, "MissingServiceName", - "No service name declared in unmanaged Cassandra journal")) + Left( + errorCondition(StatefulStoreConditionType, + "MissingServiceName", + "No service name declared in unmanaged Cassandra journal") + ) } case unknown => - Left(errorCondition(StatefulStoreConditionType, "UnknownDeploymentType", - s"Unknown Cassandra deployment type: $unknown, supported types for Cassandra are: Unmanaged")) + Left( + errorCondition( + StatefulStoreConditionType, + "UnknownDeploymentType", + s"Unknown Cassandra deployment type: $unknown, supported types for Cassandra are: Unmanaged" + ) + ) } case unknown => - Left(errorCondition(StatefulStoreConditionType, "UnknownJournalType", - s"Unknown journal type: $unknown, supported types are: Cassandra")) + Left( + errorCondition(StatefulStoreConditionType, + "UnknownJournalType", + s"Unknown journal type: $unknown, supported types are: Cassandra") + ) } } - } - - private def createCassandraSideCar(revision: KnativeRevision.Resource, deployer: CloudStateDeployer, - service: String, keyspace: String) = { - createSideCar(revision, deployer, config.images.cassandra, List( - EnvVar("CASSANDRA_CONTACT_POINTS", service), - EnvVar("CASSANDRA_KEYSPACE", keyspace) - )) - } - private def createSideCar(revision: KnativeRevision.Resource, deployer: CloudStateDeployer, image: String, env: Seq[EnvVar]) = { + private def createCassandraSideCar(revision: KnativeRevision.Resource, + deployer: CloudStateDeployer, + service: String, + keyspace: String) = + createSideCar(revision, + deployer, + config.images.cassandra, + List( + EnvVar("CASSANDRA_CONTACT_POINTS", service), + EnvVar("CASSANDRA_KEYSPACE", keyspace) + )) + + private def createSideCar(revision: KnativeRevision.Resource, + deployer: CloudStateDeployer, + image: String, + env: Seq[EnvVar]) = { val jvmMemory = deployer.sidecarJvmMemory.getOrElse("256m") - val sidecarResources = deployer.sidecarResources.getOrElse(Resource.Requirements( - limits = Map( - Resource.memory -> Resource.Quantity("512Mi") - ), - requests = Map( - Resource.memory -> Resource.Quantity("512Mi"), - Resource.cpu -> Resource.Quantity("400m") + val sidecarResources = deployer.sidecarResources.getOrElse( + Resource.Requirements( + limits = Map( + Resource.memory -> Resource.Quantity("512Mi") + ), + requests = Map( + Resource.memory -> Resource.Quantity("512Mi"), + Resource.cpu -> Resource.Quantity("400m") + ) ) - )) + ) val userPort = revision.spec.containers.flatMap(_.ports).headOption.fold(DefaultUserPort)(_.containerPort) val configuration = revision.metadata.labels.getOrElse(ConfigurationLabel, "") @@ -271,53 +316,60 @@ class KnativeRevisionOperatorFactory(implicit mat: Materializer, ec: ExecutionCo Container( name = "akka-sidecar", image = image, - imagePullPolicy = if (image.endsWith(":latest")) Container.PullPolicy.Always else Container.PullPolicy.IfNotPresent, + imagePullPolicy = + if (image.endsWith(":latest")) Container.PullPolicy.Always else Container.PullPolicy.IfNotPresent, ports = List( Container.Port(containerPort = KnativeSidecarH2cPort, name = KnativeSidecarPortName), Container.Port(containerPort = MetricsPort, name = MetricsPortName) ), env = List( - EnvVar("HTTP_PORT", KnativeSidecarH2cPort.toString), - EnvVar("USER_FUNCTION_PORT", userPort.toString), - EnvVar("REMOTING_PORT", AkkaRemotingPort.toString), - EnvVar("MANAGEMENT_PORT", AkkaManagementPort.toString), - EnvVar("METRICS_PORT", MetricsPort.toString), - EnvVar("SELECTOR_LABEL_VALUE", configuration), - EnvVar("SELECTOR_LABEL", ConfigurationLabel), - EnvVar("CONTAINER_CONCURRENCY", revision.spec.containerConcurrency.getOrElse(0).toString), - EnvVar("REVISION_TIMEOUT", revision.spec.timeoutSeconds.getOrElse(10) + "s"), - EnvVar("SERVING_NAMESPACE", revision.namespace), - EnvVar("SERVING_CONFIGURATION", configuration), - EnvVar("SERVING_REVISION", revision.name), - EnvVar("SERVING_POD", EnvVar.FieldRef("metadata.name")), - // todo this should be based on minscale - EnvVar("REQUIRED_CONTACT_POINT_NR", "1"), - EnvVar("JAVA_OPTS", s"-Xms$jvmMemory -Xmx$jvmMemory") - ) ++ env, + EnvVar("HTTP_PORT", KnativeSidecarH2cPort.toString), + EnvVar("USER_FUNCTION_PORT", userPort.toString), + EnvVar("REMOTING_PORT", AkkaRemotingPort.toString), + EnvVar("MANAGEMENT_PORT", AkkaManagementPort.toString), + EnvVar("METRICS_PORT", MetricsPort.toString), + EnvVar("SELECTOR_LABEL_VALUE", configuration), + EnvVar("SELECTOR_LABEL", ConfigurationLabel), + EnvVar("CONTAINER_CONCURRENCY", revision.spec.containerConcurrency.getOrElse(0).toString), + EnvVar("REVISION_TIMEOUT", revision.spec.timeoutSeconds.getOrElse(10) + "s"), + EnvVar("SERVING_NAMESPACE", revision.namespace), + EnvVar("SERVING_CONFIGURATION", configuration), + EnvVar("SERVING_REVISION", revision.name), + EnvVar("SERVING_POD", EnvVar.FieldRef("metadata.name")), + // todo this should be based on minscale + EnvVar("REQUIRED_CONTACT_POINT_NR", "1"), + EnvVar("JAVA_OPTS", s"-Xms$jvmMemory -Xmx$jvmMemory") + ) ++ env, resources = Some(sidecarResources), - readinessProbe = Some(Probe( - action = HTTPGetAction( - port = Left(AkkaManagementPort), - path = "/ready" - ), - periodSeconds = Some(2), - failureThreshold = Some(20), - initialDelaySeconds = 2 - )), - livenessProbe = Some(Probe( - action = HTTPGetAction( - port = Left(AkkaManagementPort), - path = "/alive" - ), - periodSeconds = Some(2), - failureThreshold = Some(20), - initialDelaySeconds = 2 - )) + readinessProbe = Some( + Probe( + action = HTTPGetAction( + port = Left(AkkaManagementPort), + path = "/ready" + ), + periodSeconds = Some(2), + failureThreshold = Some(20), + initialDelaySeconds = 2 + ) + ), + livenessProbe = Some( + Probe( + action = HTTPGetAction( + port = Left(AkkaManagementPort), + path = "/alive" + ), + periodSeconds = Some(2), + failureThreshold = Some(20), + initialDelaySeconds = 2 + ) + ) ) } - private def createDeployment(revision: KnativeRevision.Resource, deployer: CloudStateDeployer, sidecar: Container) = { + private def createDeployment(revision: KnativeRevision.Resource, + deployer: CloudStateDeployer, + sidecar: Container) = { // validate? It should already be validated. val orig = revision.spec.containers.head @@ -328,16 +380,19 @@ class KnativeRevisionOperatorFactory(implicit mat: Materializer, ec: ExecutionCo val userContainer = orig.copy( name = UserContainerName, volumeMounts = orig.volumeMounts :+ Volume.Mount("varlog", "/var/log"), - ports = List(Container.Port( - name = UserPortName, - containerPort = userPort - )), - env = orig.env ++ List( - EnvVar(UserPortEnvVar, userPort.toString), - EnvVar(KnativeRevisionEnvVar, revision.name), - EnvVar(KnativeConfigruationEnvVar, EnvVar.StringValue(revision.metadata.labels.getOrElse(ConfigurationLabel, ""))), - EnvVar(KnativeServiceEnvVar, EnvVar.StringValue(revision.metadata.labels.getOrElse(ServiceLabel, ""))) + ports = List( + Container.Port( + name = UserPortName, + containerPort = userPort + ) ), + env = orig.env ++ List( + EnvVar(UserPortEnvVar, userPort.toString), + EnvVar(KnativeRevisionEnvVar, revision.name), + EnvVar(KnativeConfigruationEnvVar, + EnvVar.StringValue(revision.metadata.labels.getOrElse(ConfigurationLabel, ""))), + EnvVar(KnativeServiceEnvVar, EnvVar.StringValue(revision.metadata.labels.getOrElse(ServiceLabel, ""))) + ), stdin = Some(false), tty = Some(false), image = revision.status @@ -368,18 +423,18 @@ class KnativeRevisionOperatorFactory(implicit mat: Materializer, ec: ExecutionCo val labels = { val ls = revision.metadata.labels ++ Map( - RevisionLabel -> revision.name, - RevisionUidLabel -> revision.uid, - StatefulStoreLabel -> deployer.journal.name - ) + RevisionLabel -> revision.name, + RevisionUidLabel -> revision.uid, + StatefulStoreLabel -> deployer.journal.name + ) if (!ls.contains("app")) ls + ("app" -> revision.name) else ls } val annotations = revision.metadata.annotations - LastPinnedLabel val podAnnotations = annotations ++ Seq( - "traffic.sidecar.istio.io/includeInboundPorts" -> s"$KnativeSidecarH2cPort", - "traffic.sidecar.istio.io/excludeOutboundPorts" -> s"$AkkaRemotingPort,$AkkaManagementPort" - ) + "traffic.sidecar.istio.io/includeInboundPorts" -> s"$KnativeSidecarH2cPort", + "traffic.sidecar.istio.io/excludeOutboundPorts" -> s"$AkkaRemotingPort,$AkkaManagementPort" + ) // Create the deployment Deployment( @@ -405,23 +460,24 @@ class KnativeRevisionOperatorFactory(implicit mat: Materializer, ec: ExecutionCo .withLabelSelector( RevisionUidLabel is revision.uid ) - .withTemplate(Pod.Template.Spec( - metadata = ObjectMeta( - labels = labels, - annotations = podAnnotations - ), - spec = Some(podSpec) - )) + .withTemplate( + Pod.Template.Spec( + metadata = ObjectMeta( + labels = labels, + annotations = podAnnotations + ), + spec = Some(podSpec) + ) + ) } - private def ensureRbacPermissionsInNamespace(serviceAccountName: String) = { + private def ensureRbacPermissionsInNamespace(serviceAccountName: String) = for { _ <- helper.ensurePodReaderRoleExists() _ <- helper.ensurePodReaderRoleBindingExists(serviceAccountName) } yield () - } // Must match https://github.com/knative/serving/blob/2297b69327bbc457563cefc7d36a848159a4c7c0/pkg/reconciler/revision/resources/names/names.go#L24 private def deploymentNameFor(revision: Resource) = revision.metadata.name + "-deployment" } -} \ No newline at end of file +} diff --git a/operator/src/main/scala/io/cloudstate/operator/OperatorConfig.scala b/operator/src/main/scala/io/cloudstate/operator/OperatorConfig.scala index 3d991917b..485a8c7a6 100644 --- a/operator/src/main/scala/io/cloudstate/operator/OperatorConfig.scala +++ b/operator/src/main/scala/io/cloudstate/operator/OperatorConfig.scala @@ -21,4 +21,4 @@ object OperatorConfig { } } -case class ImageConfig(cassandra: String, inMemory: String, noStore: String, postgres: String) \ No newline at end of file +case class ImageConfig(cassandra: String, inMemory: String, noStore: String, postgres: String) diff --git a/operator/src/main/scala/io/cloudstate/operator/OperatorFactory.scala b/operator/src/main/scala/io/cloudstate/operator/OperatorFactory.scala index 45591bf09..2af43c557 100644 --- a/operator/src/main/scala/io/cloudstate/operator/OperatorFactory.scala +++ b/operator/src/main/scala/io/cloudstate/operator/OperatorFactory.scala @@ -28,33 +28,33 @@ trait OperatorFactory[Status, Resource <: CustomResource[_, Status]] { def apply(client: KubernetesClient, config: OperatorConfig): Operator /** - * An operator. - */ + * An operator. + */ trait Operator { /** - * Handle a resource being changed. - * - * @param resource The changed resource. - * @return Optionally, the status to update, if the status should be updated. - */ + * Handle a resource being changed. + * + * @param resource The changed resource. + * @return Optionally, the status to update, if the status should be updated. + */ def handleChanged(resource: Resource): Future[StatusUpdate] /** - * Handle a resource being deleted. - * - * @param resource The deleted resource. - * @return A future that is redeemed when the operation is done. - */ + * Handle a resource being deleted. + * + * @param resource The deleted resource. + * @return A future that is redeemed when the operation is done. + */ def handleDeleted(resource: Resource): Future[Done] /** - * Convert the given error to a status. - * - * @param error The error to convert. - * @param existing The existing resource, if it could be successfully parsed. - * @return The status to set. - */ + * Convert the given error to a status. + * + * @param error The error to convert. + * @param existing The existing resource, if it could be successfully parsed. + * @return The status to set. + */ def statusFromError(error: Throwable, existing: Option[Resource] = None): StatusUpdate sealed trait StatusUpdate @@ -70,5 +70,3 @@ trait OperatorFactory[Status, Resource <: CustomResource[_, Status]] { } } - - diff --git a/operator/src/main/scala/io/cloudstate/operator/OperatorMain.scala b/operator/src/main/scala/io/cloudstate/operator/OperatorMain.scala index 00f827a7a..04cd91617 100644 --- a/operator/src/main/scala/io/cloudstate/operator/OperatorMain.scala +++ b/operator/src/main/scala/io/cloudstate/operator/OperatorMain.scala @@ -28,8 +28,8 @@ import scala.collection.concurrent.TrieMap object OperatorMain extends App { - private val operatorNamespace = sys.env.getOrElse("NAMESPACE", - sys.error("No NAMESPACE environment variable configured!")) + private val operatorNamespace = + sys.env.getOrElse("NAMESPACE", sys.error("No NAMESPACE environment variable configured!")) private val configMapName = sys.env.getOrElse("CONFIG_MAP", "cloudstate-operator-config") implicit val system = ActorSystem() @@ -49,7 +49,8 @@ object OperatorMain extends App { def maybeRestart(configMap: ConfigMap): Unit = { val configString = configMap.data.getOrElse("config", "") - val config = ConfigFactory.parseString(configString) + val config = ConfigFactory + .parseString(configString) .withFallback(ConfigFactory.defaultReference()) val opConfig = OperatorConfig(config) if (!currentConfig.contains(opConfig)) { @@ -74,7 +75,9 @@ object OperatorMain extends App { } } - Watcher.watchSingle[ConfigMap](client.usingNamespace(operatorNamespace), configMapName, + Watcher.watchSingle[ConfigMap]( + client.usingNamespace(operatorNamespace), + configMapName, Flow[WatchEvent[ConfigMap]].map { case WatchEvent(EventType.ADDED, map) => maybeRestart(map) @@ -86,7 +89,8 @@ object OperatorMain extends App { currentlyRunning = None currentConfig = None case _ => - }) + } + ) def watchConfiguredNamespaces(config: OperatorConfig): AutoCloseable = { val killSwitches = for { @@ -100,33 +104,38 @@ object OperatorMain extends App { def watchAllNamespaces(config: OperatorConfig): AutoCloseable = { val namespaces = TrieMap.empty[String, List[KillSwitch]] - def watch(namespace: Namespace): Unit = { + def watch(namespace: Namespace): Unit = namespaces.put(namespace.name, runners.map(_.start(namespace.name, config))) - } - def unwatch(namespace: Namespace): Unit = { + def unwatch(namespace: Namespace): Unit = namespaces.get(namespace.name).foreach { killSwitches => killSwitches.foreach(_.shutdown()) namespaces.remove(namespace.name) } - } - val killSwitch = Watcher.watch[Namespace](client, Flow[WatchEvent[Namespace]].map { - case WatchEvent(EventType.ADDED, namespace) if !isWatchingDisabled(namespace) => - println(s"Watching new namespace ${namespace.name}") - watch(namespace) - case WatchEvent(EventType.MODIFIED, namespace) if isWatchingDisabled(namespace) && namespaces.contains(namespace.name) => - println(s"Namespace ${namespace.name} has had io.cloudstate/watch=disabled annotation added to it, stopping watcher.") - unwatch(namespace) - case WatchEvent(EventType.MODIFIED, namespace) if !isWatchingDisabled(namespace) && !namespaces.contains(namespace.name) => - println(s"Watching namespace ${namespace.name}") - watch(namespace) - case WatchEvent(EventType.DELETED, namespace) if namespaces.contains(namespace.name) => - println(s"Namespace ${namespace.name} has been deleted, stopping watcher.") - unwatch(namespace) - case _ => - () - }) + val killSwitch = Watcher.watch[Namespace]( + client, + Flow[WatchEvent[Namespace]].map { + case WatchEvent(EventType.ADDED, namespace) if !isWatchingDisabled(namespace) => + println(s"Watching new namespace ${namespace.name}") + watch(namespace) + case WatchEvent(EventType.MODIFIED, namespace) + if isWatchingDisabled(namespace) && namespaces.contains(namespace.name) => + println( + s"Namespace ${namespace.name} has had io.cloudstate/watch=disabled annotation added to it, stopping watcher." + ) + unwatch(namespace) + case WatchEvent(EventType.MODIFIED, namespace) + if !isWatchingDisabled(namespace) && !namespaces.contains(namespace.name) => + println(s"Watching namespace ${namespace.name}") + watch(namespace) + case WatchEvent(EventType.DELETED, namespace) if namespaces.contains(namespace.name) => + println(s"Namespace ${namespace.name} has been deleted, stopping watcher.") + unwatch(namespace) + case _ => + () + } + ) () => { killSwitch.shutdown() @@ -136,7 +145,6 @@ object OperatorMain extends App { } } - private def isWatchingDisabled(namespace: Namespace): Boolean = { + private def isWatchingDisabled(namespace: Namespace): Boolean = namespace.metadata.annotations.get("cloudstate.io/watch").contains("disabled") - } } diff --git a/operator/src/main/scala/io/cloudstate/operator/OperatorRunner.scala b/operator/src/main/scala/io/cloudstate/operator/OperatorRunner.scala index c07c43bf3..97b9ed8d3 100644 --- a/operator/src/main/scala/io/cloudstate/operator/OperatorRunner.scala +++ b/operator/src/main/scala/io/cloudstate/operator/OperatorRunner.scala @@ -27,8 +27,15 @@ import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal import skuber._ -class OperatorRunner[Status, Resource <: CustomResource[_, Status]](client: KubernetesClient, operator: OperatorFactory[Status, Resource]) - (implicit fmt: Format[Resource], statusFmt: Format[Status], rd: ResourceDefinition[Resource], hs: HasStatusSubresource[Resource], mat: Materializer, ec: ExecutionContext) { +class OperatorRunner[Status, Resource <: CustomResource[_, Status]](client: KubernetesClient, + operator: OperatorFactory[Status, Resource])( + implicit fmt: Format[Resource], + statusFmt: Format[Status], + rd: ResourceDefinition[Resource], + hs: HasStatusSubresource[Resource], + mat: Materializer, + ec: ExecutionContext +) { def start(namespace: String, config: OperatorConfig): KillSwitch = { val namespacedClient = client.usingNamespace(namespace) @@ -38,69 +45,82 @@ class OperatorRunner[Status, Resource <: CustomResource[_, Status]](client: Kube private type JsValueCustomResource = CustomResource[JsValue, JsValue] private type Cache = Map[String, JsValueCustomResource] - private implicit val listResourceFormat: Format[ListResource[JsValueCustomResource]] = ListResourceFormat(implicitly[Format[JsValueCustomResource]]) + private implicit val listResourceFormat: Format[ListResource[JsValueCustomResource]] = ListResourceFormat( + implicitly[Format[JsValueCustomResource]] + ) - private class NamespacedOperatorRunner(client: KubernetesClient, operator: OperatorFactory[Status, Resource]#Operator) { + private class NamespacedOperatorRunner(client: KubernetesClient, + operator: OperatorFactory[Status, Resource]#Operator) { // See https://github.com/doriordan/skuber/issues/270 // We do all watches and list resources using JsValue, rather than our actual classes, because this allows us // to handle parse errors - implicit val jsValueRd: ResourceDefinition[JsValueCustomResource] = rd.asInstanceOf[ResourceDefinition[JsValueCustomResource]] - implicit val statusSubEnabled: HasStatusSubresource[JsValueCustomResource] = CustomResource.statusMethodsEnabler[JsValueCustomResource] - - def start(): KillSwitch = { - Watcher.watch[JsValueCustomResource](client, Flow[WatchEvent[JsValueCustomResource]] - .scanAsync(Map.empty[String, JsValueCustomResource]) { (cache, event) => - - // It's unmodifed if the event type is modified but the object hasn't changed. - // Otherwise, it's it's been modified in some way (eg, added, deleted or changed). - val unmodified = event._type == EventType.MODIFIED && - cache.get(event._object.name).contains(event._object) + implicit val jsValueRd: ResourceDefinition[JsValueCustomResource] = + rd.asInstanceOf[ResourceDefinition[JsValueCustomResource]] + implicit val statusSubEnabled: HasStatusSubresource[JsValueCustomResource] = + CustomResource.statusMethodsEnabler[JsValueCustomResource] + + def start(): KillSwitch = + Watcher.watch[JsValueCustomResource]( + client, + Flow[WatchEvent[JsValueCustomResource]] + .scanAsync(Map.empty[String, JsValueCustomResource]) { (cache, event) => + // It's unmodifed if the event type is modified but the object hasn't changed. + // Otherwise, it's it's been modified in some way (eg, added, deleted or changed). + val unmodified = event._type == EventType.MODIFIED && + cache.get(event._object.name).contains(event._object) + + if (unmodified) { + Future.successful(cache) + } else { + val newCache = cache + (event._object.name -> event._object) + // Attempt to parse + Json.fromJson[Resource](Json.toJson(event._object)) match { + case JsSuccess(resource, _) => + handleEvent(newCache, event._object, WatchEvent(event._type, resource)) + + case err: JsError => + val status = operator.statusFromError(JsResult.Exception(err), None) + updateStatus(cache, event._object, status) + } + } - if (unmodified) { - Future.successful(cache) - } else { - val newCache = cache + (event._object.name -> event._object) - // Attempt to parse - Json.fromJson[Resource](Json.toJson(event._object)) match { - case JsSuccess(resource, _) => - handleEvent(newCache, event._object, WatchEvent(event._type, resource)) - - case err: JsError => - val status = operator.statusFromError(JsResult.Exception(err), None) - updateStatus(cache, event._object, status) } - } + ) - }) - } - - private def updateStatus(cache: Cache, resource: JsValueCustomResource, statusUpdate: operator.StatusUpdate): Future[Cache] = { + private def updateStatus(cache: Cache, + resource: JsValueCustomResource, + statusUpdate: operator.StatusUpdate): Future[Cache] = statusUpdate match { case operator.StatusUpdate.None => Future.successful(cache) - case p@ operator.StatusUpdate.Patch(patch) => + case p @ operator.StatusUpdate.Patch(patch) => implicit val patchWrites: Writes[p.PatchType] = p.writes - client.patch[p.PatchType, JsValueCustomResource](resource.name, patch) + client + .patch[p.PatchType, JsValueCustomResource](resource.name, patch) .map(newResource => cache + (newResource.name -> newResource)) case operator.StatusUpdate.Update(status) => - client.updateStatus(resource.withStatus(Json.toJson(status))) + client + .updateStatus(resource.withStatus(Json.toJson(status))) .map(newResource => cache + (newResource.name -> newResource)) .recover { case e: K8SException if e.status.code.contains(409) => // Something else has modified it, so ignore, because this operator should get notified of the new resource - println(s"Got conflict on updating status of ${resource.namespace}/${resource.name}:${resource.uid}@${resource.metadata.resourceVersion}, ignoring to handle changed version.") + println( + s"Got conflict on updating status of ${resource.namespace}/${resource.name}:${resource.uid}@${resource.metadata.resourceVersion}, ignoring to handle changed version." + ) cache } } - } - private def handleResource(cache: Cache, jsResource: JsValueCustomResource, resource: Resource): Future[Cache] = { - operator.handleChanged(resource) + private def handleResource(cache: Cache, jsResource: JsValueCustomResource, resource: Resource): Future[Cache] = + operator + .handleChanged(resource) .flatMap(statusUpdate => updateStatus(cache, jsResource, statusUpdate)) - } - private def withErrorHandling(cache: Cache, jsResource: JsValueCustomResource, resource: Resource)(block: => Future[Cache]): Future[Cache] = { + private def withErrorHandling(cache: Cache, jsResource: JsValueCustomResource, resource: Resource)( + block: => Future[Cache] + ): Future[Cache] = { val result = try { block } catch { @@ -114,7 +134,9 @@ class OperatorRunner[Status, Resource <: CustomResource[_, Status]](client: Kube } } - private def handleEvent(cache: Cache, jsResource: JsValueCustomResource, event: WatchEvent[Resource]): Future[Cache] = { + private def handleEvent(cache: Cache, + jsResource: JsValueCustomResource, + event: WatchEvent[Resource]): Future[Cache] = { println("Got event " + event) withErrorHandling(cache, jsResource, event._object) { @@ -124,9 +146,7 @@ class OperatorRunner[Status, Resource <: CustomResource[_, Status]](client: Kube handleResource(cache, jsResource, event._object) case EventType.DELETED => - operator.handleDeleted(event._object).map(_ => - cache - jsResource.name - ) + operator.handleDeleted(event._object).map(_ => cache - jsResource.name) case EventType.MODIFIED => handleResource(cache, jsResource, event._object) @@ -140,4 +160,4 @@ class OperatorRunner[Status, Resource <: CustomResource[_, Status]](client: Kube } } -} \ No newline at end of file +} diff --git a/operator/src/main/scala/io/cloudstate/operator/ResourceHelper.scala b/operator/src/main/scala/io/cloudstate/operator/ResourceHelper.scala index 51e81b931..d78f3809b 100644 --- a/operator/src/main/scala/io/cloudstate/operator/ResourceHelper.scala +++ b/operator/src/main/scala/io/cloudstate/operator/ResourceHelper.scala @@ -19,20 +19,27 @@ class ResourceHelper(client: KubernetesClient)(implicit ec: ExecutionContext) { def ensureServiceForStatefulServiceExists(service: StatefulService.Resource): Future[Service] = { val expectedService = Service( - metadata = createMetadata(service.name, Some(service)), - ).setPort(Service.Port( - name = "grpc", - port = 80, - targetPort = Some(Left(KnativeSidecarH2cPort)) - )).withSelector(StatefulServiceUidLabel -> service.uid) + metadata = createMetadata(service.name, Some(service)) + ).setPort( + Service.Port( + name = "grpc", + port = 80, + targetPort = Some(Left(KnativeSidecarH2cPort)) + ) + ) + .withSelector(StatefulServiceUidLabel -> service.uid) ensureObjectOwnedByUsExists[Service](expectedService, Some(service)) { existing => - existing.copy(spec = existing.spec.map(_.copy( - ports = expectedService.spec.get.ports, - selector = expectedService.spec.get.selector, - _type = expectedService.spec.get._type, - externalName = expectedService.spec.get.externalName, - sessionAffinity = expectedService.spec.get.sessionAffinity - ))) + existing.copy( + spec = existing.spec.map( + _.copy( + ports = expectedService.spec.get.ports, + selector = expectedService.spec.get.selector, + _type = expectedService.spec.get._type, + externalName = expectedService.spec.get.externalName, + sessionAffinity = expectedService.spec.get.sessionAffinity + ) + ) + ) } } @@ -82,11 +89,10 @@ class ResourceHelper(client: KubernetesClient)(implicit ec: ExecutionContext) { private def deploymentScalerRoleName(serviceName: String) = s"$serviceName-$DeploymentScalerRoleName" - private def ensureRoleExists(expectedRole: Role, owner: Option[ObjectResource]): Future[Role] = { + private def ensureRoleExists(expectedRole: Role, owner: Option[ObjectResource]): Future[Role] = ensureObjectOwnedByUsExists[Role](expectedRole, owner) { existing => existing.copy(rules = expectedRole.rules) } - } def ensurePodReaderRoleBindingExists(serviceAccountName: String): Future[RoleBinding] = { val name = s"$PodReaderRoleBindingName-$serviceAccountName" @@ -110,7 +116,8 @@ class ResourceHelper(client: KubernetesClient)(implicit ec: ExecutionContext) { ensureRoleBindingExists(expectedRoleBinding, None) } - def ensureDeploymentScalerRoleBindingExists(serviceAccountName: String, owner: ObjectResource): Future[RoleBinding] = { + def ensureDeploymentScalerRoleBindingExists(serviceAccountName: String, + owner: ObjectResource): Future[RoleBinding] = { val roleName = deploymentScalerRoleName(owner.name) val name = s"${owner.name}-$DeploymentScalerRoleBindingName" val expectedRoleBinding = RoleBinding( @@ -132,47 +139,49 @@ class ResourceHelper(client: KubernetesClient)(implicit ec: ExecutionContext) { ensureRoleBindingExists(expectedRoleBinding, Some(owner)) } - private def createMetadata(name: String, owner: Option[ObjectResource]) = { + private def createMetadata(name: String, owner: Option[ObjectResource]) = ObjectMeta( name = name, labels = labels, - ownerReferences = owner.map(owner => - OwnerReference( - apiVersion = owner.apiVersion, - kind = owner.kind, - name = owner.name, - uid = owner.uid, - controller = Some(true), - blockOwnerDeletion = Some(true) + ownerReferences = owner + .map( + owner => + OwnerReference( + apiVersion = owner.apiVersion, + kind = owner.kind, + name = owner.name, + uid = owner.uid, + controller = Some(true), + blockOwnerDeletion = Some(true) + ) ) - ).toList + .toList ) - } - private def ensureRoleBindingExists(roleBinding: RoleBinding, owner: Option[ObjectResource]): Future[RoleBinding] = { + private def ensureRoleBindingExists(roleBinding: RoleBinding, owner: Option[ObjectResource]): Future[RoleBinding] = ensureObjectOwnedByUsExists[RoleBinding](roleBinding, owner) { existing => existing.copy(subjects = roleBinding.subjects, roleRef = roleBinding.roleRef) } - } /** - * If an owner is passed in, returns true if that owner is a controller owner reference of the passed in object. - * - * If no owner is passed in, returns true if the Kubernetes managed by label is cloud state. - */ - private def isOwnedByUs(obj: ObjectResource, owner: Option[ObjectResource]): Boolean = { + * If an owner is passed in, returns true if that owner is a controller owner reference of the passed in object. + * + * If no owner is passed in, returns true if the Kubernetes managed by label is cloud state. + */ + private def isOwnedByUs(obj: ObjectResource, owner: Option[ObjectResource]): Boolean = owner match { case Some(owner) => - obj.metadata.ownerReferences.find(_.controller.getOrElse(false)).exists(ref => - ref.apiVersion.startsWith(owner.apiVersion.takeWhile(_ != '/') + "/") - ) + obj.metadata.ownerReferences + .find(_.controller.getOrElse(false)) + .exists(ref => ref.apiVersion.startsWith(owner.apiVersion.takeWhile(_ != '/') + "/")) case None => obj.metadata.labels.get(KubernetesManagedByLabel).contains(CloudStateGroup) } - } - private def ensureObjectOwnedByUsExists[O <: ObjectResource: Format: ResourceDefinition](obj: O, owner: Option[ObjectResource]) - (update: O => O): Future[O] = { + private def ensureObjectOwnedByUsExists[O <: ObjectResource: Format: ResourceDefinition]( + obj: O, + owner: Option[ObjectResource] + )(update: O => O): Future[O] = client.getOption[O](obj.name).flatMap { case Some(existing) if isOwnedByUs(existing, owner) => // We manage it, check that it's up to date @@ -191,6 +200,5 @@ class ResourceHelper(client: KubernetesClient)(implicit ec: ExecutionContext) { println(s"Did not find ${obj.kind} '${obj.name}', creating...") client.create(obj) } - } } diff --git a/operator/src/main/scala/io/cloudstate/operator/StatefulService.scala b/operator/src/main/scala/io/cloudstate/operator/StatefulService.scala index 91db2775c..35490af0e 100644 --- a/operator/src/main/scala/io/cloudstate/operator/StatefulService.scala +++ b/operator/src/main/scala/io/cloudstate/operator/StatefulService.scala @@ -3,7 +3,20 @@ package io.cloudstate.operator import java.time.ZonedDateTime import skuber.ResourceSpecification.Subresources -import skuber.{Container, CustomResource, EnvFromSource, EnvVar, Lifecycle, ListResource, Pod, Probe, Resource, ResourceDefinition, SecurityContext, Volume} +import skuber.{ + Container, + CustomResource, + EnvFromSource, + EnvVar, + Lifecycle, + ListResource, + Pod, + Probe, + Resource, + ResourceDefinition, + SecurityContext, + Volume +} import skuber.apiextensions.CustomResourceDefinition import play.api.libs.functional.syntax._ import play.api.libs.json._ @@ -17,15 +30,15 @@ object StatefulService { type ResourceList = ListResource[Resource] case class Spec( - containers: List[Container], - volumes: Option[List[Volume]], - serviceAccountName: Option[String], - autoscaling: Option[Autoscaling], - datastore: Option[StatefulStore], - sidecarResources: Option[Resource.Requirements], - sidecarJvmMemory: Option[String], - nodeSelector: Option[Map[String, String]], - tolerations: Option[List[Pod.Toleration]] + containers: List[Container], + volumes: Option[List[Volume]], + serviceAccountName: Option[String], + autoscaling: Option[Autoscaling], + datastore: Option[StatefulStore], + sidecarResources: Option[Resource.Requirements], + sidecarJvmMemory: Option[String], + nodeSelector: Option[Map[String, String]], + tolerations: Option[List[Pod.Toleration]] ) object Spec { @@ -38,50 +51,53 @@ object StatefulService { // So we need to be a little special in how we deal with that. val imagePullPolicyReads = ( (JsPath \ "image").read[String] and - (JsPath \ "imagePullPolicy").formatNullableEnum(Container.PullPolicy) - ) ((image, pullPolicy) => pullPolicy.getOrElse { - if (image.endsWith(":latest")) Container.PullPolicy.Always else Container.PullPolicy.IfNotPresent - }) - val imagePullPolicyFormat = OFormat(imagePullPolicyReads, (JsPath \ "imagePullPolicy").formatEnum(Container.PullPolicy)) + (JsPath \ "imagePullPolicy").formatNullableEnum(Container.PullPolicy) + )( + (image, pullPolicy) => + pullPolicy.getOrElse { + if (image.endsWith(":latest")) Container.PullPolicy.Always else Container.PullPolicy.IfNotPresent + } + ) + val imagePullPolicyFormat = + OFormat(imagePullPolicyReads, (JsPath \ "imagePullPolicy").formatEnum(Container.PullPolicy)) ( (JsPath \ "name").formatWithDefault[String]("") and - (JsPath \ "image").format[String] and - (JsPath \ "command").formatMaybeEmptyList[String] and - (JsPath \ "args").formatMaybeEmptyList[String] and - (JsPath \ "workingDir").formatNullable[String] and - (JsPath \ "ports").formatMaybeEmptyList[Container.Port] and - (JsPath \ "env").formatMaybeEmptyList[EnvVar] and - (JsPath \ "resources").formatNullable[Resource.Requirements] and - (JsPath \ "volumeMounts").formatMaybeEmptyList[Volume.Mount] and - (JsPath \ "livenessProbe").formatNullable[Probe] and - (JsPath \ "readinessProbe").formatNullable[Probe] and - (JsPath \ "lifecycle").formatNullable[Lifecycle] and - (JsPath \ "terminationMessagePath").formatNullable[String] and - (JsPath \ "terminationMessagePolicy").formatNullableEnum(Container.TerminationMessagePolicy) and - imagePullPolicyFormat and - (JsPath \ "securityContext").formatNullable[SecurityContext] and - (JsPath \ "envFrom").formatMaybeEmptyList[EnvFromSource] and - (JsPath \ "stdin").formatNullable[Boolean] and - (JsPath \ "stdinOnce").formatNullable[Boolean] and - (JsPath \ "tty").formatNullable[Boolean] and - (JsPath \ "volumeDevices").formatMaybeEmptyList[Volume.Device] - ) (Container.apply, unlift(Container.unapply)) + (JsPath \ "image").format[String] and + (JsPath \ "command").formatMaybeEmptyList[String] and + (JsPath \ "args").formatMaybeEmptyList[String] and + (JsPath \ "workingDir").formatNullable[String] and + (JsPath \ "ports").formatMaybeEmptyList[Container.Port] and + (JsPath \ "env").formatMaybeEmptyList[EnvVar] and + (JsPath \ "resources").formatNullable[Resource.Requirements] and + (JsPath \ "volumeMounts").formatMaybeEmptyList[Volume.Mount] and + (JsPath \ "livenessProbe").formatNullable[Probe] and + (JsPath \ "readinessProbe").formatNullable[Probe] and + (JsPath \ "lifecycle").formatNullable[Lifecycle] and + (JsPath \ "terminationMessagePath").formatNullable[String] and + (JsPath \ "terminationMessagePolicy").formatNullableEnum(Container.TerminationMessagePolicy) and + imagePullPolicyFormat and + (JsPath \ "securityContext").formatNullable[SecurityContext] and + (JsPath \ "envFrom").formatMaybeEmptyList[EnvFromSource] and + (JsPath \ "stdin").formatNullable[Boolean] and + (JsPath \ "stdinOnce").formatNullable[Boolean] and + (JsPath \ "tty").formatNullable[Boolean] and + (JsPath \ "volumeDevices").formatMaybeEmptyList[Volume.Device] + )(Container.apply, unlift(Container.unapply)) } - implicit val format: Format[Spec] = Json.format } case class Autoscaling( - enabled: Option[Boolean], - userFunctionTargetConcurrency: Option[Int], - requestTargetConcurrency: Option[Int], - targetConcurrencyWindow: Option[String], - scaleUpStableDeadline: Option[String], - scaleDownStableDeadline: Option[String], - requestRateThresholdFactor: Option[Double], - requestRateWindow: Option[String] + enabled: Option[Boolean], + userFunctionTargetConcurrency: Option[Int], + requestTargetConcurrency: Option[Int], + targetConcurrencyWindow: Option[String], + scaleUpStableDeadline: Option[String], + scaleDownStableDeadline: Option[String], + requestRateThresholdFactor: Option[Double], + requestRateWindow: Option[String] ) object Autoscaling { @@ -89,8 +105,8 @@ object StatefulService { } case class StatefulStore( - name: String, - config: Option[JsObject] + name: String, + config: Option[JsObject] ) object StatefulStore { @@ -98,12 +114,13 @@ object StatefulService { } case class Status( - conditions: List[Condition] + conditions: List[Condition] ) object Status { implicit val format: Format[Status] = - (__ \ "conditions").formatNullable[List[Condition]] + (__ \ "conditions") + .formatNullable[List[Condition]] .inmap[Status](c => Status(c.getOrElse(Nil)), s => Some(s.conditions)) } @@ -112,9 +129,7 @@ object StatefulService { version = CloudStateApiVersionNumber, kind = StatefulServiceKind, shortNames = List("ess"), - subresources = Some(Subresources() - .withStatusSubresource - ) + subresources = Some(Subresources().withStatusSubresource) ) implicit val statusSubEnabled = CustomResource.statusMethodsEnabler[Resource] diff --git a/operator/src/main/scala/io/cloudstate/operator/StatefulServiceOperatorFactory.scala b/operator/src/main/scala/io/cloudstate/operator/StatefulServiceOperatorFactory.scala index e6b7aad10..98102a1aa 100644 --- a/operator/src/main/scala/io/cloudstate/operator/StatefulServiceOperatorFactory.scala +++ b/operator/src/main/scala/io/cloudstate/operator/StatefulServiceOperatorFactory.scala @@ -30,22 +30,25 @@ import StatefulService.Resource import io.cloudstate.operator.stores.{StatefulStoreSupport, StatefulStoreUsageConfiguration} class StatefulServiceOperatorFactory(implicit mat: Materializer, ec: ExecutionContext) - extends OperatorFactory[StatefulService.Status, Resource] { + extends OperatorFactory[StatefulService.Status, Resource] { import OperatorConstants._ - override def apply(client: KubernetesClient, config: OperatorConfig): Operator = new StatefulServiceOperator(client, config) + override def apply(client: KubernetesClient, config: OperatorConfig): Operator = + new StatefulServiceOperator(client, config) class StatefulServiceOperator(client: KubernetesClient, config: OperatorConfig) extends Operator { private val helper = new ResourceHelper(client) - private def isOwnedByStatefulServiceController(deployment: Deployment): Boolean = { + private def isOwnedByStatefulServiceController(deployment: Deployment): Boolean = deployment.metadata.ownerReferences .find(_.controller.contains(true)) - .exists(ref => ref.apiVersion.startsWith(CloudStateGroup + "/") - && ref.kind == StatefulServiceKind) - } + .exists( + ref => + ref.apiVersion.startsWith(CloudStateGroup + "/") + && ref.kind == StatefulServiceKind + ) override def handleChanged(resource: Resource): Future[StatusUpdate] = { val deploymentName = deploymentNameFor(resource) @@ -56,7 +59,6 @@ class StatefulServiceOperatorFactory(implicit mat: Materializer, ec: ExecutionCo } val validatedDeployment = lookupDeployment(deploymentName) - val result = for { (storeUsage, maybeDeployment) <- validatedStoreUsage.zip(validatedDeployment) statusUpdate <- reconcileDeployment(resource, storeUsage, maybeDeployment) @@ -65,36 +67,35 @@ class StatefulServiceOperatorFactory(implicit mat: Materializer, ec: ExecutionCo result.fold(errors => updateCondition(resource, errors: _*), identity) } - private def validateStore(store: StatefulService.StatefulStore): Validated[StatefulStoreUsageConfiguration] = { + private def validateStore(store: StatefulService.StatefulStore): Validated[StatefulStoreUsageConfiguration] = for { storeResource <- lookupStore(store.name) storeSupport <- StatefulStoreSupport.get(storeResource) storeConfig <- storeSupport.validate(storeResource, client) usageConfiguration <- storeConfig.validateInstance(store.config, client) } yield usageConfiguration - } - private def lookupDeployment(deploymentName: String): Validated[Option[Deployment]] = { + private def lookupDeployment(deploymentName: String): Validated[Option[Deployment]] = Validated.future(client.getOption[Deployment](deploymentName)).flatMap { case Some(deployment) if isOwnedByStatefulServiceController(deployment) => Validated(Some(deployment)) case Some(_) => - Validated.error(ConditionResourcesAvailable, ConditionResourcesAvailableNotOwned, - s"There is an existing Deployment $deploymentName that we do not own.") + Validated.error(ConditionResourcesAvailable, + ConditionResourcesAvailableNotOwned, + s"There is an existing Deployment $deploymentName that we do not own.") case None => Validated(None) } - } - private def lookupStore(storeName: String): Validated[StatefulStore.Resource] = { + private def lookupStore(storeName: String): Validated[StatefulStore.Resource] = client.getOption[StatefulStore.Resource](storeName).map { case Some(store) => Validated(store) case None => - Validated.error(StatefulStoreConditionType, "StatefulStoreNotFound", s"StatefulStore with name $storeName not found.") + Validated.error(StatefulStoreConditionType, + "StatefulStoreNotFound", + s"StatefulStore with name $storeName not found.") } - } - - override def handleDeleted(resource: Resource): Future[Done] = { + override def handleDeleted(resource: Resource): Future[Done] = for { maybeExisting <- client.getOption[Deployment](deploymentNameFor(resource)) _ <- maybeExisting match { @@ -110,47 +111,52 @@ class StatefulServiceOperatorFactory(implicit mat: Materializer, ec: ExecutionCo } } yield Done - } - - override def statusFromError(error: Throwable, existing: Option[Resource]): StatusUpdate = { + override def statusFromError(error: Throwable, existing: Option[Resource]): StatusUpdate = existing match { case Some(service) => - updateCondition(service, Condition( - `type` = ConditionResourcesAvailable, - status = UnknownStatus, - reason = Some("UnknownError"), - message = Some(error.getMessage), - lastTransitionTime = Some(ZonedDateTime.now()) - )) + updateCondition( + service, + Condition( + `type` = ConditionResourcesAvailable, + status = UnknownStatus, + reason = Some("UnknownError"), + message = Some(error.getMessage), + lastTransitionTime = Some(ZonedDateTime.now()) + ) + ) case None => println("Unknown error handling service change, but we don't have an existing service to update: " + error) error.printStackTrace() StatusUpdate.None } - } private def updateCondition(service: Resource, conditions: Condition*): StatusUpdate = { val status = service.status.getOrElse(new StatefulService.Status(Nil)) - if (conditions.forall(condition => status.conditions.exists(c => - c.`type` == condition.`type` && - c.status == condition.status && - c.reason == condition.reason - ))) { + if (conditions.forall( + condition => + status.conditions.exists( + c => + c.`type` == condition.`type` && + c.status == condition.status && + c.reason == condition.reason + ) + )) { // Hasn't changed, don't update. StatusUpdate.None } else { // Otherwise, update. val newConditions = status.conditions.map { condition => - conditions.find(_.`type` == condition.`type`).getOrElse(condition) - } ++ conditions.filter(c => !status.conditions.exists(_.`type` == c.`type`)) + conditions.find(_.`type` == condition.`type`).getOrElse(condition) + } ++ conditions.filter(c => !status.conditions.exists(_.`type` == c.`type`)) StatusUpdate.Update(status.copy(conditions = newConditions)) } } - private def reconcileDeployment(service: Resource, store: StatefulStoreUsageConfiguration, - maybeDeployment: Option[Deployment]): Validated[StatusUpdate] = { + private def reconcileDeployment(service: Resource, + store: StatefulStoreUsageConfiguration, + maybeDeployment: Option[Deployment]): Validated[StatusUpdate] = { val deploymentName = deploymentNameFor(service) val newDeployment = createDeployment(service, store) @@ -159,7 +165,8 @@ class StatefulServiceOperatorFactory(implicit mat: Materializer, ec: ExecutionCo case Some(existing) => // todo why will the spec be None? val existingSpec = existing.spec.get - val desired = existing.copy(spec = newDeployment.spec) + val desired = existing + .copy(spec = newDeployment.spec) // Preserve current scale .withReplicas(existingSpec.replicas.getOrElse(1)) // Selector is immutable so preserve that too @@ -188,30 +195,35 @@ class StatefulServiceOperatorFactory(implicit mat: Materializer, ec: ExecutionCo for { _ <- ensureOtherObjectsExist(service, service.spec.serviceAccountName.getOrElse("default"), deploymentName) _ <- createdDeployment - } yield updateCondition(service, Condition( - `type` = StatefulStoreConditionType, - status = TrueStatus, - severity = Some("Info"), - lastTransitionTime = Some(ZonedDateTime.now()) - ) :: Condition( - `type` = ConditionResourcesAvailable, - status = TrueStatus, - severity = Some("Info"), - lastTransitionTime = Some(ZonedDateTime.now()) - ) :: store.successfulConditions: _*) + } yield updateCondition( + service, + Condition( + `type` = StatefulStoreConditionType, + status = TrueStatus, + severity = Some("Info"), + lastTransitionTime = Some(ZonedDateTime.now()) + ) :: Condition( + `type` = ConditionResourcesAvailable, + status = TrueStatus, + severity = Some("Info"), + lastTransitionTime = Some(ZonedDateTime.now()) + ) :: store.successfulConditions: _* + ) } private def createSideCar(service: Resource, store: StatefulStoreUsageConfiguration) = { val jvmMemory = service.spec.sidecarJvmMemory.getOrElse("256m") - val sidecarResources = service.spec.sidecarResources.getOrElse(Resource.Requirements( - limits = Map( - Resource.memory -> Resource.Quantity("512Mi") - ), - requests = Map( - Resource.memory -> Resource.Quantity("512Mi"), - Resource.cpu -> Resource.Quantity("400m") + val sidecarResources = service.spec.sidecarResources.getOrElse( + Resource.Requirements( + limits = Map( + Resource.memory -> Resource.Quantity("512Mi") + ), + requests = Map( + Resource.memory -> Resource.Quantity("512Mi"), + Resource.cpu -> Resource.Quantity("400m") + ) ) - )) + ) val userPort = service.spec.containers.flatMap(_.ports).headOption.fold(DefaultUserPort)(_.containerPort) @@ -239,39 +251,44 @@ class StatefulServiceOperatorFactory(implicit mat: Materializer, ec: ExecutionCo Container( name = "akka-sidecar", image = image, - imagePullPolicy = if (image.endsWith(":latest")) Container.PullPolicy.Always else Container.PullPolicy.IfNotPresent, + imagePullPolicy = + if (image.endsWith(":latest")) Container.PullPolicy.Always else Container.PullPolicy.IfNotPresent, ports = List( Container.Port(containerPort = KnativeSidecarH2cPort, name = "grpc-proxy") ), env = store.proxyContainerEnvVars ::: autoscalingEnvVars ::: List( - EnvVar("HTTP_PORT", KnativeSidecarH2cPort.toString), - EnvVar("USER_FUNCTION_PORT", userPort.toString), - EnvVar("REMOTING_PORT", AkkaRemotingPort.toString), - EnvVar("MANAGEMENT_PORT", AkkaManagementPort.toString), - EnvVar("SELECTOR_LABEL_VALUE", service.name), - EnvVar("SELECTOR_LABEL", StatefulServiceLabel), - EnvVar("REQUIRED_CONTACT_POINT_NR", "1"), - EnvVar("JAVA_OPTS", s"-Xms$jvmMemory -Xmx$jvmMemory") - ), - resources = Some(sidecarResources), - readinessProbe = Some(Probe( - action = HTTPGetAction( - port = Left(AkkaManagementPort), - path = "/ready" - ), - periodSeconds = Some(2), - failureThreshold = Some(20), - initialDelaySeconds = 2 - )), - livenessProbe = Some(Probe( - action = HTTPGetAction( - port = Left(AkkaManagementPort), - path = "/alive" + EnvVar("HTTP_PORT", KnativeSidecarH2cPort.toString), + EnvVar("USER_FUNCTION_PORT", userPort.toString), + EnvVar("REMOTING_PORT", AkkaRemotingPort.toString), + EnvVar("MANAGEMENT_PORT", AkkaManagementPort.toString), + EnvVar("SELECTOR_LABEL_VALUE", service.name), + EnvVar("SELECTOR_LABEL", StatefulServiceLabel), + EnvVar("REQUIRED_CONTACT_POINT_NR", "1"), + EnvVar("JAVA_OPTS", s"-Xms$jvmMemory -Xmx$jvmMemory") ), - periodSeconds = Some(2), - failureThreshold = Some(20), - initialDelaySeconds = 2 - )) + resources = Some(sidecarResources), + readinessProbe = Some( + Probe( + action = HTTPGetAction( + port = Left(AkkaManagementPort), + path = "/ready" + ), + periodSeconds = Some(2), + failureThreshold = Some(20), + initialDelaySeconds = 2 + ) + ), + livenessProbe = Some( + Probe( + action = HTTPGetAction( + port = Left(AkkaManagementPort), + path = "/alive" + ), + periodSeconds = Some(2), + failureThreshold = Some(20), + initialDelaySeconds = 2 + ) + ) ) } @@ -287,13 +304,15 @@ class StatefulServiceOperatorFactory(implicit mat: Materializer, ec: ExecutionCo val userContainer = orig.copy( name = UserContainerName, volumeMounts = orig.volumeMounts :+ Volume.Mount("varlog", "/var/log"), - ports = List(Container.Port( - name = UserPortName, - containerPort = userPort - )), - env = orig.env ++ List( - EnvVar(UserPortEnvVar, userPort.toString) + ports = List( + Container.Port( + name = UserPortName, + containerPort = userPort + ) ), + env = orig.env ++ List( + EnvVar(UserPortEnvVar, userPort.toString) + ), stdin = Some(false), tty = Some(false), image = orig.image, @@ -316,17 +335,17 @@ class StatefulServiceOperatorFactory(implicit mat: Materializer, ec: ExecutionCo val labels = { val ls = service.metadata.labels ++ Map( - StatefulServiceLabel -> service.name, - StatefulServiceUidLabel -> service.uid - ) ++ service.spec.datastore.map(ds => StatefulStoreLabel -> ds.name) + StatefulServiceLabel -> service.name, + StatefulServiceUidLabel -> service.uid + ) ++ service.spec.datastore.map(ds => StatefulStoreLabel -> ds.name) if (!ls.contains("app")) ls + ("app" -> service.name) else ls } val annotations = service.metadata.annotations - LastPinnedLabel val podAnnotations = annotations ++ Seq( - "traffic.sidecar.istio.io/includeInboundPorts" -> s"$KnativeSidecarH2cPort", - "traffic.sidecar.istio.io/excludeOutboundPorts" -> s"$AkkaRemotingPort,$AkkaManagementPort,9042" - ) + "traffic.sidecar.istio.io/includeInboundPorts" -> s"$KnativeSidecarH2cPort", + "traffic.sidecar.istio.io/excludeOutboundPorts" -> s"$AkkaRemotingPort,$AkkaManagementPort,9042" + ) // Create the deployment Deployment( @@ -352,16 +371,20 @@ class StatefulServiceOperatorFactory(implicit mat: Materializer, ec: ExecutionCo .withLabelSelector( StatefulServiceUidLabel is service.uid ) - .withTemplate(Pod.Template.Spec( - metadata = ObjectMeta( - labels = labels, - annotations = podAnnotations - ), - spec = Some(podSpec) - )) + .withTemplate( + Pod.Template.Spec( + metadata = ObjectMeta( + labels = labels, + annotations = podAnnotations + ), + spec = Some(podSpec) + ) + ) } - private def ensureOtherObjectsExist(service: Resource, serviceAccountName: String, deploymentName: String): Validated[Done] = { + private def ensureOtherObjectsExist(service: Resource, + serviceAccountName: String, + deploymentName: String): Validated[Done] = Validated.future(for { _ <- helper.ensurePodReaderRoleExists() _ <- helper.ensurePodReaderRoleBindingExists(serviceAccountName) @@ -369,8 +392,7 @@ class StatefulServiceOperatorFactory(implicit mat: Materializer, ec: ExecutionCo _ <- helper.ensureDeploymentScalerRoleBindingExists(serviceAccountName, service) _ <- helper.ensureServiceForStatefulServiceExists(service) } yield Done) - } private def deploymentNameFor(revision: Resource) = revision.metadata.name + "-deployment" } -} \ No newline at end of file +} diff --git a/operator/src/main/scala/io/cloudstate/operator/StatefulStore.scala b/operator/src/main/scala/io/cloudstate/operator/StatefulStore.scala index faeb455ce..a0e7bec19 100644 --- a/operator/src/main/scala/io/cloudstate/operator/StatefulStore.scala +++ b/operator/src/main/scala/io/cloudstate/operator/StatefulStore.scala @@ -43,9 +43,7 @@ object StatefulStore { version = "v1alpha1", kind = "StatefulStore", shortNames = Nil, - subresources = Some(Subresources() - .withStatusSubresource - ) + subresources = Some(Subresources().withStatusSubresource) ) implicit val statusSubEnabled = CustomResource.statusMethodsEnabler[Resource] @@ -53,4 +51,4 @@ object StatefulStore { val crd = CustomResourceDefinition[Resource] def apply(name: String, spec: Spec) = CustomResource[Spec, Status](spec).withName(name) -} \ No newline at end of file +} diff --git a/operator/src/main/scala/io/cloudstate/operator/StatefulStoreOperatorFactory.scala b/operator/src/main/scala/io/cloudstate/operator/StatefulStoreOperatorFactory.scala index 55379b804..9188478f5 100644 --- a/operator/src/main/scala/io/cloudstate/operator/StatefulStoreOperatorFactory.scala +++ b/operator/src/main/scala/io/cloudstate/operator/StatefulStoreOperatorFactory.scala @@ -33,8 +33,8 @@ import skuber.apps.v1.Deployment import scala.util.control.NonFatal -class StatefulStoreOperatorFactory(implicit mat: Materializer, ec: ExecutionContext) extends - OperatorFactory[StatefulStore.Status, StatefulStore.Resource] { +class StatefulStoreOperatorFactory(implicit mat: Materializer, ec: ExecutionContext) + extends OperatorFactory[StatefulStore.Status, StatefulStore.Resource] { import OperatorConstants._ import StatefulStore.Resource @@ -43,23 +43,30 @@ class StatefulStoreOperatorFactory(implicit mat: Materializer, ec: ExecutionCont class StatefulStoreOperator(client: KubernetesClient) extends Operator { - private def status(maybeSpec: Option[Resource], status: String, reason: Option[String] = None, message: Option[String] = None) = StatefulStore.Status( - conditions = Some(List( - Condition( - `type` = StatefulStoreConditionType, - status = status, - reason = reason, - message = message, - lastUpdateTime = Some(ZonedDateTime.now()) + private def status(maybeSpec: Option[Resource], + status: String, + reason: Option[String] = None, + message: Option[String] = None) = StatefulStore.Status( + conditions = Some( + List( + Condition( + `type` = StatefulStoreConditionType, + status = status, + reason = reason, + message = message, + lastUpdateTime = Some(ZonedDateTime.now()) + ) ) - )), + ), lastConfig = maybeSpec.map(spec => Base64.getEncoder.encodeToString(Json.toBytes(Json.toJson(spec.spec)))) ) override def handleChanged(resource: Resource): Future[StatusUpdate] = { val maybeLastConfig = parseLastConfig(resource.status) if (maybeLastConfig.contains(resource.spec) && - resource.status.exists(_.conditions.exists(_.exists(c => c.`type` == StatefulStoreConditionType && c.status == TrueStatus)))) { + resource.status.exists( + _.conditions.exists(_.exists(c => c.`type` == StatefulStoreConditionType && c.status == TrueStatus)) + )) { // Don't do anything if last time we saw it, we successfully validated it, and it hasn't changed since then. Future.successful(StatusUpdate.None) } else { @@ -87,16 +94,16 @@ class StatefulStoreOperatorFactory(implicit mat: Materializer, ec: ExecutionCont } } - override def handleDeleted(resource: Resource): Future[Done] = { + override def handleDeleted(resource: Resource): Future[Done] = updateDependents(resource.name) - } - - private def updateDependents(name: String) = { + private def updateDependents(name: String) = (for { - deployments <- client.listSelected[DeploymentList](LabelSelector( - StatefulStoreLabel is name - )) + deployments <- client.listSelected[DeploymentList]( + LabelSelector( + StatefulStoreLabel is name + ) + ) _ <- Future.sequence(deployments.map(deployment => updateServiceForDeployment(deployment))) } yield Done).recover { case error => @@ -104,15 +111,16 @@ class StatefulStoreOperatorFactory(implicit mat: Materializer, ec: ExecutionCont error.printStackTrace() Done } - } - - private def updateServiceForDeployment(deployment: Deployment): Future[Done] = { + private def updateServiceForDeployment(deployment: Deployment): Future[Done] = if (deployment.metadata.labels.contains(RevisionLabel)) { for { - maybeRevision <- deployment.metadata.labels.get(RevisionLabel).map { revisionName => - client.getOption[KnativeRevision.Resource](revisionName) - }.getOrElse(Future.successful(None)) + maybeRevision <- deployment.metadata.labels + .get(RevisionLabel) + .map { revisionName => + client.getOption[KnativeRevision.Resource](revisionName) + } + .getOrElse(Future.successful(None)) _ <- maybeRevision match { case Some(revision) => val status = revision.status.getOrElse(KnativeRevision.Status(None, Nil, None, None, None)) @@ -123,9 +131,12 @@ class StatefulStoreOperatorFactory(implicit mat: Materializer, ec: ExecutionCont } yield Done } else if (deployment.metadata.labels.contains(StatefulServiceLabel)) { for { - maybeStatefulService <- deployment.metadata.labels.get(StatefulServiceLabel).map { serviceName => - client.getOption[StatefulService.Resource](serviceName) - }.getOrElse(Future.successful(None)) + maybeStatefulService <- deployment.metadata.labels + .get(StatefulServiceLabel) + .map { serviceName => + client.getOption[StatefulService.Resource](serviceName) + } + .getOrElse(Future.successful(None)) _ <- maybeStatefulService match { case Some(service) => val status = service.status.getOrElse(StatefulService.Status(Nil)) @@ -140,8 +151,6 @@ class StatefulStoreOperatorFactory(implicit mat: Materializer, ec: ExecutionCont Future.successful(Done) } - } - // Here we change the validation to Unknown. It is the responsibility of the revision controller to // handle updates to the store, by changing to unknown we let it go in and do the update. private def touchKnativeRevisionStatus(status: KnativeRevision.Status): KnativeRevision.Status = { @@ -184,11 +193,10 @@ class StatefulStoreOperatorFactory(implicit mat: Materializer, ec: ExecutionCont status.copy(conditions = conditions) } - override def statusFromError(error: Throwable, existing: Option[Resource]): StatusUpdate = { + override def statusFromError(error: Throwable, existing: Option[Resource]): StatusUpdate = StatusUpdate.Update(status(existing, UnknownStatus, Some("UnknownOperatorError"), Some(error.getMessage))) - } - private def parseLastConfig(maybeStatus: Option[StatefulStore.Status]) = { + private def parseLastConfig(maybeStatus: Option[StatefulStore.Status]) = for { status <- maybeStatus lastConfigJs <- status.lastConfig @@ -198,7 +206,6 @@ class StatefulStoreOperatorFactory(implicit mat: Materializer, ec: ExecutionCont case NonFatal(_) => None } } yield lastConfig - } } -} \ No newline at end of file +} diff --git a/operator/src/main/scala/io/cloudstate/operator/Validated.scala b/operator/src/main/scala/io/cloudstate/operator/Validated.scala index 3a874110c..84fad8660 100644 --- a/operator/src/main/scala/io/cloudstate/operator/Validated.scala +++ b/operator/src/main/scala/io/cloudstate/operator/Validated.scala @@ -1,6 +1,5 @@ package io.cloudstate.operator - import java.time.ZonedDateTime import io.cloudstate.operator.OperatorConstants.FalseStatus @@ -8,8 +7,8 @@ import io.cloudstate.operator.OperatorConstants.FalseStatus import scala.concurrent.{ExecutionContext, Future} /** - * Probably could do this better with cats - */ + * Probably could do this better with cats + */ sealed trait Validated[+T] { import io.cloudstate.operator.Validated._ @@ -44,14 +43,17 @@ sealed trait Validated[+T] { } def filter(predicate: T => Boolean)(implicit ec: ExecutionContext): Validated[T] = this match { - case v@ Valid(t) if predicate(t) => v - case Valid(_) => invalid(Condition( - `type` = "Filtered", - status = OperatorConstants.FalseStatus, - reason = Some("PredicateFailed"), - message = Some("A generic predicate failed"), - lastTransitionTime = Some(ZonedDateTime.now()) - )) + case v @ Valid(t) if predicate(t) => v + case Valid(_) => + invalid( + Condition( + `type` = "Filtered", + status = OperatorConstants.FalseStatus, + reason = Some("PredicateFailed"), + message = Some("A generic predicate failed"), + lastTransitionTime = Some(ZonedDateTime.now()) + ) + ) case invalid @ Invalid(_) => invalid case FutureBased(future) => FutureBased(future.map(_.filter(predicate))) } @@ -66,15 +68,15 @@ object Validated { def invalid(errors: List[Condition]): Validated[Nothing] = Invalid(errors) def invalid(error: Condition): Validated[Nothing] = Invalid(List(error)) - def error(`type`: String, reason: String, message: String) = { - Validated.invalid(Condition( - `type` = `type`, - status = FalseStatus, - severity = Some("Error"), - lastTransitionTime = Some(ZonedDateTime.now()), - reason = Some(reason), - message = Some(message))) - } + def error(`type`: String, reason: String, message: String) = + Validated.invalid( + Condition(`type` = `type`, + status = FalseStatus, + severity = Some("Error"), + lastTransitionTime = Some(ZonedDateTime.now()), + reason = Some(reason), + message = Some(message)) + ) implicit def futureT2Validated[T](future: Future[T])(implicit ec: ExecutionContext): Validated[T] = Validated.future(future) @@ -89,4 +91,3 @@ object Validated { private case class Valid[+T](t: T) extends Validated[T] private case class Invalid(errors: List[Condition]) extends Validated[Nothing] } - diff --git a/operator/src/main/scala/io/cloudstate/operator/Watcher.scala b/operator/src/main/scala/io/cloudstate/operator/Watcher.scala index 29513df49..a827c0efb 100644 --- a/operator/src/main/scala/io/cloudstate/operator/Watcher.scala +++ b/operator/src/main/scala/io/cloudstate/operator/Watcher.scala @@ -14,11 +14,13 @@ import scala.concurrent.ExecutionContext object Watcher { - private implicit def listResourceFormat[Resource <: ObjectResource: Format]: Format[ListResource[Resource]] = ListResourceFormat(implicitly[Format[Resource]]) - - def watch[Resource <: ObjectResource: Format: ResourceDefinition](client: KubernetesClient, - handler: Flow[WatchEvent[Resource], _, _])(implicit ec: ExecutionContext, mat: Materializer): KillSwitch = { + private implicit def listResourceFormat[Resource <: ObjectResource: Format]: Format[ListResource[Resource]] = + ListResourceFormat(implicitly[Format[Resource]]) + def watch[Resource <: ObjectResource: Format: ResourceDefinition]( + client: KubernetesClient, + handler: Flow[WatchEvent[Resource], _, _] + )(implicit ec: ExecutionContext, mat: Materializer): KillSwitch = // Summary of what we want our event loop to look like: // * We start by listing all the resources, and process them. // * Then we start watching from the resourceVersion that we got in our list, so we get all updates. @@ -28,46 +30,66 @@ object Watcher { // restarting. // * Also, if errors are encountered, we don't want to continually restart in a hot loop, so we use the // RestartSource to restart with backoff. - RestartSource.onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () => - val source = Source.repeat(NotUsed) - .flatMapConcat { _ => - Source.fromFutureSource( - client.list[ListResource[Resource]]() - .map { resources => - val watch = client - .watchAllContinuously[Resource](sinceResourceVersion = Some(resources.resourceVersion)) - - Source(resources) - .map(WatchEvent(EventType.MODIFIED, _)) - .concat(watch) - } - ).takeWithin(5.minutes) - } + RestartSource + .onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () => + val source = Source + .repeat(NotUsed) + .flatMapConcat { _ => + Source + .fromFutureSource( + client + .list[ListResource[Resource]]() + .map { resources => + val watch = client + .watchAllContinuously[Resource](sinceResourceVersion = Some(resources.resourceVersion)) - source.via(handler) - }.viaMat(KillSwitches.single)(Keep.right).to(Sink.ignore).run() - } + Source(resources) + .map(WatchEvent(EventType.MODIFIED, _)) + .concat(watch) + } + ) + .takeWithin(5.minutes) + } - def watchSingle[Resource <: ObjectResource: Format: ResourceDefinition](client: KubernetesClient, resourceName: String, - handler: Flow[WatchEvent[Resource], _, _])(implicit ec: ExecutionContext, mat: Materializer): KillSwitch = { + source.via(handler) + } + .viaMat(KillSwitches.single)(Keep.right) + .to(Sink.ignore) + .run() - RestartSource.onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () => - val source = Source.repeat(NotUsed) - .flatMapConcat { _ => - Source.fromFutureSource( - client.getOption[Resource](resourceName).map { - case Some(resource) => - val watch = client.watchContinuously[Resource](resourceName, sinceResourceVersion = Some(resource.resourceVersion)) - Source.single(resource) - .map(WatchEvent(EventType.MODIFIED, _)) - .concat(watch) - case None => - throw new RuntimeException(s"Resource $resourceName not found in namespace ${client.namespaceName}!") - } - ).takeWithin(5.minutes) - } + def watchSingle[Resource <: ObjectResource: Format: ResourceDefinition]( + client: KubernetesClient, + resourceName: String, + handler: Flow[WatchEvent[Resource], _, _] + )(implicit ec: ExecutionContext, mat: Materializer): KillSwitch = + RestartSource + .onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () => + val source = Source + .repeat(NotUsed) + .flatMapConcat { _ => + Source + .fromFutureSource( + client.getOption[Resource](resourceName).map { + case Some(resource) => + val watch = + client.watchContinuously[Resource](resourceName, + sinceResourceVersion = Some(resource.resourceVersion)) + Source + .single(resource) + .map(WatchEvent(EventType.MODIFIED, _)) + .concat(watch) + case None => + throw new RuntimeException( + s"Resource $resourceName not found in namespace ${client.namespaceName}!" + ) + } + ) + .takeWithin(5.minutes) + } - source.via(handler) - }.viaMat(KillSwitches.single)(Keep.right).to(Sink.ignore).run() - } + source.via(handler) + } + .viaMat(KillSwitches.single)(Keep.right) + .to(Sink.ignore) + .run() } diff --git a/operator/src/main/scala/io/cloudstate/operator/stores/CassandraStoreSupport.scala b/operator/src/main/scala/io/cloudstate/operator/stores/CassandraStoreSupport.scala index 99858e384..1cc50ec6c 100644 --- a/operator/src/main/scala/io/cloudstate/operator/stores/CassandraStoreSupport.scala +++ b/operator/src/main/scala/io/cloudstate/operator/stores/CassandraStoreSupport.scala @@ -10,7 +10,7 @@ object CassandraStoreSupport extends StatefulStoreSupport { override def name: String = OperatorConstants.CassandraStatefulStoreType - override def validate(store: StatefulStore.Resource, client: KubernetesClient): Validated[ConfiguredStatefulStore] = { + override def validate(store: StatefulStore.Resource, client: KubernetesClient): Validated[ConfiguredStatefulStore] = store.spec.deployment match { case Some(`UnmanagedStatefulStoreDeployment`) => store.spec.config.flatMap(c => (c \ "service").asOpt[String]) match { @@ -18,36 +18,43 @@ object CassandraStoreSupport extends StatefulStoreSupport { Validated(new UnmanagedCassandra(serviceName)) case None => - Validated.error(StatefulStoreConditionType, "MissingServiceName", - "No service name declared in unmanaged Cassandra journal") + Validated.error(StatefulStoreConditionType, + "MissingServiceName", + "No service name declared in unmanaged Cassandra journal") } case Some(unknown) => - Validated.error(StatefulStoreConditionType, "UnknownDeploymentType", - s"Unknown Cassandra deployment type: $unknown, supported types for Cassandra are: $UnmanagedStatefulStoreDeployment") + Validated.error( + StatefulStoreConditionType, + "UnknownDeploymentType", + s"Unknown Cassandra deployment type: $unknown, supported types for Cassandra are: $UnmanagedStatefulStoreDeployment" + ) case None => - Validated.error(StatefulStoreConditionType, "UnspecifiedDeploymentType", - s"Unspecified Cassandra deployment type, supported types for Cassandra are: $UnmanagedStatefulStoreDeployment") + Validated.error( + StatefulStoreConditionType, + "UnspecifiedDeploymentType", + s"Unspecified Cassandra deployment type, supported types for Cassandra are: $UnmanagedStatefulStoreDeployment" + ) } - } - - override def reconcile(store: Resource, client: KubernetesClient): Validated[ConfiguredStatefulStore] = validate(store, client) + override def reconcile(store: Resource, client: KubernetesClient): Validated[ConfiguredStatefulStore] = + validate(store, client) private class UnmanagedCassandra(service: String) extends ConfiguredStatefulStore { override def successfulConditions: List[Condition] = Nil - override def validateInstance(config: Option[JsValue], client: KubernetesClient): Validated[StatefulStoreUsageConfiguration] = { + override def validateInstance(config: Option[JsValue], + client: KubernetesClient): Validated[StatefulStoreUsageConfiguration] = config.flatMap(config => (config \ "keyspace").asOpt[String]) match { case Some(keyspace) => Validated(new CassandraUsage(service, keyspace)) case None => - Validated.error(StatefulStoreConditionType, "MissingKeyspace", - "No keyspace declared for unmanaged Cassandra journal") + Validated.error(StatefulStoreConditionType, + "MissingKeyspace", + "No keyspace declared for unmanaged Cassandra journal") } - } } private class CassandraUsage(service: String, keyspace: String) extends StatefulStoreUsageConfiguration { diff --git a/operator/src/main/scala/io/cloudstate/operator/stores/CredentialsHelper.scala b/operator/src/main/scala/io/cloudstate/operator/stores/CredentialsHelper.scala index c27449495..8d8ac0d1b 100644 --- a/operator/src/main/scala/io/cloudstate/operator/stores/CredentialsHelper.scala +++ b/operator/src/main/scala/io/cloudstate/operator/stores/CredentialsHelper.scala @@ -5,33 +5,34 @@ import play.api.libs.functional.syntax._ import skuber.EnvVar /** - * Credentials can either be supplied from a secret, or direct as values. The supported ways of configuring a - * credential called username are: - * - * {{{ - * credentials: - * username: value - * - * credentialsFromSecret: - * name: secretname - * usernameKey: mykey (defaults to username) - * }}} - * - * For a given key, only one of the above methods are allowed, but multiple methods can be combined for different - * keys. So credentials can be pulled from multiple secrets, for example. - */ + * Credentials can either be supplied from a secret, or direct as values. The supported ways of configuring a + * credential called username are: + * + * {{{ + * credentials: + * username: value + * + * credentialsFromSecret: + * name: secretname + * usernameKey: mykey (defaults to username) + * }}} + * + * For a given key, only one of the above methods are allowed, but multiple methods can be combined for different + * keys. So credentials can be pulled from multiple secrets, for example. + */ object CredentialsHelper { - def readCredentialParam(key: String): Reads[CredentialParam] = { + def readCredentialParam(key: String): Reads[CredentialParam] = readCredentialParam(key, true).flatMap { case Some(param) => Reads.pure(param) - case None => Reads.failed(s"No $key specified in credentials. Either specify using credentials/$key, or by configuring credentialsFromSecret") + case None => + Reads.failed( + s"No $key specified in credentials. Either specify using credentials/$key, or by configuring credentialsFromSecret" + ) } - } - def readOptionalCredentialParam(key: String): Reads[Option[CredentialParam]] = { + def readOptionalCredentialParam(key: String): Reads[Option[CredentialParam]] = readCredentialParam(key, false) - } private def readCredentialParam(key: String, mandatory: Boolean): Reads[Option[CredentialParam]] = { @@ -45,7 +46,9 @@ object CredentialsHelper { (nameValueReads and credentialsFromSecretReads).tupled.flatMap { case (Some(_), Some((_, Some(_)))) => - Reads.failed(s"$key can either be specified as a value in credentials, or configured as ${key}Key in credentialsFromSecret, not both.") + Reads.failed( + s"$key can either be specified as a value in credentials, or configured as ${key}Key in credentialsFromSecret, not both." + ) case (Some(value), _) => Reads.pure(Some(value)) case (_, Some((name, Some(customKey)))) => Reads.pure(Some(FromSecret(name, customKey))) case (_, Some((name, None))) if mandatory => Reads.pure(Some(FromSecret(name, key))) @@ -53,7 +56,6 @@ object CredentialsHelper { } } - sealed trait CredentialParam { def toEnvVar: EnvVar.Value } @@ -65,5 +67,3 @@ object CredentialsHelper { } } - - diff --git a/operator/src/main/scala/io/cloudstate/operator/stores/InMemoryStoreSupport.scala b/operator/src/main/scala/io/cloudstate/operator/stores/InMemoryStoreSupport.scala index 412976224..0dbd03d82 100644 --- a/operator/src/main/scala/io/cloudstate/operator/stores/InMemoryStoreSupport.scala +++ b/operator/src/main/scala/io/cloudstate/operator/stores/InMemoryStoreSupport.scala @@ -5,12 +5,18 @@ import play.api.libs.json.JsValue import skuber.EnvVar import skuber.api.client.KubernetesClient -object InMemoryStoreSupport extends StatefulStoreSupport with ConfiguredStatefulStore with StatefulStoreUsageConfiguration { +object InMemoryStoreSupport + extends StatefulStoreSupport + with ConfiguredStatefulStore + with StatefulStoreUsageConfiguration { override def name: String = OperatorConstants.InMemoryStatefulStoreType - override def validate(store: StatefulStore.Resource, client: KubernetesClient): Validated[ConfiguredStatefulStore] = Validated(this) - override def reconcile(store: Resource, client: KubernetesClient): Validated[ConfiguredStatefulStore] = Validated(this) + override def validate(store: StatefulStore.Resource, client: KubernetesClient): Validated[ConfiguredStatefulStore] = + Validated(this) + override def reconcile(store: Resource, client: KubernetesClient): Validated[ConfiguredStatefulStore] = + Validated(this) override def successfulConditions: List[Condition] = List() - override def validateInstance(config: Option[JsValue], client: KubernetesClient): Validated[StatefulStoreUsageConfiguration] = Validated(this) + override def validateInstance(config: Option[JsValue], + client: KubernetesClient): Validated[StatefulStoreUsageConfiguration] = Validated(this) override def proxyImage(config: ImageConfig): String = config.inMemory override def proxyContainerEnvVars: List[EnvVar] = Nil } diff --git a/operator/src/main/scala/io/cloudstate/operator/stores/PostgresStoreSupport.scala b/operator/src/main/scala/io/cloudstate/operator/stores/PostgresStoreSupport.scala index eff5c0f54..3d83b4bb3 100644 --- a/operator/src/main/scala/io/cloudstate/operator/stores/PostgresStoreSupport.scala +++ b/operator/src/main/scala/io/cloudstate/operator/stores/PostgresStoreSupport.scala @@ -12,44 +12,53 @@ object PostgresStoreSupport extends StatefulStoreSupport { override def name: String = "Postgres" - override def validate(store: Resource, client: KubernetesClient): Validated[ConfiguredStatefulStore] = { + override def validate(store: Resource, client: KubernetesClient): Validated[ConfiguredStatefulStore] = store.spec.deployment match { case Some(`UnmanagedStatefulStoreDeployment`) => store.spec.config.map(_.validate[PostgresConfig]) match { case None => - Validated.error(StatefulStoreConditionType, "BadConfiguration", - s"Missing configuration for unmanaged Postgres store") + Validated.error(StatefulStoreConditionType, + "BadConfiguration", + s"Missing configuration for unmanaged Postgres store") case Some(JsError(errors)) => - Validated.error(StatefulStoreConditionType, "BadConfiguration", - s"Configuration error in postgres store at ${errors.head._1}: ${errors.head._2.head.message}") + Validated.error( + StatefulStoreConditionType, + "BadConfiguration", + s"Configuration error in postgres store at ${errors.head._1}: ${errors.head._2.head.message}" + ) case Some(JsSuccess(config, _)) => - // todo validate that any referenced secrets exist Validated(new UnmanagedPostgres(config)) } case Some(unknown) => - Validated.error(StatefulStoreConditionType, "UnknownDeploymentType", - s"Unknown Postgres deployment type: $unknown, supported types for Postgres are: $UnmanagedStatefulStoreDeployment") + Validated.error( + StatefulStoreConditionType, + "UnknownDeploymentType", + s"Unknown Postgres deployment type: $unknown, supported types for Postgres are: $UnmanagedStatefulStoreDeployment" + ) case None => - Validated.error(StatefulStoreConditionType, "UnspecifiedDeploymentType", - s"Unspecified Postgres deployment type, supported types for Postgres are: $UnmanagedStatefulStoreDeployment") + Validated.error( + StatefulStoreConditionType, + "UnspecifiedDeploymentType", + s"Unspecified Postgres deployment type, supported types for Postgres are: $UnmanagedStatefulStoreDeployment" + ) } - } - - override def reconcile(store: Resource, client: KubernetesClient): Validated[ConfiguredStatefulStore] = validate(store, client) + override def reconcile(store: Resource, client: KubernetesClient): Validated[ConfiguredStatefulStore] = + validate(store, client) private class UnmanagedPostgres(config: PostgresConfig) extends ConfiguredStatefulStore { override def successfulConditions: List[Condition] = Nil - override def validateInstance(jsConfig: Option[JsValue], client: KubernetesClient): Validated[StatefulStoreUsageConfiguration] = { + override def validateInstance(jsConfig: Option[JsValue], + client: KubernetesClient): Validated[StatefulStoreUsageConfiguration] = { val instanceSchema = jsConfig.flatMap(c => (c \ "schema").asOpt[String]).map(Value) val instanceConfig = config.copy(schema = instanceSchema.orElse(config.schema)) Validated(new PostgresUsage(instanceConfig)) @@ -61,24 +70,29 @@ object PostgresStoreSupport extends StatefulStoreSupport { override def proxyImage(config: ImageConfig): String = config.postgres - override def proxyContainerEnvVars: List[EnvVar] = List( - EnvVar("POSTGRES_SERVICE", config.service), - EnvVar("POSTGRES_DATABASE", config.database.toEnvVar), - EnvVar("POSTGRES_USERNAME", config.username.toEnvVar), - EnvVar("POSTGRES_PASSWORD", config.password.toEnvVar) - ) ++ config.port.map(port => EnvVar("POSTGRES_PORT", port.toString)) ++ + override def proxyContainerEnvVars: List[EnvVar] = + List( + EnvVar("POSTGRES_SERVICE", config.service), + EnvVar("POSTGRES_DATABASE", config.database.toEnvVar), + EnvVar("POSTGRES_USERNAME", config.username.toEnvVar), + EnvVar("POSTGRES_PASSWORD", config.password.toEnvVar) + ) ++ config.port.map(port => EnvVar("POSTGRES_PORT", port.toString)) ++ config.schema.map(schema => EnvVar("POSTGRES_SCHEMA", schema.toEnvVar)) } - case class PostgresConfig(service: String, port: Option[Int], database: CredentialParam, username: CredentialParam, password: CredentialParam, schema: Option[CredentialParam]) + case class PostgresConfig(service: String, + port: Option[Int], + database: CredentialParam, + username: CredentialParam, + password: CredentialParam, + schema: Option[CredentialParam]) - implicit def postgresCredentialsReads: Reads[PostgresConfig] = { + implicit def postgresCredentialsReads: Reads[PostgresConfig] = ((__ \ "service").read[String] and - (__ \ "port").readNullable[Int] and - readCredentialParam("database") and - readCredentialParam("username") and - readCredentialParam("password") and - readOptionalCredentialParam("schema"))(PostgresConfig) - } + (__ \ "port").readNullable[Int] and + readCredentialParam("database") and + readCredentialParam("username") and + readCredentialParam("password") and + readOptionalCredentialParam("schema"))(PostgresConfig) } diff --git a/operator/src/main/scala/io/cloudstate/operator/stores/StatefulStoreSupport.scala b/operator/src/main/scala/io/cloudstate/operator/stores/StatefulStoreSupport.scala index 7e4bde1eb..3d9116caa 100644 --- a/operator/src/main/scala/io/cloudstate/operator/stores/StatefulStoreSupport.scala +++ b/operator/src/main/scala/io/cloudstate/operator/stores/StatefulStoreSupport.scala @@ -10,26 +10,29 @@ import skuber.api.client.KubernetesClient import scala.concurrent.Future - object StatefulStoreSupport { - private val types: List[StatefulStoreSupport] = List(CassandraStoreSupport, InMemoryStoreSupport, PostgresStoreSupport) + private val types: List[StatefulStoreSupport] = + List(CassandraStoreSupport, InMemoryStoreSupport, PostgresStoreSupport) def get(storeType: String): Option[StatefulStoreSupport] = types.find(_.name == storeType) - def get(store: StatefulStore.Resource): Validated[StatefulStoreSupport] = { + def get(store: StatefulStore.Resource): Validated[StatefulStoreSupport] = store.spec.`type` match { case Some(storeType) => StatefulStoreSupport.get(storeType) match { case Some(storeSupport) => Validated(storeSupport) case None => - Validated.error(StatefulStoreConditionType, "UnknownStoreType", - s"Unknown store type: $storeType, supported types are: ${StatefulStoreSupport.supportedTypes.mkString(", ")}") + Validated.error( + StatefulStoreConditionType, + "UnknownStoreType", + s"Unknown store type: $storeType, supported types are: ${StatefulStoreSupport.supportedTypes.mkString(", ")}" + ) } case None => - Validated.error(StatefulStoreConditionType, "UnspecifiedStoreType", - s"StatefulStore ${store.name} does not specify a store type.") + Validated.error(StatefulStoreConditionType, + "UnspecifiedStoreType", + s"StatefulStore ${store.name} does not specify a store type.") } - } def supportedTypes: List[String] = types.map(_.name) @@ -57,4 +60,4 @@ trait StatefulStoreUsageConfiguration { def successfulConditions: List[Condition] def proxyImage(config: ImageConfig): String def proxyContainerEnvVars: List[EnvVar] -} \ No newline at end of file +} diff --git a/project/plugins.sbt b/project/plugins.sbt index f399a42c2..3d2e6cd54 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -7,6 +7,10 @@ addSbtPlugin("com.lightbend.sbt" % "sbt-javaagent" % "0.1.4") addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.9") +addSbtPlugin("com.lightbend.sbt" % "sbt-java-formatter" % "0.4.4") + +addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.0.4") + addSbtPlugin("de.heikoseeberger" % "sbt-header" % "5.2.0") addSbtPlugin("com.github.gseitz" % "sbt-protobuf" % "0.6.5") diff --git a/proxy/cassandra/src/main/java/io/cloudstate/internal/svm/Substitutions.java b/proxy/cassandra/src/main/java/io/cloudstate/internal/svm/Substitutions.java index 5fb50be4b..f27949fcb 100644 --- a/proxy/cassandra/src/main/java/io/cloudstate/internal/svm/Substitutions.java +++ b/proxy/cassandra/src/main/java/io/cloudstate/internal/svm/Substitutions.java @@ -1,5 +1,6 @@ // FIXME THIS FILE IS FOR PROOF-OF-CONCEPT ONLY -// THESE SUBSTITUTIONS SHOULD BE PRESENT IN THE SHADED VERSION OF NETTY IN Cassandra client. NOT HERE! +// THESE SUBSTITUTIONS SHOULD BE PRESENT IN THE SHADED VERSION OF NETTY IN Cassandra client. NOT +// HERE! package io.cloudstate.internal.svm; @@ -9,24 +10,34 @@ @TargetClass(className = "io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess") final class Target_io_netty_util_internal_shaded_org_jctools_util_UnsafeRefArrayAccess { - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexShift, declClass = Object[].class) + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexShift, declClass = Object[].class) public static int REF_ELEMENT_SHIFT; } @TargetClass(className = "io.netty.util.internal.CleanerJava6") final class Target_io_netty_util_internal_CleanerJava6 { - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.FieldOffset, declClassName = "java.nio.DirectByteBuffer", name = "cleaner") + @Alias + @RecomputeFieldValue( + kind = RecomputeFieldValue.Kind.FieldOffset, + declClassName = "java.nio.DirectByteBuffer", + name = "cleaner") private static long CLEANER_FIELD_OFFSET; } @TargetClass(className = "io.netty.util.internal.PlatformDependent") final class Target_io_netty_util_internal_PlatformDependent { - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = byte[].class) + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = byte[].class) private static long ARRAY_BASE_OFFSET; } @TargetClass(className = "io.netty.util.internal.PlatformDependent0") final class Target_io_netty_util_internal_PlatformDependent0 { - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.FieldOffset, declClassName = "java.nio.Buffer", name = "address") + @Alias + @RecomputeFieldValue( + kind = RecomputeFieldValue.Kind.FieldOffset, + declClassName = "java.nio.Buffer", + name = "address") private static long ADDRESS_FIELD_OFFSET; } diff --git a/proxy/core/src/main/java/io/cloudstate/internal/svm/Substitutions.java b/proxy/core/src/main/java/io/cloudstate/internal/svm/Substitutions.java index 95f26cdc5..11f4aea4e 100644 --- a/proxy/core/src/main/java/io/cloudstate/internal/svm/Substitutions.java +++ b/proxy/core/src/main/java/io/cloudstate/internal/svm/Substitutions.java @@ -6,65 +6,97 @@ import com.oracle.svm.core.annotate.*; -@TargetClass(className = "io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess") -final class Target_io_grpc_netty_shaded_io_netty_util_internal_shaded_org_jctools_util_UnsafeRefArrayAccess { - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexShift, declClass = Object[].class) +@TargetClass( + className = + "io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess") +final +class Target_io_grpc_netty_shaded_io_netty_util_internal_shaded_org_jctools_util_UnsafeRefArrayAccess { + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexShift, declClass = Object[].class) public static int REF_ELEMENT_SHIFT; } @TargetClass(className = "io.grpc.netty.shaded.io.netty.util.internal.CleanerJava6") final class Target_io_grpc_netty_shaded_io_netty_util_internal_CleanerJava6 { - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.FieldOffset, declClassName = "java.nio.DirectByteBuffer", name = "cleaner") + @Alias + @RecomputeFieldValue( + kind = RecomputeFieldValue.Kind.FieldOffset, + declClassName = "java.nio.DirectByteBuffer", + name = "cleaner") private static long CLEANER_FIELD_OFFSET; } @TargetClass(className = "io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent") final class Target_io_grpc_netty_shaded_io_netty_util_internal_PlatformDependent { - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = byte[].class) + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = byte[].class) private static long BYTE_ARRAY_BASE_OFFSET; } @TargetClass(className = "io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0") final class Target_io_grpc_netty_shaded_io_netty_util_internal_PlatformDependent0 { - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.FieldOffset, declClassName = "java.nio.Buffer", name = "address") + @Alias + @RecomputeFieldValue( + kind = RecomputeFieldValue.Kind.FieldOffset, + declClassName = "java.nio.Buffer", + name = "address") private static long ADDRESS_FIELD_OFFSET; } @TargetClass(className = "org.agrona.concurrent.AbstractConcurrentArrayQueue") final class Target_org_agrona_concurrent_AbstractConcurrentArrayQueue { - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexShift, declClass = Object[].class) + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexShift, declClass = Object[].class) public static int SHIFT_FOR_SCALE; } @TargetClass(className = "com.google.protobuf.UnsafeUtil") final class Target_com_google_protobuf_UnsafeUtil { - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = boolean[].class) + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = boolean[].class) public static long BOOLEAN_ARRAY_BASE_OFFSET; - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexScale, declClass = boolean[].class) + + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexScale, declClass = boolean[].class) public static long BOOLEAN_ARRAY_INDEX_SCALE; - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = int[].class) + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = int[].class) public static long INT_ARRAY_BASE_OFFSET; - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexScale, declClass = int[].class) + + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexScale, declClass = int[].class) public static long INT_ARRAY_INDEX_SCALE; - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = long[].class) + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = long[].class) public static long LONG_ARRAY_BASE_OFFSET; - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexScale, declClass = long[].class) + + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexScale, declClass = long[].class) public static long LONG_ARRAY_INDEX_SCALE; - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = float[].class) + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = float[].class) public static long FLOAT_ARRAY_BASE_OFFSET; - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexScale, declClass = float[].class) + + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexScale, declClass = float[].class) public static long FLOAT_ARRAY_INDEX_SCALE; - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = double[].class) + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = double[].class) public static long DOUBLE_ARRAY_BASE_OFFSET; - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexScale, declClass = double[].class) + + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexScale, declClass = double[].class) public static long DOUBLE_ARRAY_INDEX_SCALE; - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = Object[].class) + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = Object[].class) public static long OBJECT_ARRAY_BASE_OFFSET; - @Alias @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexScale, declClass = Object[].class) + + @Alias + @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexScale, declClass = Object[].class) public static long OBJECT_ARRAY_INDEX_SCALE; -} \ No newline at end of file +} diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/AkkaHttpPrometheusExporter.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/AkkaHttpPrometheusExporter.scala index 24bd492f3..717972893 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/AkkaHttpPrometheusExporter.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/AkkaHttpPrometheusExporter.scala @@ -31,9 +31,12 @@ import io.prometheus.client.exporter.common.TextFormat import scala.concurrent.Future /** - * Serves Prometheus metrics - */ -class AkkaHttpPrometheusExporter(metricsPort: Int, registry: CollectorRegistry = CollectorRegistry.defaultRegistry)(implicit system: ActorSystem, mat: Materializer) { + * Serves Prometheus metrics + */ +class AkkaHttpPrometheusExporter(metricsPort: Int, registry: CollectorRegistry = CollectorRegistry.defaultRegistry)( + implicit system: ActorSystem, + mat: Materializer +) { private[this] final val PrometheusContentType = ContentType.parse(TextFormat.CONTENT_TYPE_004).right.get @@ -56,7 +59,6 @@ class AkkaHttpPrometheusExporter(metricsPort: Int, registry: CollectorRegistry = } } - def start(): Future[Http.ServerBinding] = { + def start(): Future[Http.ServerBinding] = Http().bindAndHandle(routes, "0.0.0.0", metricsPort) - } -} \ No newline at end of file +} diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/CloudStateProxyMain.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/CloudStateProxyMain.scala index d05a95901..a551ab0cc 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/CloudStateProxyMain.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/CloudStateProxyMain.scala @@ -37,52 +37,56 @@ import scala.util.{Failure, Success} final class HealthCheckReady(system: ActorSystem) extends (() => Future[Boolean]) { private[this] final val log = LoggerFactory.getLogger(getClass) - private[this] final val timeoutMs = system.settings.config.getConfig("cloudstate.proxy").getDuration("ready-timeout").toMillis.millis + private[this] final val timeoutMs = + system.settings.config.getConfig("cloudstate.proxy").getDuration("ready-timeout").toMillis.millis private[this] final implicit val ec = system.dispatcher private[this] final val serverManager = system.actorSelection("/user/server-manager-supervisor/server-manager") private[this] final val warmup = system.actorSelection("/user/state-manager-warm-up") private[this] final implicit val timeout = Timeout(timeoutMs) - private[this] final def check(name: String, selection: ActorSelection, msg: Any) = { - selection.resolveOne() + private[this] final def check(name: String, selection: ActorSelection, msg: Any) = + selection + .resolveOne() .flatMap(_ ? msg) .mapTo[Boolean] - .recover { case e => - log.debug(s"Error performing $name readiness check", e) - false + .recover { + case e => + log.debug(s"Error performing $name readiness check", e) + false } - } - override final def apply(): Future[Boolean] = { - Future.sequence(Seq( - check("warmup", warmup, Warmup.Ready), - check("server manager", serverManager, EntityDiscoveryManager.Ready) - )).map(_.reduce(_ && _)) - } + override final def apply(): Future[Boolean] = + Future + .sequence( + Seq( + check("warmup", warmup, Warmup.Ready), + check("server manager", serverManager, EntityDiscoveryManager.Ready) + ) + ) + .map(_.reduce(_ && _)) } final class HealthCheckLive(system: ActorSystem) extends (() => Future[Boolean]) { - override final def apply(): Future[Boolean] = { + override final def apply(): Future[Boolean] = Future.successful(true) - } } object CloudStateProxyMain { - final case class Configuration ( - devMode: Boolean, - backoffMin: FiniteDuration, - backoffMax: FiniteDuration, - backoffRandomFactor: Double, - metricsPort: Int - ) { + final case class Configuration( + devMode: Boolean, + backoffMin: FiniteDuration, + backoffMax: FiniteDuration, + backoffRandomFactor: Double, + metricsPort: Int + ) { validate() def this(config: Config) = { this( - devMode = config.getBoolean("dev-mode-enabled"), - backoffMin = config.getDuration("backoff.min").toMillis.millis, - backoffMax = config.getDuration("backoff.max").toMillis.millis, + devMode = config.getBoolean("dev-mode-enabled"), + backoffMin = config.getDuration("backoff.min").toMillis.millis, + backoffMax = config.getDuration("backoff.max").toMillis.millis, backoffRandomFactor = config.getDouble("backoff.random-factor"), - metricsPort = config.getInt("metrics-port") + metricsPort = config.getInt("metrics-port") ) } @@ -96,21 +100,21 @@ object CloudStateProxyMain { private val isGraalVM = sys.props.get("org.graalvm.nativeimage.imagecode").contains("runtime") /** - * Work around for https://github.com/oracle/graal/issues/1610. - * - * ThreadLocalRandom gets initialized with a static seed generator, from this generator all seeds for - * each thread are generated, but this gets computed at build time when compiling a native image, which - * means that you get the same sequence of seeds each time you run the native image, and one serious - * consequence of this is that every cluster node ends up with the same UID, and that causes big problems. - * We can't tell Graal not to initialize at build time because it's already loaded by Graal itself. - * So, we have to reset that field ourselves. - */ + * Work around for https://github.com/oracle/graal/issues/1610. + * + * ThreadLocalRandom gets initialized with a static seed generator, from this generator all seeds for + * each thread are generated, but this gets computed at build time when compiling a native image, which + * means that you get the same sequence of seeds each time you run the native image, and one serious + * consequence of this is that every cluster node ends up with the same UID, and that causes big problems. + * We can't tell Graal not to initialize at build time because it's already loaded by Graal itself. + * So, we have to reset that field ourselves. + */ private def initializeThreadLocalRandom(): Unit = { // MurmurHash3 64 bit mixer to give an even distribution of seeds: // https://github.com/aappleby/smhasher/wiki/MurmurHash3 def mix64(z: Long): Long = { - val z1 = (z ^ (z >>> 33)) * 0xff51afd7ed558ccdL - val z2 = (z1 ^ (z1 >>> 33)) * 0xc4ceb9fe1a85ec53L + val z1 = (z ^ (z >>> 33)) * 0xFF51AFD7ED558CCDL + val z2 = (z1 ^ (z1 >>> 33)) * 0xC4CEB9FE1A85EC53L z2 ^ (z2 >>> 33) } @@ -120,11 +124,8 @@ object CloudStateProxyMain { field.get(null).asInstanceOf[AtomicLong].set(seed) } - - - def main(args: Array[String]): Unit = { + def main(args: Array[String]): Unit = start() - } def start(): ActorSystem = { // Must do this first, before anything uses ThreadLocalRandom @@ -175,15 +176,19 @@ object CloudStateProxyMain { } } - system.actorOf(BackoffSupervisor.props( - BackoffOpts.onFailure( - EntityDiscoveryManager.props(serverConfig), - childName = "server-manager", - minBackoff = appConfig.backoffMin, - maxBackoff = appConfig.backoffMax, - randomFactor = appConfig.backoffRandomFactor - )), "server-manager-supervisor") + system.actorOf( + BackoffSupervisor.props( + BackoffOpts.onFailure( + EntityDiscoveryManager.props(serverConfig), + childName = "server-manager", + minBackoff = appConfig.backoffMin, + maxBackoff = appConfig.backoffMax, + randomFactor = appConfig.backoffRandomFactor + ) + ), + "server-manager-supervisor" + ) system } -} \ No newline at end of file +} diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/ConcurrencyEnforcer.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/ConcurrencyEnforcer.scala index 426ee9355..a0a786578 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/ConcurrencyEnforcer.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/ConcurrencyEnforcer.scala @@ -23,47 +23,51 @@ import scala.collection.immutable.Queue import scala.concurrent.duration.{Deadline, FiniteDuration} /** - * This actor enforces the concurrency of requests going to the user code. - * - * Background reading necessary to understand before reading this: - * - * https://github.com/knative/serving/blob/master/docs/scaling/DEVELOPMENT.md - * - * The Autoscaler selects a desired concurrency level (though, this is currently not implemented and hardcoded at 1) - * based on CPU/memory/other resource usage of the pod. This is referred to as slow brain scaling. We need to enforce - * that concurrency level, and then also report metrics on outstanding requests to the user function. When the - * autoscaler sees the number of queued request go up, it scales the deployment. This is referred to as fast brain - * scaling. - * - * One challenge that we have is that not all our communication with the pod is request based - we also send init - * messages and events, and these messages don't send a reply, so there's no way to measure how long they take to - * process. We could potentially use backpressure to determine when they are processed, however it's more complex than - * that because a lot of the time in hydrating the events will come from loading the database, which we don't want to - * include in metrics. - * - * All this said, it's not as bad as it sounds. Currently we don't wait for events to be consumed by the user function - * before sending the command. If the user function is slow at consuming them, then the command will end up queuing - * behind the events, and the slow processing of events will cause the command to be delayed in being processed. So, - * our strategy is to just report metrics on command handling, and let event handling just happen. - */ + * This actor enforces the concurrency of requests going to the user code. + * + * Background reading necessary to understand before reading this: + * + * https://github.com/knative/serving/blob/master/docs/scaling/DEVELOPMENT.md + * + * The Autoscaler selects a desired concurrency level (though, this is currently not implemented and hardcoded at 1) + * based on CPU/memory/other resource usage of the pod. This is referred to as slow brain scaling. We need to enforce + * that concurrency level, and then also report metrics on outstanding requests to the user function. When the + * autoscaler sees the number of queued request go up, it scales the deployment. This is referred to as fast brain + * scaling. + * + * One challenge that we have is that not all our communication with the pod is request based - we also send init + * messages and events, and these messages don't send a reply, so there's no way to measure how long they take to + * process. We could potentially use backpressure to determine when they are processed, however it's more complex than + * that because a lot of the time in hydrating the events will come from loading the database, which we don't want to + * include in metrics. + * + * All this said, it's not as bad as it sounds. Currently we don't wait for events to be consumed by the user function + * before sending the command. If the user function is slow at consuming them, then the command will end up queuing + * behind the events, and the slow processing of events will cause the command to be delayed in being processed. So, + * our strategy is to just report metrics on command handling, and let event handling just happen. + */ object ConcurrencyEnforcer { final case class Action(id: String, start: () => Unit) final case class ActionCompleted(id: String, timeNanos: Long) final case class ConcurrencyEnforcerSettings( - concurrency: Int, - actionTimeout: FiniteDuration, - cleanupPeriod: FiniteDuration + concurrency: Int, + actionTimeout: FiniteDuration, + cleanupPeriod: FiniteDuration ) private case object Tick extends DeadLetterSuppression - def props(settings: ConcurrencyEnforcerSettings, statsCollector: ActorRef): Props = Props(new ConcurrencyEnforcer(settings, statsCollector)) + def props(settings: ConcurrencyEnforcerSettings, statsCollector: ActorRef): Props = + Props(new ConcurrencyEnforcer(settings, statsCollector)) private final case class OutstandingAction(deadline: Deadline) } -class ConcurrencyEnforcer(settings: ConcurrencyEnforcerSettings, statsCollector: ActorRef) extends Actor with ActorLogging with Timers { +class ConcurrencyEnforcer(settings: ConcurrencyEnforcerSettings, statsCollector: ActorRef) + extends Actor + with ActorLogging + with Timers { import ConcurrencyEnforcer._ private[this] final var outstanding = Map.empty[String, OutstandingAction] @@ -103,13 +107,11 @@ class ConcurrencyEnforcer(settings: ConcurrencyEnforcerSettings, statsCollector: } } - private def reportCommand(action: Action) = { + private def reportCommand(action: Action) = statsCollector ! StatsCollector.CommandSent - } - private def reportReply(timeNanos: Long) = { + private def reportReply(timeNanos: Long) = statsCollector ! StatsCollector.ReplyReceived(timeNanos) - } private def completeAction(id: String, timeNanos: Long) = { reportReply(timeNanos) @@ -122,7 +124,7 @@ class ConcurrencyEnforcer(settings: ConcurrencyEnforcerSettings, statsCollector: } } - private def startAction(action: Action)= { + private def startAction(action: Action) = { if (outstanding.contains(action.id)) { log.warning("Action {} already outstanding?", action.id) } diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/EntityDiscoveryManager.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/EntityDiscoveryManager.scala index 157be340c..7199d93c6 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/EntityDiscoveryManager.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/EntityDiscoveryManager.scala @@ -24,7 +24,12 @@ import akka.pattern.pipe import akka.stream.Materializer import akka.http.scaladsl.{Http, HttpConnectionContext, UseHttp2} import akka.http.scaladsl.Http.ServerBinding -import akka.cluster.singleton.{ClusterSingletonManager, ClusterSingletonManagerSettings, ClusterSingletonProxy, ClusterSingletonProxySettings} +import akka.cluster.singleton.{ + ClusterSingletonManager, + ClusterSingletonManagerSettings, + ClusterSingletonProxy, + ClusterSingletonProxySettings +} import akka.grpc.GrpcClientSettings import com.google.protobuf.DescriptorProtos import com.google.protobuf.Descriptors.{FileDescriptor, ServiceDescriptor} @@ -35,50 +40,57 @@ import io.cloudstate.protocol.event_sourced.EventSourced import io.cloudstate.proxy.StatsCollector.StatsCollectorSettings import io.cloudstate.proxy.autoscaler.Autoscaler.ScalerFactory import io.cloudstate.proxy.ConcurrencyEnforcer.ConcurrencyEnforcerSettings -import io.cloudstate.proxy.autoscaler.{Autoscaler, AutoscalerSettings, ClusterMembershipFacadeImpl, KubernetesDeploymentScaler, NoAutoscaler, NoScaler} +import io.cloudstate.proxy.autoscaler.{ + Autoscaler, + AutoscalerSettings, + ClusterMembershipFacadeImpl, + KubernetesDeploymentScaler, + NoAutoscaler, + NoScaler +} import io.cloudstate.proxy.crdt.CrdtSupportFactory import io.cloudstate.proxy.eventsourced.EventSourcedSupportFactory import scala.concurrent.duration._ object EntityDiscoveryManager { - final case class Configuration ( - devMode: Boolean, - httpInterface: String, - httpPort: Int, - userFunctionInterface: String, - userFunctionPort: Int, - relayTimeout: Timeout, - relayOutputBufferSize: Int, - gracefulTerminationTimeout: Timeout, - passivationTimeout: Timeout, - numberOfShards: Int, - proxyParallelism: Int, - concurrencySettings: ConcurrencyEnforcerSettings, - statsCollectorSettings: StatsCollectorSettings, - journalEnabled: Boolean + final case class Configuration( + devMode: Boolean, + httpInterface: String, + httpPort: Int, + userFunctionInterface: String, + userFunctionPort: Int, + relayTimeout: Timeout, + relayOutputBufferSize: Int, + gracefulTerminationTimeout: Timeout, + passivationTimeout: Timeout, + numberOfShards: Int, + proxyParallelism: Int, + concurrencySettings: ConcurrencyEnforcerSettings, + statsCollectorSettings: StatsCollectorSettings, + journalEnabled: Boolean ) { validate() def this(config: Config) = { this( - devMode = config.getBoolean("dev-mode-enabled"), - httpInterface = config.getString("http-interface"), - httpPort = config.getInt("http-port"), - userFunctionInterface = config.getString("user-function-interface"), - userFunctionPort = config.getInt("user-function-port"), - relayTimeout = Timeout(config.getDuration("relay-timeout").toMillis.millis), - relayOutputBufferSize = config.getInt("relay-buffer-size"), + devMode = config.getBoolean("dev-mode-enabled"), + httpInterface = config.getString("http-interface"), + httpPort = config.getInt("http-port"), + userFunctionInterface = config.getString("user-function-interface"), + userFunctionPort = config.getInt("user-function-port"), + relayTimeout = Timeout(config.getDuration("relay-timeout").toMillis.millis), + relayOutputBufferSize = config.getInt("relay-buffer-size"), gracefulTerminationTimeout = Timeout(config.getDuration("graceful-termination-timeout").toMillis.millis), - passivationTimeout = Timeout(config.getDuration("passivation-timeout").toMillis.millis), - numberOfShards = config.getInt("number-of-shards"), - proxyParallelism = config.getInt("proxy-parallelism"), - concurrencySettings = ConcurrencyEnforcerSettings( - concurrency = config.getInt("container-concurrency"), + passivationTimeout = Timeout(config.getDuration("passivation-timeout").toMillis.millis), + numberOfShards = config.getInt("number-of-shards"), + proxyParallelism = config.getInt("proxy-parallelism"), + concurrencySettings = ConcurrencyEnforcerSettings( + concurrency = config.getInt("container-concurrency"), actionTimeout = config.getDuration("action-timeout").toMillis.millis, cleanupPeriod = config.getDuration("action-timeout-poll-period").toMillis.millis ), - statsCollectorSettings = new StatsCollectorSettings(config.getConfig("stats")), - journalEnabled = config.getBoolean("journal-enabled") + statsCollectorSettings = new StatsCollectorSettings(config.getConfig("stats")), + journalEnabled = config.getBoolean("journal-enabled") ) } @@ -101,17 +113,22 @@ object EntityDiscoveryManager { supportedEntityTypes = supportedEntityTypes ) - final case class ServableEntity(serviceName: String, serviceDescriptor: ServiceDescriptor, userFunctionTypeSupport: UserFunctionTypeSupport) + final case class ServableEntity(serviceName: String, + serviceDescriptor: ServiceDescriptor, + userFunctionTypeSupport: UserFunctionTypeSupport) } -class EntityDiscoveryManager(config: EntityDiscoveryManager.Configuration)(implicit mat: Materializer) extends Actor with ActorLogging { +class EntityDiscoveryManager(config: EntityDiscoveryManager.Configuration)(implicit mat: Materializer) + extends Actor + with ActorLogging { import context.system import context.dispatcher import EntityDiscoveryManager.Ready - private[this] final val clientSettings = GrpcClientSettings.connectToServiceAt(config.userFunctionInterface, config.userFunctionPort).withTls(false) - private[this] final val entityDiscoveryClient = EntityDiscoveryClient(clientSettings) - private[this] final val autoscaler = { + private[this] final val clientSettings = + GrpcClientSettings.connectToServiceAt(config.userFunctionInterface, config.userFunctionPort).withTls(false) + private[this] final val entityDiscoveryClient = EntityDiscoveryClient(clientSettings) + private[this] final val autoscaler = { val autoscalerSettings = AutoscalerSettings(system) if (autoscalerSettings.enabled) { val managerSettings = ClusterSingletonManagerSettings(system) @@ -122,27 +139,44 @@ class EntityDiscoveryManager(config: EntityDiscoveryManager.Configuration)(impli else factory.actorOf(KubernetesDeploymentScaler.props(autoscaler), "kubernetesDeploymentScaler") } - val singleton = context.actorOf(ClusterSingletonManager.props(Autoscaler.props(autoscalerSettings, - scalerFactory, new ClusterMembershipFacadeImpl(Cluster(context.system))), - terminationMessage = PoisonPill, managerSettings), "autoscaler") + val singleton = context.actorOf( + ClusterSingletonManager.props( + Autoscaler.props(autoscalerSettings, scalerFactory, new ClusterMembershipFacadeImpl(Cluster(context.system))), + terminationMessage = PoisonPill, + managerSettings + ), + "autoscaler" + ) - context.actorOf(ClusterSingletonProxy.props(singleton.path.toStringWithoutAddress, proxySettings), "autoscalerProxy") + context.actorOf(ClusterSingletonProxy.props(singleton.path.toStringWithoutAddress, proxySettings), + "autoscalerProxy") } else { context.actorOf(Props(new NoAutoscaler), "noAutoscaler") } } - private[this] final val statsCollector = context.actorOf(StatsCollector.props(config.statsCollectorSettings, autoscaler), "statsCollector") - private[this] final val concurrencyEnforcer = context.actorOf(ConcurrencyEnforcer.props(config.concurrencySettings, statsCollector), "concurrencyEnforcer") + private[this] final val statsCollector = + context.actorOf(StatsCollector.props(config.statsCollectorSettings, autoscaler), "statsCollector") + private[this] final val concurrencyEnforcer = + context.actorOf(ConcurrencyEnforcer.props(config.concurrencySettings, statsCollector), "concurrencyEnforcer") private val supportFactories: Map[String, UserFunctionTypeSupportFactory] = Map( - Crdt.name -> new CrdtSupportFactory(context.system, config, entityDiscoveryClient, clientSettings, - concurrencyEnforcer = concurrencyEnforcer, statsCollector = statsCollector) - ) ++ { - if (config.journalEnabled) - Map(EventSourced.name -> new EventSourcedSupportFactory(context.system, config, clientSettings, - concurrencyEnforcer = concurrencyEnforcer, statsCollector = statsCollector)) - else Map.empty - } + Crdt.name -> new CrdtSupportFactory(context.system, + config, + entityDiscoveryClient, + clientSettings, + concurrencyEnforcer = concurrencyEnforcer, + statsCollector = statsCollector) + ) ++ { + if (config.journalEnabled) + Map( + EventSourced.name -> new EventSourcedSupportFactory(context.system, + config, + clientSettings, + concurrencyEnforcer = concurrencyEnforcer, + statsCollector = statsCollector) + ) + else Map.empty + } entityDiscoveryClient.discover(EntityDiscoveryManager.proxyInfo(supportFactories.keys.toSeq)) pipeTo self @@ -159,17 +193,24 @@ class EntityDiscoveryManager(config: EntityDiscoveryManager.Configuration)(impli } val entities = spec.entities.map { entity => - - val serviceDescriptor = descriptors.collectFirst(Function.unlift(descriptor => extractService(entity.serviceName, descriptor))) + val serviceDescriptor = descriptors + .collectFirst(Function.unlift(descriptor => extractService(entity.serviceName, descriptor))) .getOrElse(throw EntityDiscoveryException(s"Service [${entity.serviceName}] not found in descriptors!")) supportFactories.get(entity.entityType) match { - case Some(factory) => EntityDiscoveryManager.ServableEntity( - entity.serviceName, serviceDescriptor, factory.build(entity, serviceDescriptor)) + case Some(factory) => + EntityDiscoveryManager.ServableEntity(entity.serviceName, + serviceDescriptor, + factory.build(entity, serviceDescriptor)) case None if entity.entityType == EventSourced.name => - throw EntityDiscoveryException(s"Service [${entity.serviceName}] has declared an event sourced entity, however, this proxy does not have a configured store, or is using a store that doesn't support event sourced journals. A store that supports journals must be configured in this stateful services resource if event sourcing is to be used.") + throw EntityDiscoveryException( + s"Service [${entity.serviceName}] has declared an event sourced entity, however, this proxy does not have a configured store, or is using a store that doesn't support event sourced journals. A store that supports journals must be configured in this stateful services resource if event sourcing is to be used." + ) case None => - throw EntityDiscoveryException(s"Service [${entity.serviceName}] has declared an unsupported entity type [${entity.entityType}]. Supported types are ${supportFactories.keys.mkString(",")}") + throw EntityDiscoveryException( + s"Service [${entity.serviceName}] has declared an unsupported entity type [${entity.entityType}]. Supported types are ${supportFactories.keys + .mkString(",")}" + ) } } @@ -194,14 +235,12 @@ class EntityDiscoveryManager(config: EntityDiscoveryManager.Configuration)(impli context.become(binding) - } catch { case e @ EntityDiscoveryException(message) => entityDiscoveryClient.reportError(UserFunctionError(message)) throw e } - case Ready => sender ! false case Status.Failure(cause) => // Failure to load the entity spec is not fatal, simply crash and let the backoff supervisor restart us @@ -247,9 +286,8 @@ class EntityDiscoveryManager(config: EntityDiscoveryManager.Configuration)(impli case Ready => sender ! true } - override final def postStop(): Unit = { + override final def postStop(): Unit = entityDiscoveryClient.close() - } } case class EntityDiscoveryException(message: String) extends RuntimeException(message) diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/FileDescriptorBuilder.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/FileDescriptorBuilder.scala index 7a196f2ac..fc4f16fd7 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/FileDescriptorBuilder.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/FileDescriptorBuilder.scala @@ -8,27 +8,32 @@ import scala.collection.JavaConverters._ object FileDescriptorBuilder { /** - * In order to build a FileDescriptor, you need to build and pass its dependencies first. This walks through - * a FileDescriptorSet, building each descriptor, and building dependencies along the way, caching the results so - * when it comes to building it next time, the cached result will be used. - */ + * In order to build a FileDescriptor, you need to build and pass its dependencies first. This walks through + * a FileDescriptorSet, building each descriptor, and building dependencies along the way, caching the results so + * when it comes to building it next time, the cached result will be used. + */ def build(descriptorSet: DescriptorProtos.FileDescriptorSet): Seq[FileDescriptor] = { val allProtos = descriptorSet.getFileList.asScala.map { desc => desc.getName -> desc }.toMap - descriptorSet.getFileList.asScala.foldLeft(Map.empty[String, FileDescriptor]) { (alreadyBuilt, desc) => - buildDescriptorWithDependencies(desc, allProtos, Nil, alreadyBuilt) - }.values.toSeq + descriptorSet.getFileList.asScala + .foldLeft(Map.empty[String, FileDescriptor]) { (alreadyBuilt, desc) => + buildDescriptorWithDependencies(desc, allProtos, Nil, alreadyBuilt) + } + .values + .toSeq } private def buildDescriptorWithDependencies(desc: DescriptorProtos.FileDescriptorProto, - allProtos: Map[String, DescriptorProtos.FileDescriptorProto], beingBuilt: List[String], - alreadyBuilt: Map[String, FileDescriptor]): Map[String, FileDescriptor] = { - + allProtos: Map[String, DescriptorProtos.FileDescriptorProto], + beingBuilt: List[String], + alreadyBuilt: Map[String, FileDescriptor]): Map[String, FileDescriptor] = if (beingBuilt.contains(desc.getName)) { // todo - technically we could support circular dependencies by building with allowing unknown dependencies first, // then rebuilding once the circular dependencies have been built. Not sure how protoc handles this one. - throw EntityDiscoveryException(s"Circular dependency detected in entity spec descriptor: [${desc.getName}] -> ${beingBuilt.map(n => s"[$n]").mkString(" -> ")}") + throw EntityDiscoveryException( + s"Circular dependency detected in entity spec descriptor: [${desc.getName}] -> ${beingBuilt.map(n => s"[$n]").mkString(" -> ")}" + ) } else if (alreadyBuilt.contains(desc.getName)) { alreadyBuilt } else { @@ -47,11 +52,12 @@ object FileDescriptorBuilder { case depDesc if alreadyBuiltWithDependencies.contains(depDesc) => alreadyBuiltWithDependencies(depDesc) case notFound => - throw EntityDiscoveryException(s"Descriptor dependency [$notFound] not found, dependency path: ${currentBeingBuilt.map(n => s"[$n]").mkString(" -> ")}") + throw EntityDiscoveryException( + s"Descriptor dependency [$notFound] not found, dependency path: ${currentBeingBuilt.map(n => s"[$n]").mkString(" -> ")}" + ) } alreadyBuiltWithDependencies + (desc.getName -> FileDescriptor.buildFrom(desc, dependencies.toArray, true)) } - } } diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/HttpApi.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/HttpApi.scala index 190d50d7c..f5cb161dc 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/HttpApi.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/HttpApi.scala @@ -20,7 +20,17 @@ import scala.annotation.tailrec import scala.collection.JavaConverters._ import scala.concurrent.{ExecutionContext, Future} import akka.ConfigurationException -import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpMethod, HttpMethods, HttpRequest, HttpResponse, IllegalRequestException, RequestEntityAcceptance, StatusCodes} +import akka.http.scaladsl.model.{ + ContentTypes, + HttpEntity, + HttpMethod, + HttpMethods, + HttpRequest, + HttpResponse, + IllegalRequestException, + RequestEntityAcceptance, + StatusCodes +} import akka.http.scaladsl.model.Uri import akka.http.scaladsl.model.Uri.Path import akka.http.scaladsl.unmarshalling.Unmarshal @@ -36,7 +46,14 @@ import com.google.protobuf.{descriptor => ScalaPBDescriptorProtos} import com.google.protobuf.any.{Any => ProtobufAny} import com.google.protobuf.util.JsonFormat import com.google.protobuf.Descriptors.FieldDescriptor.JavaType -import java.lang.{Boolean => JBoolean, Double => JDouble, Float => JFloat, Integer => JInteger, Long => JLong, Short => JShort} +import java.lang.{ + Boolean => JBoolean, + Double => JDouble, + Float => JFloat, + Integer => JInteger, + Long => JLong, + Short => JShort +} import com.google.protobuf.{ListValue, Struct, Value} import io.cloudstate.protocol.entity.{ClientAction, EntityDiscovery, Failure, Reply, UserFunctionError} @@ -49,19 +66,29 @@ import io.cloudstate.proxy.entity.{UserFunctionCommand, UserFunctionReply} // https://github.com/googleapis/googleapis/blob/master/google/api/annotations.proto object HttpApi { final val ParseShort: String => Option[JShort] = - s => try Option(JShort.valueOf(s)) catch { case _: NumberFormatException => None } + s => + try Option(JShort.valueOf(s)) + catch { case _: NumberFormatException => None } final val ParseInt: String => Option[JInteger] = - s => try Option(JInteger.valueOf(s)) catch { case _: NumberFormatException => None } + s => + try Option(JInteger.valueOf(s)) + catch { case _: NumberFormatException => None } final val ParseLong: String => Option[JLong] = - s => try Option(JLong.valueOf(s)) catch { case _: NumberFormatException => None } + s => + try Option(JLong.valueOf(s)) + catch { case _: NumberFormatException => None } final val ParseFloat: String => Option[JFloat] = - s => try Option(JFloat.valueOf(s)) catch { case _: NumberFormatException => None } + s => + try Option(JFloat.valueOf(s)) + catch { case _: NumberFormatException => None } final val ParseDouble: String => Option[JDouble] = - s => try Option(JDouble.valueOf(s)) catch { case _: NumberFormatException => None } + s => + try Option(JDouble.valueOf(s)) + catch { case _: NumberFormatException => None } final val ParseString: String => Option[String] = s => Option(s) @@ -71,9 +98,9 @@ object HttpApi { final val ParseBoolean: String => Option[JBoolean] = _.toLowerCase match { - case "true" => someJTrue + case "true" => someJTrue case "false" => someJFalse - case _ => None + case _ => None } // Reads a rfc2045 encoded Base64 string @@ -82,15 +109,15 @@ object HttpApi { final def suitableParserFor(field: FieldDescriptor)(whenIllegal: String => Nothing): String => Option[Any] = field.getJavaType match { - case JavaType.BOOLEAN => ParseBoolean + case JavaType.BOOLEAN => ParseBoolean case JavaType.BYTE_STRING => ParseBytes - case JavaType.DOUBLE => ParseDouble - case JavaType.ENUM => whenIllegal("Enum path parameters not supported!") - case JavaType.FLOAT => ParseFloat - case JavaType.INT => ParseInt - case JavaType.LONG => ParseLong - case JavaType.MESSAGE => whenIllegal("Message path parameters not supported!") - case JavaType.STRING => ParseString + case JavaType.DOUBLE => ParseDouble + case JavaType.ENUM => whenIllegal("Enum path parameters not supported!") + case JavaType.FLOAT => ParseFloat + case JavaType.INT => ParseInt + case JavaType.LONG => ParseLong + case JavaType.MESSAGE => whenIllegal("Message path parameters not supported!") + case JavaType.STRING => ParseString } // We use this to indicate problems with the configuration of the routes @@ -98,45 +125,40 @@ object HttpApi { // We use this to signal to the requestor that there's something wrong with the request private final val requestError: String => Nothing = s => throw IllegalRequestException(StatusCodes.BadRequest, s) // This is so that we can reuse path comparisons for path value extraction - private final val nofx: (Option[Any], FieldDescriptor) => Unit = (_,_) => () + private final val nofx: (Option[Any], FieldDescriptor) => Unit = (_, _) => () // This is used to support the "*" custom pattern private final val ANY_METHOD = HttpMethod.custom(name = "ANY", - safe = false, - idempotent = false, - requestEntityAcceptance = RequestEntityAcceptance.Tolerated) + safe = false, + idempotent = false, + requestEntityAcceptance = RequestEntityAcceptance.Tolerated) // A route which will not match anything private final val NoMatch = PartialFunction.empty[HttpRequest, Future[HttpResponse]] - final class HttpEndpoint(final val methDesc: MethodDescriptor, - final val rule: HttpRule, - final val userFunctionRouter: UserFunctionRouter, - final val entityDiscovery: EntityDiscovery)(implicit sys: ActorSystem, mat: Materializer, ec: ExecutionContext) extends PartialFunction[HttpRequest, Future[HttpResponse]] { + final class HttpEndpoint( + final val methDesc: MethodDescriptor, + final val rule: HttpRule, + final val userFunctionRouter: UserFunctionRouter, + final val entityDiscovery: EntityDiscovery + )(implicit sys: ActorSystem, mat: Materializer, ec: ExecutionContext) + extends PartialFunction[HttpRequest, Future[HttpResponse]] { private[this] final val log = Logging(sys, rule.pattern.toString) // TODO use other name? private[this] final val (methodPattern, urlPattern, bodyDescriptor, responseBodyDescriptor) = extractAndValidate() - private[this] final val jsonParser = JsonFormat. - parser. - usingTypeRegistry(JsonFormat.TypeRegistry. - newBuilder. - add(bodyDescriptor). - build()) - //ignoringUnknownFields(). - //usingRecursionLimit(…). - - private[this] final val jsonPrinter = JsonFormat. - printer. - usingTypeRegistry(JsonFormat.TypeRegistry. - newBuilder. - add(methDesc.getOutputType). - build()). - includingDefaultValueFields(). - omittingInsignificantWhitespace() - //printingEnumsAsInts() // If you enable this, you need to fix the output for responseBody as well - //preservingProtoFieldNames(). // If you enable this, you need to fix the output for responseBody structs as well - //sortingMapKeys(). + private[this] final val jsonParser = + JsonFormat.parser.usingTypeRegistry(JsonFormat.TypeRegistry.newBuilder.add(bodyDescriptor).build()) + //ignoringUnknownFields(). + //usingRecursionLimit(…). + + private[this] final val jsonPrinter = JsonFormat.printer + .usingTypeRegistry(JsonFormat.TypeRegistry.newBuilder.add(methDesc.getOutputType).build()) + .includingDefaultValueFields() + .omittingInsignificantWhitespace() + //printingEnumsAsInts() // If you enable this, you need to fix the output for responseBody as well + //preservingProtoFieldNames(). // If you enable this, you need to fix the output for responseBody structs as well + //sortingMapKeys(). private[this] final val expectedReplyTypeUrl = Serve.AnyTypeUrlHostName + methDesc.getOutputType.getFullName @@ -148,16 +170,17 @@ object HttpApi { // Validate pattern val (mp, up) = { - import HttpRule.Pattern.{Empty, Get, Put, Post, Delete, Patch, Custom} - import HttpMethods.{GET, PUT, POST, DELETE, PATCH} + import HttpRule.Pattern.{Custom, Delete, Empty, Get, Patch, Post, Put} + import HttpMethods.{DELETE, GET, PATCH, POST, PUT} def validPath(pattern: String): Path = { val path = Uri.Path(pattern) - if (!path.startsWithSlash) configError(s"Configured pattern [$pattern] does not start with slash") // FIXME better error description + if (!path.startsWithSlash) + configError(s"Configured pattern [$pattern] does not start with slash") // FIXME better error description else { var p = path var found = Set[String]() - while(!p.isEmpty) { + while (!p.isEmpty) { p.head match { case '/' => case vbl: String if vbl.head == '{' && vbl.last == '}' => @@ -166,10 +189,13 @@ object HttpApi { lookupFieldByName(methDesc.getInputType, variable) match { case null => false case field => - if (field.isRepeated) configError(s"Repeated parameters [$field] are not allowed as path variables") - else if (field.isMapField) configError(s"Map parameters [$field] are not allowed as path variables") + if (field.isRepeated) + configError(s"Repeated parameters [$field] are not allowed as path variables") + else if (field.isMapField) + configError(s"Map parameters [$field] are not allowed as path variables") else if (suitableParserFor(field)(configError) == null) () // Can't really happen - else if (found.contains(variable)) configError(s"Path parameter [$variable] occurs more than once") + else if (found.contains(variable)) + configError(s"Path parameter [$variable] occurs more than once") else found += variable // Keep track of the variables we've seen so far } case _ => // path element, ignore @@ -181,15 +207,16 @@ object HttpApi { } rule.pattern match { - case Empty => configError(s"Pattern missing for rule [$rule]!") // TODO improve error message - case p @ Get(pattern) => (GET, validPath(pattern)) - case p @ Put(pattern) => (PUT, validPath(pattern)) - case p @ Post(pattern) => (POST, validPath(pattern)) - case p @ Delete(pattern) => (DELETE, validPath(pattern)) - case p @ Patch(pattern) => (PATCH, validPath(pattern)) - case p @ Custom(chp) => - if (chp.kind == "*") (ANY_METHOD, validPath(chp.path)) // FIXME is "path" the same as "pattern" for the other kinds? Is an empty kind valid? - else configError(s"Only Custom patterns with [*] kind supported but [${chp.kind}] found!") + case Empty => configError(s"Pattern missing for rule [$rule]!") // TODO improve error message + case p @ Get(pattern) => (GET, validPath(pattern)) + case p @ Put(pattern) => (PUT, validPath(pattern)) + case p @ Post(pattern) => (POST, validPath(pattern)) + case p @ Delete(pattern) => (DELETE, validPath(pattern)) + case p @ Patch(pattern) => (PATCH, validPath(pattern)) + case p @ Custom(chp) => + if (chp.kind == "*") + (ANY_METHOD, validPath(chp.path)) // FIXME is "path" the same as "pattern" for the other kinds? Is an empty kind valid? + else configError(s"Only Custom patterns with [*] kind supported but [${chp.kind}] found!") } } @@ -220,7 +247,10 @@ object HttpApi { case "" => None case fieldName => lookupFieldByName(methDesc.getOutputType, fieldName) match { - case null => configError(s"Response body field [$fieldName] does not exist on type [${methDesc.getOutputType.getFullName}]") + case null => + configError( + s"Response body field [$fieldName] does not exist on type [${methDesc.getOutputType.getFullName}]" + ) case field => Some(field) } } @@ -232,11 +262,13 @@ object HttpApi { } // TODO support more advanced variable declarations: x=*, x=**, x=/foo/** etc? - @tailrec private[this] final def pathMatches(patPath: Path, reqPath: Path, effect: (Option[Any], FieldDescriptor) => Unit): Boolean = + @tailrec private[this] final def pathMatches(patPath: Path, + reqPath: Path, + effect: (Option[Any], FieldDescriptor) => Unit): Boolean = if (patPath.isEmpty && reqPath.isEmpty) true else if (patPath.isEmpty || reqPath.isEmpty) false else { - if(log.isDebugEnabled) + if (log.isDebugEnabled) log.debug((if (effect eq nofx) "Matching: " else "Extracting: ") + patPath.head + " " + reqPath.head) val segmentMatch = (patPath.head, reqPath.head) match { case ('/', '/') => true @@ -257,8 +289,8 @@ object HttpApi { @tailrec private[this] final def lookupFieldByPath(desc: Descriptor, selector: String): FieldDescriptor = Names.splitNext(selector) match { - case ("", "") => null - case (fieldName, "") => lookupFieldByName(desc, fieldName) + case ("", "") => null + case (fieldName, "") => lookupFieldByName(desc, fieldName) case (fieldName, next) => val field = lookupFieldByName(desc, fieldName) if (field == null) null @@ -270,32 +302,38 @@ object HttpApi { private[this] final def lookupFieldByName(desc: Descriptor, selector: String): FieldDescriptor = desc.findFieldByName(selector) // TODO potentially start supporting path-like selectors with maximum nesting level? - private[this] final def parseRequestParametersInto(query: Map[String, List[String]], inputBuilder: DynamicMessage.Builder): Unit = { + private[this] final def parseRequestParametersInto(query: Map[String, List[String]], + inputBuilder: DynamicMessage.Builder): Unit = query.foreach { case (selector, values) => if (values.nonEmpty) { lookupFieldByPath(methDesc.getInputType, selector) match { case null => requestError("Query parameter [$selector] refers to non-existant field") - case field if field.getMessageType != null => requestError("Query parameter [$selector] refers to a message type") // FIXME validate assumption that this is prohibited - case field if !field.isRepeated && values.size > 1 => requestError("Multiple values sent for non-repeated field by query parameter [$selector]") + case field if field.getMessageType != null => + requestError("Query parameter [$selector] refers to a message type") // FIXME validate assumption that this is prohibited + case field if !field.isRepeated && values.size > 1 => + requestError("Multiple values sent for non-repeated field by query parameter [$selector]") case field => // FIXME verify that we can set nested fields from the inputBuilder type val x = suitableParserFor(field)(requestError) if (field.isRepeated) { - values foreach { - v => inputBuilder.addRepeatedField(field, x(v).getOrElse(requestError("Malformed Query parameter [$selector]"))) + values foreach { v => + inputBuilder.addRepeatedField(field, + x(v).getOrElse(requestError("Malformed Query parameter [$selector]"))) } - } else inputBuilder.setField(field, x(values.head).getOrElse(requestError("Malformed Query parameter [$selector]"))) + } else + inputBuilder.setField(field, + x(values.head).getOrElse(requestError("Malformed Query parameter [$selector]"))) } } // Ignore empty values } - } private[this] final def parsePathParametersInto(requestPath: Path, inputBuilder: DynamicMessage.Builder): Unit = - pathMatches(urlPattern, requestPath, (value, field) => - inputBuilder.setField(field, value.getOrElse(requestError("Path contains value of wrong type!"))) - ) + pathMatches(urlPattern, + requestPath, + (value, field) => + inputBuilder.setField(field, value.getOrElse(requestError("Path contains value of wrong type!")))) - final def parseCommand(req: HttpRequest): Future[UserFunctionCommand] = { + final def parseCommand(req: HttpRequest): Future[UserFunctionCommand] = if (rule.body.nonEmpty && req.entity.contentType != ContentTypes.`application/json`) { Future.failed(IllegalRequestException(StatusCodes.BadRequest, "Content-type must be application/json!")) } else { @@ -307,53 +345,60 @@ object HttpApi { parsePathParametersInto(req.uri.path, inputBuilder) Future.successful(createCommand(inputBuilder.build)) case "*" => // Iff * body rule, then no query parameters, and only fields not mapped in path variables - Unmarshal(req.entity).to[String].map(str => { - jsonParser.merge(str, inputBuilder) - parsePathParametersInto(req.uri.path, inputBuilder) - createCommand(inputBuilder.build) - }) + Unmarshal(req.entity) + .to[String] + .map(str => { + jsonParser.merge(str, inputBuilder) + parsePathParametersInto(req.uri.path, inputBuilder) + createCommand(inputBuilder.build) + }) case fieldName => // Iff fieldName body rule, then all parameters not mapped in path variables - Unmarshal(req.entity).to[String].map(str => { - val subField = lookupFieldByName(methDesc.getInputType, fieldName) - val subInputBuilder = DynamicMessage.newBuilder(subField.getMessageType) - jsonParser.merge(str, subInputBuilder) - parseRequestParametersInto(req.uri.query().toMultiMap, inputBuilder) - parsePathParametersInto(req.uri.path, inputBuilder) - inputBuilder.setField(subField, subInputBuilder.build()) - createCommand(inputBuilder.build) - }) + Unmarshal(req.entity) + .to[String] + .map(str => { + val subField = lookupFieldByName(methDesc.getInputType, fieldName) + val subInputBuilder = DynamicMessage.newBuilder(subField.getMessageType) + jsonParser.merge(str, subInputBuilder) + parseRequestParametersInto(req.uri.query().toMultiMap, inputBuilder) + parsePathParametersInto(req.uri.path, inputBuilder) + inputBuilder.setField(subField, subInputBuilder.build()) + createCommand(inputBuilder.build) + }) } } - } override final def isDefinedAt(req: HttpRequest): Boolean = (methodPattern == ANY_METHOD || req.method == methodPattern) && pathMatches(urlPattern, req.uri.path, nofx) override final def apply(req: HttpRequest): Future[HttpResponse] = - parseCommand(req). - flatMap(command => sendCommand(command).map(createResponse)). - recover { - case ire: IllegalRequestException => HttpResponse(ire.status.intValue, entity = ire.status.reason) - } + parseCommand(req).flatMap(command => sendCommand(command).map(createResponse)).recover { + case ire: IllegalRequestException => HttpResponse(ire.status.intValue, entity = ire.status.reason) + } private[this] final def debugMsg(msg: DynamicMessage, preamble: String): Unit = - if(log.isDebugEnabled) - log.debug(s"$preamble: ${msg}${msg.getAllFields().asScala.map(f => s"\n\r * Request Field: [${f._1.getFullName}] = [${f._2}]").mkString}") + if (log.isDebugEnabled) + log.debug( + s"$preamble: ${msg}${msg.getAllFields().asScala.map(f => s"\n\r * Request Field: [${f._1.getFullName}] = [${f._2}]").mkString}" + ) private[this] final def createCommand(command: DynamicMessage): UserFunctionCommand = { debugMsg(command, "Got request") UserFunctionCommand( - name = methDesc.getName, - payload = Some(ProtobufAny(typeUrl = Serve.AnyTypeUrlHostName + methDesc.getInputType.getFullName, value = command.toByteString)) - ) + name = methDesc.getName, + payload = Some( + ProtobufAny(typeUrl = Serve.AnyTypeUrlHostName + methDesc.getInputType.getFullName, + value = command.toByteString) + ) + ) } - private[this] final def sendCommand(command: UserFunctionCommand): Future[DynamicMessage] = { + private[this] final def sendCommand(command: UserFunctionCommand): Future[DynamicMessage] = userFunctionRouter.handleUnary(methDesc.getService.getFullName, command).map { reply => reply.clientAction match { case Some(ClientAction(ClientAction.Action.Reply(Reply(Some(payload))))) => if (payload.typeUrl != expectedReplyTypeUrl) { - val msg = s"${methDesc.getFullName}: Expected reply type_url to be [$expectedReplyTypeUrl] but was [${payload.typeUrl}]." + val msg = + s"${methDesc.getFullName}: Expected reply type_url to be [$expectedReplyTypeUrl] but was [${payload.typeUrl}]." log.warning(msg) entityDiscovery.reportError(UserFunctionError("Warning: " + msg)) } @@ -369,37 +414,40 @@ object HttpApi { throw new Exception(msg) } } - } // FIXME Devise other way of supporting responseBody, this is waaay too costly and unproven // This method converts an arbitrary type to something which can be represented as JSON. - private[this] final def responseBody(jType: JavaType, value: AnyRef, repeated: Boolean): com.google.protobuf.Value = { + private[this] final def responseBody(jType: JavaType, + value: AnyRef, + repeated: Boolean): com.google.protobuf.Value = { val result = if (repeated) { Value.newBuilder.setListValue( - ListValue. - newBuilder. - addAllValues( - value.asInstanceOf[java.lang.Iterable[AnyRef]].asScala.map(v => responseBody(jType, v, false)).asJava - ) + ListValue.newBuilder.addAllValues( + value.asInstanceOf[java.lang.Iterable[AnyRef]].asScala.map(v => responseBody(jType, v, false)).asJava + ) ) } else { val b = Value.newBuilder jType match { - case JavaType.BOOLEAN => b.setBoolValue(value.asInstanceOf[JBoolean]) + case JavaType.BOOLEAN => b.setBoolValue(value.asInstanceOf[JBoolean]) case JavaType.BYTE_STRING => b.setStringValueBytes(value.asInstanceOf[ProtobufByteString]) - case JavaType.DOUBLE => b.setNumberValue(value.asInstanceOf[JDouble]) - case JavaType.ENUM => b.setStringValue(value.asInstanceOf[EnumValueDescriptor].getName) // Switch to getNumber if enabling printingEnumsAsInts in the JSON Printer - case JavaType.FLOAT => b.setNumberValue(value.asInstanceOf[JFloat].toDouble) - case JavaType.INT => b.setNumberValue(value.asInstanceOf[JInteger].toDouble) - case JavaType.LONG => b.setNumberValue(value.asInstanceOf[JLong].toDouble) - case JavaType.MESSAGE => + case JavaType.DOUBLE => b.setNumberValue(value.asInstanceOf[JDouble]) + case JavaType.ENUM => + b.setStringValue(value.asInstanceOf[EnumValueDescriptor].getName) // Switch to getNumber if enabling printingEnumsAsInts in the JSON Printer + case JavaType.FLOAT => b.setNumberValue(value.asInstanceOf[JFloat].toDouble) + case JavaType.INT => b.setNumberValue(value.asInstanceOf[JInteger].toDouble) + case JavaType.LONG => b.setNumberValue(value.asInstanceOf[JLong].toDouble) + case JavaType.MESSAGE => val sb = Struct.newBuilder - value.asInstanceOf[MessageOrBuilder].getAllFields.forEach( - (k,v) => sb.putFields(k.getJsonName, responseBody(k.getJavaType, v, k.isRepeated)) //Switch to getName if enabling preservingProtoFieldNames in the JSON Printer - ) + value + .asInstanceOf[MessageOrBuilder] + .getAllFields + .forEach( + (k, v) => sb.putFields(k.getJsonName, responseBody(k.getJavaType, v, k.isRepeated)) //Switch to getName if enabling preservingProtoFieldNames in the JSON Printer + ) b.setStructValue(sb) - case JavaType.STRING => b.setStringValue(value.asInstanceOf[String]) + case JavaType.STRING => b.setStringValue(value.asInstanceOf[String]) } } result.build() @@ -408,7 +456,7 @@ object HttpApi { private[this] final def createResponse(response: DynamicMessage): HttpResponse = { val output = responseBodyDescriptor match { - case None => + case None => response case Some(field) => response.getField(field) match { @@ -420,23 +468,32 @@ object HttpApi { } } - /** - * ScalaPB doesn't do this conversion for us unfortunately. - * By doing it, we can use HttpProto.entityKey.get() to read the entity key nicely. - */ + /** + * ScalaPB doesn't do this conversion for us unfortunately. + * By doing it, we can use HttpProto.entityKey.get() to read the entity key nicely. + */ private[this] final def convertMethodOptions(method: MethodDescriptor): ScalaPBDescriptorProtos.MethodOptions = - ScalaPBDescriptorProtos.MethodOptions.fromJavaProto(method.toProto.getOptions).withUnknownFields( - scalapb.UnknownFieldSet(method.getOptions.getUnknownFields.asMap.asScala.map { - case (idx, f) => idx.toInt -> scalapb.UnknownFieldSet.Field( - varint = f.getVarintList.asScala.map(_.toLong), - fixed64 = f.getFixed64List.asScala.map(_.toLong), - fixed32 = f.getFixed32List.asScala.map(_.toInt), - lengthDelimited = f.getLengthDelimitedList.asScala - ) - }.toMap) - ) + ScalaPBDescriptorProtos.MethodOptions + .fromJavaProto(method.toProto.getOptions) + .withUnknownFields( + scalapb.UnknownFieldSet(method.getOptions.getUnknownFields.asMap.asScala.map { + case (idx, f) => + idx.toInt -> scalapb.UnknownFieldSet.Field( + varint = f.getVarintList.asScala.map(_.toLong), + fixed64 = f.getFixed64List.asScala.map(_.toLong), + fixed32 = f.getFixed32List.asScala.map(_.toInt), + lengthDelimited = f.getLengthDelimitedList.asScala + ) + }.toMap) + ) - final def serve(userFunctionRouter: UserFunctionRouter, entities: Seq[ServableEntity], entityDiscoveryClient: EntityDiscovery)(implicit sys: ActorSystem, mat: Materializer, ec: ExecutionContext): PartialFunction[HttpRequest, Future[HttpResponse]] = { + final def serve(userFunctionRouter: UserFunctionRouter, + entities: Seq[ServableEntity], + entityDiscoveryClient: EntityDiscovery)( + implicit sys: ActorSystem, + mat: Materializer, + ec: ExecutionContext + ): PartialFunction[HttpRequest, Future[HttpResponse]] = { val log = Logging(sys, "HttpApi") (for { entity <- entities.iterator @@ -446,11 +503,13 @@ object HttpApi { log.info(s"Using configured HTTP API endpoint using [$rule]") rule case None => - val rule = HttpRule.of(selector = method.getFullName, // We know what thing we are proxying - body = "*", // Parse all input - responseBody = "", // Include all output - additionalBindings = Nil, // No need for additional bindings - pattern = HttpRule.Pattern.Post((Path / "v1" / method.getName).toString)) + val rule = HttpRule.of( + selector = method.getFullName, // We know what thing we are proxying + body = "*", // Parse all input + responseBody = "", // Include all output + additionalBindings = Nil, // No need for additional bindings + pattern = HttpRule.Pattern.Post((Path / "v1" / method.getName).toString) + ) log.info(s"Using generated HTTP API endpoint using [$rule]") rule } @@ -458,7 +517,7 @@ object HttpApi { } yield { new HttpEndpoint(method, binding, userFunctionRouter, entityDiscoveryClient) }).foldLeft(NoMatch) { - case (NoMatch, first) => first + case (NoMatch, first) => first case (previous, current) => current orElse previous // Last goes first } } diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/Reflection.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/Reflection.scala index 65d700cf2..1eabd531d 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/Reflection.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/Reflection.scala @@ -38,11 +38,13 @@ object Reflection { ReflectionProto.javaDescriptor ).flatMap(flattenDependencies).distinct - private def flattenDependencies(descriptor: FileDescriptor): List[FileDescriptor] = { + private def flattenDependencies(descriptor: FileDescriptor): List[FileDescriptor] = descriptor :: descriptor.getDependencies.asScala.toList.flatMap(flattenDependencies) - } - def serve(fileDescriptors: Seq[FileDescriptor], services: List[String])(implicit mat: Materializer, sys: ActorSystem): PartialFunction[HttpRequest, Future[HttpResponse]] = { + def serve( + fileDescriptors: Seq[FileDescriptor], + services: List[String] + )(implicit mat: Materializer, sys: ActorSystem): PartialFunction[HttpRequest, Future[HttpResponse]] = { implicit val ec: ExecutionContext = mat.executionContext import ServerReflection.Serializers._ @@ -51,9 +53,16 @@ object Reflection { { case req: HttpRequest if req.uri.path == ReflectionPath => val responseCodec = Codecs.negotiate(req) - GrpcMarshalling.unmarshalStream(req)(ServerReflectionRequestSerializer, mat) - .map(_ via handler) - .map(e => GrpcMarshalling.marshalStream(e, GrpcExceptionHandler.defaultMapper)(ServerReflectionResponseSerializer, mat, responseCodec, sys)) + GrpcMarshalling + .unmarshalStream(req)(ServerReflectionRequestSerializer, mat) + .map(_ via handler) + .map( + e => + GrpcMarshalling.marshalStream(e, GrpcExceptionHandler.defaultMapper)(ServerReflectionResponseSerializer, + mat, + responseCodec, + sys) + ) } } @@ -62,37 +71,43 @@ object Reflection { (Names.splitNext(if (fileDesc.getPackage.isEmpty) symbol else symbol.drop(fileDesc.getPackage.length + 1)) match { case ("", "") => false case (typeOrService, "") => - //fileDesc.findEnumTypeByName(typeOrService) != null || // TODO investigate if this is expected + //fileDesc.findEnumTypeByName(typeOrService) != null || // TODO investigate if this is expected fileDesc.findMessageTypeByName(typeOrService) != null || fileDesc.findServiceByName(typeOrService) != null case (service, method) => Option(fileDesc.findServiceByName(service)).exists(_.findMethodByName(method) != null) }) - private final def findFileDescForSymbol(symbol: String, fileDescriptors: Map[String, FileDescriptor]): Option[FileDescriptor] = + private final def findFileDescForSymbol(symbol: String, + fileDescriptors: Map[String, FileDescriptor]): Option[FileDescriptor] = fileDescriptors.values.collectFirst { case fileDesc if containsSymbol(symbol, fileDesc) => fileDesc } private final def containsExtension(container: String, number: Int, fileDesc: FileDescriptor): Boolean = - fileDesc.getExtensions.iterator.asScala.exists(ext => container == ext.getContainingType.getFullName && number == ext.getNumber) + fileDesc.getExtensions.iterator.asScala + .exists(ext => container == ext.getContainingType.getFullName && number == ext.getNumber) - private final def findFileDescForExtension(container: String, number: Int, fileDescriptors: Map[String, FileDescriptor]): Option[FileDescriptor] = + private final def findFileDescForExtension(container: String, + number: Int, + fileDescriptors: Map[String, FileDescriptor]): Option[FileDescriptor] = fileDescriptors.values.collectFirst { case fileDesc if containsExtension(container, number, fileDesc) => fileDesc } - private final def findExtensionNumbersForContainingType(container: String, fileDescriptors: Map[String, FileDescriptor]): List[Int] = + private final def findExtensionNumbersForContainingType(container: String, + fileDescriptors: Map[String, FileDescriptor]): List[Int] = (for { fileDesc <- fileDescriptors.values.iterator extension <- fileDesc.getExtensions.iterator.asScala if extension.getFullName == container } yield extension.getNumber).toList - private def handle(fileDescriptors: Map[String, FileDescriptor], services: List[String]): Flow[ServerReflectionRequest, ServerReflectionResponse, NotUsed] = + private def handle(fileDescriptors: Map[String, FileDescriptor], + services: List[String]): Flow[ServerReflectionRequest, ServerReflectionResponse, NotUsed] = Flow[ServerReflectionRequest].map(req => { - import ServerReflectionRequest.{ MessageRequest => In} - import ServerReflectionResponse.{ MessageResponse => Out} + import ServerReflectionRequest.{MessageRequest => In} + import ServerReflectionResponse.{MessageResponse => Out} val response = req.messageRequest match { case In.Empty => diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/Serve.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/Serve.scala index 20793e763..90e07c37b 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/Serve.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/Serve.scala @@ -37,7 +37,6 @@ import io.cloudstate.proxy.entity.{UserFunctionCommand, UserFunctionReply} import io.grpc.Status import org.slf4j.LoggerFactory - object Serve { private final val log = LoggerFactory.getLogger(getClass) @@ -63,7 +62,8 @@ object Serve { else ProtobufByteString.readFrom(bytes.iterator.asInputStream) } - private final class CommandSerializer(commandName: String, desc: Descriptor) extends ProtobufSerializer[UserFunctionCommand] { + private final class CommandSerializer(commandName: String, desc: Descriptor) + extends ProtobufSerializer[UserFunctionCommand] { private[this] final val commandTypeUrl = AnyTypeUrlHostName + desc.getFullName // Should not be used in practice @@ -72,52 +72,68 @@ object Serve { case Some(payload) => ByteString(payload.value.asReadOnlyByteBuffer()) } - override final def deserialize(bytes: ByteString): UserFunctionCommand = { + override final def deserialize(bytes: ByteString): UserFunctionCommand = UserFunctionCommand( name = commandName, payload = Some(ProtobufAny(typeUrl = commandTypeUrl, value = ProtobufByteString.copyFrom(bytes.asByteBuffer))) ) - } } - private final case class CommandHandler(fullCommandName: String, serializer: CommandSerializer, flow: Flow[UserFunctionCommand, - UserFunctionReply, NotUsed], unary: Boolean, expectedReplyTypeUrl: String) - - def createRoute(entities: Seq[ServableEntity], router: UserFunctionRouter, statsCollector: ActorRef, - entityDiscoveryClient: EntityDiscoveryClient, fileDescriptors: Seq[FileDescriptor])(implicit sys: ActorSystem, mat: Materializer, ec: ExecutionContext): PartialFunction[HttpRequest, Future[HttpResponse]] = { - + private final case class CommandHandler(fullCommandName: String, + serializer: CommandSerializer, + flow: Flow[UserFunctionCommand, UserFunctionReply, NotUsed], + unary: Boolean, + expectedReplyTypeUrl: String) + + def createRoute(entities: Seq[ServableEntity], + router: UserFunctionRouter, + statsCollector: ActorRef, + entityDiscoveryClient: EntityDiscoveryClient, + fileDescriptors: Seq[FileDescriptor])( + implicit sys: ActorSystem, + mat: Materializer, + ec: ExecutionContext + ): PartialFunction[HttpRequest, Future[HttpResponse]] = compileProxy(entities, router, statsCollector, entityDiscoveryClient) orElse // Fast path - handleNetworkProbe() orElse - Reflection.serve(fileDescriptors, entities.map(_.serviceName).toList) orElse - HttpApi.serve(router, entities, entityDiscoveryClient) orElse // Slow path - NotFound // No match. TODO: Consider having the caller of this method deal with this condition - } + handleNetworkProbe() orElse + Reflection.serve(fileDescriptors, entities.map(_.serviceName).toList) orElse + HttpApi.serve(router, entities, entityDiscoveryClient) orElse // Slow path + NotFound // No match. TODO: Consider having the caller of this method deal with this condition /** - * Knative network probe handler. - */ + * Knative network probe handler. + */ def handleNetworkProbe(): PartialFunction[HttpRequest, Future[HttpResponse]] = Function.unlift { req => req.headers.find(_.name.equalsIgnoreCase("K-Network-Probe")).map { header => Future.successful(header.value match { case "queue" => HttpResponse(entity = HttpEntity("queue")) - case other => HttpResponse(status = StatusCodes.BadRequest, entity = HttpEntity(s"unexpected probe header value: $other")) + case other => + HttpResponse(status = StatusCodes.BadRequest, entity = HttpEntity(s"unexpected probe header value: $other")) }) } } - private[this] final def compileProxy(entities: Seq[ServableEntity], router: UserFunctionRouter, statsCollector: ActorRef, entityDiscoveryClient: EntityDiscoveryClient)(implicit sys: ActorSystem, mat: Materializer, ec: ExecutionContext): PartialFunction[HttpRequest, Future[HttpResponse]] = { + private[this] final def compileProxy(entities: Seq[ServableEntity], + router: UserFunctionRouter, + statsCollector: ActorRef, + entityDiscoveryClient: EntityDiscoveryClient)( + implicit sys: ActorSystem, + mat: Materializer, + ec: ExecutionContext + ): PartialFunction[HttpRequest, Future[HttpResponse]] = { val rpcMethodSerializers = (for { entity <- entities.iterator method <- entity.serviceDescriptor.getMethods.iterator.asScala } yield { - (Path / entity.serviceName / method.getName, CommandHandler( - fullCommandName = entity.serviceName + "." + method.getName, - new CommandSerializer(method.getName, method.getInputType), - router.handle(entity.serviceName), - unary = !method.toProto.getClientStreaming && !method.toProto.getServerStreaming, - expectedReplyTypeUrl = AnyTypeUrlHostName + method.getOutputType.getFullName - )) + (Path / entity.serviceName / method.getName, + CommandHandler( + fullCommandName = entity.serviceName + "." + method.getName, + new CommandSerializer(method.getName, method.getInputType), + router.handle(entity.serviceName), + unary = !method.toProto.getClientStreaming && !method.toProto.getServerStreaming, + expectedReplyTypeUrl = AnyTypeUrlHostName + method.getOutputType.getFullName + )) }).toMap val mapRequestFailureExceptions: ActorSystem => PartialFunction[Throwable, Status] = { @@ -144,7 +160,8 @@ object Serve { reply.clientAction match { case Some(ClientAction(ClientAction.Action.Reply(Reply(Some(payload))))) => if (payload.typeUrl != handler.expectedReplyTypeUrl) { - val msg = s"${handler.fullCommandName}: Expected reply type_url to be [${handler.expectedReplyTypeUrl}] but was [${payload.typeUrl}]." + val msg = + s"${handler.fullCommandName}: Expected reply type_url to be [${handler.expectedReplyTypeUrl}] but was [${payload.typeUrl}]." log.warn(msg) entityDiscoveryClient.reportError(UserFunctionError("Warning: " + msg)) } @@ -157,7 +174,8 @@ object Serve { case _ => None } - }.collect(Function.unlift(identity)) + } + .collect(Function.unlift(identity)) .watchTermination() { (_, complete) => if (handler.unary) { complete.onComplete { _ => @@ -168,10 +186,10 @@ object Serve { } marshalStream(pipeline, mapRequestFailureExceptions)(ReplySerializer, mat, responseCodec, sys) - }.recoverWith(GrpcExceptionHandler.default(GrpcExceptionHandler.defaultMapper(sys))) + } + .recoverWith(GrpcExceptionHandler.default(GrpcExceptionHandler.defaultMapper(sys))) } } private case class CommandException(msg: String) extends RuntimeException(msg, null, false, false) } - diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/StatsCollector.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/StatsCollector.scala index b8ca58457..1049c194a 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/StatsCollector.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/StatsCollector.scala @@ -26,39 +26,40 @@ import scala.collection.mutable import scala.concurrent.duration._ /** - * Collects stats for actions executed. - * - * This actor attempts to replicate the stats collection logic in https://github.com/knative/serving/blob/master/pkg/queue/stats.go. - * That logic records the amount of time spent at each concurrency level, and uses that to periodically report - * the weighted average concurrency. - */ + * Collects stats for actions executed. + * + * This actor attempts to replicate the stats collection logic in https://github.com/knative/serving/blob/master/pkg/queue/stats.go. + * That logic records the amount of time spent at each concurrency level, and uses that to periodically report + * the weighted average concurrency. + */ object StatsCollector { - def props(settings: StatsCollectorSettings, autoscaler: ActorRef): Props = Props(new StatsCollector(settings, autoscaler)) + def props(settings: StatsCollectorSettings, autoscaler: ActorRef): Props = + Props(new StatsCollector(settings, autoscaler)) /** - * A request has been received by the proxy server - */ + * A request has been received by the proxy server + */ case object RequestReceived final case class ResponseSent(timeNanos: Long) /** - * A command has been sent to the user function. - */ + * A command has been sent to the user function. + */ case object CommandSent /** - * A reply has been received from the user function. - */ - final case class ReplyReceived private(timeNanos: Long) + * A reply has been received from the user function. + */ + final case class ReplyReceived private (timeNanos: Long) case object DatabaseOperationStarted final case class DatabaseOperationFinished(timeNanos: Long) final case class StatsCollectorSettings( - reportPeriod: FiniteDuration + reportPeriod: FiniteDuration ) { def this(config: Config) = this( reportPeriod = config.getDuration("report-period").toMillis.millis @@ -71,7 +72,10 @@ object StatsCollector { private final val SecondInNanos: Long = 1000000000 } -class StatsCollector(settings: StatsCollectorSettings, autoscaler: ActorRef) extends Actor with Timers with ActorLogging { +class StatsCollector(settings: StatsCollectorSettings, autoscaler: ActorRef) + extends Actor + with Timers + with ActorLogging { import StatsCollector._ @@ -106,25 +110,27 @@ class StatsCollector(settings: StatsCollectorSettings, autoscaler: ActorRef) ext private def updateCommandState(): Unit = { val currentNanos = System.nanoTime() val sinceLastNanos = currentNanos - commandLastChangedNanos - commandTimeNanosOnConcurrency.update(commandConcurrency, commandTimeNanosOnConcurrency(commandConcurrency) + sinceLastNanos) + commandTimeNanosOnConcurrency.update(commandConcurrency, + commandTimeNanosOnConcurrency(commandConcurrency) + sinceLastNanos) commandLastChangedNanos = currentNanos } private def updateRequestState(): Unit = { val currentNanos = System.nanoTime() val sinceLastNanos = currentNanos - requestLastChangedNanos - requestTimeNanosOnConcurrency.update(requestConcurrency, requestTimeNanosOnConcurrency(requestConcurrency) + sinceLastNanos) + requestTimeNanosOnConcurrency.update(requestConcurrency, + requestTimeNanosOnConcurrency(requestConcurrency) + sinceLastNanos) requestLastChangedNanos = currentNanos } private def updateDatabaseState(): Unit = { val currentNanos = System.nanoTime() val sinceLastNanos = currentNanos - databaseLastChangedNanos - databaseTimeNanosOnConcurrency.update(databaseConcurrency, databaseTimeNanosOnConcurrency(databaseConcurrency) + sinceLastNanos) + databaseTimeNanosOnConcurrency.update(databaseConcurrency, + databaseTimeNanosOnConcurrency(databaseConcurrency) + sinceLastNanos) databaseLastChangedNanos = currentNanos } - private def weightedAverage(times: mutable.Map[Int, Long]): Double = { // This replicates this go code: @@ -207,10 +213,9 @@ class StatsCollector(settings: StatsCollectorSettings, autoscaler: ActorRef) ext userFunctionCount = commandCount, databaseConcurrency = avgDatabaseConcurrency, databaseTimeNanos = databaseTimeNanos, - databaseCount = databaseCount, + databaseCount = databaseCount ) - lastReportedNanos = currentTime commandCount = 0 commandTimeNanosOnConcurrency.clear() diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/UserFunctionRouter.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/UserFunctionRouter.scala index 13d9c3de2..7054a6c4a 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/UserFunctionRouter.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/UserFunctionRouter.scala @@ -10,32 +10,31 @@ import io.cloudstate.proxy.entity.{UserFunctionCommand, UserFunctionReply} import scala.collection.JavaConverters._ import scala.concurrent.{ExecutionContext, Future} -class UserFunctionRouter(entities: Seq[ServableEntity], entityDiscovery: EntityDiscovery)(implicit mat: Materializer, ec: ExecutionContext) { +class UserFunctionRouter(entities: Seq[ServableEntity], entityDiscovery: EntityDiscovery)(implicit mat: Materializer, + ec: ExecutionContext) { private[this] final val entityCommands = entities.map { case ServableEntity(serviceName, serviceDescriptor, entitySupport) => - serviceName -> EntityCommands(serviceName, entitySupport, - serviceDescriptor.getMethods.asScala.map(_.getName).toSet) + serviceName -> EntityCommands(serviceName, + entitySupport, + serviceDescriptor.getMethods.asScala.map(_.getName).toSet) }.toMap - def handle(serviceName: String): Flow[UserFunctionCommand, UserFunctionReply, NotUsed] = { + def handle(serviceName: String): Flow[UserFunctionCommand, UserFunctionReply, NotUsed] = Flow[UserFunctionCommand].flatMapConcat { command => routeMessage(Nil, RouteReason.Initial, serviceName, command.name, command.payload, synchronous = true) } - } - def handleUnary(serviceName: String, command: UserFunctionCommand): Future[UserFunctionReply] = { + def handleUnary(serviceName: String, command: UserFunctionCommand): Future[UserFunctionReply] = routeMessageUnary(Nil, RouteReason.Initial, serviceName, command.name, command.payload) - } - - private def route(trace: List[(RouteReason, String, String)]): Flow[UserFunctionReply, UserFunctionReply, NotUsed] = { + private def route(trace: List[(RouteReason, String, String)]): Flow[UserFunctionReply, UserFunctionReply, NotUsed] = Flow[UserFunctionReply].flatMapConcat { response => val sideEffects = Source(response.sideEffects.toList) - .flatMapConcat { - case SideEffect(serviceName, commandName, payload, synchronous) => - routeMessage(trace, RouteReason.SideEffect, serviceName, commandName, payload, synchronous) - } + .flatMapConcat { + case SideEffect(serviceName, commandName, payload, synchronous) => + routeMessage(trace, RouteReason.SideEffect, serviceName, commandName, payload, synchronous) + } val nextAction = response.clientAction match { case Some(ClientAction(ClientAction.Action.Forward(Forward(serviceName, commandName, payload)))) => @@ -47,15 +46,20 @@ class UserFunctionRouter(entities: Seq[ServableEntity], entityDiscovery: EntityD } // First do the side effects, but ignore the response, then do the next action - sideEffects.filter(_ => false) + sideEffects + .filter(_ => false) .concat(nextAction) } - } - private def routeUnary(trace: List[(RouteReason, String, String)], response: UserFunctionReply): Future[UserFunctionReply] = { + private def routeUnary(trace: List[(RouteReason, String, String)], + response: UserFunctionReply): Future[UserFunctionReply] = { val afterSideEffects = response.sideEffects.foldLeft(Future.successful[Any](())) { (future, sideEffect) => future.flatMap { _ => - val sideEffectFuture = routeMessageUnary(trace, RouteReason.SideEffect, sideEffect.serviceName, sideEffect.commandName, sideEffect.payload) + val sideEffectFuture = routeMessageUnary(trace, + RouteReason.SideEffect, + sideEffect.serviceName, + sideEffect.commandName, + sideEffect.payload) if (sideEffect.synchronous) { sideEffectFuture } else { @@ -74,13 +78,18 @@ class UserFunctionRouter(entities: Seq[ServableEntity], entityDiscovery: EntityD } } - private def routeMessage(trace: List[(RouteReason, String, String)], routeReason: RouteReason, serviceName: String, commandName: String, - payload: Option[com.google.protobuf.any.Any], synchronous: Boolean): Source[UserFunctionReply, NotUsed] = { + private def routeMessage(trace: List[(RouteReason, String, String)], + routeReason: RouteReason, + serviceName: String, + commandName: String, + payload: Option[com.google.protobuf.any.Any], + synchronous: Boolean): Source[UserFunctionReply, NotUsed] = { val source = entityCommands.get(serviceName) match { case Some(EntityCommands(_, entitySupport, commands)) => if (commands(commandName)) { - Source.single(UserFunctionCommand(commandName, payload)) + Source + .single(UserFunctionCommand(commandName, payload)) .via(entitySupport.handler(commandName)) .via(route((routeReason, serviceName, commandName) :: trace)) } else { @@ -101,9 +110,11 @@ class UserFunctionRouter(entities: Seq[ServableEntity], entityDiscovery: EntityD } } - private def routeMessageUnary(trace: List[(RouteReason, String, String)], routeReason: RouteReason, serviceName: String, commandName: String, - payload: Option[com.google.protobuf.any.Any]): Future[UserFunctionReply] = { - + private def routeMessageUnary(trace: List[(RouteReason, String, String)], + routeReason: RouteReason, + serviceName: String, + commandName: String, + payload: Option[com.google.protobuf.any.Any]): Future[UserFunctionReply] = entityCommands.get(serviceName) match { case Some(EntityCommands(_, entitySupport, commands)) => if (commands(commandName)) { @@ -116,27 +127,32 @@ class UserFunctionRouter(entities: Seq[ServableEntity], entityDiscovery: EntityD case None => reportErrorUnary(routeReason, trace, s"Service [$serviceName] unknown") } - } - private def reportError(routeReason: RouteReason, trace: List[(RouteReason, String, String)], error: String): Exception = { + private def reportError(routeReason: RouteReason, + trace: List[(RouteReason, String, String)], + error: String): Exception = { val firstReason = if (routeReason == RouteReason.Initial) "" else s"\n ${routeReason.trace}" - val errorWithTrace = trace.map { - case (RouteReason.Initial, service, command) => s"$service.$command" - case (reason, service, command) => s"$service.$command\n ${reason.trace} " - }.mkString(error + firstReason, "", "") + val errorWithTrace = trace + .map { + case (RouteReason.Initial, service, command) => s"$service.$command" + case (reason, service, command) => s"$service.$command\n ${reason.trace} " + } + .mkString(error + firstReason, "", "") entityDiscovery.reportError(UserFunctionError(errorWithTrace)) new Exception("Error") } - private def reportErrorSource(routeReason: RouteReason, trace: List[(RouteReason, String, String)], error: String): Source[Nothing, NotUsed] = { + private def reportErrorSource(routeReason: RouteReason, + trace: List[(RouteReason, String, String)], + error: String): Source[Nothing, NotUsed] = Source.failed(reportError(routeReason, trace, error)) - } - private def reportErrorUnary(routeReason: RouteReason, trace: List[(RouteReason, String, String)], error: String): Future[Nothing] = { + private def reportErrorUnary(routeReason: RouteReason, + trace: List[(RouteReason, String, String)], + error: String): Future[Nothing] = Future.failed(reportError(routeReason, trace, error)) - } } @@ -156,4 +172,3 @@ private object RouteReason { override val trace = "" } } - diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/UserFunctionTypeSupport.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/UserFunctionTypeSupport.scala index ed4b56b27..09b903b6b 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/UserFunctionTypeSupport.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/UserFunctionTypeSupport.scala @@ -24,15 +24,17 @@ trait UserFunctionTypeSupportFactory { } /** - * Abstract support for any user function type that is entity based (ie, has entity id keys). - */ + * Abstract support for any user function type that is entity based (ie, has entity id keys). + */ abstract class EntityTypeSupportFactory extends UserFunctionTypeSupportFactory { override final def build(entity: Entity, serviceDescriptor: ServiceDescriptor): UserFunctionTypeSupport = { val idExtractors = serviceDescriptor.getMethods.asScala .map(method => method.getName -> new EntityMethodDescriptor(method)) .toMap - new EntityUserFunctionTypeSupport(serviceDescriptor, idExtractors, buildEntityTypeSupport(entity, serviceDescriptor)) + new EntityUserFunctionTypeSupport(serviceDescriptor, + idExtractors, + buildEntityTypeSupport(entity, serviceDescriptor)) } protected def buildEntityTypeSupport(entity: Entity, serviceDescriptor: ServiceDescriptor): EntityTypeSupport @@ -44,30 +46,33 @@ private object EntityMethodDescriptor { } final class EntityMethodDescriptor(val method: MethodDescriptor) { + /** - * ScalaPB doesn't do this conversion for us unfortunately. - * By doing it, we can use EntitykeyProto.entityKey.get() to read the entity key nicely. - */ - private def convertFieldOptions(field: FieldDescriptor): ScalaPBDescriptorProtos.FieldOptions = { - ScalaPBDescriptorProtos. - FieldOptions. - fromJavaProto(field.toProto.getOptions). - withUnknownFields(scalapb.UnknownFieldSet(field.getOptions.getUnknownFields.asMap.asScala.map { - case (idx, f) => idx.toInt -> scalapb.UnknownFieldSet.Field( - varint = f.getVarintList.asScala.map(_.toLong), - fixed64 = f.getFixed64List.asScala.map(_.toLong), - fixed32 = f.getFixed32List.asScala.map(_.toInt), - lengthDelimited = f.getLengthDelimitedList.asScala - ) + * ScalaPB doesn't do this conversion for us unfortunately. + * By doing it, we can use EntitykeyProto.entityKey.get() to read the entity key nicely. + */ + private def convertFieldOptions(field: FieldDescriptor): ScalaPBDescriptorProtos.FieldOptions = + ScalaPBDescriptorProtos.FieldOptions + .fromJavaProto(field.toProto.getOptions) + .withUnknownFields(scalapb.UnknownFieldSet(field.getOptions.getUnknownFields.asMap.asScala.map { + case (idx, f) => + idx.toInt -> scalapb.UnknownFieldSet.Field( + varint = f.getVarintList.asScala.map(_.toLong), + fixed64 = f.getFixed64List.asScala.map(_.toLong), + fixed32 = f.getFixed32List.asScala.map(_.toInt), + lengthDelimited = f.getLengthDelimitedList.asScala + ) }.toMap)) - } - private val fields = method.getInputType.getFields.iterator.asScala. - filter(field => EntityKeyProto.entityKey.get(convertFieldOptions(field))). - toArray.sortBy(_.getIndex) + private val fields = method.getInputType.getFields.iterator.asScala + .filter(field => EntityKeyProto.entityKey.get(convertFieldOptions(field))) + .toArray + .sortBy(_.getIndex) if (fields.isEmpty) { - throw EntityDiscoveryException(s"No field marked with [(cloudstate.entity_key) = true] found for in type ${method.getInputType.getName}, this is needed to associate commands sent to ${method.getFullName} with the entities that they are for.") + throw EntityDiscoveryException( + s"No field marked with [(cloudstate.entity_key) = true] found for in type ${method.getInputType.getName}, this is needed to associate commands sent to ${method.getFullName} with the entities that they are for." + ) } def extractId(bytes: ByteString): String = { @@ -82,26 +87,30 @@ final class EntityMethodDescriptor(val method: MethodDescriptor) { } private final class EntityUserFunctionTypeSupport(serviceDescriptor: ServiceDescriptor, - methodDescriptors: Map[String, EntityMethodDescriptor], entityTypeSupport: EntityTypeSupport) extends UserFunctionTypeSupport { + methodDescriptors: Map[String, EntityMethodDescriptor], + entityTypeSupport: EntityTypeSupport) + extends UserFunctionTypeSupport { override def handler(name: String): Flow[UserFunctionCommand, UserFunctionReply, NotUsed] = { val method = methodDescriptor(name) Flow[UserFunctionCommand].map(ufToEntityCommand(method)).via(entityTypeSupport.handler(method)) } - - override def handleUnary(command: UserFunctionCommand): Future[UserFunctionReply] = { + override def handleUnary(command: UserFunctionCommand): Future[UserFunctionReply] = entityTypeSupport.handleUnary(ufToEntityCommand(methodDescriptor(command.name))(command)) - } private def ufToEntityCommand(method: EntityMethodDescriptor): UserFunctionCommand => EntityCommand = { command => val entityId = method.extractId(command.payload.fold(ByteString.EMPTY)(_.value)) - EntityCommand(entityId = entityId, name = command.name, payload = command.payload, streamed = method.method.isServerStreaming) + EntityCommand(entityId = entityId, + name = command.name, + payload = command.payload, + streamed = method.method.isServerStreaming) } private def methodDescriptor(name: String): EntityMethodDescriptor = methodDescriptors - .getOrElse(name, throw EntityDiscoveryException(s"Unknown command $name on service ${serviceDescriptor.getFullName}")) + .getOrElse(name, + throw EntityDiscoveryException(s"Unknown command $name on service ${serviceDescriptor.getFullName}")) } trait EntityTypeSupport { diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/Warmup.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/Warmup.scala index 76127940a..331c99729 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/Warmup.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/Warmup.scala @@ -34,17 +34,24 @@ object Warmup { } /** - * Warms things up by starting a dummy instance of the state manager actor up, this ensures - * Cassandra gets connected to etc, so a lot of classloading and jitting is done here. - */ + * Warms things up by starting a dummy instance of the state manager actor up, this ensures + * Cassandra gets connected to etc, so a lot of classloading and jitting is done here. + */ class Warmup(needsWarmup: Boolean) extends Actor with ActorLogging { if (needsWarmup) { log.debug("Starting warmup...") - val stateManager = context.watch(context.actorOf(EventSourcedEntity.props( - Configuration("warmup.Service", "###warmup", 30.seconds, 100), "###warmup-entity", self, self, self - ), "entity")) + val stateManager = context.watch( + context.actorOf(EventSourcedEntity.props( + Configuration("warmup.Service", "###warmup", 30.seconds, 100), + "###warmup-entity", + self, + self, + self + ), + "entity") + ) stateManager ! EntityCommand( entityId = "###warmup-entity", @@ -64,17 +71,23 @@ class Warmup(needsWarmup: Boolean) extends Actor with ActorLogging { log.debug("Warmup received action, starting it.") start() case EventSourcedStreamIn(EventSourcedStreamIn.Message.Event(_)) => - // Ignore + // Ignore case EventSourcedStreamIn(EventSourcedStreamIn.Message.Init(_)) => log.debug("Warmup got init.") - // Ignore + // Ignore case EventSourcedStreamIn(EventSourcedStreamIn.Message.Command(cmd)) => log.debug("Warmup got forwarded command") // It's forwarded us our command, send it a reply - eventSourcedEntityManager ! EventSourcedStreamOut(EventSourcedStreamOut.Message.Reply(EventSourcedReply( - commandId = cmd.id, - clientAction = Some(ClientAction(ClientAction.Action.Reply(Reply(Some(com.google.protobuf.any.Any("url", ByteString.EMPTY)))))) - ))) + eventSourcedEntityManager ! EventSourcedStreamOut( + EventSourcedStreamOut.Message.Reply( + EventSourcedReply( + commandId = cmd.id, + clientAction = Some( + ClientAction(ClientAction.Action.Reply(Reply(Some(com.google.protobuf.any.Any("url", ByteString.EMPTY))))) + ) + ) + ) + ) case _: UserFunctionReply => log.debug("Warmup got forwarded reply") // It's forwarded the reply, now stop it diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/Autoscaler.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/Autoscaler.scala index 2d1ffbd82..ade0a4f4b 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/Autoscaler.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/Autoscaler.scala @@ -1,6 +1,16 @@ package io.cloudstate.proxy.autoscaler -import akka.actor.{Actor, ActorLogging, ActorRef, ActorRefFactory, ActorSystem, AddressFromURIString, DeadLetterSuppression, Props, Timers} +import akka.actor.{ + Actor, + ActorLogging, + ActorRef, + ActorRefFactory, + ActorSystem, + AddressFromURIString, + DeadLetterSuppression, + Props, + Timers +} import akka.cluster.ddata.Replicator._ import akka.cluster.ddata.{DistributedData, LWWRegister, LWWRegisterKey} import akka.cluster.{Cluster, Member, MemberStatus, UniqueAddress} @@ -11,62 +21,50 @@ import scala.concurrent.duration._ import io.cloudstate.proxy.autoscaler._ final case class AutoscalerSettings( - - /** - * Whether autoscaling is enabled. - */ - enabled: Boolean, - - /** - * Target concurrency on user functions. - */ - targetUserFunctionConcurrency: Int, - - /** - * Target concurrency for requests. - */ - targetRequestConcurrency: Int, - - /** - * The sliding window period used to calculate the current concurrency for all nodes. - */ - targetConcurrencyWindow: FiniteDuration, - - /** - * While scaling up, concurrency can spike due to the time it takes for new nodes to join the cluster, - * to warm up/jit, and rebalance etc. So we don't make any new decisions based on concurrency until this - * deadline is met. - */ - scaleUpStableDeadline: FiniteDuration, - - /** - * While scaling down, concurrency can spike due to rebalancing, so we don't make any new decisions based - * on concurrency until this deadline is met. - */ - scaleDownStableDeadline: FiniteDuration, - - /** - * During the scaling up/down stable deadline period, decisions to scale up further are made based on the - * request rate. If it increases by more than this factor, then we scale the number of nodes to handle that - * rate based on the original rate per node being handled before scale up/down. - */ - requestRateThresholdFactor: Double, - - /** - * The window used to determine whether the request rate has exceeded the threshold. - */ - requestRateThresholdWindow: FiniteDuration, - - maxScaleFactor: Double, - - maxScaleAbsolute: Int, - - maxMembers: Int, - - /** - * Autoscale tick period. - */ - tickPeriod: FiniteDuration + /** + * Whether autoscaling is enabled. + */ + enabled: Boolean, + /** + * Target concurrency on user functions. + */ + targetUserFunctionConcurrency: Int, + /** + * Target concurrency for requests. + */ + targetRequestConcurrency: Int, + /** + * The sliding window period used to calculate the current concurrency for all nodes. + */ + targetConcurrencyWindow: FiniteDuration, + /** + * While scaling up, concurrency can spike due to the time it takes for new nodes to join the cluster, + * to warm up/jit, and rebalance etc. So we don't make any new decisions based on concurrency until this + * deadline is met. + */ + scaleUpStableDeadline: FiniteDuration, + /** + * While scaling down, concurrency can spike due to rebalancing, so we don't make any new decisions based + * on concurrency until this deadline is met. + */ + scaleDownStableDeadline: FiniteDuration, + /** + * During the scaling up/down stable deadline period, decisions to scale up further are made based on the + * request rate. If it increases by more than this factor, then we scale the number of nodes to handle that + * rate based on the original rate per node being handled before scale up/down. + */ + requestRateThresholdFactor: Double, + /** + * The window used to determine whether the request rate has exceeded the threshold. + */ + requestRateThresholdWindow: FiniteDuration, + maxScaleFactor: Double, + maxScaleAbsolute: Int, + maxMembers: Int, + /** + * Autoscale tick period. + */ + tickPeriod: FiniteDuration ) object AutoscalerSettings { @@ -97,19 +95,19 @@ object AutoscalerSettings { object Autoscaler { final case class Sample( - receivedNanos: Long, - metrics: AutoscalerMetrics + receivedNanos: Long, + metrics: AutoscalerMetrics ) private final case class Summary( - clusterMembers: Int, - requestConcurrency: Double, - databaseConcurrency: Double, - userFunctionConcurrency: Double, - requestRate: Double, - requestTimeMillis: Double, - userFunctionTimeMillis: Double, - databaseTimeMillis: Double + clusterMembers: Int, + requestConcurrency: Double, + databaseConcurrency: Double, + userFunctionConcurrency: Double, + requestRate: Double, + requestTimeMillis: Double, + userFunctionTimeMillis: Double, + databaseTimeMillis: Double ) final case class Deployment(name: String, ready: Int, scale: Int, upgrading: Boolean) @@ -118,24 +116,30 @@ object Autoscaler { case object Tick extends DeadLetterSuppression /** - * This state is never gossipped, it's used as a placeholder for when we are waiting to have all - * the state we need to make decisions. - */ + * This state is never gossipped, it's used as a placeholder for when we are waiting to have all + * the state we need to make decisions. + */ case object WaitingForState extends AutoscalerState type ScalerFactory = (ActorRef, ActorRefFactory) => ActorRef - def props(settings: AutoscalerSettings, scalerFactory: ScalerFactory, - clusterMembershipFacade: ClusterMembershipFacade): Props = Props(new Autoscaler(settings, scalerFactory, clusterMembershipFacade)) + def props(settings: AutoscalerSettings, + scalerFactory: ScalerFactory, + clusterMembershipFacade: ClusterMembershipFacade): Props = + Props(new Autoscaler(settings, scalerFactory, clusterMembershipFacade)) } /** - * This state is gossipped via an LWW-Register CRDT. Most implementations are protobuf messages. - */ + * This state is gossipped via an LWW-Register CRDT. Most implementations are protobuf messages. + */ trait AutoscalerState -class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerFactory, - clusterMembershipFacade: ClusterMembershipFacade) extends Actor with Timers with ActorLogging { +class Autoscaler(settings: AutoscalerSettings, + scalerFactory: Autoscaler.ScalerFactory, + clusterMembershipFacade: ClusterMembershipFacade) + extends Actor + with Timers + with ActorLogging { import Autoscaler._ @@ -156,24 +160,23 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF override def receive: Receive = waitingForState(WaitingForState) - private def become(handler: Receive): Unit = { + private def become(handler: Receive): Unit = context become ( handleMetrics orElse handler orElse { case UpdateSuccess(_, _) => - // Ignore + // Ignore case failure: UpdateFailure[_] => log.warning("Failure updating autoscaler state CRDT {}", failure) - // It's no big deal, it just means a majority of nodes couldn't be updated in the given timeout. - // But this one was, and maybe several others were too, and worst case, the next cluster singleton - // to read it will get the wrong state, in which case it will make its own decision about its state - // soon enough. + // It's no big deal, it just means a majority of nodes couldn't be updated in the given timeout. + // But this one was, and maybe several others were too, and worst case, the next cluster singleton + // to read it will get the wrong state, in which case it will make its own decision about its state + // soon enough. case deploy: Deployment => deployment = Some(deploy) } ) - } - private def checkInit(state: AutoscalerState) = { + private def checkInit(state: AutoscalerState) = deployment match { case None => case Some(deploy) => @@ -190,8 +193,11 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF self ! Tick case ScalingUp(desired, lastStableRequestRatePerNode, wallClockDeadline) => - become(scalingUp(desired, lastStableRequestRatePerNode, - Deadline.now + (wallClockDeadline - System.currentTimeMillis()).millis)) + become( + scalingUp(desired, + lastStableRequestRatePerNode, + Deadline.now + (wallClockDeadline - System.currentTimeMillis()).millis) + ) self ! Tick case ScalingDown(desired, wallClockDeadline) => @@ -204,7 +210,6 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF } } } - } private def waitingForState(state: AutoscalerState): Receive = { case metrics: AutoscalerMetrics => @@ -235,8 +240,8 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF } /** - * In this state, we are stable, the number of nodes is appropriate to keep the target concurrency - */ + * In this state, we are stable, the number of nodes is appropriate to keep the target concurrency + */ private def becomeStable(): Unit = { updateState(Stable()) become(stable) @@ -247,30 +252,53 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF val summary = summarize() val adjustedRequestConcurrency = summary.requestConcurrency - summary.databaseConcurrency - val desiredForUserFunction = Math.max(1, Math.ceil((summary.userFunctionConcurrency * summary.clusterMembers) / settings.targetUserFunctionConcurrency).toInt) - val desiredForRequest = Math.max(1, Math.ceil((adjustedRequestConcurrency * summary.clusterMembers) / settings.targetRequestConcurrency).toInt) + val desiredForUserFunction = Math.max( + 1, + Math + .ceil((summary.userFunctionConcurrency * summary.clusterMembers) / settings.targetUserFunctionConcurrency) + .toInt + ) + val desiredForRequest = Math.max( + 1, + Math.ceil((adjustedRequestConcurrency * summary.clusterMembers) / settings.targetRequestConcurrency).toInt + ) if (summary.userFunctionConcurrency > settings.targetUserFunctionConcurrency) { val desired = capScaling(desiredForUserFunction, summary.clusterMembers) - log.info("Scaling up from {} to {} because user function concurrency {} exceeds target {}", - summary.clusterMembers, desired, summary.userFunctionConcurrency, settings.targetUserFunctionConcurrency) + log.info( + "Scaling up from {} to {} because user function concurrency {} exceeds target {}", + summary.clusterMembers, + desired, + summary.userFunctionConcurrency, + settings.targetUserFunctionConcurrency + ) scaleUp(desired, summary.requestRate) } else if (adjustedRequestConcurrency > settings.targetRequestConcurrency) { val desired = capScaling(desiredForRequest, summary.clusterMembers) - log.info("Scaling up from {} to {} because adjusted request concurrency {} exceeds target {}", - summary.clusterMembers, desired, adjustedRequestConcurrency, settings.targetRequestConcurrency) + log.info( + "Scaling up from {} to {} because adjusted request concurrency {} exceeds target {}", + summary.clusterMembers, + desired, + adjustedRequestConcurrency, + settings.targetRequestConcurrency + ) scaleUp(desired, summary.requestRate) } else if (desiredForUserFunction < summary.clusterMembers && desiredForRequest < summary.clusterMembers) { val desired = capScaling(Math.max(desiredForRequest, desiredForUserFunction), summary.clusterMembers) - log.info("Scaling down to {} because desired nodes for user function {} and desired nodes for request handling {} is below cluster members {}", - desired, desiredForUserFunction, desiredForRequest, summary.clusterMembers) + log.info( + "Scaling down to {} because desired nodes for user function {} and desired nodes for request handling {} is below cluster members {}", + desired, + desiredForUserFunction, + desiredForRequest, + summary.clusterMembers + ) scaleDown(desired) } else if (deployment.exists(_.upgrading)) { @@ -282,8 +310,8 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF } /** - * Cap scaling to the max scaling settings - */ + * Cap scaling to the max scaling settings + */ private def capScaling(desired: Int, clusterMembers: Int): Int = { val changeInClusterMembers = Math.abs(desired - clusterMembers) val factorCappedChange = if (settings.maxScaleFactor > 0) { @@ -300,22 +328,25 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF } /** - * In this state, we are scaling up, and waiting for the scale up deadline to elapse before switching back to stable, or - * scaling up further. In addition, if observed request rate exceeds the last stable request rate by the request rate - * threshold, we may scale up more. - */ + * In this state, we are scaling up, and waiting for the scale up deadline to elapse before switching back to stable, or + * scaling up further. In addition, if observed request rate exceeds the last stable request rate by the request rate + * threshold, we may scale up more. + */ private def scaleUp(desired: Int, lastStableRequestRatePerNode: Double): Unit = { deployment.foreach { d => scaler ! Scale(d.name, desired) } - updateState(ScalingUp(desired, lastStableRequestRatePerNode, System.currentTimeMillis() + settings.scaleUpStableDeadline.toMillis)) + updateState( + ScalingUp(desired, + lastStableRequestRatePerNode, + System.currentTimeMillis() + settings.scaleUpStableDeadline.toMillis) + ) become(scalingUp(desired, lastStableRequestRatePerNode, settings.scaleUpStableDeadline.fromNow)) } private def scalingUp(desired: Int, lastStableRequestRatePerNode: Double, deadline: Deadline): Receive = { case Tick => - if (deadline.isOverdue()) { log.info("Scaling up to {} stable period over", desired) // Concurrency would have gone crazy during the scaling period, so expire most of the old data @@ -334,7 +365,6 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF private def scalingDown(desired: Int, deadline: Deadline): Receive = { case Tick => - if (deadline.isOverdue()) { log.info("Scaling down to {} stable period over", desired) // Concurrency would have gone crazy during the scaling period, so expire most of the old data @@ -350,14 +380,22 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF val summary = summarize() if (summary.requestRate > lastStableRequestRatePerNode * settings.requestRateThresholdFactor && - (summary.userFunctionConcurrency > settings.targetUserFunctionConcurrency || + (summary.userFunctionConcurrency > settings.targetUserFunctionConcurrency || summary.requestConcurrency > settings.targetRequestConcurrency)) { - val newDesired = capScaling(Math.ceil(summary.requestRate * summary.clusterMembers / lastStableRequestRatePerNode).toInt, summary.clusterMembers) + val newDesired = capScaling( + Math.ceil(summary.requestRate * summary.clusterMembers / lastStableRequestRatePerNode).toInt, + summary.clusterMembers + ) if (newDesired > currentDesired) { - log.info("Scaling up to {} because request rate {} has exceeded last stable request rate {} by configured factor {}", - newDesired, summary.requestRate, lastStableRequestRatePerNode, settings.requestRateThresholdFactor) + log.info( + "Scaling up to {} because request rate {} has exceeded last stable request rate {} by configured factor {}", + newDesired, + summary.requestRate, + lastStableRequestRatePerNode, + settings.requestRateThresholdFactor + ) scaleUp(newDesired, lastStableRequestRatePerNode) } true @@ -365,8 +403,8 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF } /** - * Like scale up, except with a different configured deadline. - */ + * Like scale up, except with a different configured deadline. + */ private def scaleDown(desired: Int): Unit = { deployment.foreach { d => scaler ! Scale(d.name, desired) @@ -376,9 +414,9 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF } /** - * When upgrading, due to the shutting down and starting of nodes, concurrency can go crazy. So, while upgrading, - * we scale based on request rate. - */ + * When upgrading, due to the shutting down and starting of nodes, concurrency can go crazy. So, while upgrading, + * we scale based on request rate. + */ private def becomeUpgrading(desired: Int, lastStableRequestRatePerNode: Double): Unit = { updateState(Upgrading(desired, lastStableRequestRatePerNode)) become(upgrading(desired, lastStableRequestRatePerNode)) @@ -401,29 +439,28 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF stats = stats.updated(uniqueAddress, stats(uniqueAddress).enqueue(Sample(System.nanoTime(), m))) } - private def updateState(state: AutoscalerState): Unit = { + private def updateState(state: AutoscalerState): Unit = // We write majority to ensure this immediately gets written to a majority of replicas. This mostly ensures // if we immediately after this leave the cluster, and a cluster singleton comes up somewhere else, when it // does a read majority, it should see the write. ddata.replicator ! Update(StateKey, EmptyState, WriteMajority(5.seconds))(_.withValueOf(state)) - } private def summarize(): Summary = { val summaryTime = System.nanoTime() val concurrencyWindowNanos = settings.targetConcurrencyWindow.toNanos val requestRateWindowNanos = settings.requestRateThresholdWindow.toNanos - var totalConcurrencyNanos = 0l + var totalConcurrencyNanos = 0L var weightedRequestConcurrencySum = 0d var weightedDatabaseConcurrencySum = 0d var weightedUserFunctionConcurrencySum = 0d var requestCount = 0 - var requestTimeNanos = 0l + var requestTimeNanos = 0L var userFunctionCount = 0 - var userFunctionTimeNanos = 0l + var userFunctionTimeNanos = 0L var databaseCount = 0 - var databaseTimeNanos = 0l + var databaseTimeNanos = 0L var requestRatePerSecond = 0 @@ -433,8 +470,7 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF stats.foreach { case (_, samples) => - - var addressNanos = 0l + var addressNanos = 0L var addressRequestCount = 0 // Now, accumulate sample values @@ -458,7 +494,8 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF } requestCount += addressRequestCount - val requestRate = if (addressNanos == 0) 0 else (addressRequestCount.toDouble / addressNanos * 1000000000l).toInt + val requestRate = + if (addressNanos == 0) 0 else (addressRequestCount.toDouble / addressNanos * 1000000000L).toInt requestRatePerSecond += requestRate } @@ -466,7 +503,8 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF Summary(clusterMembers, 0, 0, 0, 0, 0, 0, 0) } else { val requestTimeMillis = if (requestCount == 0) 0 else requestTimeNanos.toDouble / requestCount / 1000000 - val userFunctionTimeMillis = if (userFunctionCount == 0) 0 else userFunctionTimeNanos.toDouble / userFunctionCount / 1000000 + val userFunctionTimeMillis = + if (userFunctionCount == 0) 0 else userFunctionTimeNanos.toDouble / userFunctionCount / 1000000 val databaseTimeMillis = if (databaseCount == 0) 0 else databaseTimeNanos.toDouble / databaseCount / 1000000 Summary( @@ -484,7 +522,7 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF summary } - private def expireMetricsOlderThan(currentTimeNanos: Long, olderThanNanos: Long): Unit = { + private def expireMetricsOlderThan(currentTimeNanos: Long, olderThanNanos: Long): Unit = stats.foreach { case (address, samples) => val currentSamples = samples.dropWhile(sample => currentTimeNanos - sample.receivedNanos > olderThanNanos) @@ -494,27 +532,45 @@ class Autoscaler(settings: AutoscalerSettings, scalerFactory: Autoscaler.ScalerF stats += (address -> currentSamples) } } - } - private def logReport(summary: Summary): Unit = { + private def logReport(summary: Summary): Unit = if (log.isDebugEnabled) { reportHeaders += 1 if (reportHeaders % 10 == 1) { - log.debug("%8s %8s %8s %8s %8s %8s %8s %8s %8s %8s".format("Members", "Scale", "Ready", "UF cncy", "UF lat", "Req cncy", "Req lat", "Req rate", "DB cncy", "DB lat")) + log.debug( + "%8s %8s %8s %8s %8s %8s %8s %8s %8s %8s".format("Members", + "Scale", + "Ready", + "UF cncy", + "UF lat", + "Req cncy", + "Req lat", + "Req rate", + "DB cncy", + "DB lat") + ) } - log.debug("%8d %8d %8d %8.2f %8.1f %8.2f %8.1f %8.1f %8.2f %8.1f".format( - summary.clusterMembers, - deployment.fold(0)(_.scale), deployment.fold(0)(_.ready), - summary.userFunctionConcurrency, summary.userFunctionTimeMillis, summary.requestConcurrency, - summary.requestTimeMillis, summary.requestRate, summary.databaseConcurrency, summary.databaseTimeMillis)) + log.debug( + "%8d %8d %8d %8.2f %8.1f %8.2f %8.1f %8.1f %8.2f %8.1f".format( + summary.clusterMembers, + deployment.fold(0)(_.scale), + deployment.fold(0)(_.ready), + summary.userFunctionConcurrency, + summary.userFunctionTimeMillis, + summary.requestConcurrency, + summary.requestTimeMillis, + summary.requestRate, + summary.databaseConcurrency, + summary.databaseTimeMillis + ) + ) } - } } /** - * Facade over Akka cluster membership, so it can be substituted for unit testing purposes. - */ + * Facade over Akka cluster membership, so it can be substituted for unit testing purposes. + */ trait ClusterMembershipFacade { def upMembers: Iterable[UniqueAddress] def upMemberCount: Int = upMembers.size diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/KubernetesDeploymentScaler.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/KubernetesDeploymentScaler.scala index d9d8c0046..921ee908d 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/KubernetesDeploymentScaler.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/KubernetesDeploymentScaler.scala @@ -23,60 +23,60 @@ import scala.util.control.NonFatal object KubernetesDeploymentScaler { private final case class DeploymentList( - items: List[Deployment] + items: List[Deployment] ) private final case class Deployment( - metadata: Metadata, - spec: DeploymentSpec, - status: Option[DeploymentStatus] + metadata: Metadata, + spec: DeploymentSpec, + status: Option[DeploymentStatus] ) private final case class Metadata( - name: String, - namespace: String, + name: String, + namespace: String ) private final case class DeploymentSpec( - replicas: Int + replicas: Int ) private final case class DeploymentStatus( - readyReplicas: Option[Int], - conditions: Option[List[DeploymentCondition]] + readyReplicas: Option[Int], + conditions: Option[List[DeploymentCondition]] ) /** - * This gives the status of upgrades. - */ + * This gives the status of upgrades. + */ private final val DeploymentProgressingConditionType = "Progressing" /** - * There are at least three reasons for the progressing type, - * NewReplicaSetAvailable - the deployment is ready and stable, no upgrade in progress - * NewReplicaSetCreated - an upgrade has started with the new replica set being created - * NewReplicaSetUpdated - an upgrade is in progress with the new replica set providing some nodes - * So, if the reason isn't the first one, then we're currently upgrading - */ + * There are at least three reasons for the progressing type, + * NewReplicaSetAvailable - the deployment is ready and stable, no upgrade in progress + * NewReplicaSetCreated - an upgrade has started with the new replica set being created + * NewReplicaSetUpdated - an upgrade is in progress with the new replica set providing some nodes + * So, if the reason isn't the first one, then we're currently upgrading + */ private final val DeploymentProgressingReasonNotUpgrading = "NewReplicaSetAvailable" private final case class DeploymentCondition( - `type`: String, - reason: String + `type`: String, + reason: String ) private final case class Scale( - metadata: Metadata, - spec: ScaleSpec, - status: Option[ScaleStatus] + metadata: Metadata, + spec: ScaleSpec, + status: Option[ScaleStatus] ) private final case class ScaleSpec( - replicas: Option[Int] + replicas: Option[Int] ) private final case class ScaleStatus( - replicas: Option[Int] + replicas: Option[Int] ) private object JsonFormat extends SprayJsonSupport with DefaultJsonProtocol { @@ -111,24 +111,25 @@ class KubernetesDeploymentScaler(autoscaler: ActorRef) extends Actor with ActorL // A lot of the below is copied shamelessly from KubernetesApiServiceDiscovery private[this] final val http = Http()(context.system) private[this] final val kubernetesSettings = Settings(context.system) - private[this] final val clusterBootstrapSettings = ClusterBootstrapSettings(context.system.settings.config, context.system.log) + private[this] final val clusterBootstrapSettings = + ClusterBootstrapSettings(context.system.settings.config, context.system.log) private[this] final val httpsTrustStoreConfig = TrustStoreConfig(data = None, filePath = Some(kubernetesSettings.apiCaPath)).withStoreType("PEM") private[this] final val httpsConfig = AkkaSSLConfig()(context.system).mapSettings( - s => s.withTrustManagerConfig(s.trustManagerConfig.withTrustStoreConfigs(Seq(httpsTrustStoreConfig)))) + s => s.withTrustManagerConfig(s.trustManagerConfig.withTrustStoreConfigs(Seq(httpsTrustStoreConfig))) + ) private[this] final val httpsContext = http.createClientHttpsContext(httpsConfig) private[this] final val apiToken = readConfigVarFromFilesystem(kubernetesSettings.apiTokenPath, "api-token") getOrElse "" private[this] final val deployNamespace = kubernetesSettings.podNamespace orElse readConfigVarFromFilesystem(kubernetesSettings.podNamespacePath, "pod-namespace") getOrElse "default" - private[this] final val serviceName = clusterBootstrapSettings.contactPointDiscovery.serviceName getOrElse { - throw new RuntimeException("No service name defined") - } + private[this] final val serviceName = clusterBootstrapSettings.contactPointDiscovery.serviceName getOrElse { + throw new RuntimeException("No service name defined") + } private[this] final val host = sys.env(kubernetesSettings.apiServiceHostEnvName) private[this] final val port = sys.env(kubernetesSettings.apiServicePortEnvName).toInt private[this] final val appsV1ApiPath = Uri.Path / "apis" / "apps" / "v1" / "namespaces" / deployNamespace - // Rather than polling, we could watch the resource timers.startPeriodicTimer("tick", Tick, 20.seconds) self ! Tick @@ -160,8 +161,11 @@ class KubernetesDeploymentScaler(autoscaler: ActorRef) extends Actor with ActorL log.warning(s"No deployments found that match selector '{}'", kubernetesSettings.podLabelSelector(serviceName)) case DeploymentList(deps) => - log.warning(s"Got back multiple deployments that match selector '{}': {}, using the first one.", - kubernetesSettings.podLabelSelector(serviceName), deps.map(_.metadata.name).mkString(",")) + log.warning( + s"Got back multiple deployments that match selector '{}': {}, using the first one.", + kubernetesSettings.podLabelSelector(serviceName), + deps.map(_.metadata.name).mkString(",") + ) updateDeployment(deps.head) @@ -184,16 +188,20 @@ class KubernetesDeploymentScaler(autoscaler: ActorRef) extends Actor with ActorL context become running(dep) } - private def isUpgrading(dep: Deployment) = { - !dep.status.exists(_.conditions.exists(_.exists(condition => - condition.`type` == DeploymentProgressingConditionType && - condition.reason == DeploymentProgressingReasonNotUpgrading - ))) - } + private def isUpgrading(dep: Deployment) = + !dep.status.exists( + _.conditions.exists( + _.exists( + condition => + condition.`type` == DeploymentProgressingConditionType && + condition.reason == DeploymentProgressingReasonNotUpgrading + ) + ) + ) /** - * This uses blocking IO, and so should only be used to read configuration at startup. - */ + * This uses blocking IO, and so should only be used to read configuration at startup. + */ private def readConfigVarFromFilesystem(path: String, name: String): Option[String] = { val file = Paths.get(path) if (Files.exists(file)) { @@ -210,7 +218,9 @@ class KubernetesDeploymentScaler(autoscaler: ActorRef) extends Actor with ActorL } } - private def makeRequest[T](request: HttpRequest)(implicit unmarshaller: Unmarshaller[HttpEntity.Strict, T]): Future[T] = { + private def makeRequest[T]( + request: HttpRequest + )(implicit unmarshaller: Unmarshaller[HttpEntity.Strict, T]): Future[T] = { log.debug("Making request {}", request) for { response <- http.singleRequest(request, httpsContext) @@ -223,17 +233,20 @@ class KubernetesDeploymentScaler(autoscaler: ActorRef) extends Actor with ActorL unmarshalled.failed.foreach { t => log.warning( "Failed to unmarshal Kubernetes API response. Status code: [{}]; Response body: [{}]. Ex: [{}]", - response.status.value, entity, t.getMessage) + response.status.value, + entity, + t.getMessage + ) } unmarshalled case StatusCodes.Forbidden => Unmarshal(entity).to[String].foreach { body => log.warning("Forbidden to communicate with Kubernetes API server; check RBAC settings. Response: [{}]", - body) + body) } Future.failed( - new RuntimeException( - "Forbidden when communicating with the Kubernetes API. Check RBAC settings.")) + new RuntimeException("Forbidden when communicating with the Kubernetes API. Check RBAC settings.") + ) case other => Unmarshal(entity).to[String].foreach { body => log.warning( diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/NoAutoscaler.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/NoAutoscaler.scala index 9b54418a4..dd2d85d6c 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/NoAutoscaler.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/NoAutoscaler.scala @@ -3,13 +3,13 @@ package io.cloudstate.proxy.autoscaler import akka.actor.Actor /** - * An autoscaler that does nothing. - */ + * An autoscaler that does nothing. + */ class NoAutoscaler extends Actor { override def receive: Receive = { case _: AutoscalerMetrics => - // Ignore + // Ignore } } diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/NoScaler.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/NoScaler.scala index 44fee43f7..96b36ed78 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/NoScaler.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/autoscaler/NoScaler.scala @@ -6,8 +6,8 @@ import akka.cluster.{Cluster, MemberStatus} import Autoscaler.{Deployment, Scale} /** - * A scaler that doesn't do anything other than reports current cluster size. - */ + * A scaler that doesn't do anything other than reports current cluster size. + */ class NoScaler(autoscaler: ActorRef) extends Actor with ActorLogging { private[this] final val cluster = Cluster(context.system) @@ -15,18 +15,16 @@ class NoScaler(autoscaler: ActorRef) extends Actor with ActorLogging { cluster.subscribe(self, classOf[ClusterDomainEvent]) sendDeployment() - override def postStop(): Unit = { + override def postStop(): Unit = cluster.unsubscribe(self) - } - private def sendDeployment(): Unit = { + private def sendDeployment(): Unit = autoscaler ! Deployment( name = context.system.name, ready = cluster.state.members.count(c => c.status == MemberStatus.Up || c.status == MemberStatus.WeaklyUp), scale = cluster.state.members.size, upgrading = false ) - } override def receive: Receive = { case Scale(_, scale) => diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtEntity.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtEntity.scala index a7f9cfd5d..ac5a5b2ec 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtEntity.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtEntity.scala @@ -12,7 +12,14 @@ import akka.stream.{Materializer, OverflowStrategy} import akka.stream.scaladsl.{Keep, Sink, Source} import akka.util.Timeout import io.cloudstate.protocol.crdt._ -import io.cloudstate.protocol.entity.{ClientAction, Command, EntityDiscovery, Failure, StreamCancelled, UserFunctionError} +import io.cloudstate.protocol.entity.{ + ClientAction, + Command, + EntityDiscovery, + Failure, + StreamCancelled, + UserFunctionError +} import io.cloudstate.proxy.crdt.WireTransformer.CrdtChange import io.cloudstate.proxy.entity.{EntityCommand, UserFunctionReply} @@ -23,9 +30,9 @@ object CrdtEntity { private final case class Relay(actorRef: ActorRef) /** - * This is sent by Akka streams when the gRPC stream to the user function has closed - which typically shouldn't - * happen unless it crashes for some reason. - */ + * This is sent by Akka streams when the gRPC stream to the user function has closed - which typically shouldn't + * happen unless it crashes for some reason. + */ final case object EntityStreamClosed final case object Stop @@ -33,17 +40,19 @@ object CrdtEntity { private final case class AnyKey(_id: String) extends Key[ReplicatedData](_id) final case class Configuration( - serviceName: String, - userFunctionName: String, - passivationTimeout: Timeout, - sendQueueSize: Int, - initialReadTimeout: FiniteDuration, - writeTimeout: FiniteDuration + serviceName: String, + userFunctionName: String, + passivationTimeout: Timeout, + sendQueueSize: Int, + initialReadTimeout: FiniteDuration, + writeTimeout: FiniteDuration ) private final case class InitiatorReply(commandId: Long, userFunctionReply: UserFunctionReply, endStream: Boolean) - def props(client: Crdt, configuration: CrdtEntity.Configuration, entityDiscovery: EntityDiscovery)(implicit mat: Materializer) = + def props(client: Crdt, configuration: CrdtEntity.Configuration, entityDiscovery: EntityDiscovery)( + implicit mat: Materializer + ) = Props(new CrdtEntity(client, configuration, entityDiscovery)) private final case class Initiator(commandId: Long, actorRef: ActorRef, streamed: Boolean) @@ -51,49 +60,53 @@ object CrdtEntity { private final case class StreamedCommandSourceMaterialized(commandId: Long, command: EntityCommand) /** - * We send this to ourselves when a streamed command stream terminates. - */ + * We send this to ourselves when a streamed command stream terminates. + */ private final case class StreamEnded(commandId: Long) extends DeadLetterSuppression } /** - * Optimization idea: Rather than try and calculate changes, implement a custom ReplicatedData type that wraps - * the rest, and whenever update or mergeDelta is called, keep track of the changes in a shared delta tracking - * object. That object should get set by this actor, and once present, all calls to merge/mergeDelta/update etc - * will add changes to the delta tracking object. - * - * So here's the general principle of how this actor works. - * - * - The actor first establishes a stream to the user function as well as fetches the current state of the entity. - * Until both of those are returned, it stashes commands, and once it has both, it unstashes. - * - When a command is received, if the command is streamed, we need to respond with a Source that materializes to - * an actor ref that we can send replies to, so that gets done first, otherwise we go straight to command handling - * logic. - * - The actor seeks to keep its state in sync with the user functions state. The user functions state is not a CRDT, - * so at any one time, only one of them may be allowed to update their state, otherwise concurrent updates won't be - * able to be reconciled. There are two times that the user function is allowed to update its state, one is while - * it's handling a command, the other is while it's handling a stream cancelled. If it is not currently handling a - * command or a stream cancelled, then the actor is free to update the state, and push deltas to the user function - * to keep it in sync. The outstandingMutatingRequests variable is used to track which mode we are in, if greater - * than zero, we are not allowed to update our state except on direction by the user function. - * - We use a replicator subscription to receive state updates. If outstandingMutatingRequests is not zero, we ignore - * any change events from that subscription, otherwise, we convert them to deltas and forward them to the user - * function. - * - When we receive a command, we do the following: - * - Increment outstanding mutating operations - * - Forward the command to the user function - * - When we receive a reply from the user function, we do the following: - * - Perform any update as required by the user function - * - Send a reply back to the stream/initiator of the command - * - If there is more than one outstanding mutating operation, we just decrement it, and we're done. - * - Otherwise, we don't decrement yet. Instead, because we may have ignored some updates while the operations were - * underway, we do a local get on the replicator. - * - When we get the response (either success or not found) we decrement outstanding mutating operations, and then - * check if it's zero (a command have have arrived while we were doing the read), and if it is, then we calculate - * and send any deltas found, and we're done. - * - Similar logic is also used for stream cancelled messages. - */ -final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, entityDiscovery: EntityDiscovery)(implicit mat: Materializer) extends Actor with Stash with ActorLogging { + * Optimization idea: Rather than try and calculate changes, implement a custom ReplicatedData type that wraps + * the rest, and whenever update or mergeDelta is called, keep track of the changes in a shared delta tracking + * object. That object should get set by this actor, and once present, all calls to merge/mergeDelta/update etc + * will add changes to the delta tracking object. + * + * So here's the general principle of how this actor works. + * + * - The actor first establishes a stream to the user function as well as fetches the current state of the entity. + * Until both of those are returned, it stashes commands, and once it has both, it unstashes. + * - When a command is received, if the command is streamed, we need to respond with a Source that materializes to + * an actor ref that we can send replies to, so that gets done first, otherwise we go straight to command handling + * logic. + * - The actor seeks to keep its state in sync with the user functions state. The user functions state is not a CRDT, + * so at any one time, only one of them may be allowed to update their state, otherwise concurrent updates won't be + * able to be reconciled. There are two times that the user function is allowed to update its state, one is while + * it's handling a command, the other is while it's handling a stream cancelled. If it is not currently handling a + * command or a stream cancelled, then the actor is free to update the state, and push deltas to the user function + * to keep it in sync. The outstandingMutatingRequests variable is used to track which mode we are in, if greater + * than zero, we are not allowed to update our state except on direction by the user function. + * - We use a replicator subscription to receive state updates. If outstandingMutatingRequests is not zero, we ignore + * any change events from that subscription, otherwise, we convert them to deltas and forward them to the user + * function. + * - When we receive a command, we do the following: + * - Increment outstanding mutating operations + * - Forward the command to the user function + * - When we receive a reply from the user function, we do the following: + * - Perform any update as required by the user function + * - Send a reply back to the stream/initiator of the command + * - If there is more than one outstanding mutating operation, we just decrement it, and we're done. + * - Otherwise, we don't decrement yet. Instead, because we may have ignored some updates while the operations were + * underway, we do a local get on the replicator. + * - When we get the response (either success or not found) we decrement outstanding mutating operations, and then + * check if it's zero (a command have have arrived while we were doing the read), and if it is, then we calculate + * and send any deltas found, and we're done. + * - Similar logic is also used for stream cancelled messages. + */ +final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, entityDiscovery: EntityDiscovery)( + implicit mat: Materializer +) extends Actor + with Stash + with ActorLogging { import CrdtEntity._ @@ -110,7 +123,7 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en private[this] final var relay: ActorRef = _ private[this] final var state: Option[ReplicatedData] = _ - private[this] final var idCounter = 0l + private[this] final var idCounter = 0L // This is used to know whether there are currently outstanding operations on the user function where it could change // its state. To ensure we stay in sync, we don't respond to any entity changes during this time. private[this] final var outstandingMutatingOperations = 0 @@ -129,11 +142,16 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en log.debug("Started CRDT entity for service {} with id {}", configuration.serviceName, entityId) override def preStart(): Unit = { - client.handle(Source.actorRef[CrdtStreamIn](configuration.sendQueueSize, OverflowStrategy.fail) - .mapMaterializedValue { ref => - self ! Relay(ref) - NotUsed - }).runWith(Sink.actorRef(self, EntityStreamClosed)) + client + .handle( + Source + .actorRef[CrdtStreamIn](configuration.sendQueueSize, OverflowStrategy.fail) + .mapMaterializedValue { ref => + self ! Relay(ref) + NotUsed + } + ) + .runWith(Sink.actorRef(self, EntityStreamClosed)) // We initially do a read to get the initial state. Try a majority read first in case this is a new node. replicator ! Get(key, ReadMajority(configuration.initialReadTimeout)) @@ -151,7 +169,7 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en relay = r maybeStart() - case s@GetSuccess(_, _) => + case s @ GetSuccess(_, _) => state = Some(s.dataValue) maybeStart() @@ -182,24 +200,26 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en replicator ! Unsubscribe(key, self) } - private def maybeStart() = { - + private def maybeStart() = if (relay != null && state != null) { log.debug("{} - Received relay and state, starting.", entityId) val wireState = state.map(WireTransformer.toWireState) - sendToRelay(CrdtStreamIn.Message.Init(CrdtInit( - serviceName = configuration.serviceName, - entityId = entityId, - state = wireState - ))) + sendToRelay( + CrdtStreamIn.Message.Init( + CrdtInit( + serviceName = configuration.serviceName, + entityId = entityId, + state = wireState + ) + ) + ) context become running replicator ! Subscribe(key, self) unstashAll() } - } private def maybeSendAndUpdateState(data: ReplicatedData): Unit = { state match { @@ -223,11 +243,11 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en private def running: Receive = { - case c@Changed(_) if outstandingMutatingOperations > 0 => + case c @ Changed(_) if outstandingMutatingOperations > 0 => // As long as we have outstanding ops, we ignore any changes, to ensure that we never have simultaneous // changes of the actor state and the user function state - case c@Changed(_) => + case c @ Changed(_) => maybeSendAndUpdateState(c.dataValue) case Deleted(_) => @@ -240,17 +260,19 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en val commandId = idCounter if (command.streamed) { // Delay handling the command until the source we return is materialized - sender() ! Source.actorRef(configuration.sendQueueSize, OverflowStrategy.fail) + sender() ! Source + .actorRef(configuration.sendQueueSize, OverflowStrategy.fail) .watchTermination()(Keep.both) - .mapMaterializedValue { case (streamActorRef, terminated) => - // Send from the stream so that replies go to the stream - self.tell(StreamedCommandSourceMaterialized(commandId, command), streamActorRef) - terminated.onComplete { result => - // If it's a fail, that can only have been generated by us, so ignore it. - if (result.isSuccess) { - self ! StreamEnded(commandId) + .mapMaterializedValue { + case (streamActorRef, terminated) => + // Send from the stream so that replies go to the stream + self.tell(StreamedCommandSourceMaterialized(commandId, command), streamActorRef) + terminated.onComplete { result => + // If it's a fail, that can only have been generated by us, so ignore it. + if (result.isSuccess) { + self ! StreamEnded(commandId) + } } - } } } else { handleCommand(commandId, command) @@ -260,18 +282,20 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en handleCommand(commandId, command) case CrdtStreamOut(CrdtStreamOut.Message.Reply(reply)) => - val userFunctionReply = UserFunctionReply(reply.clientAction, reply.sideEffects) outstanding.get(reply.commandId) match { case Some(Initiator(_, actorRef, streamed)) => - if (streamed && reply.streamed) { if (closingStreams(reply.commandId)) { - sendToRelay(CrdtStreamIn.Message.StreamCancelled(StreamCancelled( - entityId, - reply.commandId - ))) + sendToRelay( + CrdtStreamIn.Message.StreamCancelled( + StreamCancelled( + entityId, + reply.commandId + ) + ) + ) closingStreams -= reply.commandId } } else if (streamed) { @@ -305,12 +329,16 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en } } case None => - entityDiscovery.reportError(UserFunctionError("Received streamed message for unknown command id: " + message.commandId)) + entityDiscovery.reportError( + UserFunctionError("Received streamed message for unknown command id: " + message.commandId) + ) } case CrdtStreamOut(CrdtStreamOut.Message.StreamCancelledResponse(response)) => - performAction(response.commandId, response.stateAction.getOrElse(CrdtStateAction.defaultInstance), - UserFunctionReply(None, response.sideEffects), false) + performAction(response.commandId, + response.stateAction.getOrElse(CrdtStateAction.defaultInstance), + UserFunctionReply(None, response.sideEffects), + false) case StreamEnded(commandId) => streamedCalls.get(commandId) match { @@ -320,13 +348,13 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en closingStreams += commandId streamedCalls -= commandId case None => - // Ignore, we will get a stream ended command both when the client cancels, and when we close. + // Ignore, we will get a stream ended command both when the client cancels, and when we close. } case UpdateSuccess(_, Some(InitiatorReply(commandId, userFunctionReply, endStream))) => sendReplyToInitiator(commandId, userFunctionReply, endStream) - case success@GetSuccess(_, _) => + case success @ GetSuccess(_, _) => outstandingMutatingOperations -= 1 if (outstandingMutatingOperations == 0) { maybeSendAndUpdateState(success.dataValue) @@ -388,13 +416,17 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en outstanding = outstanding.updated(commandId, Initiator(commandId, sender(), command.streamed)) outstandingMutatingOperations += 1 - sendToRelay(CrdtStreamIn.Message.Command(Command( - entityId = entityId, - id = commandId, - name = command.name, - payload = command.payload, - streamed = command.streamed - ))) + sendToRelay( + CrdtStreamIn.Message.Command( + Command( + entityId = entityId, + id = commandId, + name = command.name, + payload = command.payload, + streamed = command.streamed + ) + ) + ) } private def sendReplyToInitiator(commandId: Long, reply: UserFunctionReply, terminate: Boolean) = { @@ -421,7 +453,13 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en } private def failCommand(commandId: Long, message: String): Unit = { - val reply = UserFunctionReply(Some(ClientAction(ClientAction.Action.Failure(Failure(description = "Failed to update CRDT at requested write consistency"))))) + val reply = UserFunctionReply( + Some( + ClientAction( + ClientAction.Action.Failure(Failure(description = "Failed to update CRDT at requested write consistency")) + ) + ) + ) sendReplyToInitiator(commandId, reply, true) } @@ -431,7 +469,10 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en crash("Failed to update CRDT at requested write consistency") } - private def performAction(commandId: Long, stateAction: CrdtStateAction, userFunctionReply: UserFunctionReply, endStream: Boolean)= { + private def performAction(commandId: Long, + stateAction: CrdtStateAction, + userFunctionReply: UserFunctionReply, + endStream: Boolean) = stateAction.action match { case CrdtStateAction.Action.Empty => sendReplyToInitiator(commandId, userFunctionReply, false) @@ -442,12 +483,16 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en } else { val crdt = WireTransformer.stateToCrdt(create) state = Some(WireTransformer.stateToCrdt(create)) - replicator ! Update(key, crdt, toDdataWriteConsistency(stateAction.writeConsistency), - Some(InitiatorReply(commandId, userFunctionReply, endStream)))(identity) + replicator ! Update(key, + crdt, + toDdataWriteConsistency(stateAction.writeConsistency), + Some(InitiatorReply(commandId, userFunctionReply, endStream)))(identity) } case CrdtStateAction.Action.Delete(_) => - replicator ! Delete(key, toDdataWriteConsistency(stateAction.writeConsistency), Some(InitiatorReply(commandId, userFunctionReply, endStream))) + replicator ! Delete(key, + toDdataWriteConsistency(stateAction.writeConsistency), + Some(InitiatorReply(commandId, userFunctionReply, endStream))) state = None context become deleted replicator ! Unsubscribe(key, self) @@ -460,16 +505,17 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en // Apply to our own state first state = Some(modify(state.getOrElse(initial))) // And then to the replicator - replicator ! Update(key, initial, toDdataWriteConsistency(stateAction.writeConsistency), - Some(InitiatorReply(commandId, userFunctionReply, endStream)))(modify) + replicator ! Update(key, + initial, + toDdataWriteConsistency(stateAction.writeConsistency), + Some(InitiatorReply(commandId, userFunctionReply, endStream)))(modify) } catch { case e: Exception => crash(e.getMessage, Some(e)) } } - } - private def operationFinished(): Unit = { + private def operationFinished(): Unit = if (stopping) { if (outstanding.isEmpty) { context.stop(self) @@ -483,10 +529,11 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en replicator ! Get(key, ReadLocal) } } - } private def crash(message: String, cause: Option[Throwable] = None): Unit = { - val reply = UserFunctionReply(Some(ClientAction(ClientAction.Action.Failure(Failure(description = "Entity terminating"))))) + val reply = UserFunctionReply( + Some(ClientAction(ClientAction.Action.Failure(Failure(description = "Entity terminating")))) + ) outstanding.values.foreach { initiator => initiator.actorRef ! reply streamedCalls -= initiator.commandId @@ -508,7 +555,6 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en throw error } - private def toDdataWriteConsistency(wc: CrdtWriteConsistency): WriteConsistency = wc match { case CrdtWriteConsistency.LOCAL => WriteLocal case CrdtWriteConsistency.MAJORITY => WriteMajority(configuration.writeTimeout) @@ -522,14 +568,16 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en relay = r sendDelete() - case c@Changed(_) => + case c @ Changed(_) => // Ignore case Deleted(_) => // Ignore, we know. case EntityCommand(_, _, _, streamed) => - val reply = UserFunctionReply(Some(ClientAction(ClientAction.Action.Failure(Failure(description = "Entity deleted"))))) + val reply = UserFunctionReply( + Some(ClientAction(ClientAction.Action.Failure(Failure(description = "Entity deleted")))) + ) if (streamed) { sender() ! Source.single(reply) sender() ! Status.Success(Done) @@ -554,19 +602,23 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en actorRef ! Status.Success(Done) streamedCalls -= message.commandId case None => - entityDiscovery.reportError(UserFunctionError("Received streamed message for unknown command id: " + message.commandId)) + entityDiscovery.reportError( + UserFunctionError("Received streamed message for unknown command id: " + message.commandId) + ) } case CrdtStreamOut(CrdtStreamOut.Message.StreamCancelledResponse(response)) => if (!closingStreams.contains(response.commandId)) { crash("Received stream cancelled response for stream that's not closing: " + response.commandId) } else { - performAction(response.commandId, response.stateAction.getOrElse(CrdtStateAction.defaultInstance), - UserFunctionReply(None, response.sideEffects), false) + performAction(response.commandId, + response.stateAction.getOrElse(CrdtStateAction.defaultInstance), + UserFunctionReply(None, response.sideEffects), + false) } case StreamEnded(commandId) => - // Ignore, nothing to do + // Ignore, nothing to do case UpdateSuccess(_, Some(InitiatorReply(commandId, userFunctionReply, _))) => sendReplyToInitiator(commandId, userFunctionReply, true) @@ -610,9 +662,7 @@ final class CrdtEntity(client: Crdt, configuration: CrdtEntity.Configuration, en context.stop(self) } - private def sendToRelay(message: CrdtStreamIn.Message): Unit = { + private def sendToRelay(message: CrdtStreamIn.Message): Unit = relay ! CrdtStreamIn(message) - } - } diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtEntityManager.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtEntityManager.scala index 67c364593..4dd32a0f0 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtEntityManager.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtEntityManager.scala @@ -56,8 +56,9 @@ final class CrdtEntityManager(entityProps: Props) extends Actor with ActorLoggin if (queue.nonEmpty) { val entity = startEntity(actor.path.name) - queue.foreach { case (command, initiator) => - entity.tell(command, initiator) + queue.foreach { + case (command, initiator) => + entity.tell(command, initiator) } } } @@ -76,9 +77,8 @@ final class CrdtEntityManager(entityProps: Props) extends Actor with ActorLoggin } } - private def startEntity(entityId: String) = { + private def startEntity(entityId: String) = context.watch(context.actorOf(entityProps, entityId)) - } private def stopping(reportStopped: ActorRef): Receive = { case Terminated(actor) => @@ -90,8 +90,9 @@ final class CrdtEntityManager(entityProps: Props) extends Actor with ActorLoggin if (queue.nonEmpty) { // Restart so we can process just these messages val entity = startEntity(actor.path.name) - queue.foreach { case (command, initiator) => - entity.tell(command, initiator) + queue.foreach { + case (command, initiator) => + entity.tell(command, initiator) } // And then stop again entity ! CrdtEntity.Stop @@ -104,10 +105,10 @@ final class CrdtEntityManager(entityProps: Props) extends Actor with ActorLoggin } case Passivate => - // Ignore, we've already told all entities to stop + // Ignore, we've already told all entities to stop case Shutdown => - // Ignore, we're already shutting down + // Ignore, we're already shutting down case command: EntityCommand => log.warning("Received command {} for {} while shutting down, dropping.", command.name, command.entityId) diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtSerializers.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtSerializers.scala index f4f229c38..a2e8de7ce 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtSerializers.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtSerializers.scala @@ -6,7 +6,9 @@ import akka.serialization.{BaseSerializer, SerializerWithStringManifest} import com.google.protobuf.UnsafeByteOperations import io.cloudstate.proxy.crdt.protobufs.{CrdtVote, CrdtVoteEntry} -class CrdtSerializers(override val system: ExtendedActorSystem) extends SerializerWithStringManifest with BaseSerializer { +class CrdtSerializers(override val system: ExtendedActorSystem) + extends SerializerWithStringManifest + with BaseSerializer { override def manifest(o: AnyRef): String = o match { case v: Vote => "V" } @@ -21,9 +23,17 @@ class CrdtSerializers(override val system: ExtendedActorSystem) extends Serializ } override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = manifest match { - case "V" => Vote(CrdtVote.parseFrom(bytes).entries.map { entry => - (UniqueAddress(AddressFromURIString(entry.address), entry.uid), BigInt(entry.value.toByteArray)) - }.toMap, None) + case "V" => + Vote( + CrdtVote + .parseFrom(bytes) + .entries + .map { entry => + (UniqueAddress(AddressFromURIString(entry.address), entry.uid), BigInt(entry.value.toByteArray)) + } + .toMap, + None + ) case _ => throw new RuntimeException(s"Don't know how to deserialize manifest [$manifest]") } } diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtSupportFactory.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtSupportFactory.scala index 20da20b07..6789dce18 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtSupportFactory.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/CrdtSupportFactory.scala @@ -21,27 +21,39 @@ import scala.collection.JavaConverters._ import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration._ -class CrdtSupportFactory(system: ActorSystem, config: EntityDiscoveryManager.Configuration, discovery: EntityDiscovery, - grpcClientSettings: GrpcClientSettings, concurrencyEnforcer: ActorRef, statsCollector: ActorRef)(implicit ec: ExecutionContext, mat: Materializer) extends EntityTypeSupportFactory { +class CrdtSupportFactory(system: ActorSystem, + config: EntityDiscoveryManager.Configuration, + discovery: EntityDiscovery, + grpcClientSettings: GrpcClientSettings, + concurrencyEnforcer: ActorRef, + statsCollector: ActorRef)(implicit ec: ExecutionContext, mat: Materializer) + extends EntityTypeSupportFactory { private[this] final val log = Logging.getLogger(system, this.getClass) private[this] final val crdtClient = CrdtClient(grpcClientSettings) override def buildEntityTypeSupport(entity: Entity, serviceDescriptor: ServiceDescriptor): EntityTypeSupport = { - val crdtEntityConfig = CrdtEntity.Configuration(entity.serviceName, entity.persistenceId, config.passivationTimeout, config.relayOutputBufferSize, 3.seconds, 5.seconds) + val crdtEntityConfig = CrdtEntity.Configuration(entity.serviceName, + entity.persistenceId, + config.passivationTimeout, + config.relayOutputBufferSize, + 3.seconds, + 5.seconds) log.debug("Starting CrdtEntity for {}", entity.serviceName) val crdtEntityProps = CrdtEntity.props(crdtClient, crdtEntityConfig, discovery) - val crdtEntityManager = system.actorOf(CrdtEntityManager.props(crdtEntityProps), URLEncoder.encode(entity.serviceName, "utf-8")) + val crdtEntityManager = + system.actorOf(CrdtEntityManager.props(crdtEntityProps), URLEncoder.encode(entity.serviceName, "utf-8")) // Ensure the ddata replicator is started, to ensure state replication starts immediately, and also ensure the first // request to the first CRDT doesn't timeout DistributedData(system) val coordinatedShutdown = CoordinatedShutdown(system) - coordinatedShutdown.addTask(CoordinatedShutdown.PhaseClusterShardingShutdownRegion, "shutdown-crdt-" + entity.serviceName) { () => + coordinatedShutdown.addTask(CoordinatedShutdown.PhaseClusterShardingShutdownRegion, + "shutdown-crdt-" + entity.serviceName) { () => implicit val timeout = Timeout(10.seconds) (crdtEntityManager ? CrdtEntityManager.Shutdown).mapTo[Done] } @@ -52,15 +64,18 @@ class CrdtSupportFactory(system: ActorSystem, config: EntityDiscoveryManager.Con private def validate(serviceDescriptor: ServiceDescriptor): Unit = { val streamedMethods = serviceDescriptor.getMethods.asScala.filter(m => m.toProto.getClientStreaming) if (streamedMethods.nonEmpty) { - throw EntityDiscoveryException(s"CRDT entities do not support streaming in from the client, but ${serviceDescriptor.getFullName} has the following streamed methods: ${streamedMethods.map(_.getName).mkString(",")}") + throw EntityDiscoveryException( + s"CRDT entities do not support streaming in from the client, but ${serviceDescriptor.getFullName} has the following streamed methods: ${streamedMethods.map(_.getName).mkString(",")}" + ) } } } -private class CrdtSupport(crdtEntity: ActorRef, parallelism: Int, private implicit val relayTimeout: Timeout) extends EntityTypeSupport { +private class CrdtSupport(crdtEntity: ActorRef, parallelism: Int, private implicit val relayTimeout: Timeout) + extends EntityTypeSupport { import akka.pattern.ask - override def handler(method: EntityMethodDescriptor): Flow[EntityCommand, UserFunctionReply, NotUsed] = { + override def handler(method: EntityMethodDescriptor): Flow[EntityCommand, UserFunctionReply, NotUsed] = if (method.method.toProto.getServerStreaming) { Flow[EntityCommand] .mapAsync(parallelism)(command => (crdtEntity ? command).mapTo[Source[UserFunctionReply, NotUsed]]) @@ -68,7 +83,6 @@ private class CrdtSupport(crdtEntity: ActorRef, parallelism: Int, private implic } else { Flow[EntityCommand].mapAsync(parallelism)(command => (crdtEntity ? command).mapTo[UserFunctionReply]) } - } override def handleUnary(command: EntityCommand): Future[UserFunctionReply] = (crdtEntity ? command).mapTo[UserFunctionReply] diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/NodeVector.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/NodeVector.scala index d882b117b..60f7e1ff0 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/NodeVector.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/NodeVector.scala @@ -4,14 +4,14 @@ import akka.cluster.UniqueAddress import akka.cluster.ddata.{DeltaReplicatedData, FastMerge, RemovedNodePruning, ReplicatedDelta} /** - * An abstract node vector, where each node has its own value that it and it only is responsible for. - * - * The values of the vector must have a stable merge function, implemented by [[mergeValues()]], which is used to - * merge different values for the same node when encountered. - */ + * An abstract node vector, where each node has its own value that it and it only is responsible for. + * + * The values of the vector must have a stable merge function, implemented by [[mergeValues()]], which is used to + * merge different values for the same node when encountered. + */ @SerialVersionUID(1L) // FIXME Java Serialization? abstract class NodeVector[V](private val state: Map[UniqueAddress, V]) - extends DeltaReplicatedData + extends DeltaReplicatedData with ReplicatedDelta with RemovedNodePruning { @@ -19,20 +19,20 @@ abstract class NodeVector[V](private val state: Map[UniqueAddress, V]) final type D = T /** - * Create a new vector with the given state and delta. - * - * This is needed by operations such as [[merge()]] to create the new version of this CRDT. - */ + * Create a new vector with the given state and delta. + * + * This is needed by operations such as [[merge()]] to create the new version of this CRDT. + */ protected def newVector(state: Map[UniqueAddress, V], delta: Option[T]): T /** - * Merge the given values for a node. - */ + * Merge the given values for a node. + */ protected def mergeValues(thisValue: V, thatValue: V): V /** - * Collapse the given value into the given node. - */ + * Collapse the given value into the given node. + */ protected def collapseInto(key: UniqueAddress, value: V): T // Is not final because overrides can implement it without allocating. @@ -75,7 +75,7 @@ abstract class NodeVector[V](private val state: Map[UniqueAddress, V]) override final def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): T = state.get(removedNode) match { case Some(value) => newVector(state - removedNode, None).collapseInto(collapseInto, value).asInstanceOf[T] - case None => this.asInstanceOf[T] + case None => this.asInstanceOf[T] } override final def pruningCleanup(removedNode: UniqueAddress): T = diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/UserFunctionProtocolError.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/UserFunctionProtocolError.scala index a966f057d..0d6a6de0e 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/UserFunctionProtocolError.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/UserFunctionProtocolError.scala @@ -1,4 +1,4 @@ package io.cloudstate.proxy.crdt -private[crdt] case class UserFunctionProtocolError(message: String) extends RuntimeException(message, null, false, false) - +private[crdt] case class UserFunctionProtocolError(message: String) + extends RuntimeException(message, null, false, false) diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/Vote.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/Vote.scala index 738873e09..f577f7329 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/Vote.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/Vote.scala @@ -11,40 +11,40 @@ object Vote { def apply(): Vote = empty /** - * Java API - */ + * Java API + */ def create(): Vote = empty /** - * Decider for voting. - */ + * Decider for voting. + */ type Decider = Iterable[Boolean] => Boolean /** - * At least `n` nodes must vote positive. - */ + * At least `n` nodes must vote positive. + */ def atLeast(n: Int): Decider = _.filter(identity).take(n).size >= n /** - * At least one node must vote positive. - */ + * At least one node must vote positive. + */ val AtLeastOne: Decider = atLeast(1) /** - * At most `n` node must vote positive. - */ + * At most `n` node must vote positive. + */ def atMost(n: Int): Decider = _.filter(identity).take(n + 1).size <= n /** - * All nodes must vote positive. - */ + * All nodes must vote positive. + */ val All: Decider = _.forall(identity) /** - * A majority of nodes must vote positive - */ + * A majority of nodes must vote positive + */ val Majority: Decider = { votes => val (totalVoters, votesFor) = votes.foldLeft((0, 0)) { case ((total, votes), vote) => (total + 1, if (vote) votes + 1 else votes) @@ -53,45 +53,44 @@ object Vote { } /** - * Java API: Predicate for deciding a vote. - */ + * Java API: Predicate for deciding a vote. + */ @FunctionalInterface trait DeciderPredicate extends Decider with java.util.function.Predicate[java.lang.Iterable[java.lang.Boolean]] { - override final def apply(votes: Iterable[Boolean]): Boolean = { + override final def apply(votes: Iterable[Boolean]): Boolean = test(votes.asJava.asInstanceOf[java.lang.Iterable[java.lang.Boolean]]) - } } private val One = BigInt(1) } /** - * Implements a vote CRDT. - * - * A vote CRDT allows each node to manage its own vote. Nodes can change their vote at any time. - * - * This CRDT has the same state as a GCounter, recording votes for each node in a node vector as an integer, with - * odd being a positive vote, and even being a negative vote. Changing a nodes vote is done by incrementing the nodes - * integer by one. - * - * The result of the vote is calculated on request by supplying the current cluster state, along with the set of - * member statuses that are allowed to take part in the vote, and a [[akka.cluster.ddata.Vote.Decider]]. - */ + * Implements a vote CRDT. + * + * A vote CRDT allows each node to manage its own vote. Nodes can change their vote at any time. + * + * This CRDT has the same state as a GCounter, recording votes for each node in a node vector as an integer, with + * odd being a positive vote, and even being a negative vote. Changing a nodes vote is done by incrementing the nodes + * integer by one. + * + * The result of the vote is calculated on request by supplying the current cluster state, along with the set of + * member statuses that are allowed to take part in the vote, and a [[akka.cluster.ddata.Vote.Decider]]. + */ @SerialVersionUID(1L) -final case class Vote private[crdt] ( - private[crdt] val state: Map[UniqueAddress, BigInt] = Map.empty, - override val delta: Option[Vote] = None) - extends NodeVector[BigInt](state) { +final case class Vote private[crdt] (private[crdt] val state: Map[UniqueAddress, BigInt] = Map.empty, + override val delta: Option[Vote] = None) + extends NodeVector[BigInt](state) { type T = Vote import Vote.One /** - * Scala API: The result given the current cluster state. - */ + * Scala API: The result given the current cluster state. + */ def result(decider: Vote.Decider, allowedVoterStatuses: Set[MemberStatus] = Set(MemberStatus.Up))( - implicit currentClusterState: CurrentClusterState): Boolean = { + implicit currentClusterState: CurrentClusterState + ): Boolean = { // Create a view so that when calculating the result we can break early once we've counted enough votes val votes: Iterable[Boolean] = currentClusterState.members.view @@ -106,31 +105,28 @@ final case class Vote private[crdt] ( } /** - * Java API: The result given the current cluster state. - */ - def getResult( - decider: Vote.Decider, - allowedVoterStatuses: java.util.Collection[MemberStatus], - currentClusterState: CurrentClusterState): Boolean = + * Java API: The result given the current cluster state. + */ + def getResult(decider: Vote.Decider, + allowedVoterStatuses: java.util.Collection[MemberStatus], + currentClusterState: CurrentClusterState): Boolean = result(decider, allowedVoterStatuses.asScala.toSet)(currentClusterState) /** - * Scala API: Place a vote. - */ - def vote(vote: Boolean)(implicit node: SelfUniqueAddress): Vote = { + * Scala API: Place a vote. + */ + def vote(vote: Boolean)(implicit node: SelfUniqueAddress): Vote = state.get(node.uniqueAddress) match { case Some(v) if vote != v.testBit(0) => update(node.uniqueAddress, v + 1) - case None if vote => update(node.uniqueAddress, One) - case _ => this + case None if vote => update(node.uniqueAddress, One) + case _ => this } - } /** - * Java API: Place a vote. - */ - def setVote(node: SelfUniqueAddress, vote: Boolean): Vote = { + * Java API: Place a vote. + */ + def setVote(node: SelfUniqueAddress, vote: Boolean): Vote = this.vote(vote)(node) - } override protected def newVector(state: Map[UniqueAddress, BigInt], delta: Option[Vote]): Vote = new Vote(state, delta) @@ -154,4 +150,4 @@ object VoteKey { } @SerialVersionUID(1L) -final case class VoteKey(_id: String) extends Key[Vote](_id) with ReplicatedDataSerialization \ No newline at end of file +final case class VoteKey(_id: String) extends Key[Vote](_id) with ReplicatedDataSerialization diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/WireTransformer.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/WireTransformer.scala index b6aefb142..0d9fc7a49 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/WireTransformer.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/crdt/WireTransformer.scala @@ -8,13 +8,14 @@ import io.cloudstate.protocol.crdt._ import com.google.protobuf.any.{Any => ProtoAny} /** - * Transforms wire versions of CRDTs and deltas to/from the actual CRDTs/deltas - */ + * Transforms wire versions of CRDTs and deltas to/from the actual CRDTs/deltas + */ object WireTransformer { private[this] final val Zero = BigInt(0) - private def voteState(vote: Vote)(implicit clusterState: CurrentClusterState, selfUniqueAddress: SelfUniqueAddress): VoteState = { + private def voteState(vote: Vote)(implicit clusterState: CurrentClusterState, + selfUniqueAddress: SelfUniqueAddress): VoteState = { var votesFor = 0 var votes = 0 var selfVote = false @@ -32,7 +33,8 @@ object WireTransformer { VoteState(votesFor, votes, selfVote) } - def toWireState(state: ReplicatedData)(implicit clusterState: CurrentClusterState, selfUniqueAddress: SelfUniqueAddress): CrdtState = { + def toWireState(state: ReplicatedData)(implicit clusterState: CurrentClusterState, + selfUniqueAddress: SelfUniqueAddress): CrdtState = { import CrdtState.{State => S} CrdtState(state match { @@ -40,15 +42,15 @@ object WireTransformer { S.Gcounter(GCounterState(gcounter.value.toLong)) case pncounter: PNCounter => S.Pncounter(PNCounterState(pncounter.value.toLong)) - case gset: GSet[ProtoAny@unchecked] => + case gset: GSet[ProtoAny @unchecked] => S.Gset(GSetState(gset.elements.toSeq)) - case orset: ORSet[ProtoAny@unchecked] => + case orset: ORSet[ProtoAny @unchecked] => S.Orset(ORSetState(orset.elements.toSeq)) - case lwwregister: LWWRegister[ProtoAny@unchecked] => + case lwwregister: LWWRegister[ProtoAny @unchecked] => S.Lwwregister(LWWRegisterState(Some(lwwregister.value))) case flag: Flag => S.Flag(FlagState(flag.enabled)) - case ormap: ORMap[ProtoAny@unchecked, ReplicatedData@unchecked] => + case ormap: ORMap[ProtoAny @unchecked, ReplicatedData @unchecked] => S.Ormap(ORMapState(ormap.entries.map { case (k, value) => ORMapEntry(Some(k), Some(toWireState(value))) }.toSeq)) @@ -62,7 +64,9 @@ object WireTransformer { // We both apply the update to our current state, as well as produce the function that will update the // replicators version, since these might not be the same thing. - def deltaToUpdate(delta: CrdtDelta)(implicit selfUniqueAddress: SelfUniqueAddress): (ReplicatedData, ReplicatedData => ReplicatedData) = { + def deltaToUpdate( + delta: CrdtDelta + )(implicit selfUniqueAddress: SelfUniqueAddress): (ReplicatedData, ReplicatedData => ReplicatedData) = { import CrdtDelta.{Delta => D} @@ -85,15 +89,16 @@ object WireTransformer { case D.Gset(GSetDelta(added)) => (GSet.empty[ProtoAny], { - case gset: GSet[ProtoAny@unchecked] => added.foldLeft(gset)((gset, e) => gset + e) + case gset: GSet[ProtoAny @unchecked] => added.foldLeft(gset)((gset, e) => gset + e) case other => throw IncompatibleCrdtChange(s"GSetDelta is incompatible with CRDT $other") }) case D.Orset(ORSetDelta(cleared, removed, added)) => (ORSet.empty[ProtoAny], { - case orset: ORSet[ProtoAny@unchecked] => - val maybeCleared = if (cleared) orset.clear(selfUniqueAddress) - else orset + case orset: ORSet[ProtoAny @unchecked] => + val maybeCleared = + if (cleared) orset.clear(selfUniqueAddress) + else orset val withRemoved = removed.foldLeft(maybeCleared)((orset, key) => orset remove key) added.foldLeft(withRemoved)((orset, value) => orset :+ value) case other => throw IncompatibleCrdtChange(s"ORSetDelta is incompatible with CRDT $other") @@ -102,7 +107,7 @@ object WireTransformer { case D.Lwwregister(LWWRegisterDelta(maybeValue, clock, customClockValue)) => val value = maybeValue.getOrElse(ProtoAny.defaultInstance) (LWWRegister.create(value), { - case lwwregister: LWWRegister[ProtoAny@unchecked] => + case lwwregister: LWWRegister[ProtoAny @unchecked] => lwwregister.withValue(selfUniqueAddress, value, toDdataClock(clock, customClockValue)) case other => throw IncompatibleCrdtChange(s"LWWRegisterDelta is incompatible with CRDT $other") }) @@ -118,29 +123,30 @@ object WireTransformer { case D.Ormap(ORMapDelta(cleared, removed, updated, added)) => (ORMap.empty[ProtoAny, ReplicatedData], { - case ormap: ORMap[ProtoAny@unchecked, ReplicatedData@unchecked] => - - val maybeCleared = if (cleared) ormap.entries.keySet.foldLeft(ormap)((ormap, key) => ormap remove key) - else ormap + case ormap: ORMap[ProtoAny @unchecked, ReplicatedData @unchecked] => + val maybeCleared = + if (cleared) ormap.entries.keySet.foldLeft(ormap)((ormap, key) => ormap remove key) + else ormap val withRemoved = removed.foldLeft(maybeCleared)((ormap, key) => ormap remove key) - val withUpdated = updated.foldLeft(withRemoved) { case (ormap, ORMapEntryDelta(Some(key), Some(delta))) => - // While the CRDT we're using won't have changed, the CRDT in the replicator may have, so we detect that. - ormap.get(key) match { - case Some(data) => - try { - val (initial, modify) = deltaToUpdate(delta) - ormap.updated(selfUniqueAddress, key, initial)(modify) - } catch { - case IncompatibleCrdtChange(_) => - // The delta is incompatible, the value must have been removed and then added again, so ignore - ormap - } - case None => - // There is no element, it must have been removed, ignore - ormap - } + val withUpdated = updated.foldLeft(withRemoved) { + case (ormap, ORMapEntryDelta(Some(key), Some(delta))) => + // While the CRDT we're using won't have changed, the CRDT in the replicator may have, so we detect that. + ormap.get(key) match { + case Some(data) => + try { + val (initial, modify) = deltaToUpdate(delta) + ormap.updated(selfUniqueAddress, key, initial)(modify) + } catch { + case IncompatibleCrdtChange(_) => + // The delta is incompatible, the value must have been removed and then added again, so ignore + ormap + } + case None => + // There is no element, it must have been removed, ignore + ormap + } } added.foldLeft(withUpdated) { @@ -170,11 +176,13 @@ object WireTransformer { case S.Pncounter(PNCounterState(value)) => PNCounter.empty :+ value case S.Gset(GSetState(items)) => items.foldLeft(GSet.empty[ProtoAny])((gset, item) => gset + item) case S.Orset(ORSetState(items)) => items.foldLeft(ORSet.empty[ProtoAny])((orset, item) => orset :+ item) - case S.Lwwregister(LWWRegisterState(value, clock, customClockValue)) => LWWRegister(selfUniqueAddress, value.getOrElse(ProtoAny.defaultInstance), toDdataClock(clock, customClockValue)) + case S.Lwwregister(LWWRegisterState(value, clock, customClockValue)) => + LWWRegister(selfUniqueAddress, value.getOrElse(ProtoAny.defaultInstance), toDdataClock(clock, customClockValue)) case S.Flag(FlagState(value)) => if (value) Flag.Enabled else Flag.Disabled - case S.Ormap(ORMapState(items)) => items.foldLeft(ORMap.empty[ProtoAny, ReplicatedData]) { - case (ormap, ORMapEntry(Some(key), Some(state))) => ormap.put(selfUniqueAddress, key, stateToCrdt(state)) - } + case S.Ormap(ORMapState(items)) => + items.foldLeft(ORMap.empty[ProtoAny, ReplicatedData]) { + case (ormap, ORMapEntry(Some(key), Some(state))) => ormap.put(selfUniqueAddress, key, stateToCrdt(state)) + } case S.Vote(VoteState(_, _, selfVote)) => Vote.empty.vote(selfVote) case S.Empty => throw UserFunctionProtocolError("Unknown state or state not set") } @@ -206,7 +214,10 @@ object WireTransformer { } - def detectChange(original: ReplicatedData, changed: ReplicatedData)(implicit clusterState: CurrentClusterState, selfUniqueAddress: SelfUniqueAddress): CrdtChange = { + def detectChange( + original: ReplicatedData, + changed: ReplicatedData + )(implicit clusterState: CurrentClusterState, selfUniqueAddress: SelfUniqueAddress): CrdtChange = { import CrdtChange._ import CrdtDelta.{Delta => D} @@ -232,9 +243,9 @@ object WireTransformer { case _ => IncompatibleChange } - case gset: GSet[ProtoAny@unchecked] => + case gset: GSet[ProtoAny @unchecked] => original match { - case old: GSet[ProtoAny@unchecked] => + case old: GSet[ProtoAny @unchecked] => val diff = gset.elements -- old.elements if (old.elements.size + diff.size > gset.elements.size) IncompatibleChange else if (diff.isEmpty) NoChange @@ -242,17 +253,23 @@ object WireTransformer { case _ => IncompatibleChange } - case orset: ORSet[ProtoAny@unchecked] => + case orset: ORSet[ProtoAny @unchecked] => original match { - case old: ORSet[ProtoAny@unchecked] => + case old: ORSet[ProtoAny @unchecked] => // Fast path, just cleared if (orset.elements.isEmpty) { if (old.elements.isEmpty) { NoChange } else { - Updated(CrdtDelta(D.Orset(ORSetDelta( - cleared = true - )))) + Updated( + CrdtDelta( + D.Orset( + ORSetDelta( + cleared = true + ) + ) + ) + ) } } else { val removed = old.elements -- orset.elements @@ -263,24 +280,36 @@ object WireTransformer { // Optimisation, if we're going to end up sending more operations than there are elements in the set, // it's cheaper to just clear it and send all the elements if (removed.size + added.size > orset.elements.size) { - Updated(CrdtDelta(D.Orset(ORSetDelta( - cleared = true, - added = orset.elements.toSeq - )))) + Updated( + CrdtDelta( + D.Orset( + ORSetDelta( + cleared = true, + added = orset.elements.toSeq + ) + ) + ) + ) } else { - Updated(CrdtDelta(D.Orset(ORSetDelta( - removed = removed.toSeq, - added = added.toSeq - )))) + Updated( + CrdtDelta( + D.Orset( + ORSetDelta( + removed = removed.toSeq, + added = added.toSeq + ) + ) + ) + ) } } } case _ => IncompatibleChange } - case lwwregister: LWWRegister[ProtoAny@unchecked] => + case lwwregister: LWWRegister[ProtoAny @unchecked] => original match { - case old: LWWRegister[ProtoAny@unchecked] => + case old: LWWRegister[ProtoAny @unchecked] => if (old.value == lwwregister.value) NoChange else Updated(CrdtDelta(D.Lwwregister(LWWRegisterDelta(Some(lwwregister.value))))) case _ => IncompatibleChange @@ -295,13 +324,11 @@ object WireTransformer { case _ => IncompatibleChange } - case ormap: ORMap[ProtoAny@unchecked, ReplicatedData@unchecked] => - + case ormap: ORMap[ProtoAny @unchecked, ReplicatedData @unchecked] => import ORMapEntryAction._ original match { - case old: ORMap[ProtoAny@unchecked, ReplicatedData@unchecked] => - + case old: ORMap[ProtoAny @unchecked, ReplicatedData @unchecked] => if (ormap.isEmpty) { if (old.isEmpty) NoChange else Updated(CrdtDelta(D.Ormap(ORMapDelta(cleared = true)))) @@ -320,8 +347,8 @@ object WireTransformer { val deleted = old.entries.keySet -- ormap.entries.keys val allDeleted = deleted ++ changes.collect { - case DeleteThenAdd(k, _) => k - } + case DeleteThenAdd(k, _) => k + } val updated = changes.collect { case UpdateEntry(key, delta) => ORMapEntryDelta(Some(key), Some(delta)) } @@ -333,11 +360,17 @@ object WireTransformer { if (allDeleted.isEmpty && updated.isEmpty && added.isEmpty) { NoChange } else { - Updated(CrdtDelta(D.Ormap(ORMapDelta( - removed = allDeleted.toSeq, - updated = updated, - added = added - )))) + Updated( + CrdtDelta( + D.Ormap( + ORMapDelta( + removed = allDeleted.toSeq, + updated = updated, + added = added + ) + ) + ) + ) } } @@ -346,18 +379,23 @@ object WireTransformer { } case vote: Vote => - original match { case old: Vote => val newState = voteState(vote) val oldState = voteState(old) if (newState != oldState) { - Updated(CrdtDelta(D.Vote(VoteDelta( - selfVote = newState.selfVote, - votesFor = newState.votesFor, - totalVoters = newState.totalVoters - )))) + Updated( + CrdtDelta( + D.Vote( + VoteDelta( + selfVote = newState.selfVote, + votesFor = newState.votesFor, + totalVoters = newState.totalVoters + ) + ) + ) + ) } else { NoChange } @@ -372,7 +410,7 @@ object WireTransformer { } } - private def toDdataClock(clock: CrdtClock, customClockValue: Long): Clock[ProtoAny] = { + private def toDdataClock(clock: CrdtClock, customClockValue: Long): Clock[ProtoAny] = clock match { case CrdtClock.DEFAULT => LWWRegister.defaultClock case CrdtClock.REVERSE => LWWRegister.reverseClock @@ -380,7 +418,6 @@ object WireTransformer { case CrdtClock.CUSTOM_AUTO_INCREMENT => new CustomClock(customClockValue, true) case CrdtClock.Unrecognized(_) => LWWRegister.defaultClock } - } case class IncompatibleCrdtChange(message: String) extends RuntimeException(message, null, false, false) diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/DynamicLeastShardAllocationStrategy.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/DynamicLeastShardAllocationStrategy.scala index 357ac00ea..828d1c5a5 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/DynamicLeastShardAllocationStrategy.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/DynamicLeastShardAllocationStrategy.scala @@ -7,23 +7,27 @@ import akka.cluster.sharding.ShardRegion.ShardId import scala.collection.immutable import scala.concurrent.Future -class DynamicLeastShardAllocationStrategy(rebalanceThreshold: Int, maxSimultaneousRebalance: Int, rebalanceNumber: Int, rebalanceFactor: Double) - extends ShardAllocationStrategy +class DynamicLeastShardAllocationStrategy(rebalanceThreshold: Int, + maxSimultaneousRebalance: Int, + rebalanceNumber: Int, + rebalanceFactor: Double) + extends ShardAllocationStrategy with Serializable { - def this(rebalanceThreshold: Int, maxSimultaneousRebalance: Int) = this(rebalanceThreshold, maxSimultaneousRebalance, rebalanceThreshold, 0.0) + def this(rebalanceThreshold: Int, maxSimultaneousRebalance: Int) = + this(rebalanceThreshold, maxSimultaneousRebalance, rebalanceThreshold, 0.0) override def allocateShard( - requester: ActorRef, - shardId: ShardId, - currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]]): Future[ActorRef] = { + requester: ActorRef, + shardId: ShardId, + currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]] + ): Future[ActorRef] = { val (regionWithLeastShards, _) = currentShardAllocations.minBy { case (_, v) => v.size } Future.successful(regionWithLeastShards) } - override def rebalance( - currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], - rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = { + override def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], + rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = if (rebalanceInProgress.size < maxSimultaneousRebalance) { val (_, leastShards) = currentShardAllocations.minBy { case (_, v) => v.size } val mostShards = currentShardAllocations @@ -46,14 +50,12 @@ class DynamicLeastShardAllocationStrategy(rebalanceThreshold: Int, maxSimultaneo // The ideal number to rebalance to so these nodes have an even number of shards val evenRebalance = difference / 2 - val n = math.min( - math.min(factoredRebalanceLimit, evenRebalance), - maxSimultaneousRebalance - rebalanceInProgress.size) + val n = + math.min(math.min(factoredRebalanceLimit, evenRebalance), maxSimultaneousRebalance - rebalanceInProgress.size) Future.successful(mostShards.sorted.take(n).toSet) } else emptyRebalanceResult } else emptyRebalanceResult - } private[this] final val emptyRebalanceResult = Future.successful(Set.empty[ShardId]) -} \ No newline at end of file +} diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedEntity.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedEntity.scala index 885dda258..59c616d87 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedEntity.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedEntity.scala @@ -39,33 +39,45 @@ object EventSourcedEntitySupervisor { private final case class Relay(actorRef: ActorRef) - def props(client: EventSourcedClient, configuration: EventSourcedEntity.Configuration, concurrencyEnforcer: ActorRef, statsCollector: ActorRef)(implicit mat: Materializer): Props = + def props(client: EventSourcedClient, + configuration: EventSourcedEntity.Configuration, + concurrencyEnforcer: ActorRef, + statsCollector: ActorRef)(implicit mat: Materializer): Props = Props(new EventSourcedEntitySupervisor(client, configuration, concurrencyEnforcer, statsCollector)) } /** - * This serves two purposes. - * - * Firstly, when the StateManager crashes, we don't want it restarted. Cluster sharding restarts, and there's no way - * to customise that. - * - * Secondly, we need to ensure that we have an Akka Streams actorRef source to publish messages two before Akka - * persistence starts feeding us events. There's a race condition if we do this in the same persistent actor. This - * establishes that connection first. - */ -final class EventSourcedEntitySupervisor(client: EventSourcedClient, configuration: EventSourcedEntity.Configuration, concurrencyEnforcer: ActorRef, statsCollector: ActorRef)(implicit mat: Materializer) - extends Actor with Stash { + * This serves two purposes. + * + * Firstly, when the StateManager crashes, we don't want it restarted. Cluster sharding restarts, and there's no way + * to customise that. + * + * Secondly, we need to ensure that we have an Akka Streams actorRef source to publish messages two before Akka + * persistence starts feeding us events. There's a race condition if we do this in the same persistent actor. This + * establishes that connection first. + */ +final class EventSourcedEntitySupervisor(client: EventSourcedClient, + configuration: EventSourcedEntity.Configuration, + concurrencyEnforcer: ActorRef, + statsCollector: ActorRef)(implicit mat: Materializer) + extends Actor + with Stash { import EventSourcedEntitySupervisor._ override final def receive: Receive = PartialFunction.empty override final def preStart(): Unit = { - client.handle(Source.actorRef[EventSourcedStreamIn](configuration.sendQueueSize, OverflowStrategy.fail) - .mapMaterializedValue { ref => - self ! Relay(ref) - NotUsed - }).runWith(Sink.actorRef(self, EventSourcedEntity.StreamClosed)) + client + .handle( + Source + .actorRef[EventSourcedStreamIn](configuration.sendQueueSize, OverflowStrategy.fail) + .mapMaterializedValue { ref => + self ! Relay(ref) + NotUsed + } + ) + .runWith(Sink.actorRef(self, EventSourcedEntity.StreamClosed)) context.become(waitingForRelay) } @@ -73,7 +85,11 @@ final class EventSourcedEntitySupervisor(client: EventSourcedClient, configurati case Relay(relayRef) => // Cluster sharding URL encodes entity ids, so to extract it we need to decode. val entityId = URLDecoder.decode(self.path.name, "utf-8") - val manager = context.watch(context.actorOf(EventSourcedEntity.props(configuration, entityId, relayRef, concurrencyEnforcer, statsCollector), "entity")) + val manager = context.watch( + context + .actorOf(EventSourcedEntity.props(configuration, entityId, relayRef, concurrencyEnforcer, statsCollector), + "entity") + ) context.become(forwarding(manager)) unstashAll() case _ => stash() @@ -98,28 +114,37 @@ object EventSourcedEntity { final case object StreamClosed extends DeadLetterSuppression final case class Configuration( - serviceName: String, - userFunctionName: String, - passivationTimeout: Timeout, - sendQueueSize: Int + serviceName: String, + userFunctionName: String, + passivationTimeout: Timeout, + sendQueueSize: Int ) private final case class OutstandingCommand( - commandId: Long, - replyTo: ActorRef + commandId: Long, + replyTo: ActorRef ) - final def props(configuration: Configuration, entityId: String, relay: ActorRef, concurrencyEnforcer: ActorRef, statsCollector: ActorRef): Props = + final def props(configuration: Configuration, + entityId: String, + relay: ActorRef, + concurrencyEnforcer: ActorRef, + statsCollector: ActorRef): Props = Props(new EventSourcedEntity(configuration, entityId, relay, concurrencyEnforcer, statsCollector)) /** - * Used to ensure the action ids sent to the concurrency enforcer are indeed unique. - */ + * Used to ensure the action ids sent to the concurrency enforcer are indeed unique. + */ private val actorCounter = new AtomicLong(0) } -final class EventSourcedEntity(configuration: EventSourcedEntity.Configuration, entityId: String, relay: ActorRef, - concurrencyEnforcer: ActorRef, statsCollector: ActorRef) extends PersistentActor with ActorLogging { +final class EventSourcedEntity(configuration: EventSourcedEntity.Configuration, + entityId: String, + relay: ActorRef, + concurrencyEnforcer: ActorRef, + statsCollector: ActorRef) + extends PersistentActor + with ActorLogging { override final def persistenceId: String = configuration.userFunctionName + entityId private val actorId = EventSourcedEntity.actorCounter.incrementAndGet() @@ -127,12 +152,12 @@ final class EventSourcedEntity(configuration: EventSourcedEntity.Configuration, private[this] final var stashedCommands = Queue.empty[(EntityCommand, ActorRef)] // PERFORMANCE: look at options for data structures private[this] final var currentCommand: EventSourcedEntity.OutstandingCommand = null private[this] final var stopped = false - private[this] final var idCounter = 0l + private[this] final var idCounter = 0L private[this] final var inited = false private[this] final var currentActionId: String = null private[this] final var reportedDatabaseOperationStarted = false - private[this] final var databaseOperationStartTime = 0l - private[this] final var commandStartTime = 0l + private[this] final var databaseOperationStartTime = 0L + private[this] final var commandStartTime = 0L // Set up passivation timer context.setReceiveTimeout(configuration.passivationTimeout.duration) @@ -200,18 +225,16 @@ final class EventSourcedEntity(configuration: EventSourcedEntity.Configuration, }) } - private final def esReplyToUfReply(reply: EventSourcedReply) = { + private final def esReplyToUfReply(reply: EventSourcedReply) = UserFunctionReply( clientAction = reply.clientAction, sideEffects = reply.sideEffects ) - } - private final def createFailure(message: String) = { + private final def createFailure(message: String) = UserFunctionReply( clientAction = Some(ClientAction(ClientAction.Action.Failure(Failure(description = message)))) ) - } override final def receiveCommand: PartialFunction[Any, Unit] = { @@ -222,7 +245,6 @@ final class EventSourcedEntity(configuration: EventSourcedEntity.Configuration, handleCommand(command, sender()) case EventSourcedStreamOut(m) => - import EventSourcedStreamOut.{Message => ESOMsg} m match { @@ -301,19 +323,23 @@ final class EventSourcedEntity(configuration: EventSourcedEntity.Configuration, } } - private[this] final def maybeInit(snapshot: Option[SnapshotOffer]): Unit = { + private[this] final def maybeInit(snapshot: Option[SnapshotOffer]): Unit = if (!inited) { - relay ! EventSourcedStreamIn(EventSourcedStreamIn.Message.Init(EventSourcedInit( - serviceName = configuration.serviceName, - entityId = entityId, - snapshot = snapshot.map { - case SnapshotOffer(metadata, offeredSnapshot: pbAny) => EventSourcedSnapshot(metadata.sequenceNr, Some(offeredSnapshot)) - case other => throw new IllegalStateException(s"Unexpected snapshot type received: ${other.getClass}") - } - ))) + relay ! EventSourcedStreamIn( + EventSourcedStreamIn.Message.Init( + EventSourcedInit( + serviceName = configuration.serviceName, + entityId = entityId, + snapshot = snapshot.map { + case SnapshotOffer(metadata, offeredSnapshot: pbAny) => + EventSourcedSnapshot(metadata.sequenceNr, Some(offeredSnapshot)) + case other => throw new IllegalStateException(s"Unexpected snapshot type received: ${other.getClass}") + } + ) + ) + ) inited = true } - } override final def receiveRecover: PartialFunction[Any, Unit] = { case offer: SnapshotOffer => @@ -328,7 +354,7 @@ final class EventSourcedEntity(configuration: EventSourcedEntity.Configuration, relay ! EventSourcedStreamIn(EventSourcedStreamIn.Message.Event(EventSourcedEvent(lastSequenceNr, Some(event)))) } - private def reportDatabaseOperationStarted(): Unit = { + private def reportDatabaseOperationStarted(): Unit = if (reportedDatabaseOperationStarted) { log.warning("Already reported database operation started") } else { @@ -336,14 +362,12 @@ final class EventSourcedEntity(configuration: EventSourcedEntity.Configuration, reportedDatabaseOperationStarted = true statsCollector ! StatsCollector.DatabaseOperationStarted } - } - private def reportDatabaseOperationFinished(): Unit = { + private def reportDatabaseOperationFinished(): Unit = if (!reportedDatabaseOperationStarted) { log.warning("Hadn't reported database operation started") } else { reportedDatabaseOperationStarted = false statsCollector ! StatsCollector.DatabaseOperationFinished(System.nanoTime() - databaseOperationStartTime) } - } -} \ No newline at end of file +} diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedSupportFactory.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedSupportFactory.scala index d4cbd37cb..439974a03 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedSupportFactory.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedSupportFactory.scala @@ -18,22 +18,30 @@ import io.cloudstate.proxy.entity.{EntityCommand, UserFunctionReply} import scala.concurrent.{ExecutionContext, Future} import scala.collection.JavaConverters._ -class EventSourcedSupportFactory(system: ActorSystem, config: EntityDiscoveryManager.Configuration, - grpcClientSettings: GrpcClientSettings, concurrencyEnforcer: ActorRef, statsCollector: ActorRef)(implicit ec: ExecutionContext, mat: Materializer) extends EntityTypeSupportFactory { +class EventSourcedSupportFactory(system: ActorSystem, + config: EntityDiscoveryManager.Configuration, + grpcClientSettings: GrpcClientSettings, + concurrencyEnforcer: ActorRef, + statsCollector: ActorRef)(implicit ec: ExecutionContext, mat: Materializer) + extends EntityTypeSupportFactory { private final val log = Logging.getLogger(system, this.getClass) private val eventSourcedClient = EventSourcedClient(grpcClientSettings) override def buildEntityTypeSupport(entity: Entity, serviceDescriptor: ServiceDescriptor): EntityTypeSupport = { - val stateManagerConfig = EventSourcedEntity.Configuration(entity.serviceName, entity.persistenceId, config.passivationTimeout, config.relayOutputBufferSize) + val stateManagerConfig = EventSourcedEntity.Configuration(entity.serviceName, + entity.persistenceId, + config.passivationTimeout, + config.relayOutputBufferSize) log.debug("Starting EventSourcedEntity for {}", entity.persistenceId) val clusterSharding = ClusterSharding(system) val clusterShardingSettings = ClusterShardingSettings(system) val eventSourcedEntity = clusterSharding.start( typeName = entity.persistenceId, - entityProps = EventSourcedEntitySupervisor.props(eventSourcedClient, stateManagerConfig, concurrencyEnforcer, statsCollector), + entityProps = + EventSourcedEntitySupervisor.props(eventSourcedClient, stateManagerConfig, concurrencyEnforcer, statsCollector), settings = clusterShardingSettings, messageExtractor = new EntityIdExtractor(config.numberOfShards), allocationStrategy = new DynamicLeastShardAllocationStrategy(1, 10, 2, 0.0), @@ -44,14 +52,20 @@ class EventSourcedSupportFactory(system: ActorSystem, config: EntityDiscoveryMan } private def validate(serviceDescriptor: ServiceDescriptor): Unit = { - val streamedMethods = serviceDescriptor.getMethods.asScala.filter(m => m.toProto.getClientStreaming || m.toProto.getServerStreaming) + val streamedMethods = + serviceDescriptor.getMethods.asScala.filter(m => m.toProto.getClientStreaming || m.toProto.getServerStreaming) if (streamedMethods.nonEmpty) { - throw EntityDiscoveryException(s"Event sourced entities do not support streamed methods, but ${serviceDescriptor.getFullName} has the following streamed methods: ${streamedMethods.map(_.getName).mkString(",")}") + throw EntityDiscoveryException( + s"Event sourced entities do not support streamed methods, but ${serviceDescriptor.getFullName} has the following streamed methods: ${streamedMethods.map(_.getName).mkString(",")}" + ) } } } -private class EventSourcedSupport(eventSourcedEntity: ActorRef, parallelism: Int, private implicit val relayTimeout: Timeout) extends EntityTypeSupport { +private class EventSourcedSupport(eventSourcedEntity: ActorRef, + parallelism: Int, + private implicit val relayTimeout: Timeout) + extends EntityTypeSupport { import akka.pattern.ask override def handler(method: EntityMethodDescriptor): Flow[EntityCommand, UserFunctionReply, NotUsed] = @@ -66,4 +80,3 @@ private final class EntityIdExtractor(shards: Int) extends HashCodeMessageExtrac case command: EntityCommand => command.entityId } } - diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/InMemSnapshotStore.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/InMemSnapshotStore.scala index 2a260def4..19a7c9c01 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/InMemSnapshotStore.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/InMemSnapshotStore.scala @@ -25,15 +25,18 @@ class InMemSnapshotStore extends SnapshotStore { private[this] final var snapshots = Map.empty[String, SelectedSnapshot] - override def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = { - Future.successful(snapshots.get(persistenceId) - .filter(s => s.metadata.sequenceNr >= criteria.minSequenceNr && - s.metadata.sequenceNr <= criteria.maxSequenceNr && - s.metadata.timestamp >= criteria.minTimestamp && - s.metadata.timestamp <= criteria.maxTimestamp - ) + override def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = + Future.successful( + snapshots + .get(persistenceId) + .filter( + s => + s.metadata.sequenceNr >= criteria.minSequenceNr && + s.metadata.sequenceNr <= criteria.maxSequenceNr && + s.metadata.timestamp >= criteria.minTimestamp && + s.metadata.timestamp <= criteria.maxTimestamp + ) ) - } override def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = { snapshots += metadata.persistenceId -> SelectedSnapshot(metadata, snapshot) diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/HttpApiSpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/HttpApiSpec.scala index 9c2d83835..dab5d7870 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/HttpApiSpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/HttpApiSpec.scala @@ -43,46 +43,53 @@ class HttpApiSpec extends WordSpec with MustMatchers with ScalatestRouteTest { override def reportError(in: UserFunctionError): Future[Empty] = ??? } - def assertConfigurationFailure(d: FileDescriptor, n: String, msg: String): Assertion = { + def assertConfigurationFailure(d: FileDescriptor, n: String, msg: String): Assertion = intercept[ConfigurationException] { val service = d.findServiceByName(n) - service must not be(null) + service must not be (null) val probe = TestProbe().ref - val entity = ServableEntity(service.getFullName, service, new UserFunctionTypeSupport { - override def handler(command: String): Flow[UserFunctionCommand, UserFunctionReply, NotUsed] = - Flow[UserFunctionCommand].mapAsync(1)(handleUnary) - override def handleUnary(command: UserFunctionCommand): Future[UserFunctionReply] = - (probe ? command).mapTo[UserFunctionReply] - }) + val entity = ServableEntity( + service.getFullName, + service, + new UserFunctionTypeSupport { + override def handler(command: String): Flow[UserFunctionCommand, UserFunctionReply, NotUsed] = + Flow[UserFunctionCommand].mapAsync(1)(handleUnary) + override def handleUnary(command: UserFunctionCommand): Future[UserFunctionReply] = + (probe ? command).mapTo[UserFunctionReply] + } + ) HttpApi.serve(new UserFunctionRouter(Seq(entity), mockEntityDiscovery), Seq(entity), mockEntityDiscovery) }.getMessage must equal(msg) - } "HTTP API" must { "not allow empty patterns" in { assertConfigurationFailure( - IllegalHttpConfig0.IllegalHttpConfig0Proto.javaDescriptor, "IllegalHttpConfig0", + IllegalHttpConfig0.IllegalHttpConfig0Proto.javaDescriptor, + "IllegalHttpConfig0", "HTTP API Config: Pattern missing for rule [HttpRule(,,,Vector(),Empty)]!" ) } "not allow selectors which do not exist as service methods" in { assertConfigurationFailure( - IllegalHttpConfig1.IllegalHttpConfig1Proto.javaDescriptor, "IllegalHttpConfig1", + IllegalHttpConfig1.IllegalHttpConfig1Proto.javaDescriptor, + "IllegalHttpConfig1", "HTTP API Config: Rule selector [wrongSelector] must be empty or [cloudstate.proxy.test.IllegalHttpConfig1.fail]" ) } "not allow patterns which do not start with slash" in { assertConfigurationFailure( - IllegalHttpConfig2.IllegalHttpConfig2Proto.javaDescriptor, "IllegalHttpConfig2", + IllegalHttpConfig2.IllegalHttpConfig2Proto.javaDescriptor, + "IllegalHttpConfig2", "HTTP API Config: Configured pattern [no/initial/slash] does not start with slash" ) } "not allow path extractors which refer to repeated fields" in { assertConfigurationFailure( - IllegalHttpConfig3.IllegalHttpConfig3Proto.javaDescriptor, "IllegalHttpConfig3", + IllegalHttpConfig3.IllegalHttpConfig3Proto.javaDescriptor, + "IllegalHttpConfig3", "HTTP API Config: Repeated parameters [cloudstate.proxy.test.IllegalHttpConfig3Message.illegal_repeated] are not allowed as path variables" ) } @@ -91,42 +98,48 @@ class HttpApiSpec extends WordSpec with MustMatchers with ScalatestRouteTest { "not allow path extractors to be duplicated in the same rule" in { assertConfigurationFailure( - IllegalHttpConfig4.IllegalHttpConfig4Proto.javaDescriptor, "IllegalHttpConfig4", + IllegalHttpConfig4.IllegalHttpConfig4Proto.javaDescriptor, + "IllegalHttpConfig4", "HTTP API Config: Path parameter [duplicated] occurs more than once" ) } "not allow custom non-* custom kinds" in { assertConfigurationFailure( - IllegalHttpConfig5.IllegalHttpConfig5Proto.javaDescriptor, "IllegalHttpConfig5", + IllegalHttpConfig5.IllegalHttpConfig5Proto.javaDescriptor, + "IllegalHttpConfig5", "HTTP API Config: Only Custom patterns with [*] kind supported but [not currently supported] found!" ) } "not allow fieldName body-selector which does not exist on request type" in { assertConfigurationFailure( - IllegalHttpConfig6.IllegalHttpConfig6Proto.javaDescriptor, "IllegalHttpConfig6", + IllegalHttpConfig6.IllegalHttpConfig6Proto.javaDescriptor, + "IllegalHttpConfig6", "HTTP API Config: Body configured to [not-available] but that field does not exist on input type." ) } "not allow repeated fields in body-selector" in { assertConfigurationFailure( - IllegalHttpConfig7.IllegalHttpConfig7Proto.javaDescriptor, "IllegalHttpConfig7", + IllegalHttpConfig7.IllegalHttpConfig7Proto.javaDescriptor, + "IllegalHttpConfig7", "HTTP API Config: Body configured to [not_allowed] but that field does not exist on input type." ) } "not allow fieldName responseBody-selector which does not exist on response type" in { assertConfigurationFailure( - IllegalHttpConfig8.IllegalHttpConfig8Proto.javaDescriptor, "IllegalHttpConfig8", + IllegalHttpConfig8.IllegalHttpConfig8Proto.javaDescriptor, + "IllegalHttpConfig8", "HTTP API Config: Response body field [not-available] does not exist on type [google.protobuf.Empty]" ) } "not allow more than one level of additionalBindings" in { assertConfigurationFailure( - IllegalHttpConfig9.IllegalHttpConfig9Proto.javaDescriptor, "IllegalHttpConfig9", + IllegalHttpConfig9.IllegalHttpConfig9Proto.javaDescriptor, + "IllegalHttpConfig9", "HTTP API Config: Only one level of additionalBindings supported, but [HttpRule(,,,Vector(HttpRule(,,,Vector(HttpRule(,,,Vector(),Get(/baz))),Get(/bar))),Get(/foo))] has more than one!" ) } diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/WarmupSpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/WarmupSpec.scala index b1126fad2..2f390d54f 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/WarmupSpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/WarmupSpec.scala @@ -5,8 +5,11 @@ import akka.testkit.{ImplicitSender, TestKit} import com.typesafe.config.ConfigFactory import org.scalatest.{BeforeAndAfterAll, WordSpecLike} -class WarmupSpec extends TestKit(ActorSystem("WarmupSpec", ConfigFactory.load("test-in-memory"))) - with WordSpecLike with BeforeAndAfterAll with ImplicitSender { +class WarmupSpec + extends TestKit(ActorSystem("WarmupSpec", ConfigFactory.load("test-in-memory"))) + with WordSpecLike + with BeforeAndAfterAll + with ImplicitSender { override protected def afterAll(): Unit = TestKit.shutdownActorSystem(system) diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/autoscaler/AutoscalerSpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/autoscaler/AutoscalerSpec.scala index b815780dd..457848042 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/autoscaler/AutoscalerSpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/autoscaler/AutoscalerSpec.scala @@ -11,8 +11,12 @@ import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import scala.concurrent.duration._ -class AutoscalerSpec extends TestKit(ActorSystem("AutoscalerSpec", ConfigFactory.load("test-in-memory"))) - with WordSpecLike with Matchers with BeforeAndAfterAll with ImplicitSender { +class AutoscalerSpec + extends TestKit(ActorSystem("AutoscalerSpec", ConfigFactory.load("test-in-memory"))) + with WordSpecLike + with Matchers + with BeforeAndAfterAll + with ImplicitSender { // Start cluster with ourselves, necessary for ddata val cluster = Cluster(system) @@ -26,25 +30,24 @@ class AutoscalerSpec extends TestKit(ActorSystem("AutoscalerSpec", ConfigFactory override protected def afterAll(): Unit = TestKit.shutdownActorSystem(system) - private def uniqueAddress(name: String): UniqueAddress = { - UniqueAddress(Address("akka", "system", Some(name), Some(2552)), 1l) - } + private def uniqueAddress(name: String): UniqueAddress = + UniqueAddress(Address("akka", "system", Some(name), Some(2552)), 1L) private def addressString(name: String) = uniqueAddress(name).address.toString private def withAutoscaler( - initialState: AutoscalerState = Stable(), - members: Seq[String] = Seq("a"), - targetUserFunctionConcurrency: Int = 1, - targetRequestConcurrency: Int = 10, - targetConcurrencyWindow: FiniteDuration = 60.seconds, - scaleUpStableDeadline: FiniteDuration = 2.minutes, - scaleDownStableDeadline: FiniteDuration = 30.seconds, - requestRateThresholdFactor: Double = 1.5, - requestRateThresholdWindow: FiniteDuration = 6.seconds, - maxScaleFactor: Double = 0, - maxScaleAbsolute: Int = 4, - maxMembers: Int = 10 + initialState: AutoscalerState = Stable(), + members: Seq[String] = Seq("a"), + targetUserFunctionConcurrency: Int = 1, + targetRequestConcurrency: Int = 10, + targetConcurrencyWindow: FiniteDuration = 60.seconds, + scaleUpStableDeadline: FiniteDuration = 2.minutes, + scaleDownStableDeadline: FiniteDuration = 30.seconds, + requestRateThresholdFactor: Double = 1.5, + requestRateThresholdWindow: FiniteDuration = 6.seconds, + maxScaleFactor: Double = 0, + maxScaleAbsolute: Int = 4, + maxMembers: Int = 10 )(block: ActorRef => Unit): Unit = { ensureScalerStateIs(initialState) @@ -84,7 +87,7 @@ class AutoscalerSpec extends TestKit(ActorSystem("AutoscalerSpec", ConfigFactory expectMsg(UpdateSuccess(StateKey, None)) } - def expectScalerStateToBe(pf: PartialFunction[Any, Unit]) = { + def expectScalerStateToBe(pf: PartialFunction[Any, Unit]) = within(3.seconds) { replicator ! Get(StateKey, ReadLocal) expectMsgPF() { @@ -93,26 +96,25 @@ class AutoscalerSpec extends TestKit(ActorSystem("AutoscalerSpec", ConfigFactory } } - } private def aLongTimeFromNowMillis = System.currentTimeMillis() + 1.hour.toMillis private def metrics( - address: String = "a", - metricInterval: FiniteDuration = 1.second, - requestConcurrency: Double = 5.0, - requestTime: FiniteDuration = 10.millis, - requestCount: Int = 200, - userFunctionConcurrency: Double = 0.5, - userFunctionTime: FiniteDuration = 5.millis, - userFunctionCount: Int = 200, - databaseConcurrency: Double = 0.0, - databaseTime: FiniteDuration = Duration.Zero, - databaseCount: Int = 0 - ): AutoscalerMetrics = { + address: String = "a", + metricInterval: FiniteDuration = 1.second, + requestConcurrency: Double = 5.0, + requestTime: FiniteDuration = 10.millis, + requestCount: Int = 200, + userFunctionConcurrency: Double = 0.5, + userFunctionTime: FiniteDuration = 5.millis, + userFunctionCount: Int = 200, + databaseConcurrency: Double = 0.0, + databaseTime: FiniteDuration = Duration.Zero, + databaseCount: Int = 0 + ): AutoscalerMetrics = AutoscalerMetrics( address = addressString(address), - uniqueAddressLongId = 1l, + uniqueAddressLongId = 1L, metricIntervalNanos = metricInterval.toNanos, requestConcurrency = requestConcurrency, requestTimeNanos = requestTime.toNanos, @@ -124,7 +126,6 @@ class AutoscalerSpec extends TestKit(ActorSystem("AutoscalerSpec", ConfigFactory databaseTimeNanos = databaseTime.toNanos, databaseCount = databaseCount ) - } "The Autoscaler" should { "do nothing when there is one node and metrics are below thresholds" in withAutoscaler() { autoscaler => @@ -236,7 +237,6 @@ class AutoscalerSpec extends TestKit(ActorSystem("AutoscalerSpec", ConfigFactory expectNoMessage(300.millis) } - } -} \ No newline at end of file +} diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/AbstractCrdtEntitySpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/AbstractCrdtEntitySpec.scala index 3cdb3aeaf..1dc86901d 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/AbstractCrdtEntitySpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/AbstractCrdtEntitySpec.scala @@ -24,8 +24,7 @@ import scala.concurrent.{Await, Future} import scala.concurrent.duration._ object AbstractCrdtEntitySpec { - def config: Config = ConfigFactory.parseString( - """ + def config: Config = ConfigFactory.parseString(""" |akka.actor.provider = cluster |# Make the tests run faster |akka.cluster.distributed-data.notify-subscribers-interval = 50ms @@ -49,15 +48,16 @@ object AbstractCrdtEntitySpec { final val element3 = ProtoAny("element3", ByteString.copyFromUtf8("3")) } -abstract class AbstractCrdtEntitySpec extends TestKit(ActorSystem("test", AbstractCrdtEntitySpec.config)) - with WordSpecLike - with Matchers - with Inside - with Eventually - with BeforeAndAfter - with BeforeAndAfterAll - with ImplicitSender - with OptionValues { +abstract class AbstractCrdtEntitySpec + extends TestKit(ActorSystem("test", AbstractCrdtEntitySpec.config)) + with WordSpecLike + with Matchers + with Inside + with Eventually + with BeforeAndAfter + with BeforeAndAfterAll + with ImplicitSender + with OptionValues { import AbstractCrdtEntitySpec._ @@ -116,50 +116,71 @@ abstract class AbstractCrdtEntitySpec extends TestKit(ActorSystem("test", Abstra (expectCommand(name, payload, true), streamProbe) } - protected def expectState(): S = { + protected def expectState(): S = inside(toUserFunction.expectMsgType[CrdtStreamIn].message) { case CrdtStreamIn.Message.State(state) => extractState(state.state) } - } - protected def expectDelta(): D = { + protected def expectDelta(): D = inside(toUserFunction.expectMsgType[CrdtStreamIn].message) { case CrdtStreamIn.Message.Changed(delta) => extractDelta(delta.delta) } - } - protected def sendAndExpectReply(commandId: Long, action: CrdtStateAction.Action = CrdtStateAction.Action.Empty, - writeConsistency: CrdtWriteConsistency = CrdtWriteConsistency.LOCAL): UserFunctionReply = { + protected def sendAndExpectReply( + commandId: Long, + action: CrdtStateAction.Action = CrdtStateAction.Action.Empty, + writeConsistency: CrdtWriteConsistency = CrdtWriteConsistency.LOCAL + ): UserFunctionReply = { val reply = doSendAndExpectReply(commandId, action, writeConsistency) reply.clientAction shouldBe None reply } - protected def sendStreamedMessage(commandId: Long, payload: Option[ProtoAny] = None, endStream: Boolean = false): Unit = { - fromUserFunction ! CrdtStreamOut(CrdtStreamOut.Message.StreamedMessage(CrdtStreamedMessage(commandId = commandId, - sideEffects = Nil, clientAction = payload.map(p => ClientAction(ClientAction.Action.Reply(Reply(Some(p))))), endStream = endStream))) - } - - protected def sendAndExpectFailure(commandId: Long, action: CrdtStateAction.Action = CrdtStateAction.Action.Empty, - writeConsistency: CrdtWriteConsistency = CrdtWriteConsistency.LOCAL): Failure = { + protected def sendStreamedMessage(commandId: Long, + payload: Option[ProtoAny] = None, + endStream: Boolean = false): Unit = + fromUserFunction ! CrdtStreamOut( + CrdtStreamOut.Message.StreamedMessage( + CrdtStreamedMessage(commandId = commandId, + sideEffects = Nil, + clientAction = payload.map(p => ClientAction(ClientAction.Action.Reply(Reply(Some(p))))), + endStream = endStream) + ) + ) + + protected def sendAndExpectFailure(commandId: Long, + action: CrdtStateAction.Action = CrdtStateAction.Action.Empty, + writeConsistency: CrdtWriteConsistency = CrdtWriteConsistency.LOCAL): Failure = { val reply = doSendAndExpectReply(commandId, action, writeConsistency) inside(reply.clientAction) { case Some(ClientAction(ClientAction.Action.Failure(failure))) => failure } } - protected def sendReply(commandId: Long, action: CrdtStateAction.Action = CrdtStateAction.Action.Empty, - writeConsistency: CrdtWriteConsistency = CrdtWriteConsistency.LOCAL, streamed: Boolean = false) = { - fromUserFunction ! CrdtStreamOut(CrdtStreamOut.Message.Reply(CrdtReply(commandId = commandId, sideEffects = Nil, - clientAction = None, stateAction = Some(CrdtStateAction(action = action, writeConsistency = writeConsistency)), streamed = streamed))) - } - - protected def doSendAndExpectReply(commandId: Long, action: CrdtStateAction.Action, writeConsistency: CrdtWriteConsistency): UserFunctionReply = { + protected def sendReply(commandId: Long, + action: CrdtStateAction.Action = CrdtStateAction.Action.Empty, + writeConsistency: CrdtWriteConsistency = CrdtWriteConsistency.LOCAL, + streamed: Boolean = false) = + fromUserFunction ! CrdtStreamOut( + CrdtStreamOut.Message.Reply( + CrdtReply( + commandId = commandId, + sideEffects = Nil, + clientAction = None, + stateAction = Some(CrdtStateAction(action = action, writeConsistency = writeConsistency)), + streamed = streamed + ) + ) + ) + + protected def doSendAndExpectReply(commandId: Long, + action: CrdtStateAction.Action, + writeConsistency: CrdtWriteConsistency): UserFunctionReply = { sendReply(commandId, action, writeConsistency) expectMsgType[UserFunctionReply] } - protected def expectCommand(name: String, payload: ProtoAny, streamed: Boolean = false): Long = { + protected def expectCommand(name: String, payload: ProtoAny, streamed: Boolean = false): Long = inside(toUserFunction.expectMsgType[CrdtStreamIn].message) { case CrdtStreamIn.Message.Command(Command(eid, cid, n, p, s)) => eid should ===(entityId) @@ -168,30 +189,35 @@ abstract class AbstractCrdtEntitySpec extends TestKit(ActorSystem("test", Abstra s shouldBe streamed cid } - } protected def createAndExpectInit(): Option[S] = { toUserFunction = TestProbe() entityDiscovery = TestProbe() - entity = system.actorOf(CrdtEntity.props(new Crdt() { - override def handle(in: Source[CrdtStreamIn, NotUsed]): Source[CrdtStreamOut, NotUsed] = { - in.runWith(Sink.actorRef(toUserFunction.testActor, StreamClosed)) - Source.actorRef(100, OverflowStrategy.fail).mapMaterializedValue { actor => - fromUserFunction = actor - NotUsed - } - } - }, CrdtEntity.Configuration(ServiceName, UserFunctionName, Timeout(10.minutes), 100, 10.minutes, 10.minutes), - new EntityDiscovery { - override def discover(in: ProxyInfo): Future[EntitySpec] = { - entityDiscovery.testActor ! in - Future.failed(new Exception("Not expecting discover")) - } - override def reportError(in: UserFunctionError): Future[Empty] = { - entityDiscovery.testActor ! in - Future.successful(Empty()) + entity = system.actorOf( + CrdtEntity.props( + new Crdt() { + override def handle(in: Source[CrdtStreamIn, NotUsed]): Source[CrdtStreamOut, NotUsed] = { + in.runWith(Sink.actorRef(toUserFunction.testActor, StreamClosed)) + Source.actorRef(100, OverflowStrategy.fail).mapMaterializedValue { actor => + fromUserFunction = actor + NotUsed + } + } + }, + CrdtEntity.Configuration(ServiceName, UserFunctionName, Timeout(10.minutes), 100, 10.minutes, 10.minutes), + new EntityDiscovery { + override def discover(in: ProxyInfo): Future[EntitySpec] = { + entityDiscovery.testActor ! in + Future.failed(new Exception("Not expecting discover")) + } + override def reportError(in: UserFunctionError): Future[Empty] = { + entityDiscovery.testActor ! in + Future.successful(Empty()) + } } - }), entityId) + ), + entityId + ) val init = toUserFunction.expectMsgType[CrdtStreamIn] inside(init.message) { @@ -218,13 +244,10 @@ abstract class AbstractCrdtEntitySpec extends TestKit(ActorSystem("test", Abstra } } - override protected def beforeAll(): Unit = { + override protected def beforeAll(): Unit = cluster.join(cluster.selfAddress) - } - override protected def afterAll(): Unit = { + override protected def afterAll(): Unit = Await.ready(system.terminate(), 10.seconds) - } - } diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/CrdtEntitySpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/CrdtEntitySpec.scala index bae986524..70a2e5e22 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/CrdtEntitySpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/CrdtEntitySpec.scala @@ -7,7 +7,6 @@ import io.cloudstate.proxy.entity.UserFunctionReply import scala.concurrent.duration._ - class CrdtEntitySpec extends AbstractCrdtEntitySpec { import AbstractCrdtEntitySpec._ @@ -25,9 +24,8 @@ class CrdtEntitySpec extends AbstractCrdtEntitySpec { override protected def extractDelta(delta: CrdtDelta.Delta) = delta.pncounter.value - def updateCounter(update: Long) = { + def updateCounter(update: Long) = CrdtStateAction.Action.Update(CrdtDelta(CrdtDelta.Delta.Pncounter(PNCounterDelta(update)))) - } "The CrdtEntity" should { @@ -138,8 +136,12 @@ class CrdtEntitySpec extends AbstractCrdtEntitySpec { update(_ :+ 6) toUserFunction.expectNoMessage(200.millis) - fromUserFunction ! CrdtStreamOut(CrdtStreamOut.Message.StreamCancelledResponse( - CrdtStreamCancelledResponse(cid, stateAction = Some(CrdtStateAction(CrdtWriteConsistency.LOCAL, updateCounter(3)))))) + fromUserFunction ! CrdtStreamOut( + CrdtStreamOut.Message.StreamCancelledResponse( + CrdtStreamCancelledResponse(cid, + stateAction = Some(CrdtStateAction(CrdtWriteConsistency.LOCAL, updateCounter(3)))) + ) + ) expectDelta().change should be(8) eventually { @@ -166,8 +168,9 @@ class CrdtEntitySpec extends AbstractCrdtEntitySpec { update(_ :+ 6) toUserFunction.expectNoMessage(200.millis) - fromUserFunction ! CrdtStreamOut(CrdtStreamOut.Message.StreamCancelledResponse( - CrdtStreamCancelledResponse(cid, stateAction = None))) + fromUserFunction ! CrdtStreamOut( + CrdtStreamOut.Message.StreamCancelledResponse(CrdtStreamCancelledResponse(cid, stateAction = None)) + ) expectDelta().change should be(8) eventually { diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/FlagCrdtEntitySpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/FlagCrdtEntitySpec.scala index 72c721f50..166dab2f5 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/FlagCrdtEntitySpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/FlagCrdtEntitySpec.scala @@ -6,7 +6,6 @@ import io.cloudstate.protocol.entity.UserFunctionError import scala.concurrent.duration._ - class FlagCrdtEntitySpec extends AbstractCrdtEntitySpec { import AbstractCrdtEntitySpec._ @@ -23,13 +22,11 @@ class FlagCrdtEntitySpec extends AbstractCrdtEntitySpec { override protected def extractDelta(delta: CrdtDelta.Delta) = delta.flag.value - def create(value: Boolean) = { + def create(value: Boolean) = CrdtStateAction.Action.Create(CrdtState(CrdtState.State.Flag(FlagState(value)))) - } - def enable(value: Boolean = true) = { + def enable(value: Boolean = true) = CrdtStateAction.Action.Update(CrdtDelta(CrdtDelta.Delta.Flag(FlagDelta(value)))) - } "The Flag CrdtEntity" should { diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/GCounterCrdtEntitySpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/GCounterCrdtEntitySpec.scala index 8099a750f..db1e329bf 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/GCounterCrdtEntitySpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/GCounterCrdtEntitySpec.scala @@ -6,7 +6,6 @@ import io.cloudstate.protocol.entity.UserFunctionError import scala.concurrent.duration._ - class GCounterCrdtEntitySpec extends AbstractCrdtEntitySpec { import AbstractCrdtEntitySpec._ @@ -23,13 +22,11 @@ class GCounterCrdtEntitySpec extends AbstractCrdtEntitySpec { override protected def extractDelta(delta: CrdtDelta.Delta) = delta.gcounter.value - def create(value: Long) = { + def create(value: Long) = CrdtStateAction.Action.Create(CrdtState(CrdtState.State.Gcounter(GCounterState(value)))) - } - def updateCounter(increment: Long) = { + def updateCounter(increment: Long) = CrdtStateAction.Action.Update(CrdtDelta(CrdtDelta.Delta.Gcounter(GCounterDelta(increment)))) - } "The GCounter CrdtEntity" should { diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/GSetCrdtEntitySpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/GSetCrdtEntitySpec.scala index 58982d672..11954ba4b 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/GSetCrdtEntitySpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/GSetCrdtEntitySpec.scala @@ -6,7 +6,6 @@ import io.cloudstate.protocol.crdt._ import scala.concurrent.duration._ - class GSetCrdtEntitySpec extends AbstractCrdtEntitySpec { import AbstractCrdtEntitySpec._ @@ -23,13 +22,11 @@ class GSetCrdtEntitySpec extends AbstractCrdtEntitySpec { override protected def extractDelta(delta: CrdtDelta.Delta) = delta.gset.value - def createSet(elements: ProtoAny*) = { + def createSet(elements: ProtoAny*) = CrdtStateAction.Action.Create(CrdtState(CrdtState.State.Gset(GSetState(elements)))) - } - def updateSet(elements: ProtoAny*) = { + def updateSet(elements: ProtoAny*) = CrdtStateAction.Action.Update(CrdtDelta(CrdtDelta.Delta.Gset(GSetDelta(elements)))) - } "The GSet CrdtEntity" should { diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/LWWRegisterCrdtEntitySpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/LWWRegisterCrdtEntitySpec.scala index 0e1e895c7..288cf285a 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/LWWRegisterCrdtEntitySpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/LWWRegisterCrdtEntitySpec.scala @@ -6,7 +6,6 @@ import io.cloudstate.protocol.crdt._ import scala.concurrent.duration._ - class LWWRegisterCrdtEntitySpec extends AbstractCrdtEntitySpec { import AbstractCrdtEntitySpec._ @@ -23,13 +22,16 @@ class LWWRegisterCrdtEntitySpec extends AbstractCrdtEntitySpec { override protected def extractDelta(delta: CrdtDelta.Delta) = delta.lwwregister.value - def create(element: ProtoAny) = { + def create(element: ProtoAny) = CrdtStateAction.Action.Create(CrdtState(CrdtState.State.Lwwregister(LWWRegisterState(value = Some(element))))) - } - def updateRegister(element: ProtoAny, clock: CrdtClock = CrdtClock.DEFAULT, customClockValue: Long = 0) = { - CrdtStateAction.Action.Update(CrdtDelta(CrdtDelta.Delta.Lwwregister(LWWRegisterDelta(value = Some(element), clock = clock, customClockValue = customClockValue)))) - } + def updateRegister(element: ProtoAny, clock: CrdtClock = CrdtClock.DEFAULT, customClockValue: Long = 0) = + CrdtStateAction.Action.Update( + CrdtDelta( + CrdtDelta.Delta + .Lwwregister(LWWRegisterDelta(value = Some(element), clock = clock, customClockValue = customClockValue)) + ) + ) "The LWWRegister CrdtEntity" should { @@ -107,12 +109,16 @@ class LWWRegisterCrdtEntitySpec extends AbstractCrdtEntitySpec { createAndExpectInit() val cid1 = sendAndExpectCommand("cmd", command) - sendAndExpectReply(cid1, updateRegister(element2, clock = CrdtClock.CUSTOM_AUTO_INCREMENT, customClockValue = start + 1000)) + sendAndExpectReply( + cid1, + updateRegister(element2, clock = CrdtClock.CUSTOM_AUTO_INCREMENT, customClockValue = start + 1000) + ) get().value shouldBe element2 expectNoMessage(200.millis) val cid2 = sendAndExpectCommand("cmd", command) - sendAndExpectReply(cid2, updateRegister(element3, clock = CrdtClock.CUSTOM_AUTO_INCREMENT, customClockValue = start)) + sendAndExpectReply(cid2, + updateRegister(element3, clock = CrdtClock.CUSTOM_AUTO_INCREMENT, customClockValue = start)) get().value shouldBe element3 expectNoMessage(200.millis) } diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/ORMapCrdtEntitySpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/ORMapCrdtEntitySpec.scala index 2ef384e05..de128c450 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/ORMapCrdtEntitySpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/ORMapCrdtEntitySpec.scala @@ -6,7 +6,6 @@ import io.cloudstate.protocol.crdt._ import scala.concurrent.duration._ - class ORMapCrdtEntitySpec extends AbstractCrdtEntitySpec { import AbstractCrdtEntitySpec._ @@ -23,38 +22,41 @@ class ORMapCrdtEntitySpec extends AbstractCrdtEntitySpec { override protected def extractDelta(delta: CrdtDelta.Delta) = delta.ormap.value - def mapCounterEntries(elements: (ProtoAny, Int)*) = { + def mapCounterEntries(elements: (ProtoAny, Int)*) = elements.map { case (k, v) => ORMapEntry(Some(k), Some(createCounter(v))) } - } - def mapCounterDeltas(elements: (ProtoAny, Int)*) = { + def mapCounterDeltas(elements: (ProtoAny, Int)*) = elements.map { case (k, v) => ORMapEntryDelta(Some(k), Some(updateCounter(v))) } - } - def createMap(elements: Seq[ORMapEntry]) = { + def createMap(elements: Seq[ORMapEntry]) = CrdtStateAction.Action.Create(CrdtState(CrdtState.State.Ormap(ORMapState(elements)))) - } - def updateMap(added: Seq[(ProtoAny, CrdtState)] = Nil, removed: Seq[ProtoAny] = Nil, updated: Seq[(ProtoAny, CrdtDelta)] = Nil, cleared: Boolean = false) = { - CrdtStateAction.Action.Update(CrdtDelta(CrdtDelta.Delta.Ormap(ORMapDelta( - added = added.map(e => ORMapEntry(Some(e._1), Some(e._2))), - updated = updated.map(e => ORMapEntryDelta(Some(e._1), Some(e._2))), - removed = removed, - cleared = cleared - )))) - } - - def createCounter(value : Int) = { + def updateMap(added: Seq[(ProtoAny, CrdtState)] = Nil, + removed: Seq[ProtoAny] = Nil, + updated: Seq[(ProtoAny, CrdtDelta)] = Nil, + cleared: Boolean = false) = + CrdtStateAction.Action.Update( + CrdtDelta( + CrdtDelta.Delta.Ormap( + ORMapDelta( + added = added.map(e => ORMapEntry(Some(e._1), Some(e._2))), + updated = updated.map(e => ORMapEntryDelta(Some(e._1), Some(e._2))), + removed = removed, + cleared = cleared + ) + ) + ) + ) + + def createCounter(value: Int) = CrdtState(CrdtState.State.Gcounter(GCounterState(value))) - } - def updateCounter(increment: Int) = { + def updateCounter(increment: Int) = CrdtDelta(CrdtDelta.Delta.Gcounter(GCounterDelta(increment))) - } def verifyMapHasCounters(entries: (ProtoAny, Int)*) = { val map = get() @@ -100,7 +102,8 @@ class ORMapCrdtEntitySpec extends AbstractCrdtEntitySpec { update { s => s :+ (element1 -> (GCounter.empty :+ 2)) :+ (element2 -> (GCounter.empty :+ 5)) } - createAndExpectInit().value.entries should contain theSameElementsAs mapCounterEntries(element1 -> 2, element2 -> 5) + createAndExpectInit().value.entries should contain theSameElementsAs mapCounterEntries(element1 -> 2, + element2 -> 5) } "push the full state when no entity exists" in { @@ -203,9 +206,12 @@ class ORMapCrdtEntitySpec extends AbstractCrdtEntitySpec { val delta = expectDelta() delta.removed should contain theSameElementsAs Seq(element1) // Usually we should only get one delta, but there's a race condition that means we could get the readded elements in another delta - val added = if (delta.added.isEmpty) expectDelta().added - else delta.added - added should contain theSameElementsAs Seq(ORMapEntry(Some(element1), Some(CrdtState(CrdtState.State.Pncounter(PNCounterState(5)))))) + val added = + if (delta.added.isEmpty) expectDelta().added + else delta.added + added should contain theSameElementsAs Seq( + ORMapEntry(Some(element1), Some(CrdtState(CrdtState.State.Pncounter(PNCounterState(5))))) + ) } "detect and handle incompatible CRDT value changes" in { @@ -216,8 +222,9 @@ class ORMapCrdtEntitySpec extends AbstractCrdtEntitySpec { val delta = expectDelta() delta.removed should contain theSameElementsAs Seq(element1) // Usually we should only get one delta, but there's a race condition that means we could get the readded elements in another delta - val added = if (delta.added.isEmpty) expectDelta().added - else delta.added + val added = + if (delta.added.isEmpty) expectDelta().added + else delta.added added should contain theSameElementsAs mapCounterEntries(element1 -> 3) } @@ -279,13 +286,15 @@ class ORMapCrdtEntitySpec extends AbstractCrdtEntitySpec { } createAndExpectInit() val cid = sendAndExpectCommand("cmd", command) - sendAndExpectReply(cid, updateMap(updated = Seq(element1 -> updateCounter(4)), removed = Seq(element2), added = Seq(element3 -> createCounter(3)))) + sendAndExpectReply(cid, + updateMap(updated = Seq(element1 -> updateCounter(4)), + removed = Seq(element2), + added = Seq(element3 -> createCounter(3)))) eventually { verifyMapHasCounters(element1 -> 6, element3 -> 3) } toUserFunction.expectNoMessage(200.millis) } - } } diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/ORSetCrdtEntitySpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/ORSetCrdtEntitySpec.scala index 476a6d436..fc221afb0 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/ORSetCrdtEntitySpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/ORSetCrdtEntitySpec.scala @@ -6,7 +6,6 @@ import com.google.protobuf.any.{Any => ProtoAny} import scala.concurrent.duration._ - class ORSetCrdtEntitySpec extends AbstractCrdtEntitySpec { import AbstractCrdtEntitySpec._ @@ -23,13 +22,13 @@ class ORSetCrdtEntitySpec extends AbstractCrdtEntitySpec { override protected def extractDelta(delta: CrdtDelta.Delta) = delta.orset.value - def createSet(elements: ProtoAny*) = { + def createSet(elements: ProtoAny*) = CrdtStateAction.Action.Create(CrdtState(CrdtState.State.Orset(ORSetState(elements)))) - } - def updateSet(added: Seq[ProtoAny] = Nil, removed: Seq[ProtoAny] = Nil, cleared: Boolean = false) = { - CrdtStateAction.Action.Update(CrdtDelta(CrdtDelta.Delta.Orset(ORSetDelta(added = added, removed = removed, cleared = cleared)))) - } + def updateSet(added: Seq[ProtoAny] = Nil, removed: Seq[ProtoAny] = Nil, cleared: Boolean = false) = + CrdtStateAction.Action.Update( + CrdtDelta(CrdtDelta.Delta.Orset(ORSetDelta(added = added, removed = removed, cleared = cleared))) + ) "The ORSet CrdtEntity" should { diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/PNCounterCrdtEntitySpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/PNCounterCrdtEntitySpec.scala index 8b5f8deeb..1dd328371 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/PNCounterCrdtEntitySpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/crdt/PNCounterCrdtEntitySpec.scala @@ -5,7 +5,6 @@ import io.cloudstate.protocol.crdt._ import scala.concurrent.duration._ - class PNCounterCrdtEntitySpec extends AbstractCrdtEntitySpec { import AbstractCrdtEntitySpec._ @@ -22,13 +21,11 @@ class PNCounterCrdtEntitySpec extends AbstractCrdtEntitySpec { override protected def extractDelta(delta: CrdtDelta.Delta) = delta.pncounter.value - def create(value: Long) = { + def create(value: Long) = CrdtStateAction.Action.Create(CrdtState(CrdtState.State.Pncounter(PNCounterState(value)))) - } - def updateCounter(update: Long) = { + def updateCounter(update: Long) = CrdtStateAction.Action.Update(CrdtDelta(CrdtDelta.Delta.Pncounter(PNCounterDelta(update)))) - } "The PNCounter CrdtEntity" should { diff --git a/proxy/jdbc/src/main/scala/io/cloudstate/proxy/jdbc/SlickEnsureTablesExistReadyCheck.scala b/proxy/jdbc/src/main/scala/io/cloudstate/proxy/jdbc/SlickEnsureTablesExistReadyCheck.scala index ded595f22..91d0a825a 100644 --- a/proxy/jdbc/src/main/scala/io/cloudstate/proxy/jdbc/SlickEnsureTablesExistReadyCheck.scala +++ b/proxy/jdbc/src/main/scala/io/cloudstate/proxy/jdbc/SlickEnsureTablesExistReadyCheck.scala @@ -1,6 +1,5 @@ package io.cloudstate.proxy.jdbc - import java.sql.Connection import akka.Done @@ -34,21 +33,25 @@ class SlickEnsureTablesExistReadyCheck(system: ActorSystem) extends (() => Futur // Get a hold of the akka-jdbc slick database instance val db = SlickExtension(system).database(ConfigFactory.parseMap(Map(ConfigKeys.useSharedDb -> "slick").asJava)) - val actor = system.actorOf(BackoffSupervisor.props( - BackoffOpts.onFailure( - childProps = Props(new EnsureTablesExistsActor(db)), - childName = "jdbc-table-creator", - minBackoff = 3.seconds, - maxBackoff = 30.seconds, - randomFactor = 0.2 - )), "jdbc-table-creator-supervisor") + val actor = system.actorOf( + BackoffSupervisor.props( + BackoffOpts.onFailure( + childProps = Props(new EnsureTablesExistsActor(db)), + childName = "jdbc-table-creator", + minBackoff = 3.seconds, + maxBackoff = 30.seconds, + randomFactor = 0.2 + ) + ), + "jdbc-table-creator-supervisor" + ) implicit val timeout = Timeout(10.seconds) // TODO make configurable? import akka.pattern.ask () => (actor ? EnsureTablesExistsActor.Ready).mapTo[Boolean] - } else { - () => Future.successful(true) + } else { () => + Future.successful(true) } override def apply(): Future[Boolean] = check() @@ -61,8 +64,8 @@ private object EnsureTablesExistsActor { } /** - * Copied/adapted from https://github.com/lagom/lagom/blob/60897ef752ddbfc28553d3726b8fdb830a3ebdc4/persistence-jdbc/core/src/main/scala/com/lightbend/lagom/internal/persistence/jdbc/SlickProvider.scala - */ + * Copied/adapted from https://github.com/lagom/lagom/blob/60897ef752ddbfc28553d3726b8fdb830a3ebdc4/persistence-jdbc/core/src/main/scala/com/lightbend/lagom/internal/persistence/jdbc/SlickProvider.scala + */ private class EnsureTablesExistsActor(db: SlickDatabase) extends Actor with ActorLogging { import EnsureTablesExistsActor._ @@ -74,7 +77,9 @@ private class EnsureTablesExistsActor(db: SlickDatabase) extends Actor with Acto import context.dispatcher private val journalCfg = new JournalTableConfiguration(context.system.settings.config.getConfig("jdbc-read-journal")) - private val snapshotCfg = new SnapshotTableConfiguration(context.system.settings.config.getConfig("jdbc-snapshot-store")) + private val snapshotCfg = new SnapshotTableConfiguration( + context.system.settings.config.getConfig("jdbc-snapshot-store") + ) private val journalTables = new JournalTables { override val journalTableCfg: JournalTableConfiguration = journalCfg @@ -122,21 +127,19 @@ private class EnsureTablesExistsActor(db: SlickDatabase) extends Actor with Acto case Ready => sender() ! true } - private def createTable(schemaStatements: Seq[String], tableExists: (Vector[MTable], Option[String]) => Boolean) = { + private def createTable(schemaStatements: Seq[String], tableExists: (Vector[MTable], Option[String]) => Boolean) = for { currentSchema <- getCurrentSchema tables <- getTables(currentSchema) _ <- createTableInternal(tables, currentSchema, schemaStatements, tableExists) } yield Done.getInstance() - } private def createTableInternal( - tables: Vector[MTable], - currentSchema: Option[String], - schemaStatements: Seq[String], - tableExists: (Vector[MTable], Option[String]) => Boolean - ) = { - + tables: Vector[MTable], + currentSchema: Option[String], + schemaStatements: Seq[String], + tableExists: (Vector[MTable], Option[String]) => Boolean + ) = if (tableExists(tables, currentSchema)) { DBIO.successful(()) } else { @@ -169,9 +172,8 @@ private class EnsureTablesExistsActor(db: SlickDatabase) extends Actor with Acto } } } - } - private def getTables(currentSchema: Option[String]) = { + private def getTables(currentSchema: Option[String]) = // Calling MTable.getTables without parameters fails on MySQL // See https://github.com/lagom/lagom/issues/446 // and https://github.com/slick/slick/issues/1692 @@ -181,9 +183,8 @@ private class EnsureTablesExistsActor(db: SlickDatabase) extends Actor with Acto case _ => MTable.getTables(None, currentSchema, Option("%"), None) } - } - private def getCurrentSchema: DBIO[Option[String]] = { + private def getCurrentSchema: DBIO[Option[String]] = SimpleDBIO(ctx => tryGetSchema(ctx.connection).getOrElse(null)).flatMap { schema => if (schema == null) { // Not all JDBC drivers support the getSchema method: @@ -201,7 +202,6 @@ private class EnsureTablesExistsActor(db: SlickDatabase) extends Actor with Acto } } else DBIO.successful(Some(schema)) } - } // Some older JDBC drivers don't implement Connection.getSchema // (including some builds of H2). This causes them to throw an @@ -211,13 +211,14 @@ private class EnsureTablesExistsActor(db: SlickDatabase) extends Actor with Acto private def tryGetSchema(connection: Connection): Try[String] = try Success(connection.getSchema) catch { - case e: AbstractMethodError => Failure(new IllegalStateException("Database driver does not support Connection.getSchema", e)) + case e: AbstractMethodError => + Failure(new IllegalStateException("Database driver does not support Connection.getSchema", e)) } private def tableExists( - schemaName: Option[String], - tableName: String - )(tables: Vector[MTable], currentSchema: Option[String]): Boolean = { + schemaName: Option[String], + tableName: String + )(tables: Vector[MTable], currentSchema: Option[String]): Boolean = tables.exists { t => profile match { case _: MySQLProfile => @@ -226,6 +227,5 @@ private class EnsureTablesExistsActor(db: SlickDatabase) extends Actor with Acto t.name.schema.orElse(currentSchema) == schemaName.orElse(currentSchema) && t.name.name == tableName } } - } -} \ No newline at end of file +} diff --git a/samples/akka-client/src/main/scala/io/cloudstate/samples/CrdtsClient.scala b/samples/akka-client/src/main/scala/io/cloudstate/samples/CrdtsClient.scala index 024cf156e..5dec1dc4b 100644 --- a/samples/akka-client/src/main/scala/io/cloudstate/samples/CrdtsClient.scala +++ b/samples/akka-client/src/main/scala/io/cloudstate/samples/CrdtsClient.scala @@ -9,14 +9,14 @@ import com.example.crdts.crdt_example._ import scala.concurrent.duration._ import scala.concurrent.{Await, Future} - /** - * Designed for use in the REPL, run sbt console and then new io.cloudstate.samples.CrdtsClient("localhost", 9000) - * @param hostname - * @param port - */ + * Designed for use in the REPL, run sbt console and then new io.cloudstate.samples.CrdtsClient("localhost", 9000) + * @param hostname + * @param port + */ class CrdtsClient(hostname: String, port: Int, hostnameOverride: Option[String], sys: ActorSystem) { - def this(hostname: String, port: Int, hostnameOverride: Option[String] = None) = this(hostname, port, hostnameOverride, ActorSystem()) + def this(hostname: String, port: Int, hostnameOverride: Option[String] = None) = + this(hostname, port, hostnameOverride, ActorSystem()) private implicit val system = sys private implicit val materializer = ActorMaterializer() import sys.dispatcher @@ -52,12 +52,21 @@ class CrdtsClient(hostname: String, port: Int, hostnameOverride: Option[String], def mutateORSet(id: String, add: Seq[SomeValue] = Nil, remove: Seq[SomeValue] = Nil, clear: Boolean = false) = await(service.mutateORSet(MutateSet(key = id, add = add, remove = remove, clear = clear))).size - def connect(id: String) = { + def connect(id: String) = service.connect(User(id)).viaMat(KillSwitches.single)(Keep.right).to(Sink.ignore).run() - } - def monitor(monitorId: String, id: String) = { - service.monitor(User(id)).viaMat(KillSwitches.single)(Keep.right) - .to(Sink.foreach(status => println(s"Monitor $monitorId saw user $id go " + (if (status.online) "online" else "offline")))).run() - } -} \ No newline at end of file + def monitor(monitorId: String, id: String) = + service + .monitor(User(id)) + .viaMat(KillSwitches.single)(Keep.right) + .to( + Sink.foreach( + status => + println( + s"Monitor $monitorId saw user $id go " + (if (status.online) "online" + else "offline") + ) + ) + ) + .run() +} diff --git a/samples/akka-client/src/main/scala/io/cloudstate/samples/ShoppingCartClient.scala b/samples/akka-client/src/main/scala/io/cloudstate/samples/ShoppingCartClient.scala index 7fee0b55b..b751fe04b 100644 --- a/samples/akka-client/src/main/scala/io/cloudstate/samples/ShoppingCartClient.scala +++ b/samples/akka-client/src/main/scala/io/cloudstate/samples/ShoppingCartClient.scala @@ -39,12 +39,13 @@ object ShoppingCartClient { } /** - * Designed for use in the REPL, run sbt console and then new io.cloudstate.samples.ShoppingCartClient("localhost", 9000) - * @param hostname - * @param port - */ + * Designed for use in the REPL, run sbt console and then new io.cloudstate.samples.ShoppingCartClient("localhost", 9000) + * @param hostname + * @param port + */ class ShoppingCartClient(hostname: String, port: Int, hostnameOverride: Option[String], sys: ActorSystem) { - def this(hostname: String, port: Int, hostnameOverride: Option[String] = None) = this(hostname, port, hostnameOverride, ActorSystem()) + def this(hostname: String, port: Int, hostnameOverride: Option[String] = None) = + this(hostname, port, hostnameOverride, ActorSystem()) private implicit val system = sys private implicit val materializer = ActorMaterializer() import sys.dispatcher @@ -67,4 +68,4 @@ class ShoppingCartClient(hostname: String, port: Int, hostnameOverride: Option[S def addItem(userId: String, productId: String, name: String, quantity: Int) = await(service.addItem(AddLineItem(userId, productId, name, quantity))) def removeItem(userId: String, productId: String) = await(service.removeItem(RemoveLineItem(userId, productId))) -} \ No newline at end of file +} diff --git a/samples/java-shopping-cart/src/main/java/io/cloudstate/samples/shoppingcart/Main.java b/samples/java-shopping-cart/src/main/java/io/cloudstate/samples/shoppingcart/Main.java index 7a25205a4..b5725b6a6 100644 --- a/samples/java-shopping-cart/src/main/java/io/cloudstate/samples/shoppingcart/Main.java +++ b/samples/java-shopping-cart/src/main/java/io/cloudstate/samples/shoppingcart/Main.java @@ -5,12 +5,14 @@ import static java.util.Collections.singletonMap; public final class Main { - public final static void main(String[] args) throws Exception { - new CloudState(). - registerEventSourcedEntity( - ShoppingCartEntity.class, - Shoppingcart.getDescriptor().findServiceByName("ShoppingCart"), - com.example.shoppingcart.persistence.Domain.getDescriptor() - ).start().toCompletableFuture().get(); + public static final void main(String[] args) throws Exception { + new CloudState() + .registerEventSourcedEntity( + ShoppingCartEntity.class, + Shoppingcart.getDescriptor().findServiceByName("ShoppingCart"), + com.example.shoppingcart.persistence.Domain.getDescriptor()) + .start() + .toCompletableFuture() + .get(); } -} \ No newline at end of file +} diff --git a/samples/java-shopping-cart/src/main/java/io/cloudstate/samples/shoppingcart/ShoppingCartEntity.java b/samples/java-shopping-cart/src/main/java/io/cloudstate/samples/shoppingcart/ShoppingCartEntity.java index a915c2397..9c3c3412f 100644 --- a/samples/java-shopping-cart/src/main/java/io/cloudstate/samples/shoppingcart/ShoppingCartEntity.java +++ b/samples/java-shopping-cart/src/main/java/io/cloudstate/samples/shoppingcart/ShoppingCartEntity.java @@ -9,99 +9,94 @@ import java.util.*; import java.util.stream.Collectors; -/** - * An event sourced entity. - */ +/** An event sourced entity. */ @EventSourcedEntity public class ShoppingCartEntity { - private final String entityId; - private final Map cart = new LinkedHashMap<>(); + private final String entityId; + private final Map cart = new LinkedHashMap<>(); - public ShoppingCartEntity(@EntityId String entityId) { - this.entityId = entityId; - } + public ShoppingCartEntity(@EntityId String entityId) { + this.entityId = entityId; + } - @Snapshot - public Domain.Cart snapshot() { - return Domain.Cart.newBuilder() - .addAllItems( - cart.values().stream() - .map(this::convert) - .collect(Collectors.toList()) - ).build(); - } + @Snapshot + public Domain.Cart snapshot() { + return Domain.Cart.newBuilder() + .addAllItems(cart.values().stream().map(this::convert).collect(Collectors.toList())) + .build(); + } - @SnapshotHandler - public void handleSnapshot(Domain.Cart cart) { - this.cart.clear(); - for (Domain.LineItem item : cart.getItemsList()) { - this.cart.put(item.getProductId(), convert(item)); - } + @SnapshotHandler + public void handleSnapshot(Domain.Cart cart) { + this.cart.clear(); + for (Domain.LineItem item : cart.getItemsList()) { + this.cart.put(item.getProductId(), convert(item)); } + } - @EventHandler - public void itemAdded(Domain.ItemAdded itemAdded) { - Shoppingcart.LineItem item = cart.get(itemAdded.getItem().getProductId()); - if (item == null) { - item = convert(itemAdded.getItem()); - } else { - item = item.toBuilder() - .setQuantity(item.getQuantity() + itemAdded.getItem().getQuantity()) - .build(); - } - cart.put(item.getProductId(), item); + @EventHandler + public void itemAdded(Domain.ItemAdded itemAdded) { + Shoppingcart.LineItem item = cart.get(itemAdded.getItem().getProductId()); + if (item == null) { + item = convert(itemAdded.getItem()); + } else { + item = + item.toBuilder() + .setQuantity(item.getQuantity() + itemAdded.getItem().getQuantity()) + .build(); } + cart.put(item.getProductId(), item); + } - @EventHandler - public void itemRemoved(Domain.ItemRemoved itemRemoved) { - cart.remove(itemRemoved.getProductId()); - } + @EventHandler + public void itemRemoved(Domain.ItemRemoved itemRemoved) { + cart.remove(itemRemoved.getProductId()); + } - @CommandHandler - public Shoppingcart.Cart getCart() { - return Shoppingcart.Cart.newBuilder() - .addAllItems(cart.values()) - .build(); - } + @CommandHandler + public Shoppingcart.Cart getCart() { + return Shoppingcart.Cart.newBuilder().addAllItems(cart.values()).build(); + } - @CommandHandler - public Empty addItem(Shoppingcart.AddLineItem item, CommandContext ctx) { - if (item.getQuantity() <= 0) { - ctx.fail("Cannot add negative quantity of to item" + item.getProductId()); - } - ctx.emit(Domain.ItemAdded.newBuilder().setItem( - Domain.LineItem.newBuilder() - .setProductId(item.getProductId()) - .setName(item.getName()) - .setQuantity(item.getQuantity()) - .build() - ).build()); - return Empty.getDefaultInstance(); - } - - @CommandHandler - public Empty removeItem(Shoppingcart.RemoveLineItem item, CommandContext ctx) { - if (!cart.containsKey(item.getProductId())) { - ctx.fail("Cannot remove item " + item.getProductId() + " because it is not in the cart."); - } - ctx.emit(Domain.ItemRemoved.newBuilder().setProductId(item.getProductId()).build()); - return Empty.getDefaultInstance(); + @CommandHandler + public Empty addItem(Shoppingcart.AddLineItem item, CommandContext ctx) { + if (item.getQuantity() <= 0) { + ctx.fail("Cannot add negative quantity of to item" + item.getProductId()); } + ctx.emit( + Domain.ItemAdded.newBuilder() + .setItem( + Domain.LineItem.newBuilder() + .setProductId(item.getProductId()) + .setName(item.getName()) + .setQuantity(item.getQuantity()) + .build()) + .build()); + return Empty.getDefaultInstance(); + } - private Shoppingcart.LineItem convert(Domain.LineItem item) { - return Shoppingcart.LineItem.newBuilder() - .setProductId(item.getProductId()) - .setName(item.getName()) - .setQuantity(item.getQuantity()) - .build(); + @CommandHandler + public Empty removeItem(Shoppingcart.RemoveLineItem item, CommandContext ctx) { + if (!cart.containsKey(item.getProductId())) { + ctx.fail("Cannot remove item " + item.getProductId() + " because it is not in the cart."); } + ctx.emit(Domain.ItemRemoved.newBuilder().setProductId(item.getProductId()).build()); + return Empty.getDefaultInstance(); + } - private Domain.LineItem convert(Shoppingcart.LineItem item) { - return Domain.LineItem.newBuilder() - .setProductId(item.getProductId()) - .setName(item.getName()) - .setQuantity(item.getQuantity()) - .build(); - } + private Shoppingcart.LineItem convert(Domain.LineItem item) { + return Shoppingcart.LineItem.newBuilder() + .setProductId(item.getProductId()) + .setName(item.getName()) + .setQuantity(item.getQuantity()) + .build(); + } + private Domain.LineItem convert(Shoppingcart.LineItem item) { + return Domain.LineItem.newBuilder() + .setProductId(item.getProductId()) + .setName(item.getName()) + .setQuantity(item.getQuantity()) + .build(); + } } diff --git a/tck/src/main/scala/io/cloudstate/tck/CloudStateTCK.scala b/tck/src/main/scala/io/cloudstate/tck/CloudStateTCK.scala index c0551ea58..ae1fc6ad0 100644 --- a/tck/src/main/scala/io/cloudstate/tck/CloudStateTCK.scala +++ b/tck/src/main/scala/io/cloudstate/tck/CloudStateTCK.scala @@ -36,37 +36,53 @@ import java.net.InetAddress import akka.http.scaladsl.{Http, HttpConnectionContext, UseHttp2} import akka.http.scaladsl.Http.ServerBinding -import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpMethods, HttpProtocols, HttpRequest, HttpResponse, StatusCodes} +import akka.http.scaladsl.model.{ + ContentTypes, + HttpEntity, + HttpMethods, + HttpProtocols, + HttpRequest, + HttpResponse, + StatusCodes +} import akka.http.scaladsl.unmarshalling._ import io.cloudstate.protocol.entity._ import com.example.shoppingcart.shoppingcart._ import akka.testkit.TestProbe import com.google.protobuf.empty.Empty -import io.cloudstate.protocol.event_sourced.{EventSourced, EventSourcedClient, EventSourcedHandler, EventSourcedInit, EventSourcedReply, EventSourcedStreamIn, EventSourcedStreamOut} +import io.cloudstate.protocol.event_sourced.{ + EventSourced, + EventSourcedClient, + EventSourcedHandler, + EventSourcedInit, + EventSourcedReply, + EventSourcedStreamIn, + EventSourcedStreamOut +} object CloudStateTCK { - private[this] final val PROXY = "proxy" - private[this] final val FRONTEND = "frontend" - private[this] final val TCK = "tck" - private[this] final val HOSTNAME = "hostname" - private[this] final val PORT = "port" - private[this] final val NAME = "name" - - final case class ProcSpec private( - hostname: String, - port: Int, - directory: File, - command: Array[String], - stopCommand: Option[Array[String]], - envVars: JMap[String, Object] + private[this] final val PROXY = "proxy" + private[this] final val FRONTEND = "frontend" + private[this] final val TCK = "tck" + private[this] final val HOSTNAME = "hostname" + private[this] final val PORT = "port" + private[this] final val NAME = "name" + + final case class ProcSpec private ( + hostname: String, + port: Int, + directory: File, + command: Array[String], + stopCommand: Option[Array[String]], + envVars: JMap[String, Object] ) { def this(config: Config) = this( - hostname = config.getString(HOSTNAME ), - port = config.getInt( PORT ), - directory = new File(config.getString("directory")), - command = config.getList( "command").unwrapped.toArray.map(_.toString), - stopCommand = Some(config.getList("stop-command").unwrapped().toArray.map(_.toString)).filter(_.nonEmpty), - envVars = config.getConfig("env-vars").root.unwrapped, + hostname = config.getString(HOSTNAME), + port = config.getInt(PORT), + directory = new File(config.getString("directory")), + command = config.getList("command").unwrapped.toArray.map(_.toString), + stopCommand = Some(config.getList("stop-command").unwrapped().toArray.map(_.toString)).filter(_.nonEmpty), + envVars = config.getConfig("env-vars").root.unwrapped ) def validate(): Unit = { require(directory.exists, s"Configured directory (${directory}) does not exist") @@ -75,12 +91,11 @@ object CloudStateTCK { } } - final case class Configuration private( - name: String, - proxy: ProcSpec, - frontend: ProcSpec, - tckHostname: String, - tckPort: Int) { + final case class Configuration private (name: String, + proxy: ProcSpec, + frontend: ProcSpec, + tckHostname: String, + tckPort: Int) { def validate(): Unit = { proxy.validate() @@ -94,11 +109,11 @@ object CloudStateTCK { val reference = ConfigFactory.defaultReference().getConfig("cloudstate-tck") val c = config.withFallback(reference) Configuration( - name = c.getString(NAME), - proxy = new ProcSpec(c.getConfig(PROXY)), - frontend = new ProcSpec(c.getConfig(FRONTEND)), + name = c.getString(NAME), + proxy = new ProcSpec(c.getConfig(PROXY)), + frontend = new ProcSpec(c.getConfig(FRONTEND)), tckHostname = c.getString(TCK + "." + HOSTNAME), - tckPort = c.getInt( TCK + "." + PORT) + tckPort = c.getInt(TCK + "." + PORT) ) } } @@ -106,7 +121,10 @@ object CloudStateTCK { final val noWait = 0.seconds // FIXME add interception to enable asserting exchanges - final class EventSourcedInterceptor(val client: EventSourcedClient, val fromBackend: TestProbe, val fromFrontend: TestProbe)(implicit ec: ExecutionContext) extends EventSourced { + final class EventSourcedInterceptor(val client: EventSourcedClient, + val fromBackend: TestProbe, + val fromFrontend: TestProbe)(implicit ec: ExecutionContext) + extends EventSourced { private final val fromBackendInterceptor = Sink.actorRef[AnyRef](fromBackend.ref, "BACKEND_TERMINATED") private final val fromFrontendInterceptor = Sink.actorRef[AnyRef](fromFrontend.ref, "FRONTEND_TERMINATED") @@ -116,14 +134,17 @@ object CloudStateTCK { } // FIXME add interception to enable asserting exchanges - final class EntityDiscoveryInterceptor(val client: EntityDiscoveryClient, val fromBackend: TestProbe, val fromFrontend: TestProbe)(implicit ec: ExecutionContext) extends EntityDiscovery { - import scala.util.{Success, Failure} + final class EntityDiscoveryInterceptor(val client: EntityDiscoveryClient, + val fromBackend: TestProbe, + val fromFrontend: TestProbe)(implicit ec: ExecutionContext) + extends EntityDiscovery { + import scala.util.{Failure, Success} override def discover(info: ProxyInfo): Future[EntitySpec] = { fromBackend.ref ! info client.discover(info).andThen { case Success(es) => fromFrontend.ref ! es - case Failure(f) => fromFrontend.ref ! f + case Failure(f) => fromFrontend.ref ! f } } @@ -136,8 +157,11 @@ object CloudStateTCK { } } - def attempt[T](op: => Future[T], delay: FiniteDuration, retries: Int)(implicit ec: ExecutionContext, s: Scheduler): Future[T] = - Future.unit.flatMap(_ => op) recoverWith { case _ if retries > 0 => after(delay, s)(attempt(op, delay, retries - 1)) } + def attempt[T](op: => Future[T], delay: FiniteDuration, retries: Int)(implicit ec: ExecutionContext, + s: Scheduler): Future[T] = + Future.unit.flatMap(_ => op) recoverWith { + case _ if retries > 0 => after(delay, s)(attempt(op, delay, retries - 1)) + } final val proxyInfo = ProxyInfo( protocolMajorVersion = 0, @@ -148,28 +172,29 @@ object CloudStateTCK { ) } -class CloudStateTCK(private[this] final val config: CloudStateTCK.Configuration) extends AsyncWordSpec with MustMatchers with BeforeAndAfterAll { +class CloudStateTCK(private[this] final val config: CloudStateTCK.Configuration) + extends AsyncWordSpec + with MustMatchers + with BeforeAndAfterAll { import CloudStateTCK._ - private[this] final val system = ActorSystem("CloudStateTCK") - private[this] final val mat = ActorMaterializer()(system) - private[this] final val discoveryFromBackend = TestProbe("discoveryFromBackend")(system) - private[this] final val discoveryFromFrontend = TestProbe("discoveryFromFrontend")(system) - private[this] final val eventSourcedFromBackend = TestProbe("eventSourcedFromBackend")(system) - private[this] final val eventSourcedFromFrontend = TestProbe("eventSourcedFromFrontend")(system) - @volatile private[this] final var shoppingClient: ShoppingCartClient = _ - @volatile private[this] final var entityDiscoveryClient: EntityDiscoveryClient = _ - @volatile private[this] final var eventSourcedClient: EventSourcedClient = _ - @volatile private[this] final var backendProcess: Process = _ - @volatile private[this] final var frontendProcess: Process = _ - @volatile private[this] final var tckProxy: ServerBinding = _ + private[this] final val system = ActorSystem("CloudStateTCK") + private[this] final val mat = ActorMaterializer()(system) + private[this] final val discoveryFromBackend = TestProbe("discoveryFromBackend")(system) + private[this] final val discoveryFromFrontend = TestProbe("discoveryFromFrontend")(system) + private[this] final val eventSourcedFromBackend = TestProbe("eventSourcedFromBackend")(system) + private[this] final val eventSourcedFromFrontend = TestProbe("eventSourcedFromFrontend")(system) + @volatile private[this] final var shoppingClient: ShoppingCartClient = _ + @volatile private[this] final var entityDiscoveryClient: EntityDiscoveryClient = _ + @volatile private[this] final var eventSourcedClient: EventSourcedClient = _ + @volatile private[this] final var backendProcess: Process = _ + @volatile private[this] final var frontendProcess: Process = _ + @volatile private[this] final var tckProxy: ServerBinding = _ def process(ps: ProcSpec): ProcessBuilder = { val localhost = InetAddress.getLocalHost.getHostAddress val pb = - new ProcessBuilder(ps.command.map(_.replace("%LOCALHOST%", localhost)):_*). - inheritIO(). - directory(ps.directory) + new ProcessBuilder(ps.command.map(_.replace("%LOCALHOST%", localhost)): _*).inheritIO().directory(ps.directory) val env = pb.environment @@ -186,25 +211,25 @@ class CloudStateTCK(private[this] final val config: CloudStateTCK.Configuration) implicit val s = system implicit val m = mat Http().bindAndHandleAsync( - handler = EntityDiscoveryHandler.partial(entityDiscovery) orElse EventSourcedHandler.partial(eventSourced), - interface = config.tckHostname, - port = config.tckPort, - connectionContext = HttpConnectionContext(http2 = UseHttp2.Always) - ) - } + handler = EntityDiscoveryHandler.partial(entityDiscovery) orElse EventSourcedHandler.partial(eventSourced), + interface = config.tckHostname, + port = config.tckPort, + connectionContext = HttpConnectionContext(http2 = UseHttp2.Always) + ) + } override def beforeAll(): Unit = { config.validate() - val fp = process(config.frontend). - start() + val fp = process(config.frontend).start() require(fp.isAlive()) frontendProcess = fp - val clientSettings = GrpcClientSettings.connectToServiceAt(config.frontend.hostname, config.frontend.port)(system).withTls(false) + val clientSettings = + GrpcClientSettings.connectToServiceAt(config.frontend.hostname, config.frontend.port)(system).withTls(false) val edc = EntityDiscoveryClient(clientSettings)(mat, mat.executionContext) @@ -214,29 +239,36 @@ class CloudStateTCK(private[this] final val config: CloudStateTCK.Configuration) eventSourcedClient = esc - val tp = Await.result(buildTCKProxy(new EntityDiscoveryInterceptor(edc, discoveryFromBackend, discoveryFromFrontend), - new EventSourcedInterceptor(esc, eventSourcedFromBackend, eventSourcedFromFrontend)(system.dispatcher)), 10.seconds) + val tp = Await.result( + buildTCKProxy( + new EntityDiscoveryInterceptor(edc, discoveryFromBackend, discoveryFromFrontend), + new EventSourcedInterceptor(esc, eventSourcedFromBackend, eventSourcedFromFrontend)(system.dispatcher) + ), + 10.seconds + ) tckProxy = tp // Wait for the backend to come up before starting the frontend, otherwise the discovery call from the backend, // if it happens before the frontend starts, will cause the proxy probes to have failures in them - Await.ready(attempt(entityDiscoveryClient.discover(proxyInfo), 4.seconds, 10)(system.dispatcher, system.scheduler), 1.minute) + Await.ready(attempt(entityDiscoveryClient.discover(proxyInfo), 4.seconds, 10)(system.dispatcher, system.scheduler), + 1.minute) - val bp = process(config.proxy). - start() + val bp = process(config.proxy).start() require(bp.isAlive()) backendProcess = bp - val sc = ShoppingCartClient(GrpcClientSettings.connectToServiceAt(config.proxy.hostname, config.proxy.port)(system).withTls(false))(mat, mat.executionContext) + val sc = ShoppingCartClient( + GrpcClientSettings.connectToServiceAt(config.proxy.hostname, config.proxy.port)(system).withTls(false) + )(mat, mat.executionContext) shoppingClient = sc } override final def afterAll(): Unit = { - def destroy(spec: ProcSpec)(p: Process): Unit = while(p.isAlive) { + def destroy(spec: ProcSpec)(p: Process): Unit = while (p.isAlive) { spec.stopCommand match { case Some(stopCommand) => new ProcessBuilder(stopCommand: _*).inheritIO().directory(spec.directory).start() case None => p.destroy() @@ -244,74 +276,81 @@ class CloudStateTCK(private[this] final val config: CloudStateTCK.Configuration) p.waitFor(5, TimeUnit.SECONDS) || { p.destroyForcibly() true // todo revisit this - }// todo make configurable + } // todo make configurable } - try - Option(shoppingClient).foreach(c => Await.result(c.close(), 10.seconds)) - finally - try Option(backendProcess).foreach(destroy(config.proxy)) - finally Seq(entityDiscoveryClient, eventSourcedClient).foreach(c => Await.result(c.close(), 10.seconds)) - try Option(frontendProcess).foreach(destroy(config.frontend)) - finally Await.ready(tckProxy.unbind().transformWith(_ => system.terminate())(system.dispatcher), 30.seconds) + try Option(shoppingClient).foreach(c => Await.result(c.close(), 10.seconds)) + finally try Option(backendProcess).foreach(destroy(config.proxy)) + finally Seq(entityDiscoveryClient, eventSourcedClient).foreach(c => Await.result(c.close(), 10.seconds)) + try Option(frontendProcess).foreach(destroy(config.frontend)) + finally Await.ready(tckProxy.unbind().transformWith(_ => system.terminate())(system.dispatcher), 30.seconds) } - final def fromFrontend_expectEntitySpec(within: FiniteDuration): EntitySpec = withClue("EntitySpec was not received, or not well-formed: ") { - val spec = discoveryFromFrontend.expectMsgType[EntitySpec](within) - spec.proto must not be ProtobufByteString.EMPTY - spec.entities must not be empty - spec.entities.head.serviceName must not be empty - spec.entities.head.persistenceId must not be empty - // fixme event sourced? - spec.entities.head.entityType must not be empty - spec - } + final def fromFrontend_expectEntitySpec(within: FiniteDuration): EntitySpec = + withClue("EntitySpec was not received, or not well-formed: ") { + val spec = discoveryFromFrontend.expectMsgType[EntitySpec](within) + spec.proto must not be ProtobufByteString.EMPTY + spec.entities must not be empty + spec.entities.head.serviceName must not be empty + spec.entities.head.persistenceId must not be empty + // fixme event sourced? + spec.entities.head.entityType must not be empty + spec + } - final def fromBackend_expectInit(within: FiniteDuration): EventSourcedInit = withClue("Init message was not received, or not well-formed: ") { - val init = eventSourcedFromBackend.expectMsgType[EventSourcedStreamIn](noWait) - init must not be(null) - init.message must be('init) - init.message.init must be(defined) - init.message.init.get - } + final def fromBackend_expectInit(within: FiniteDuration): EventSourcedInit = + withClue("Init message was not received, or not well-formed: ") { + val init = eventSourcedFromBackend.expectMsgType[EventSourcedStreamIn](noWait) + init must not be (null) + init.message must be('init) + init.message.init must be(defined) + init.message.init.get + } - final def fromBackend_expectCommand(within: FiniteDuration): Command = withClue("Command was not received, or not well-formed: ") { - val command = eventSourcedFromBackend.expectMsgType[EventSourcedStreamIn](noWait) - command must not be(null) // FIXME validate Command - command.message must be('command) - command.message.command must be(defined) - val c = command.message.command.get - c.entityId must not be(empty) - c - } + final def fromBackend_expectCommand(within: FiniteDuration): Command = + withClue("Command was not received, or not well-formed: ") { + val command = eventSourcedFromBackend.expectMsgType[EventSourcedStreamIn](noWait) + command must not be (null) // FIXME validate Command + command.message must be('command) + command.message.command must be(defined) + val c = command.message.command.get + c.entityId must not be (empty) + c + } - final def fromFrontend_expectReply(events: Int, within: FiniteDuration): EventSourcedReply = withClue("Reply was not received, or not well-formed: ") { - val reply = eventSourcedFromFrontend.expectMsgType[EventSourcedStreamOut](noWait) - reply must not be(null) - reply.message must be('reply) - reply.message.reply must be(defined) - val r = reply.message.reply.get - r.clientAction must be(defined) - val clientAction = r.clientAction.get - clientAction.action must be('reply) - clientAction.action.reply must be('defined) - withClue("Reply did not have the expected number of events: ") { r.events.size must be (events) } - r - } + final def fromFrontend_expectReply(events: Int, within: FiniteDuration): EventSourcedReply = + withClue("Reply was not received, or not well-formed: ") { + val reply = eventSourcedFromFrontend.expectMsgType[EventSourcedStreamOut](noWait) + reply must not be (null) + reply.message must be('reply) + reply.message.reply must be(defined) + val r = reply.message.reply.get + r.clientAction must be(defined) + val clientAction = r.clientAction.get + clientAction.action must be('reply) + clientAction.action.reply must be('defined) + withClue("Reply did not have the expected number of events: ") { r.events.size must be(events) } + r + } - final def fromFrontend_expectFailure(within: FiniteDuration): Failure = withClue("Failure was not received, or not well-formed: ") { - val failure = eventSourcedFromFrontend.expectMsgType[EventSourcedStreamOut](noWait) // FIXME Expects entity.Failure, but gets lientAction.Action.Failure(Failure(commandId, msg))) - failure must not be(null) - failure.message must be('reply) - failure.message.reply must be(defined) - failure.message.reply.get.clientAction must be(defined) - val clientAction = failure.message.reply.get.clientAction.get - clientAction.action must be('failure) - clientAction.action.failure must be('defined) - clientAction.action.failure.get - } + final def fromFrontend_expectFailure(within: FiniteDuration): Failure = + withClue("Failure was not received, or not well-formed: ") { + val failure = eventSourcedFromFrontend.expectMsgType[EventSourcedStreamOut](noWait) // FIXME Expects entity.Failure, but gets lientAction.Action.Failure(Failure(commandId, msg))) + failure must not be (null) + failure.message must be('reply) + failure.message.reply must be(defined) + failure.message.reply.get.clientAction must be(defined) + val clientAction = failure.message.reply.get.clientAction.get + clientAction.action must be('failure) + clientAction.action.failure must be('defined) + clientAction.action.failure.get + } - final def correlate(cmd: Command, commandId: Long) = withClue("Command had the wrong id: ") { cmd.id must be(commandId) } - final def unrelated(cmd: Command, commandId: Long) = withClue("Command had the wrong id: ") { cmd.id must not be commandId } + final def correlate(cmd: Command, commandId: Long) = withClue("Command had the wrong id: ") { + cmd.id must be(commandId) + } + final def unrelated(cmd: Command, commandId: Long) = withClue("Command had the wrong id: ") { + cmd.id must not be commandId + } ("The TCK for" + config.name) must { implicit val scheduler = system.scheduler @@ -327,69 +366,79 @@ class CloudStateTCK(private[this] final val config: CloudStateTCK.Configuration) "verify that an initial GetShoppingCart request succeeds" in { val userId = "testuser:1" - attempt(shoppingClient.getCart(GetShoppingCart(userId)), 4.seconds, 10) map { - cart => - // Interaction test - val proxyInfo = discoveryFromBackend.expectMsgType[ProxyInfo] - proxyInfo.supportedEntityTypes must contain(EventSourced.name) - proxyInfo.protocolMajorVersion must be >= 0 - proxyInfo.protocolMinorVersion must be >= 0 + attempt(shoppingClient.getCart(GetShoppingCart(userId)), 4.seconds, 10) map { cart => + // Interaction test + val proxyInfo = discoveryFromBackend.expectMsgType[ProxyInfo] + proxyInfo.supportedEntityTypes must contain(EventSourced.name) + proxyInfo.protocolMajorVersion must be >= 0 + proxyInfo.protocolMinorVersion must be >= 0 - fromFrontend_expectEntitySpec(noWait) + fromFrontend_expectEntitySpec(noWait) - fromBackend_expectInit(noWait) + fromBackend_expectInit(noWait) - correlate(fromBackend_expectCommand(noWait), fromFrontend_expectReply(events = 0, noWait).commandId) + correlate(fromBackend_expectCommand(noWait), fromFrontend_expectReply(events = 0, noWait).commandId) - eventSourcedFromBackend.expectNoMsg(noWait) - eventSourcedFromFrontend.expectNoMsg(noWait) + eventSourcedFromBackend.expectNoMsg(noWait) + eventSourcedFromFrontend.expectNoMsg(noWait) - // Semantical test - cart must not be(null) - cart.items must be(empty) + // Semantical test + cart must not be (null) + cart.items must be(empty) } } // TODO convert this into a ScalaCheck generated test case "verify that items can be added to, and removed from, a shopping cart" in { - val sc = shoppingClient - import sc.{getCart, addItem, removeItem} + val sc = shoppingClient + import sc.{addItem, getCart, removeItem} - val userId = "testuser:2" - val productId1 = "testproduct:1" - val productId2 = "testproduct:2" + val userId = "testuser:2" + val productId1 = "testproduct:1" + val productId2 = "testproduct:2" val productName1 = "Test Product 1" val productName2 = "Test Product 2" for { - Cart(Nil) <- getCart(GetShoppingCart(userId)) // Test initial state - Empty() <- addItem(AddLineItem(userId, productId1, productName1, 1)) // Test add the first product - Empty() <- addItem(AddLineItem(userId, productId2, productName2, 2)) // Test add the second product - Empty() <- addItem(AddLineItem(userId, productId1, productName1, 11)) // Test increase quantity - Empty() <- addItem(AddLineItem(userId, productId2, productName2, 31)) // Test increase quantity - Cart(items1) <- getCart(GetShoppingCart(userId)) // Test intermediate state - Empty() <- removeItem(RemoveLineItem(userId, productId1)) // Test removal of first product - addNeg <- addItem(AddLineItem(userId, productId2, productName2, -7)).transform(scala.util.Success(_)) // Test decrement quantity of second product - add0 <- addItem(AddLineItem(userId, productId1, productName1, 0)).transform(scala.util.Success(_)) // Test add 0 of new product - removeNone <- removeItem(RemoveLineItem(userId, productId1)).transform(scala.util.Success(_)) // Test remove non-exiting product - Cart(items2) <- getCart(GetShoppingCart(userId)) // Test end state + Cart(Nil) <- getCart(GetShoppingCart(userId)) // Test initial state + Empty() <- addItem(AddLineItem(userId, productId1, productName1, 1)) // Test add the first product + Empty() <- addItem(AddLineItem(userId, productId2, productName2, 2)) // Test add the second product + Empty() <- addItem(AddLineItem(userId, productId1, productName1, 11)) // Test increase quantity + Empty() <- addItem(AddLineItem(userId, productId2, productName2, 31)) // Test increase quantity + Cart(items1) <- getCart(GetShoppingCart(userId)) // Test intermediate state + Empty() <- removeItem(RemoveLineItem(userId, productId1)) // Test removal of first product + addNeg <- addItem(AddLineItem(userId, productId2, productName2, -7)) + .transform(scala.util.Success(_)) // Test decrement quantity of second product + add0 <- addItem(AddLineItem(userId, productId1, productName1, 0)) + .transform(scala.util.Success(_)) // Test add 0 of new product + removeNone <- removeItem(RemoveLineItem(userId, productId1)) + .transform(scala.util.Success(_)) // Test remove non-exiting product + Cart(items2) <- getCart(GetShoppingCart(userId)) // Test end state } yield { val init = fromBackend_expectInit(noWait) - init.entityId must not be(empty) - - val commands = Seq( - (true,0),(true,1),(true, 1),(true,1),(true,1),(true,0), - (true,1),(false,0),(false,0),(false,0),(true,0)). - foldLeft(Set.empty[Long]){ case (set, (isReply, eventCount)) => - val cmd = fromBackend_expectCommand(noWait) - correlate( - cmd, - if (isReply) fromFrontend_expectReply(events = eventCount, noWait).commandId - else fromFrontend_expectFailure(noWait).commandId - ) - init.entityId must be(cmd.entityId) - set must not contain(cmd.id) - set + cmd.id + init.entityId must not be (empty) + + val commands = Seq((true, 0), + (true, 1), + (true, 1), + (true, 1), + (true, 1), + (true, 0), + (true, 1), + (false, 0), + (false, 0), + (false, 0), + (true, 0)).foldLeft(Set.empty[Long]) { + case (set, (isReply, eventCount)) => + val cmd = fromBackend_expectCommand(noWait) + correlate( + cmd, + if (isReply) fromFrontend_expectReply(events = eventCount, noWait).commandId + else fromFrontend_expectFailure(noWait).commandId + ) + init.entityId must be(cmd.entityId) + set must not contain (cmd.id) + set + cmd.id } eventSourcedFromBackend.expectNoMsg(noWait) @@ -409,24 +458,27 @@ class CloudStateTCK(private[this] final val config: CloudStateTCK.Configuration) "verify that the backend supports the ServerReflection API" in { import grpc.reflection.v1alpha.reflection._ - import ServerReflectionRequest.{ MessageRequest => In} - import ServerReflectionResponse.{ MessageResponse => Out} + import ServerReflectionRequest.{MessageRequest => In} + import ServerReflectionResponse.{MessageResponse => Out} - val reflectionClient = ServerReflectionClient(GrpcClientSettings.connectToServiceAt(config.proxy.hostname, config.proxy.port)(system).withTls(false))(mat, mat.executionContext) + val reflectionClient = ServerReflectionClient( + GrpcClientSettings.connectToServiceAt(config.proxy.hostname, config.proxy.port)(system).withTls(false) + )(mat, mat.executionContext) - val Host = config.proxy.hostname + val Host = config.proxy.hostname val ShoppingCart = "com.example.shoppingcart.ShoppingCart" val testData = List[(In, Out)]( - (In.ListServices(""), Out.ListServicesResponse(ListServiceResponse(Vector(ServiceResponse(ShoppingCart))))), - (In.ListServices("nonsense.blabla."), Out.ListServicesResponse(ListServiceResponse(Vector(ServiceResponse(ShoppingCart))))), - (In.FileContainingSymbol("nonsense.blabla.Void"), Out.FileDescriptorResponse(FileDescriptorResponse(Nil))), - ) map { - case (in, out) => - val req = ServerReflectionRequest(Host, in) - val res = ServerReflectionResponse(Host, Some(req), out) - (req, res) - } + (In.ListServices(""), Out.ListServicesResponse(ListServiceResponse(Vector(ServiceResponse(ShoppingCart))))), + (In.ListServices("nonsense.blabla."), + Out.ListServicesResponse(ListServiceResponse(Vector(ServiceResponse(ShoppingCart))))), + (In.FileContainingSymbol("nonsense.blabla.Void"), Out.FileDescriptorResponse(FileDescriptorResponse(Nil))) + ) map { + case (in, out) => + val req = ServerReflectionRequest(Host, in) + val res = ServerReflectionResponse(Host, Some(req), out) + (req, res) + } val input = testData.map(_._1) val expected = testData.map(_._2) val test = for { @@ -454,56 +506,65 @@ class CloudStateTCK(private[this] final val config: CloudStateTCK.Configuration) } def getCart(userId: String): Future[String] = - Http().singleRequest( - HttpRequest( - method = HttpMethods.GET, - headers = Nil, - uri = s"http://${config.proxy.hostname}:${config.proxy.port}/carts/${userId}", - entity = HttpEntity.Empty, - protocol = HttpProtocols.`HTTP/1.1` + Http() + .singleRequest( + HttpRequest( + method = HttpMethods.GET, + headers = Nil, + uri = s"http://${config.proxy.hostname}:${config.proxy.port}/carts/${userId}", + entity = HttpEntity.Empty, + protocol = HttpProtocols.`HTTP/1.1` + ) ) - ).flatMap(validateResponse) + .flatMap(validateResponse) def getItems(userId: String): Future[String] = - Http().singleRequest( - HttpRequest( - method = HttpMethods.GET, - headers = Nil, - uri = s"http://${config.proxy.hostname}:${config.proxy.port}/carts/${userId}/items", - entity = HttpEntity.Empty, - protocol = HttpProtocols.`HTTP/1.1` + Http() + .singleRequest( + HttpRequest( + method = HttpMethods.GET, + headers = Nil, + uri = s"http://${config.proxy.hostname}:${config.proxy.port}/carts/${userId}/items", + entity = HttpEntity.Empty, + protocol = HttpProtocols.`HTTP/1.1` + ) ) - ).flatMap(validateResponse) + .flatMap(validateResponse) def addItem(userId: String, productId: String, productName: String, quantity: Int): Future[String] = - Http().singleRequest( - HttpRequest( - method = HttpMethods.POST, - headers = Nil, - uri = s"http://${config.proxy.hostname}:${config.proxy.port}/cart/${userId}/items/add", - entity = HttpEntity(ContentTypes.`application/json`, - s""" + Http() + .singleRequest( + HttpRequest( + method = HttpMethods.POST, + headers = Nil, + uri = s"http://${config.proxy.hostname}:${config.proxy.port}/cart/${userId}/items/add", + entity = HttpEntity( + ContentTypes.`application/json`, + s""" { "product_id": "${productId}", "name": "${productName}", "quantity": ${quantity} } """.trim - ), - protocol = HttpProtocols.`HTTP/1.1` + ), + protocol = HttpProtocols.`HTTP/1.1` + ) ) - ).flatMap(validateResponse) + .flatMap(validateResponse) def removeItem(userId: String, productId: String): Future[String] = - Http().singleRequest( - HttpRequest( - method = HttpMethods.POST, - headers = Nil, - uri = s"http://${config.proxy.hostname}:${config.proxy.port}/cart/${userId}/items/${productId}/remove", - entity = HttpEntity.Empty, - protocol = HttpProtocols.`HTTP/1.1` + Http() + .singleRequest( + HttpRequest( + method = HttpMethods.POST, + headers = Nil, + uri = s"http://${config.proxy.hostname}:${config.proxy.port}/cart/${userId}/items/${productId}/remove", + entity = HttpEntity.Empty, + protocol = HttpProtocols.`HTTP/1.1` + ) ) - ).flatMap(validateResponse) + .flatMap(validateResponse) val userId = "foo" for { @@ -537,4 +598,4 @@ class CloudStateTCK(private[this] final val config: CloudStateTCK.Configuration) } } } -} \ No newline at end of file +}