diff --git a/.scalafmt.conf b/.scalafmt.conf index 5d45e749f66..0b30c8b06c3 100644 --- a/.scalafmt.conf +++ b/.scalafmt.conf @@ -1,52 +1,105 @@ -version = 2.1.0 - -style = defaultWithAlign - -docstrings = JavaDoc -indentOperator = spray -maxColumn = 120 -lineEndings = preserve -rewrite.rules = [RedundantParens, SortImports, AvoidInfix] -unindentTopLevelOperators = true -align.tokens = [{code = "=>", owner = "Case"}] -align.openParenDefnSite = false -align.openParenCallSite = false -optIn.breakChainOnFirstMethodDot = false -optIn.configStyleArguments = false -danglingParentheses = false +version = 3.7.4 + +runner.dialect = scala213 + +fileOverride { + "glob:**/scala-3*/**" { + runner.dialect = scala3 + } +} + +# Only format files tracked by git +project.git = true + +# f/k/a style = +preset = defaultWithAlign + +# f/k/a JavaDoc +docstrings { + style = Asterisk + oneline = fold + wrap = no +} + +indentOperator { + preset = akka + + # f/f/k/a unindentTopLevelOperators = true + # f/k/a topLevelOnly=false + exemptScope = all +} + +maxColumn = 120 +lineEndings = preserve + +rewrite { + rules = [ + AvoidInfix, + RedundantParens, + Imports + ] + + neverInfix.excludeFilters = [ + and + min + max + until + to + by + eq + ne + "should.*" + "contain.*" + "must.*" + in + ignore + be + taggedAs + thrownBy + synchronized + have + when + size + only + noneOf + oneElementOf + noElementsOf + atLeastOneElementOf + atMostOneElementOf + allElementsOf + inOrderElementsOf + theSameElementsAs + theSameElementsInOrderAs + ] + + imports { + sort = original + } +} + spaces.inImportCurlyBraces = true -rewrite.neverInfix.excludeFilters = [ - and - min - max - until - to - by - eq - ne - "should.*" - "contain.*" - "must.*" - in - ignore - be - taggedAs - thrownBy - synchronized - have - when - size - only - noneOf - oneElementOf - noElementsOf - atLeastOneElementOf - atMostOneElementOf - allElementsOf - inOrderElementsOf - theSameElementsAs - theSameElementsInOrderAs -] + +align { + tokens = [ + { code = "=>", owner = "Case" } + ] + openParenCallSite = false + openParenDefnSite = false +} + +optIn { + breakChainOnFirstMethodDot = false + encloseClassicChains = true + configStyleArguments = false +} + +danglingParentheses { + defnSite = false + callSite = false + ctrlSite = false + tupleSite = false +} + rewriteTokens = { "⇒": "=>" "→": "->" diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/CapturedLogEvent.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/CapturedLogEvent.scala index bd20460c5cd..3d9a379046e 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/CapturedLogEvent.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/CapturedLogEvent.scala @@ -21,9 +21,7 @@ import akka.util.OptionVal */ final case class CapturedLogEvent(level: Level, message: String, cause: Option[Throwable], marker: Option[Marker]) { - /** - * Constructor for Java API - */ + /** Constructor for Java API */ def this( level: Level, message: String, @@ -32,27 +30,19 @@ final case class CapturedLogEvent(level: Level, message: String, cause: Option[T mdc: java.util.Map[String, Any]) = this(level, message, errorCause.asScala, marker.asScala) - /** - * Constructor for Java API - */ + /** Constructor for Java API */ def this(level: Level, message: String) = this(level, message, Option.empty, Option.empty) - /** - * Constructor for Java API - */ + /** Constructor for Java API */ def this(level: Level, message: String, errorCause: Throwable) = this(level, message, Some(errorCause), Option.empty[Marker]) - /** - * Constructor for Java API - */ + /** Constructor for Java API */ def this(level: Level, message: String, marker: Marker) = this(level, message, Option.empty[Throwable], Some(marker)) - /** - * Constructor for Java API - */ + /** Constructor for Java API */ def this(level: Level, message: String, errorCause: Throwable, marker: Marker) = this(level, message, Some(errorCause), Some(marker)) @@ -63,9 +53,7 @@ final case class CapturedLogEvent(level: Level, message: String, cause: Option[T object CapturedLogEvent { - /** - * Helper method to convert [[OptionVal]] to [[Option]] - */ + /** Helper method to convert [[OptionVal]] to [[Option]] */ private def toOption[A](optionVal: OptionVal[A]): Option[A] = optionVal match { case OptionVal.Some(x) => Some(x) case _ => None diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/Effect.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/Effect.scala index e129bd3c9d1..b4b3d071ef4 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/Effect.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/Effect.scala @@ -55,9 +55,7 @@ object Effect { def adaptTimeout(msg: String): T = mapResponse(timeoutTry(msg)) def adaptTimeout: T = adaptTimeout(timeoutMsg) - /** - * Java API - */ + /** Java API */ def getResponseTimeout: java.time.Duration = responseTimeout.asJava private var sentResponse: Boolean = false @@ -82,9 +80,7 @@ object Effect { } } - /** - * The behavior spawned a named child with the given behavior (and optionally specific props) - */ + /** The behavior spawned a named child with the given behavior (and optionally specific props) */ final class Spawned[T](val behavior: Behavior[T], val childName: String, val props: Props, val ref: ActorRef[T]) extends Effect with Product3[Behavior[T], String, Props] @@ -113,9 +109,7 @@ object Effect { def unapply[T](s: Spawned[T]): Option[(Behavior[T], String, Props)] = Some((s.behavior, s.childName, s.props)) } - /** - * The behavior spawned an anonymous child with the given behavior (and optionally specific props) - */ + /** The behavior spawned an anonymous child with the given behavior (and optionally specific props) */ final class SpawnedAnonymous[T](val behavior: Behavior[T], val props: Props, val ref: ActorRef[T]) extends Effect with Product2[Behavior[T], Props] @@ -196,54 +190,36 @@ object Effect { override def canEqual(o: Any): Boolean = o.isInstanceOf[SpawnedAnonymousAdapter[_]] } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object SpawnedAnonymousAdapter { def apply[T]() = new SpawnedAnonymousAdapter[T](null) def unapply[T](@unused s: SpawnedAnonymousAdapter[T]): Boolean = true } - /** - * The behavior create a message adapter for the messages of type clazz - */ + /** The behavior create a message adapter for the messages of type clazz */ final case class MessageAdapter[A, T](messageClass: Class[A], adapt: A => T) extends Effect { - /** - * JAVA API - */ + /** JAVA API */ def adaptFunction: java.util.function.Function[A, T] = adapt.asJava } - /** - * The behavior stopped `childName` - */ + /** The behavior stopped `childName` */ final case class Stopped(childName: String) extends Effect - /** - * The behavior started watching `other`, through `context.watch(other)` - */ + /** The behavior started watching `other`, through `context.watch(other)` */ final case class Watched[T](other: ActorRef[T]) extends Effect - /** - * The behavior started watching `other`, through `context.watchWith(other, message)` - */ + /** The behavior started watching `other`, through `context.watchWith(other, message)` */ final case class WatchedWith[U, T](other: ActorRef[U], message: T) extends Effect - /** - * The behavior stopped watching `other`, through `context.unwatch(other)` - */ + /** The behavior stopped watching `other`, through `context.unwatch(other)` */ final case class Unwatched[T](other: ActorRef[T]) extends Effect - /** - * The behavior set a new receive timeout, with `message` as timeout notification - */ + /** The behavior set a new receive timeout, with `message` as timeout notification */ final case class ReceiveTimeoutSet[T](d: FiniteDuration, message: T) extends Effect { - /** - * Java API - */ + /** Java API */ def duration(): java.time.Duration = d.asJava } @@ -292,13 +268,9 @@ object Effect { final case class TimerCancelled(key: Any) extends Effect - /** - * Used to represent an empty list of effects - in other words, the behavior didn't do anything observable - */ + /** Used to represent an empty list of effects - in other words, the behavior didn't do anything observable */ case object NoEffects extends NoEffects - /** - * Used for NoEffects expectations by type - */ + /** Used for NoEffects expectations by type */ sealed abstract class NoEffects extends Effect } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/LoggingEvent.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/LoggingEvent.scala index 0019a46edc2..a703f27e6d3 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/LoggingEvent.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/LoggingEvent.scala @@ -15,21 +15,15 @@ import akka.util.ccompat.JavaConverters._ object LoggingEvent { - /** - * Scala API - */ + /** Scala API */ def apply(level: Level, loggerName: String, threadName: String, message: String, timeStamp: Long): LoggingEvent = new LoggingEvent(level, loggerName, threadName, message, timeStamp, None, None, Map.empty) - /** - * Java API - */ + /** Java API */ def create(level: Level, loggerName: String, threadName: String, message: String, timeStamp: Long): LoggingEvent = apply(level, loggerName, threadName, message, timeStamp) - /** - * Java API - */ + /** Java API */ def create( level: Level, loggerName: String, @@ -56,21 +50,15 @@ final case class LoggingEvent( throwable: Option[Throwable], mdc: Map[String, String]) { - /** - * Java API - */ + /** Java API */ def getMarker: Optional[Marker] = marker.asJava - /** - * Java API - */ + /** Java API */ def getThrowable: Optional[Throwable] = throwable.asJava - /** - * Java API - */ + /** Java API */ def getMdc: java.util.Map[String, String] = { import akka.util.ccompat.JavaConverters._ mdc.asJava diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestException.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestException.scala index c6feecd9126..77b5d4ee5ce 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestException.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestException.scala @@ -6,7 +6,5 @@ package akka.actor.testkit.typed import scala.util.control.NoStackTrace -/** - * A predefined exception that can be used in tests. It doesn't include a stack trace. - */ +/** A predefined exception that can be used in tests. It doesn't include a stack trace. */ final case class TestException(message: String) extends RuntimeException(message) with NoStackTrace diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestKitSettings.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestKitSettings.scala index 19c0c724310..e2f3f3ca534 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestKitSettings.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestKitSettings.scala @@ -16,9 +16,7 @@ import akka.util.Timeout object TestKitSettings { - /** - * Reads configuration settings from `akka.actor.testkit.typed` section. - */ + /** Reads configuration settings from `akka.actor.testkit.typed` section. */ def apply(system: ActorSystem[_]): TestKitSettings = Ext(system).settings @@ -29,9 +27,7 @@ object TestKitSettings { def apply(config: Config): TestKitSettings = new TestKitSettings(config) - /** - * Java API: Reads configuration settings from `akka.actor.testkit.typed` section. - */ + /** Java API: Reads configuration settings from `akka.actor.testkit.typed` section. */ def create(system: ActorSystem[_]): TestKitSettings = apply(system) @@ -77,15 +73,11 @@ final class TestKitSettings(val config: Config) { /** Dilated with `TestTimeFactor`. */ val FilterLeeway: FiniteDuration = dilated(config.getMillisDuration("filter-leeway")) - /** - * Scala API: Scale the `duration` with the configured `TestTimeFactor` - */ + /** Scala API: Scale the `duration` with the configured `TestTimeFactor` */ def dilated(duration: FiniteDuration): FiniteDuration = Duration.fromNanos((duration.toNanos * TestTimeFactor + 0.5).toLong) - /** - * Java API: Scale the `duration` with the configured `TestTimeFactor` - */ + /** Java API: Scale the `duration` with the configured `TestTimeFactor` */ def dilated(duration: java.time.Duration): java.time.Duration = dilated(duration.asScala).asJava } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ActorSystemStub.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ActorSystemStub.scala index 28014371db8..dd81e87c978 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ActorSystemStub.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ActorSystemStub.scala @@ -32,9 +32,7 @@ import akka.actor.typed.internal.InternalRecipientRef import akka.actor.typed.receptionist.Receptionist import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @nowarn @InternalApi private[akka] final class ActorSystemStub( val name: String, @@ -133,7 +131,7 @@ import akka.annotation.InternalApi // this is backward compatible with the old behaviour, hence it uses the loader used to load the test-kit // which is not necessarily the one used to load the tests... // hence this might not include reference config related to the actually executing test - //todo: might be better NOT to pass any class loader and let typesafeConfig rely on the contextClassLoader + // todo: might be better NOT to pass any class loader and let typesafeConfig rely on the contextClassLoader // (which is usually the system class loader) def defaultReference: Config = ConfigFactory.defaultReference(getClass.getClassLoader) } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/BehaviorTestKitImpl.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/BehaviorTestKitImpl.scala index 037807811ad..076ed760956 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/BehaviorTestKitImpl.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/BehaviorTestKitImpl.scala @@ -25,9 +25,7 @@ import akka.pattern.StatusReply import akka.util.OptionVal import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class BehaviorTestKitImpl[T]( system: ActorSystemStub, @@ -120,7 +118,7 @@ private[akka] final class BehaviorTestKitImpl[T]( case null => throw new AssertionError(s"expected: effect type ${effectClass.getName} but no effects were recorded") case effect if effectClass.isAssignableFrom(effect.getClass) => effect.asInstanceOf[E] - case other => throw new AssertionError(s"expected: effect class ${effectClass.getName} but found $other") + case other => throw new AssertionError(s"expected: effect class ${effectClass.getName} but found $other") } } @@ -142,13 +140,17 @@ private[akka] final class BehaviorTestKitImpl[T]( def currentBehavior: Behavior[T] = current def isAlive: Boolean = Behavior.isAlive(current) - private def handleException: Catcher[Unit] = { - case NonFatal(e) => - try Behavior.canonicalize(Behavior.interpretSignal(current, context, PostStop), current, context) // TODO why canonicalize here? - catch { - case NonFatal(_) => /* ignore, real is logging */ - } - throw e + private def handleException: Catcher[Unit] = { case NonFatal(e) => + try + Behavior.canonicalize( + Behavior.interpretSignal(current, context, PostStop), + current, + context + ) // TODO why canonicalize here? + catch { + case NonFatal(_) => /* ignore, real is logging */ + } + throw e } private def runAllTasks(): Unit = { @@ -162,11 +164,11 @@ private[akka] final class BehaviorTestKitImpl[T]( try { context.setCurrentActorThread() try { - //we need this to handle message adapters related messages + // we need this to handle message adapters related messages val intercepted = BehaviorTestKitImpl.Interceptor.inteceptBehaviour(current, context) currentUncanonical = Behavior.interpretMessage(intercepted, context, message) - //notice we pass current and not intercepted, this way Behaviors.same will be resolved to current which will be intercepted again on the next message - //otherwise we would have risked intercepting an already intercepted behavior (or would have had to explicitly check if the current behavior is already intercepted by us) + // notice we pass current and not intercepted, this way Behaviors.same will be resolved to current which will be intercepted again on the next message + // otherwise we would have risked intercepting an already intercepted behavior (or would have had to explicitly check if the current behavior is already intercepted by us) current = Behavior.canonicalize(currentUncanonical, current, context) } finally { context.clearCurrentActorThread() @@ -230,9 +232,11 @@ private[akka] object BehaviorTestKitImpl { def inteceptBehaviour[T](behavior: Behavior[T], ctx: TypedActorContext[T]): Behavior[T] = Behavior - .start(Behaviors.intercept { () => - this.asInstanceOf[BehaviorInterceptor[Any, T]] - }(behavior), ctx.asInstanceOf[TypedActorContext[Any]]) + .start( + Behaviors.intercept { () => + this.asInstanceOf[BehaviorInterceptor[Any, T]] + }(behavior), + ctx.asInstanceOf[TypedActorContext[Any]]) .unsafeCast[T] } } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/CapturingAppender.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/CapturingAppender.scala index 3b5ebdda568..f609841c603 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/CapturingAppender.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/CapturingAppender.scala @@ -9,9 +9,7 @@ import ch.qos.logback.core.AppenderBase import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object CapturingAppender { import LogbackUtil._ @@ -80,9 +78,7 @@ import akka.annotation.InternalApi clear() } - /** - * Discards the buffered logging events without output. - */ + /** Discards the buffered logging events without output. */ def clear(): Unit = synchronized { buffer = Vector.empty } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ControlledExecutor.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ControlledExecutor.scala index e19f3301095..df22f47d14f 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ControlledExecutor.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ControlledExecutor.scala @@ -10,9 +10,7 @@ import scala.concurrent.ExecutionContextExecutor import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class ControlledExecutor extends ExecutionContextExecutor { private val tasks = new LinkedList[Runnable] diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/DebugRef.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/DebugRef.scala index 9e52a74af67..5f74f225b18 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/DebugRef.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/DebugRef.scala @@ -15,9 +15,7 @@ import akka.actor.typed.internal.{ ActorRefImpl, SystemMessage } import akka.actor.typed.internal.InternalRecipientRef import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class DebugRef[T](override val path: classic.ActorPath, override val isLocal: Boolean) extends ActorRef[T] with ActorRefImpl[T] diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/EffectfulActorContext.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/EffectfulActorContext.scala index bceb1754290..0c59f6d5332 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/EffectfulActorContext.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/EffectfulActorContext.scala @@ -22,9 +22,7 @@ import akka.annotation.InternalApi import akka.util.JavaDurationConverters._ import akka.util.Timeout -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class EffectfulActorContext[T]( system: ActorSystemStub, path: ActorPath, @@ -60,8 +58,8 @@ import akka.util.Timeout val scalaMapResponse = { (result: Try[Res]) => result .map(applyToResponse(_, null)) - .recover { - case NonFatal(ex) => applyToResponse(null.asInstanceOf[Res], ex) + .recover { case NonFatal(ex) => + applyToResponse(null.asInstanceOf[Res], ex) } .get } @@ -182,14 +180,13 @@ import akka.util.Timeout override def cancelAll(): Unit = activeTimers.foreach(cancel) private def sendAction(key: Any): () => Unit = () => { - activeTimers.get(key).foreach { - case Effect.TimerScheduled(_, msg, _, mode, _) => - mode match { - case Effect.TimerScheduled.SingleMode => - activeTimers -= key - case _ => - } - self ! msg + activeTimers.get(key).foreach { case Effect.TimerScheduled(_, msg, _, mode, _) => + mode match { + case Effect.TimerScheduled.SingleMode => + activeTimers -= key + case _ => + } + self ! msg } } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/LogbackUtil.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/LogbackUtil.scala index 942535bc090..5c3bc1aa4e3 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/LogbackUtil.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/LogbackUtil.scala @@ -11,9 +11,7 @@ import org.slf4j.event.Level import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object LogbackUtil { def loggerNameOrRoot(loggerName: String): String = if (loggerName == "") org.slf4j.Logger.ROOT_LOGGER_NAME else loggerName diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/LoggingTestKitImpl.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/LoggingTestKitImpl.scala index eeea0e70895..b7bbdd782a2 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/LoggingTestKitImpl.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/LoggingTestKitImpl.scala @@ -20,17 +20,13 @@ import akka.actor.typed.ActorSystem import akka.annotation.InternalApi import akka.testkit.TestKit -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object LoggingTestKitImpl { def empty: LoggingTestKitImpl = new LoggingTestKitImpl(1, None, None, None, None, None, None, Map.empty, checkExcess = true, None) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class LoggingTestKitImpl( occurrences: Int, logLevel: Option[Level], diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala index 079e32c2f3e..778f86622eb 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala @@ -76,9 +76,7 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T this(new ActorSystemStub("StubbedActorContext"), name, currentBehaviorProvider) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] val selfInbox = new TestInboxImpl[T](path) override val self = selfInbox.ref @@ -160,9 +158,7 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T // TODO allow overriding of this override def executionContext: ExecutionContextExecutor = system.executionContext - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def internalSpawnMessageAdapter[U](f: U => T, name: String): ActorRef[U] = { val n = if (name != "") s"${childName.next()}-$name" else childName.next() @@ -170,12 +166,14 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T val i = new BehaviorTestKitImpl[U](system, p, BehaviorImpl.ignore) _children += p.name -> i - new FunctionRef[U](p, (message, _) => { - val m = f(message); - if (m != null) { - selfInbox.ref ! m; i.selfInbox().ref ! message - } - }) + new FunctionRef[U]( + p, + (message, _) => { + val m = f(message); + if (m != null) { + selfInbox.ref ! m; i.selfInbox().ref ! message + } + }) } /** @@ -198,9 +196,7 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T btk.as } - /** - * Retrieve the inbox representing the child actor with the given name. - */ + /** Retrieve the inbox representing the child actor with the given name. */ def childInbox[U](name: String): Option[TestInboxImpl[U]] = _children.get(name).map(_.context.selfInbox.as[U]) /** @@ -264,23 +260,17 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T } } - /** - * Clear the log entries. - */ + /** Clear the log entries. */ def clearLog(): Unit = substituteLoggerFactory.getEventQueue.clear() override private[akka] def onUnhandled(msg: T): Unit = unhandled = msg :: unhandled - /** - * Messages that are marked as unhandled. - */ + /** Messages that are marked as unhandled. */ def unhandledMessages: List[T] = unhandled.reverse - /** - * Clear the list of captured unhandled messages. - */ + /** Clear the list of captured unhandled messages. */ def clearUnhandled(): Unit = unhandled = Nil override private[akka] def currentBehavior: Behavior[T] = currentBehaviorProvider() diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestAppender.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestAppender.scala index 5a410485c00..9108ecb9d10 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestAppender.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestAppender.scala @@ -60,9 +60,7 @@ import akka.annotation.InternalApi } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class TestAppender extends AppenderBase[ILoggingEvent] { import LogbackUtil._ diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestInboxImpl.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestInboxImpl.scala index b8f0a81f1b9..bbba6d88fe9 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestInboxImpl.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestInboxImpl.scala @@ -15,9 +15,7 @@ import akka.annotation.InternalApi import akka.pattern.StatusReply import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class TestInboxImpl[T](path: ActorPath) extends akka.actor.testkit.typed.javadsl.TestInbox[T] @@ -56,9 +54,7 @@ private[akka] final class TestInboxImpl[T](path: ActorPath) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi object TestInboxImpl { def apply[T](name: String): TestInboxImpl[T] = { @@ -68,9 +64,7 @@ object TestInboxImpl { private[akka] val address = RootActorPath(Address("akka.actor.typed.inbox", "anonymous")) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class ReplyInboxImpl[T](private var underlying: OptionVal[TestInboxImpl[T]]) extends akka.actor.testkit.typed.javadsl.ReplyInbox[T] @@ -87,14 +81,14 @@ private[akka] final class ReplyInboxImpl[T](private var underlying: OptionVal[Te def expectReply(expectedReply: T): Unit = receiveReply() match { - case matches if (matches == expectedReply) => () + case matches if matches == expectedReply => () case doesntMatch => throw new AssertionError(s"Expected $expectedReply but received $doesntMatch") } def expectNoReply(): ReplyInboxImpl[T] = underlying match { - case OptionVal.Some(testInbox) if (testInbox.hasMessages) => + case OptionVal.Some(testInbox) if testInbox.hasMessages => throw new AssertionError(s"Expected no reply, but ${receiveReply()} was received") case OptionVal.Some(_) => this @@ -111,9 +105,7 @@ private[akka] final class ReplyInboxImpl[T](private var underlying: OptionVal[Te } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class StatusReplyInboxImpl[T](private var underlying: OptionVal[TestInboxImpl[StatusReply[T]]]) extends akka.actor.testkit.typed.javadsl.StatusReplyInbox[T] @@ -142,21 +134,21 @@ private[akka] final class StatusReplyInboxImpl[T](private var underlying: Option def expectValue(expectedValue: T): Unit = receiveValue() match { - case matches if (matches == expectedValue) => () + case matches if matches == expectedValue => () case doesntMatch => throw new AssertionError(s"Expected $expectedValue but received $doesntMatch") } def expectErrorMessage(errorMessage: String): Unit = receiveError() match { - case matches if (matches.getMessage == errorMessage) => () + case matches if matches.getMessage == errorMessage => () case doesntMatch => throw new AssertionError(s"Expected a throwable with message $errorMessage, but got ${doesntMatch.getMessage}") } def expectNoReply(): StatusReplyInboxImpl[T] = underlying match { - case OptionVal.Some(testInbox) if (testInbox.hasMessages) => + case OptionVal.Some(testInbox) if testInbox.hasMessages => throw new AssertionError(s"Expected no reply, but ${receiveStatusReply()} was received") case OptionVal.Some(_) => this diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestKitUtils.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestKitUtils.scala index b5034176e5a..44823326ac2 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestKitUtils.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestKitUtils.scala @@ -14,9 +14,7 @@ import akka.actor.typed.scaladsl.ActorContext import akka.actor.typed.scaladsl.Behaviors import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ActorTestKitGuardian { sealed trait TestKitCommand @@ -52,17 +50,14 @@ private[akka] object ActorTestKitGuardian { private def handleSpawnException[T]( context: ActorContext[ActorTestKitGuardian.TestKitCommand], reply: ActorRef[ActorRef[T]], - props: Props): Catcher[Behavior[TestKitCommand]] = { - case NonFatal(e) => - context.log.error(s"Spawn failed, props [$props]", e) - reply ! context.spawnAnonymous(Behaviors.stopped) - Behaviors.same + props: Props): Catcher[Behavior[TestKitCommand]] = { case NonFatal(e) => + context.log.error(s"Spawn failed, props [$props]", e) + reply ! context.spawnAnonymous(Behaviors.stopped) + Behaviors.same } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TestKitUtils { diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala index 5790046bcac..f316d0608b4 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala @@ -56,10 +56,9 @@ private[akka] object TestProbeImpl { Behaviors.same } } - .receiveSignal { - case (_, t: Terminated) => - terminations.offerLast(t) - Behaviors.same + .receiveSignal { case (_, t: Terminated) => + terminations.offerLast(t) + Behaviors.same } } @@ -134,8 +133,9 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) val prevEnd = end end = start + maxDiff - val ret = try f - finally end = prevEnd + val ret = + try f + finally end = prevEnd val diff = now - start assert(min <= diff, s"block took ${diff.pretty}, should at least have been $min") @@ -168,7 +168,7 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) o match { case Some(m) if obj == m => m.asInstanceOf[T] case Some(m) => assertFail(s"expected $obj, found $m$hintOrEmptyString") - case None => assertFail(s"timeout ($max) during expectMessage while waiting for $obj$hintOrEmptyString") + case None => assertFail(s"timeout ($max) during expectMessage while waiting for $obj$hintOrEmptyString") } } @@ -391,9 +391,7 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) poll(max min interval) } - /** - * Obtain current time (`System.nanoTime`) as Duration. - */ + /** Obtain current time (`System.nanoTime`) as Duration. */ private def now: FiniteDuration = System.nanoTime.nanos private def assertFail(msg: String): Nothing = throw new AssertionError(msg) diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ActorTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ActorTestKit.scala index 6f715073737..87a7f3e10c6 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ActorTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ActorTestKit.scala @@ -136,9 +136,7 @@ object ActorTestKit { shutdown(system, settings.DefaultActorSystemShutdownTimeout.asJava, settings.ThrowOnShutdownTimeout) } - /** - * Config loaded from `application-test.conf`, which is used if no specific config is given. - */ + /** Config loaded from `application-test.conf`, which is used if no specific config is given. */ def applicationTestConfig: Config = scaladsl.ActorTestKit.ApplicationTestConfig } @@ -159,9 +157,7 @@ object ActorTestKit { */ final class ActorTestKit private[akka] (delegate: akka.actor.testkit.typed.scaladsl.ActorTestKit) { - /** - * The default timeout as specified with the config/[[akka.actor.testkit.typed.TestKitSettings]] - */ + /** The default timeout as specified with the config/[[akka.actor.testkit.typed.TestKitSettings]] */ def timeout: Timeout = delegate.timeout /** @@ -172,14 +168,10 @@ final class ActorTestKit private[akka] (delegate: akka.actor.testkit.typed.scala def testKitSettings: TestKitSettings = delegate.testKitSettings - /** - * The scheduler of the testkit actor system - */ + /** The scheduler of the testkit actor system */ def scheduler: Scheduler = delegate.scheduler - /** - * Spawn a new auto-named actor under the testkit user guardian and return the ActorRef for the spawned actor - */ + /** Spawn a new auto-named actor under the testkit user guardian and return the ActorRef for the spawned actor */ def spawn[T](behavior: Behavior[T]): ActorRef[T] = delegate.spawn(behavior) /** @@ -259,14 +251,10 @@ final class ActorTestKit private[akka] (delegate: akka.actor.testkit.typed.scala // Note that if more methods are added here they should also be added to TestKitJunitResource - /** - * Terminate the actor system and the testkit - */ + /** Terminate the actor system and the testkit */ def shutdownTestKit(): Unit = delegate.shutdownTestKit() - /** - * Additional testing utilities for serialization. - */ + /** Additional testing utilities for serialization. */ val serializationTestKit: SerializationTestKit = new SerializationTestKit(delegate.internalSystem) } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/BehaviorTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/BehaviorTestKit.scala index 76702febdbc..6c38a194462 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/BehaviorTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/BehaviorTestKit.scala @@ -20,15 +20,11 @@ import akka.pattern.StatusReply object BehaviorTestKit { - /** - * JAVA API - */ + /** JAVA API */ @ApiMayChange def applicationTestConfig: Config = akka.actor.testkit.typed.scaladsl.BehaviorTestKit.ApplicationTestConfig - /** - * JAVA API - */ + /** JAVA API */ @ApiMayChange def create[T](initialBehavior: Behavior[T], name: String, config: Config): BehaviorTestKit[T] = { val system = new ActorSystemStub("StubbedActorContext", config) @@ -36,17 +32,13 @@ object BehaviorTestKit { new BehaviorTestKitImpl(system, (system.path / name).withUid(uid), initialBehavior) } - /** - * JAVA API - */ + /** JAVA API */ @ApiMayChange def create[T](initialBehavior: Behavior[T], name: String): BehaviorTestKit[T] = { create(initialBehavior, name, ActorSystemStub.config.defaultReference) } - /** - * JAVA API - */ + /** JAVA API */ @ApiMayChange def create[T](initialBehavior: Behavior[T]): BehaviorTestKit[T] = create(initialBehavior, "testkit") @@ -87,9 +79,7 @@ abstract class BehaviorTestKit[T] { def runAsk[Res](responseClass: Class[Res], messageFactory: JFunction[ActorRef[Res], T]): ReplyInbox[Res] = runAsk(messageFactory) - /** - * The same as [[runAsk]] but only for requests that result in a response of type [[akka.pattern.StatusReply]]. - */ + /** The same as [[runAsk]] but only for requests that result in a response of type [[akka.pattern.StatusReply]]. */ def runAskWithStatus[Res](messageFactory: JFunction[ActorRef[StatusReply[Res]], T]): StatusReplyInbox[Res] /** @@ -127,19 +117,13 @@ abstract class BehaviorTestKit[T] { */ def childInbox[U](child: ActorRef[U]): TestInbox[U] - /** - * Get the [[akka.actor.typed.Behavior]] testkit for the given child [[akka.actor.typed.ActorRef]]. - */ + /** Get the [[akka.actor.typed.Behavior]] testkit for the given child [[akka.actor.typed.ActorRef]]. */ def childTestKit[U](child: ActorRef[U]): BehaviorTestKit[U] - /** - * The self inbox contains messages the behavior sent to `context.self` - */ + /** The self inbox contains messages the behavior sent to `context.self` */ def selfInbox(): TestInbox[T] - /** - * The self reference of the actor living inside this testkit. - */ + /** The self reference of the actor living inside this testkit. */ def getRef(): ActorRef[T] = selfInbox().getRef() /** @@ -148,9 +132,7 @@ abstract class BehaviorTestKit[T] { */ def getAllEffects(): java.util.List[Effect] - /** - * Returns if there have been any effects. - */ + /** Returns if there have been any effects. */ def hasEffects(): Boolean /** @@ -165,9 +147,7 @@ abstract class BehaviorTestKit[T] { */ def expectEffectClass[U <: Effect](effectClass: Class[U]): U - /** - * The current behavior, can change any time `run` is called - */ + /** The current behavior, can change any time `run` is called */ def currentBehavior: Behavior[T] /** @@ -177,38 +157,24 @@ abstract class BehaviorTestKit[T] { */ def returnedBehavior: Behavior[T] - /** - * Is the current behavior alive or stopped - */ + /** Is the current behavior alive or stopped */ def isAlive: Boolean - /** - * Send the message to the behavior and record any [[Effect]]s - */ + /** Send the message to the behavior and record any [[Effect]]s */ def run(message: T): Unit - /** - * Send the first message in the selfInbox to the behavior and run it, recording [[Effect]]s. - */ + /** Send the first message in the selfInbox to the behavior and run it, recording [[Effect]]s. */ def runOne(): Unit - /** - * Send the signal to the beheavior and record any [[Effect]]s - */ + /** Send the signal to the beheavior and record any [[Effect]]s */ def signal(signal: Signal): Unit - /** - * Returns all the [[CapturedLogEvent]] issued by this behavior(s) - */ + /** Returns all the [[CapturedLogEvent]] issued by this behavior(s) */ def getAllLogEntries(): java.util.List[CapturedLogEvent] - /** - * Clear the log entries - */ + /** Clear the log entries */ def clearLog(): Unit - /** - * The receptionist inbox contains messages sent to `system.receptionist` - */ + /** The receptionist inbox contains messages sent to `system.receptionist` */ def receptionistInbox(): TestInbox[Receptionist.Command] } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/Effects.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/Effects.scala index f89d63ba3b0..4b5219388ac 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/Effects.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/Effects.scala @@ -28,74 +28,48 @@ object Effects { messageClass: Class[T]): AskInitiated[Req, Res, T] = AskInitiated(target, responseTimeout.asScala, responseClass)(null.asInstanceOf[Req], null, null) - /** - * The behavior spawned a named child with the given behavior with no specific props - */ + /** The behavior spawned a named child with the given behavior with no specific props */ def spawned[T](behavior: Behavior[T], childName: String): Spawned[T] = Spawned(behavior, childName) - /** - * The behavior spawned a named child with the given behavior with no specific props - */ + /** The behavior spawned a named child with the given behavior with no specific props */ def spawned[T](behavior: Behavior[T], childName: String, ref: ActorRef[T]): Spawned[T] = new Spawned(behavior, childName, Props.empty, ref) - /** - * The behavior spawned a named child with the given behavior and specific props - */ + /** The behavior spawned a named child with the given behavior and specific props */ def spawned[T](behavior: Behavior[T], childName: String, props: Props): Spawned[T] = Spawned(behavior, childName, props) - /** - * The behavior spawned a named child with the given behavior and specific props - */ + /** The behavior spawned a named child with the given behavior and specific props */ def spawned[T](behavior: Behavior[T], childName: String, props: Props, ref: ActorRef[T]): Spawned[T] = new Spawned(behavior, childName, props, ref) - /** - * The behavior spawned an anonymous child with the given behavior with no specific props - */ + /** The behavior spawned an anonymous child with the given behavior with no specific props */ def spawnedAnonymous[T](behavior: Behavior[T]): SpawnedAnonymous[T] = SpawnedAnonymous(behavior) - /** - * The behavior spawned an anonymous child with the given behavior with no specific props - */ + /** The behavior spawned an anonymous child with the given behavior with no specific props */ def spawnedAnonymous[T](behavior: Behavior[T], ref: ActorRef[T]): SpawnedAnonymous[T] = new SpawnedAnonymous(behavior, Props.empty, ref) - /** - * The behavior spawned an anonymous child with the given behavior with specific props - */ + /** The behavior spawned an anonymous child with the given behavior with specific props */ def spawnedAnonymous[T](behavior: Behavior[T], props: Props): SpawnedAnonymous[T] = SpawnedAnonymous(behavior, props) - /** - * The behavior spawned an anonymous child with the given behavior with specific props - */ + /** The behavior spawned an anonymous child with the given behavior with specific props */ def spawnedAnonymous[T](behavior: Behavior[T], props: Props, ref: ActorRef[T]): SpawnedAnonymous[T] = new SpawnedAnonymous(behavior, props, ref) - /** - * The behavior stopped `childName` - */ + /** The behavior stopped `childName` */ def stopped(childName: String): Stopped = Stopped(childName) - /** - * The behavior started watching `other`, through `context.watch(other)` - */ + /** The behavior started watching `other`, through `context.watch(other)` */ def watched[T](other: ActorRef[T]): Watched[T] = Watched(other) - /** - * The behavior started watching `other`, through `context.watchWith(other, message)` - */ + /** The behavior started watching `other`, through `context.watchWith(other, message)` */ def watchedWith[U, T](other: ActorRef[U], message: T): WatchedWith[U, T] = WatchedWith(other, message) - /** - * The behavior stopped watching `other`, through `context.unwatch(other)` - */ + /** The behavior stopped watching `other`, through `context.unwatch(other)` */ def unwatched[T](other: ActorRef[T]): Unwatched[T] = Unwatched(other) - /** - * The behavior set a new receive timeout, with `message` as timeout notification - */ + /** The behavior set a new receive timeout, with `message` as timeout notification */ def receiveTimeoutSet[T](d: Duration, message: T): ReceiveTimeoutSet[T] = ReceiveTimeoutSet(d.asScala, message) /** @@ -114,9 +88,7 @@ object Effects { send: akka.japi.function.Effect): TimerScheduled[U] = TimerScheduled(key, msg, delay.asScala, mode, overriding)(send.apply _) - /** - * Used to represent an empty list of effects - in other words, the behavior didn't do anything observable - */ + /** Used to represent an empty list of effects - in other words, the behavior didn't do anything observable */ def noEffects(): NoEffects = NoEffects } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/LoggingTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/LoggingTestKit.scala index d6023d36bab..246d6fd4ca4 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/LoggingTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/LoggingTestKit.scala @@ -36,9 +36,7 @@ import akka.annotation.DoNotInherit */ def withOccurrences(newOccurrences: Int): LoggingTestKit - /** - * Matching events with the given log level. - */ + /** Matching events with the given log level. */ def withLogLevel(newLogLevel: Level): LoggingTestKit /** @@ -54,14 +52,10 @@ import akka.annotation.DoNotInherit */ def withSource(newSource: String): LoggingTestKit - /** - * Matching events with a message that contains the given value. - */ + /** Matching events with a message that contains the given value. */ def withMessageContains(newMessageContains: String): LoggingTestKit - /** - * Matching events with a message that matches the given regular expression. - */ + /** Matching events with a message that matches the given regular expression. */ def withMessageRegex(newMessageRegex: String): LoggingTestKit /** @@ -76,20 +70,14 @@ import akka.annotation.DoNotInherit */ def withMdc(newMdc: java.util.Map[String, String]): LoggingTestKit - /** - * After matching the expected number of hits, check for excess messages - */ + /** After matching the expected number of hits, check for excess messages */ def withCheckExcess(check: Boolean): LoggingTestKit - /** - * Matching events for which the supplied function returns `true`. - */ + /** Matching events for which the supplied function returns `true`. */ def withCustom(newCustom: Function[LoggingEvent, Boolean]): LoggingTestKit // this is a Scala Function, ^ but that can be used with lambda from Java - /** - * @return `true` if the event matches the conditions of the filter. - */ + /** @return `true` if the event matches the conditions of the filter. */ def matches(event: LoggingEvent): Boolean /** @@ -198,9 +186,7 @@ object LoggingTestKit { def custom(test: Function[LoggingEvent, Boolean]): LoggingTestKit = empty.withCustom(test) // this is a Scala Function, but that can be used with lambda from Java - /** - * Filter for the logging of dead letters. - */ + /** Filter for the logging of dead letters. */ def deadLetters(): LoggingTestKit = empty.withLogLevel(Level.INFO).withMessageRegex(".*was not delivered.*dead letters encountered.*") } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ManualTime.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ManualTime.scala index 2422ccb5d80..8296ee8b9b3 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ManualTime.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ManualTime.scala @@ -22,9 +22,7 @@ import akka.util.JavaDurationConverters._ */ object ManualTime { - /** - * Config that needs to be in place for the actor system to use the manual - */ + /** Config that needs to be in place for the actor system to use the manual */ def config(): Config = akka.actor.testkit.typed.scaladsl.ManualTime.config /** @@ -48,9 +46,7 @@ object ManualTime { } -/** - * Not for user instantiation, see [[ManualTime#get]] - */ +/** Not for user instantiation, see [[ManualTime#get]] */ final class ManualTime(delegate: akka.testkit.ExplicitlyTriggeredScheduler) { /** diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/SerializationTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/SerializationTestKit.scala index cbd93153d74..c1a87979ad7 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/SerializationTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/SerializationTestKit.scala @@ -7,9 +7,7 @@ package akka.actor.testkit.typed.javadsl import akka.actor.testkit.typed.scaladsl import akka.actor.typed.ActorSystem -/** - * Utilities to test serialization. - */ +/** Utilities to test serialization. */ class SerializationTestKit(system: ActorSystem[_]) { private val delegate = new scaladsl.SerializationTestKit(system) diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestInbox.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestInbox.scala index e9b0adf5fc7..0648c9db3af 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestInbox.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestInbox.scala @@ -31,24 +31,16 @@ object TestInbox { @DoNotInherit abstract class TestInbox[T] { - /** - * The actor ref of the inbox - */ + /** The actor ref of the inbox */ def getRef(): ActorRef[T] - /** - * Get and remove the oldest message - */ + /** Get and remove the oldest message */ def receiveMessage(): T - /** - * Assert and remove the the oldest message. - */ + /** Assert and remove the the oldest message. */ def expectMessage(expectedMessage: T): TestInbox[T] - /** - * Collect all messages in the inbox and clear it out - */ + /** Collect all messages in the inbox and clear it out */ def getAllReceived(): java.util.List[T] = internalReceiveAll().asJava protected def internalReceiveAll(): immutable.Seq[T] @@ -129,9 +121,7 @@ trait StatusReplyInbox[T] { */ def expectErrorMessage(errorMessage: String): Unit - /** - * Assert that this inbox has *never* received a reply. - */ + /** Assert that this inbox has *never* received a reply. */ def expectNoReply(): StatusReplyInbox[T] def hasReply: Boolean diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala index 87ca2f26954..b27491146fa 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala @@ -38,8 +38,8 @@ import akka.util.Timeout * * @Test * public void testBlah() throws Exception { - * // spawn actors etc using the testKit - * ActorRef ref = testKit.spawn(behavior); + * // spawn actors etc using the testKit + * ActorRef ref = testKit.spawn(behavior); * } * } * }}} @@ -58,29 +58,21 @@ final class TestKitJunitResource(_kit: ActorTestKit) extends ExternalResource { */ def this() = this(ActorTestKit.create(TestKitUtils.testNameFromCallStack(classOf[TestKitJunitResource]))) - /** - * Use a custom [[akka.actor.typed.ActorSystem]] for the actor system. - */ + /** Use a custom [[akka.actor.typed.ActorSystem]] for the actor system. */ def this(system: ActorSystem[_]) = this(ActorTestKit.create(system)) - /** - * Use a custom config for the actor system. - */ + /** Use a custom config for the actor system. */ def this(customConfig: String) = this( ActorTestKit.create( TestKitUtils.testNameFromCallStack(classOf[TestKitJunitResource]), ConfigFactory.parseString(customConfig))) - /** - * Use a custom config for the actor system. - */ + /** Use a custom config for the actor system. */ def this(customConfig: Config) = this(ActorTestKit.create(TestKitUtils.testNameFromCallStack(classOf[TestKitJunitResource]), customConfig)) - /** - * Use a custom config for the actor system, and a custom [[akka.actor.testkit.typed.TestKitSettings]]. - */ + /** Use a custom config for the actor system, and a custom [[akka.actor.testkit.typed.TestKitSettings]]. */ def this(customConfig: Config, settings: TestKitSettings) = this(ActorTestKit.create(TestKitUtils.testNameFromCallStack(classOf[TestKitJunitResource]), customConfig, settings)) @@ -88,94 +80,58 @@ final class TestKitJunitResource(_kit: ActorTestKit) extends ExternalResource { val testKit: ActorTestKit = _kit // delegates of the TestKit api for minimum fuss - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def system: ActorSystem[Void] = testKit.system - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def testKitSettings: TestKitSettings = testKit.testKitSettings - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def timeout: Timeout = testKit.timeout - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def scheduler: Scheduler = testKit.scheduler - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T]): ActorRef[T] = testKit.spawn(behavior) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T], name: String): ActorRef[T] = testKit.spawn(behavior, name) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T], props: Props): ActorRef[T] = testKit.spawn(behavior, props) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T], name: String, props: Props): ActorRef[T] = testKit.spawn(behavior, name, props) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def createTestProbe[M](): TestProbe[M] = testKit.createTestProbe[M]() - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def createTestProbe[M](clazz: Class[M]): TestProbe[M] = testKit.createTestProbe(clazz) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def createTestProbe[M](name: String, clazz: Class[M]): TestProbe[M] = testKit.createTestProbe(name, clazz) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def createTestProbe[M](name: String): TestProbe[M] = testKit.createTestProbe(name) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def stop[T](ref: ActorRef[T], max: Duration): Unit = testKit.stop(ref, max) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def createUnhandledMessageProbe(): TestProbe[UnhandledMessage] = testKit.createUnhandledMessageProbe() - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def createDeadLetterProbe(): TestProbe[DeadLetter] = testKit.createDeadLetterProbe() - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def createDroppedMessageProbe(): TestProbe[Dropped] = testKit.createDroppedMessageProbe() - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def stop[T](ref: ActorRef[T]): Unit = testKit.stop(ref) - /** - * Additional testing utilities for serialization. - */ + /** Additional testing utilities for serialization. */ def serializationTestKit: SerializationTestKit = testKit.serializationTestKit override def after(): Unit = { diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestProbe.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestProbe.scala index da95a36d5cf..a0d68ba0199 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestProbe.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestProbe.scala @@ -21,24 +21,16 @@ import akka.util.unused object FishingOutcomes { - /** - * Consume this message and continue with the next - */ + /** Consume this message and continue with the next */ def continueAndCollect(): FishingOutcome = FishingOutcome.Continue - /** - * Consume this message and continue with the next - */ + /** Consume this message and continue with the next */ def continueAndIgnore(): FishingOutcome = akka.actor.testkit.typed.FishingOutcome.ContinueAndIgnore - /** - * Complete fishing and return this message - */ + /** Complete fishing and return this message */ def complete(): FishingOutcome = akka.actor.testkit.typed.FishingOutcome.Complete - /** - * Fail fishing with a custom error message - */ + /** Fail fishing with a custom error message */ def fail(error: String): FishingOutcome = akka.actor.testkit.typed.FishingOutcome.Fail(error) } @@ -71,14 +63,10 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef implicit protected def settings: TestKitSettings - /** - * ActorRef for this TestProbe - */ + /** ActorRef for this TestProbe */ def ref: ActorRef[M] - /** - * ActorRef for this TestProbe - */ + /** ActorRef for this TestProbe */ def getRef(): ActorRef[M] = ref /** @@ -119,9 +107,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef */ def within[T](min: Duration, max: Duration)(f: Supplier[T]): T - /** - * Same as calling `within(0 seconds, max)(f)`. - */ + /** Same as calling `within(0 seconds, max)(f)`. */ def within[T](max: Duration)(f: Supplier[T]): T /** @@ -173,9 +159,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef */ def expectMessageClass[T <: M](clazz: Class[T], max: Duration): T - /** - * Receive one message of type `M` within the default timeout as deadline. - */ + /** Receive one message of type `M` within the default timeout as deadline. */ def receiveMessage(): M /** @@ -184,9 +168,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef */ def receiveMessage(max: Duration): M - /** - * Same as `receiveSeveralMessages(n, remaining)` but using the default timeout as deadline. - */ + /** Same as `receiveSeveralMessages(n, remaining)` but using the default timeout as deadline. */ def receiveSeveralMessages(n: Int): JList[M] /** @@ -215,9 +197,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef */ def fishForMessage(max: Duration, fisher: java.util.function.Function[M, FishingOutcome]): java.util.List[M] - /** - * Same as the other `fishForMessage` but includes the provided hint in all error messages - */ + /** Same as the other `fishForMessage` but includes the provided hint in all error messages */ def fishForMessage( max: Duration, hint: String, @@ -231,9 +211,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef */ def expectTerminated[U](actorRef: ActorRef[U], max: Duration): Unit - /** - * Expect the given actor to be stopped or stop within the default timeout. - */ + /** Expect the given actor to be stopped or stop within the default timeout. */ def expectTerminated[U](actorRef: ActorRef[U]): Unit /** @@ -264,9 +242,7 @@ abstract class TestProbe[M] extends RecipientRef[M] { this: InternalRecipientRef // FIXME awaitAssert(Procedure): Unit would be nice for java people to not have to return null - /** - * Stops the [[TestProbe.getRef]], which is useful when testing watch and termination. - */ + /** Stops the [[TestProbe.getRef]], which is useful when testing watch and termination. */ def stop(): Unit } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala index 49e109bbed3..105c765bfda 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala @@ -161,9 +161,7 @@ object ActorTestKit { def shutdown(system: ActorSystem[_], timeout: Duration, throwIfShutdownFails: Boolean = false): Unit = TestKitUtils.shutdown(system, timeout, throwIfShutdownFails) - /** - * Config loaded from `application-test.conf`, which is used if no specific config is given. - */ + /** Config loaded from `application-test.conf`, which is used if no specific config is given. */ val ApplicationTestConfig: Config = ConfigFactory.load("application-test") private val dummyMessage = new DeadLetterSuppression {} @@ -196,9 +194,7 @@ final class ActorTestKit private[akka] ( implicit def testKitSettings: TestKitSettings = settings.getOrElse(TestKitSettings(system)) - /** - * INTERNAL API - */ + /** INTERNAL API */ implicit def system: ActorSystem[Nothing] = internalSystem private val childName: Iterator[String] = Iterator.from(0).map(_.toString) @@ -253,9 +249,11 @@ final class ActorTestKit private[akka] ( */ def stop[T](ref: ActorRef[T], max: FiniteDuration = timeout.duration): Unit = try { - Await.result(internalTestKitGuardian.ask { (x: ActorRef[ActorTestKitGuardian.Ack.type]) => - ActorTestKitGuardian.StopActor(ref, x) - }(Timeout(max), scheduler), max) + Await.result( + internalTestKitGuardian.ask { (x: ActorRef[ActorTestKitGuardian.Ack.type]) => + ActorTestKitGuardian.StopActor(ref, x) + }(Timeout(max), scheduler), + max) } catch { case _: TimeoutException => assert(false, s"timeout ($max) during stop() waiting for actor [${ref.path}] to stop") @@ -311,9 +309,7 @@ final class ActorTestKit private[akka] ( probe } - /** - * Additional testing utilities for serialization. - */ + /** Additional testing utilities for serialization. */ val serializationTestKit: SerializationTestKit = new SerializationTestKit(internalSystem) // FIXME needed for Akka internal tests but, users shouldn't spawn system actors? diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitBase.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitBase.scala index 2bc2fb68622..5054802b344 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitBase.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitBase.scala @@ -35,87 +35,55 @@ abstract class ActorTestKitBase(val testKit: ActorTestKit) { def this() = this(ActorTestKit(ActorTestKitBase.testNameFromCallStack())) - /** - * Use a custom config for the actor system. - */ + /** Use a custom config for the actor system. */ def this(config: String) = this(ActorTestKit(ActorTestKitBase.testNameFromCallStack(), ConfigFactory.parseString(config))) - /** - * Use a custom config for the actor system. - */ + /** Use a custom config for the actor system. */ def this(config: Config) = this(ActorTestKit(ActorTestKitBase.testNameFromCallStack(), config)) - /** - * Use a custom config for the actor system, and a custom [[akka.actor.testkit.typed.TestKitSettings]]. - */ + /** Use a custom config for the actor system, and a custom [[akka.actor.testkit.typed.TestKitSettings]]. */ def this(config: Config, settings: TestKitSettings) = this(ActorTestKit(ActorTestKitBase.testNameFromCallStack(), config, settings)) // delegates of the TestKit api for minimum fuss - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ implicit def system: ActorSystem[Nothing] = testKit.system - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ implicit def testKitSettings: TestKitSettings = testKit.testKitSettings - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ implicit def timeout: Timeout = testKit.timeout - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T]): ActorRef[T] = testKit.spawn(behavior) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T], name: String): ActorRef[T] = testKit.spawn(behavior, name) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T], props: Props): ActorRef[T] = testKit.spawn(behavior, props) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T], name: String, props: Props): ActorRef[T] = testKit.spawn(behavior, name, props) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def createTestProbe[M](): TestProbe[M] = testKit.createTestProbe[M]() - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def createTestProbe[M](name: String): TestProbe[M] = testKit.createTestProbe(name) - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def createDroppedMessageProbe(): TestProbe[Dropped] = testKit.createDroppedMessageProbe() - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def createDeadLetterProbe(): TestProbe[DeadLetter] = testKit.createDeadLetterProbe() - /** - * See corresponding method on [[ActorTestKit]] - */ + /** See corresponding method on [[ActorTestKit]] */ def createUnhandledMessageProbe(): TestProbe[UnhandledMessage] = testKit.createUnhandledMessageProbe() - /** - * Additional testing utilities for serialization. - */ + /** Additional testing utilities for serialization. */ def serializationTestKit: SerializationTestKit = testKit.serializationTestKit /** diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKit.scala index 063599eef51..21c80decc2d 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKit.scala @@ -56,9 +56,7 @@ trait BehaviorTestKit[T] { */ def runAsk[Res](f: ActorRef[Res] => T): ReplyInbox[Res] - /** - * The same as [[runAsk]] but only for requests that result in a response of type [[akka.pattern.StatusReply]]. - */ + /** The same as [[runAsk]] but only for requests that result in a response of type [[akka.pattern.StatusReply]]. */ def runAskWithStatus[Res](f: ActorRef[StatusReply[Res]] => T): StatusReplyInbox[Res] // FIXME it is weird that this is public but it is used in BehaviorSpec, could we avoid that? @@ -77,24 +75,16 @@ trait BehaviorTestKit[T] { */ def childInbox[U](name: String): TestInbox[U] - /** - * Get the child inbox for the child ActorRef, or fail if there is no such child. - */ + /** Get the child inbox for the child ActorRef, or fail if there is no such child. */ def childInbox[U](child: ActorRef[U]): TestInbox[U] - /** - * Get the [[akka.actor.typed.Behavior]] testkit for the given child [[akka.actor.typed.ActorRef]]. - */ + /** Get the [[akka.actor.typed.Behavior]] testkit for the given child [[akka.actor.typed.ActorRef]]. */ def childTestKit[U](child: ActorRef[U]): BehaviorTestKit[U] - /** - * The self inbox contains messages the behavior sent to `context.self` - */ + /** The self inbox contains messages the behavior sent to `context.self` */ def selfInbox(): TestInbox[T] - /** - * The self reference of the actor living inside this testkit. - */ + /** The self reference of the actor living inside this testkit. */ def ref: ActorRef[T] = selfInbox().ref /** @@ -103,9 +93,7 @@ trait BehaviorTestKit[T] { */ def retrieveAllEffects(): immutable.Seq[Effect] - /** - * Returns if there have been any effects. - */ + /** Returns if there have been any effects. */ def hasEffects(): Boolean /** @@ -120,14 +108,10 @@ trait BehaviorTestKit[T] { */ def expectEffectType[E <: Effect](implicit classTag: ClassTag[E]): E - /** - * Asserts that the oldest effect matches the given partial function. - */ + /** Asserts that the oldest effect matches the given partial function. */ def expectEffectPF[R](f: PartialFunction[Effect, R]): R - /** - * The current behavior, can change any time `run` is called - */ + /** The current behavior, can change any time `run` is called */ def currentBehavior: Behavior[T] /** @@ -137,38 +121,24 @@ trait BehaviorTestKit[T] { */ def returnedBehavior: Behavior[T] - /** - * Is the current behavior alive or stopped - */ + /** Is the current behavior alive or stopped */ def isAlive: Boolean - /** - * Send the message to the behavior and record any [[Effect]]s - */ + /** Send the message to the behavior and record any [[Effect]]s */ def run(message: T): Unit - /** - * Send the first message in the selfInbox to the behavior and run it, recording [[Effect]]s. - */ + /** Send the first message in the selfInbox to the behavior and run it, recording [[Effect]]s. */ def runOne(): Unit - /** - * Send the signal to the behavior and record any [[Effect]]s - */ + /** Send the signal to the behavior and record any [[Effect]]s */ def signal(signal: Signal): Unit - /** - * Returns all the [[CapturedLogEvent]] issued by this behavior(s) - */ + /** Returns all the [[CapturedLogEvent]] issued by this behavior(s) */ def logEntries(): immutable.Seq[CapturedLogEvent] - /** - * Clear the log entries - */ + /** Clear the log entries */ def clearLog(): Unit - /** - * The receptionist inbox contains messages sent to `system.receptionist` - */ + /** The receptionist inbox contains messages sent to `system.receptionist` */ def receptionistInbox(): TestInbox[Receptionist.Command] } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/Effects.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/Effects.scala index cbac6c33861..f51b4d1af2b 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/Effects.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/Effects.scala @@ -26,74 +26,48 @@ object Effects { responseClass: Class[Res]): AskInitiated[Req, Res, T] = AskInitiated(target, responseTimeout, responseClass)(null.asInstanceOf[Req], null, null) - /** - * The behavior spawned a named child with the given behavior with no specific props - */ + /** The behavior spawned a named child with the given behavior with no specific props */ def spawned[T](behavior: Behavior[T], childName: String): Spawned[T] = Spawned(behavior, childName) - /** - * The behavior spawned a named child with the given behavior with no specific props - */ + /** The behavior spawned a named child with the given behavior with no specific props */ def spawned[T](behavior: Behavior[T], childName: String, ref: ActorRef[T]): Spawned[T] = new Spawned(behavior, childName, Props.empty, ref) - /** - * The behavior spawned a named child with the given behavior and specific props - */ + /** The behavior spawned a named child with the given behavior and specific props */ def spawned[T](behavior: Behavior[T], childName: String, props: Props): Spawned[T] = Spawned(behavior, childName, props) - /** - * The behavior spawned a named child with the given behavior and specific props - */ + /** The behavior spawned a named child with the given behavior and specific props */ def spawned[T](behavior: Behavior[T], childName: String, props: Props, ref: ActorRef[T]): Spawned[T] = new Spawned(behavior, childName, props, ref) - /** - * The behavior spawned an anonymous child with the given behavior with no specific props - */ + /** The behavior spawned an anonymous child with the given behavior with no specific props */ def spawnedAnonymous[T](behavior: Behavior[T]): SpawnedAnonymous[T] = SpawnedAnonymous(behavior) - /** - * The behavior spawned an anonymous child with the given behavior with no specific props - */ + /** The behavior spawned an anonymous child with the given behavior with no specific props */ def spawnedAnonymous[T](behavior: Behavior[T], ref: ActorRef[T]): SpawnedAnonymous[T] = new SpawnedAnonymous(behavior, Props.empty, ref) - /** - * The behavior spawned an anonymous child with the given behavior with specific props - */ + /** The behavior spawned an anonymous child with the given behavior with specific props */ def spawnedAnonymous[T](behavior: Behavior[T], props: Props): SpawnedAnonymous[T] = SpawnedAnonymous(behavior, props) - /** - * The behavior spawned an anonymous child with the given behavior with specific props - */ + /** The behavior spawned an anonymous child with the given behavior with specific props */ def spawnedAnonymous[T](behavior: Behavior[T], props: Props, ref: ActorRef[T]): SpawnedAnonymous[T] = new SpawnedAnonymous(behavior, props, ref) - /** - * The behavior stopped `childName` - */ + /** The behavior stopped `childName` */ def stopped(childName: String): Stopped = Stopped(childName) - /** - * The behavior started watching `other`, through `context.watch(other)` - */ + /** The behavior started watching `other`, through `context.watch(other)` */ def watched[T](other: ActorRef[T]): Watched[T] = Watched(other) - /** - * The behavior started watching `other`, through `context.watchWith(other, message)` - */ + /** The behavior started watching `other`, through `context.watchWith(other, message)` */ def watchedWith[U, T](other: ActorRef[U], message: T): WatchedWith[U, T] = WatchedWith(other, message) - /** - * The behavior stopped watching `other`, through `context.unwatch(other)` - */ + /** The behavior stopped watching `other`, through `context.unwatch(other)` */ def unwatched[T](other: ActorRef[T]): Unwatched[T] = Unwatched(other) - /** - * The behavior set a new receive timeout, with `message` as timeout notification - */ + /** The behavior set a new receive timeout, with `message` as timeout notification */ def receiveTimeoutSet[T](d: FiniteDuration, message: T): ReceiveTimeoutSet[T] = ReceiveTimeoutSet(d, message) /** @@ -103,9 +77,7 @@ object Effects { def scheduled[U](delay: FiniteDuration, target: ActorRef[U], message: U): Scheduled[U] = Scheduled(delay, target, message) - /** - * Used to represent an empty list of effects - in other words, the behavior didn't do anything observable - */ + /** Used to represent an empty list of effects - in other words, the behavior didn't do anything observable */ def noEffects(): NoEffects = NoEffects } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/LoggingTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/LoggingTestKit.scala index c938a4f2865..af915818d41 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/LoggingTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/LoggingTestKit.scala @@ -36,9 +36,7 @@ import akka.annotation.DoNotInherit */ def withOccurrences(newOccurrences: Int): LoggingTestKit - /** - * Matching events with the given log level. - */ + /** Matching events with the given log level. */ def withLogLevel(newLogLevel: Level): LoggingTestKit /** @@ -54,14 +52,10 @@ import akka.annotation.DoNotInherit */ def withSource(newSource: String): LoggingTestKit - /** - * Matching events with a message that contains the given value. - */ + /** Matching events with a message that contains the given value. */ def withMessageContains(newMessageContains: String): LoggingTestKit - /** - * Matching events with a message that matches the given regular expression. - */ + /** Matching events with a message that matches the given regular expression. */ def withMessageRegex(newMessageRegex: String): LoggingTestKit /** @@ -76,19 +70,13 @@ import akka.annotation.DoNotInherit */ def withMdc(newMdc: Map[String, String]): LoggingTestKit - /** - * After matching the expected number of hits, check for excess messages - */ + /** After matching the expected number of hits, check for excess messages */ def withCheckExcess(checkExcess: Boolean): LoggingTestKit - /** - * Matching events for which the supplied function returns`true`. - */ + /** Matching events for which the supplied function returns`true`. */ def withCustom(newCustom: Function[LoggingEvent, Boolean]): LoggingTestKit - /** - * @return `true` if the event matches the conditions of the filter. - */ + /** @return `true` if the event matches the conditions of the filter. */ def matches(event: LoggingEvent): Boolean /** @@ -207,9 +195,7 @@ object LoggingTestKit { def custom(test: Function[LoggingEvent, Boolean]): LoggingTestKit = empty.withCustom(test) - /** - * Filter for the logging of dead letters. - */ + /** Filter for the logging of dead letters. */ def deadLetters(): LoggingTestKit = empty.withLogLevel(Level.INFO).withMessageRegex(".*was not delivered.*dead letters encountered.*") } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ManualTime.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ManualTime.scala index 4cbeaf5f67d..d8d8a60d58d 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ManualTime.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ManualTime.scala @@ -20,9 +20,7 @@ import akka.actor.typed.internal.adapter.SchedulerAdapter */ object ManualTime { - /** - * Config needed to use the `ExplicitlyTriggeredScheduler` - */ + /** Config needed to use the `ExplicitlyTriggeredScheduler` */ val config: Config = ConfigFactory.parseString("""akka.scheduler.implementation = "akka.testkit.ExplicitlyTriggeredScheduler"""") @@ -47,9 +45,7 @@ object ManualTime { } -/** - * Not for user instantiation, see [[ManualTime#apply]] - */ +/** Not for user instantiation, see [[ManualTime#apply]] */ final class ManualTime(delegate: akka.testkit.ExplicitlyTriggeredScheduler) { /** diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ScalaTestWithActorTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ScalaTestWithActorTestKit.scala index 99b3f560396..2dec6760b23 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ScalaTestWithActorTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ScalaTestWithActorTestKit.scala @@ -43,25 +43,17 @@ abstract class ScalaTestWithActorTestKit(testKit: ActorTestKit) */ def this() = this(ActorTestKit(ActorTestKitBase.testNameFromCallStack())) - /** - * Use a custom [[akka.actor.typed.ActorSystem]] for the actor system. - */ + /** Use a custom [[akka.actor.typed.ActorSystem]] for the actor system. */ def this(system: ActorSystem[_]) = this(ActorTestKit(system)) - /** - * Use a custom config for the actor system. - */ + /** Use a custom config for the actor system. */ def this(config: String) = this(ActorTestKit(ActorTestKitBase.testNameFromCallStack(), ConfigFactory.parseString(config))) - /** - * Use a custom config for the actor system. - */ + /** Use a custom config for the actor system. */ def this(config: Config) = this(ActorTestKit(ActorTestKitBase.testNameFromCallStack(), config)) - /** - * Use a custom config for the actor system, and a custom [[akka.actor.testkit.typed.TestKitSettings]]. - */ + /** Use a custom config for the actor system, and a custom [[akka.actor.testkit.typed.TestKitSettings]]. */ def this(config: Config, settings: TestKitSettings) = this(ActorTestKit(ActorTestKitBase.testNameFromCallStack(), config, settings)) diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/SerializationTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/SerializationTestKit.scala index 569365c8ec0..1bc58ab5196 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/SerializationTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/SerializationTestKit.scala @@ -9,9 +9,7 @@ import akka.actor.typed.scaladsl.adapter._ import akka.serialization.SerializationExtension import akka.serialization.Serializers -/** - * Utilities to test serialization. - */ +/** Utilities to test serialization. */ class SerializationTestKit(system: ActorSystem[_]) { private val serialization = SerializationExtension(system.toClassic) diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestInbox.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestInbox.scala index 62fbed93a99..751fba5d5de 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestInbox.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestInbox.scala @@ -34,24 +34,16 @@ object TestInbox { @ApiMayChange trait TestInbox[T] { - /** - * The actor ref of the inbox - */ + /** The actor ref of the inbox */ def ref: ActorRef[T] - /** - * Get and remove the oldest message - */ + /** Get and remove the oldest message */ def receiveMessage(): T - /** - * Assert and remove the the oldest message. - */ + /** Assert and remove the the oldest message. */ def expectMessage(expectedMessage: T): TestInbox[T] - /** - * Collect all messages in the inbox and clear it out - */ + /** Collect all messages in the inbox and clear it out */ def receiveAll(): immutable.Seq[T] = internalReceiveAll() protected def internalReceiveAll(): immutable.Seq[T] @@ -83,9 +75,7 @@ trait ReplyInbox[T] { */ def expectReply(expectedReply: T): Unit - /** - * Assert that this inbox has *never* received a reply. - */ + /** Assert that this inbox has *never* received a reply. */ def expectNoReply(): ReplyInbox[T] def hasReply: Boolean @@ -143,9 +133,7 @@ trait StatusReplyInbox[T] { @annotation.nowarn("msg=never used") def expectDone()(implicit ev: T =:= Done): Unit = expectValue(Done.asInstanceOf[T]) - /** - * Assert that this inbox has *never* received a reply. - */ + /** Assert that this inbox has *never* received a reply. */ def expectNoReply(): StatusReplyInbox[T] def hasReply: Boolean diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestProbe.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestProbe.scala index 22b8dabf15f..7c74fa5eb6b 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestProbe.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestProbe.scala @@ -20,24 +20,16 @@ import akka.annotation.InternalApi object FishingOutcomes { - /** - * Complete fishing and return all messages up until this - */ + /** Complete fishing and return all messages up until this */ val complete: FishingOutcome = FishingOutcome.Complete - /** - * Consume this message, collect it into the result, and continue with the next message - */ + /** Consume this message, collect it into the result, and continue with the next message */ val continue: FishingOutcome = FishingOutcome.Continue - /** - * Consume this message, but do not collect it into the result, and continue with the next message - */ + /** Consume this message, but do not collect it into the result, and continue with the next message */ val continueAndIgnore: FishingOutcome = FishingOutcome.ContinueAndIgnore - /** - * Fail fishing with a custom error message - */ + /** Fail fishing with a custom error message */ def fail(message: String): FishingOutcome = FishingOutcome.Fail(message) } @@ -62,9 +54,7 @@ object TestProbe { implicit protected def settings: TestKitSettings - /** - * ActorRef for this TestProbe - */ + /** ActorRef for this TestProbe */ def ref: ActorRef[M] /** @@ -105,14 +95,10 @@ object TestProbe { */ def within[T](min: FiniteDuration, max: FiniteDuration)(f: => T): T - /** - * Same as calling `within(0 seconds, max)(f)`. - */ + /** Same as calling `within(0 seconds, max)(f)`. */ def within[T](max: FiniteDuration)(f: => T): T - /** - * Same as `expectMessage(remainingOrDefault, obj)`, but using the default timeout as deadline. - */ + /** Same as `expectMessage(remainingOrDefault, obj)`, but using the default timeout as deadline. */ def expectMessage[T <: M](obj: T): T /** @@ -145,19 +131,13 @@ object TestProbe { */ def expectNoMessage(): Unit - /** - * Same as `expectMessageType[T](remainingOrDefault)`, but using the default timeout as deadline. - */ + /** Same as `expectMessageType[T](remainingOrDefault)`, but using the default timeout as deadline. */ def expectMessageType[T <: M](implicit t: ClassTag[T]): T - /** - * Expect a message of type T to arrive within `max` or fail. `max` is dilated. - */ + /** Expect a message of type T to arrive within `max` or fail. `max` is dilated. */ def expectMessageType[T <: M](max: FiniteDuration)(implicit t: ClassTag[T]): T - /** - * Receive one message of type `M` within the default timeout as deadline. - */ + /** Receive one message of type `M` within the default timeout as deadline. */ def receiveMessage(): M /** @@ -166,9 +146,7 @@ object TestProbe { */ def receiveMessage(max: FiniteDuration): M - /** - * Same as `receiveMessages(n, remaining)` but using the default timeout as deadline. - */ + /** Same as `receiveMessages(n, remaining)` but using the default timeout as deadline. */ def receiveMessages(n: Int): immutable.Seq[M] /** @@ -198,19 +176,13 @@ object TestProbe { */ def fishForMessage(max: FiniteDuration, hint: String)(fisher: M => FishingOutcome): immutable.Seq[M] - /** - * Same as `fishForMessage` but accepting a partial function and failing for non-matches - */ + /** Same as `fishForMessage` but accepting a partial function and failing for non-matches */ def fishForMessagePF(max: FiniteDuration, hint: String)(fisher: PartialFunction[M, FishingOutcome]): immutable.Seq[M] - /** - * Same as the other `fishForMessage` but with no hint - */ + /** Same as the other `fishForMessage` but with no hint */ def fishForMessage(max: FiniteDuration)(fisher: M => FishingOutcome): immutable.Seq[M] - /** - * Same as `fishForMessage` but with no hint, accepting a partial function and failing for non-matches - */ + /** Same as `fishForMessage` but with no hint, accepting a partial function and failing for non-matches */ def fishForMessagePF(max: FiniteDuration)(fisher: PartialFunction[M, FishingOutcome]): immutable.Seq[M] /** @@ -219,9 +191,7 @@ object TestProbe { */ def expectTerminated[U](actorRef: ActorRef[U], max: FiniteDuration): Unit - /** - * Expect the given actor to be stopped or stop within the default timeout. - */ + /** Expect the given actor to be stopped or stop within the default timeout. */ def expectTerminated[U](actorRef: ActorRef[U]): Unit /** @@ -250,14 +220,10 @@ object TestProbe { */ def awaitAssert[A](a: => A): A - /** - * Stops the [[TestProbe.ref]], which is useful when testing watch and termination. - */ + /** Stops the [[TestProbe.ref]], which is useful when testing watch and termination. */ def stop(): Unit - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def asJava: akka.actor.testkit.typed.javadsl.TestProbe[M] } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/package.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/package.scala index 14dafa6a717..805f1a096c5 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/package.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/package.scala @@ -21,7 +21,6 @@ package object scaladsl { * * Uses the scaling factor from the `TestTimeFactor` in the [[TestKitSettings]] * (in implicit scope). - * */ implicit class TestDuration(val duration: FiniteDuration) extends AnyVal { def dilated(implicit settings: TestKitSettings): FiniteDuration = settings.dilated(duration) diff --git a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitSpec.scala b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitSpec.scala index 304d0d1e523..cf237a5799c 100644 --- a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitSpec.scala @@ -81,10 +81,12 @@ class ActorTestKitSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike wi "spawn a named actor" in { val spawnedWithName = Promise[String]() - spawn(Behaviors.setup[AnyRef] { context => - spawnedWithName.trySuccess(context.self.path.name) - Behaviors.empty - }, "name") + spawn( + Behaviors.setup[AnyRef] { context => + spawnedWithName.trySuccess(context.self.path.name) + Behaviors.empty + }, + "name") spawnedWithName.future.futureValue should ===("name") } diff --git a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKitSpec.scala b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKitSpec.scala index e088610c0a5..95bced21faf 100644 --- a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKitSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKitSpec.scala @@ -92,9 +92,11 @@ object BehaviorTestKitSpec { } Behaviors.same case SpawnAdapterWithName(name) => - context.spawnMessageAdapter({ (r: Reproduce) => - SpawnAnonymous(r.times) - }, name) + context.spawnMessageAdapter( + { (r: Reproduce) => + SpawnAnonymous(r.times) + }, + name) Behaviors.same case SpawnAndWatchUnwatch(name) => val c = context.spawn(Child.initial, name) @@ -165,10 +167,9 @@ object BehaviorTestKitSpec { throw new RuntimeException(s"Unexpected command: $unexpected") } } - .receiveSignal { - case (context, Terminated(_)) => - context.log.debug("Terminated") - Behaviors.same + .receiveSignal { case (context, Terminated(_)) => + context.log.debug("Terminated") + Behaviors.same } } } @@ -231,8 +232,7 @@ class BehaviorTestKitSpec extends AnyWordSpec with Matchers with LogCapturing { val testkit = BehaviorTestKit[Parent.Command](Parent.init) testkit.run(SpawnChildren(1)) val ae = intercept[AssertionError] { - testkit.expectEffectPF { - case SpawnedAnonymous(_, _) => + testkit.expectEffectPF { case SpawnedAnonymous(_, _) => } } ae.getMessage should startWith("expected matching effect but got: ") @@ -241,16 +241,16 @@ class BehaviorTestKitSpec extends AnyWordSpec with Matchers with LogCapturing { "allow assertions using partial functions - match" in { val testkit = BehaviorTestKit[Parent.Command](Parent.init) testkit.run(SpawnChildren(1)) - val childName = testkit.expectEffectPF { - case Spawned(_, name, _) => name + val childName = testkit.expectEffectPF { case Spawned(_, name, _) => + name } childName should ===("child0") } "allow assertions using partial functions - match on NoEffect" in { val testkit = BehaviorTestKit[Parent.Command](Parent.init) - val hasEffects = testkit.expectEffectPF { - case NoEffects => false + val hasEffects = testkit.expectEffectPF { case NoEffects => + false } hasEffects should ===(false) } @@ -350,11 +350,11 @@ class BehaviorTestKitSpec extends AnyWordSpec with Matchers with LogCapturing { adaptorRef ! 2 testkit.selfInbox().hasMessages should be(true) testkit.runOne() - testkit.expectEffectPF { - case Spawned(_, childName, _) => childName should equal("child0") + testkit.expectEffectPF { case Spawned(_, childName, _) => + childName should equal("child0") } - testkit.expectEffectPF { - case Spawned(_, childName, _) => childName should equal("child1") + testkit.expectEffectPF { case Spawned(_, childName, _) => + childName should equal("child1") } } } @@ -455,20 +455,19 @@ class BehaviorTestKitSpec extends AnyWordSpec with Matchers with LogCapturing { testkit.run(ScheduleCommand("abc", 42.seconds, Effect.TimerScheduled.SingleMode, SpawnChild)) testkit.expectEffectPF { case Effect.TimerScheduled( - "abc", - SpawnChild, - finiteDuration, - Effect.TimerScheduled.SingleMode, - false /*not overriding*/ ) => + "abc", + SpawnChild, + finiteDuration, + Effect.TimerScheduled.SingleMode, + false /*not overriding*/ ) => finiteDuration should equal(42.seconds) } testkit.runAsk(IsTimerActive("abc", _)).expectReply(true) testkit.run(CancelScheduleCommand("abc")) - testkit.expectEffectPF { - case Effect.TimerCancelled(key) => - key should equal("abc") + testkit.expectEffectPF { case Effect.TimerCancelled(key) => + key should equal("abc") } testkit.runAsk(IsTimerActive("abc", _)).expectReply(false) @@ -489,10 +488,9 @@ class BehaviorTestKitSpec extends AnyWordSpec with Matchers with LogCapturing { } send() testkit.runOne() - testkit.expectEffectPF { - case Effect.Spawned(_, "child", _) => + testkit.expectEffectPF { case Effect.Spawned(_, "child", _) => } - //no effect since the timer's mode was single, hence removed after fired + // no effect since the timer's mode was single, hence removed after fired send() testkit.selfInbox().hasMessages should be(false) } @@ -513,19 +511,18 @@ class BehaviorTestKitSpec extends AnyWordSpec with Matchers with LogCapturing { } send() testkit.runOne() - val child: ActorRef[String] = testkit.expectEffectPF { - case spawned @ Effect.Spawned(_, "child", _) => spawned.asInstanceOf[Effect.Spawned[String]].ref + val child: ActorRef[String] = testkit.expectEffectPF { case spawned @ Effect.Spawned(_, "child", _) => + spawned.asInstanceOf[Effect.Spawned[String]].ref } testkit.run(StopChild(child)) testkit.expectEffect { Effect.Stopped("child") } - //when scheduling with fixed rate the timer remains scheduled + // when scheduling with fixed rate the timer remains scheduled send() testkit.runOne() - testkit.expectEffectPF { - case Effect.Spawned(_, "child", _) => + testkit.expectEffectPF { case Effect.Spawned(_, "child", _) => } testkit.run(CancelScheduleCommand("abc")) diff --git a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestAppenderSpec.scala b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestAppenderSpec.scala index 3d455747c3f..6390c8fdf30 100644 --- a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestAppenderSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestAppenderSpec.scala @@ -14,8 +14,7 @@ import org.slf4j.LoggerFactory import akka.actor.testkit.typed.TestException class TestAppenderSpec - extends ScalaTestWithActorTestKit( - """ + extends ScalaTestWithActorTestKit(""" # increase to avoid spurious failures in "find unexpected async events withOccurrences(0)" akka.actor.testkit.typed.expect-no-message-default = 1000 ms """) @@ -67,11 +66,10 @@ class TestAppenderSpec "only filter events for given logger name" in { val count = new AtomicInteger LoggingTestKit - .custom({ - case logEvent => - count.incrementAndGet() - logEvent.message == "Hello from right logger" && logEvent.loggerName == classOf[AnotherLoggerClass].getName - }) + .custom { case logEvent => + count.incrementAndGet() + logEvent.message == "Hello from right logger" && logEvent.loggerName == classOf[AnotherLoggerClass].getName + } .withOccurrences(2) .withLoggerName(classOf[AnotherLoggerClass].getName) .expect { diff --git a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestProbeSpec.scala b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestProbeSpec.scala index 3dc3db2f4a3..f5b0a2b1348 100644 --- a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestProbeSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestProbeSpec.scala @@ -19,12 +19,12 @@ class TestProbeSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with def compileOnlyApiTest(): Unit = { val probe = TestProbe[AnyRef]() - probe.fishForMessage(shortDuration) { - case _ => FishingOutcomes.complete + probe.fishForMessage(shortDuration) { case _ => + FishingOutcomes.complete } - probe.awaitAssert({ + probe.awaitAssert { "result" - }) + } probe.expectMessageType[String] probe.expectMessage("whoa") probe.expectNoMessage() @@ -105,8 +105,8 @@ class TestProbeSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with probe.ref ! "two" intercept[AssertionError] { - probe.fishForMessagePF(shortDuration) { - case "one" => FishingOutcomes.continue + probe.fishForMessagePF(shortDuration) { case "one" => + FishingOutcomes.continue } } } @@ -117,8 +117,8 @@ class TestProbeSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with probe.ref ! "one" intercept[AssertionError] { - probe.fishForMessagePF(shortDuration) { - case "one" => FishingOutcomes.continue + probe.fishForMessagePF(shortDuration) { case "one" => + FishingOutcomes.continue } } } diff --git a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala index 44805417d61..99902e0b2a2 100644 --- a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/AsyncTestingExampleSpec.scala @@ -27,20 +27,19 @@ import scala.util.Success import scala.util.Try object AsyncTestingExampleSpec { - //#under-test + // #under-test object Echo { case class Ping(message: String, response: ActorRef[Pong]) case class Pong(message: String) - def apply(): Behavior[Ping] = Behaviors.receiveMessage { - case Ping(m, replyTo) => - replyTo ! Pong(m) - Behaviors.same + def apply(): Behavior[Ping] = Behaviors.receiveMessage { case Ping(m, replyTo) => + replyTo ! Pong(m) + Behaviors.same } } - //#under-test + // #under-test - //#under-test-2 + // #under-test-2 case class Message(i: Int, replyTo: ActorRef[Try[Int]]) class Producer(publisher: ActorRef[Message])(implicit scheduler: Scheduler) { @@ -54,7 +53,7 @@ object AsyncTestingExampleSpec { } } - //#under-test-2 + // #under-test-2 } @@ -62,29 +61,29 @@ object AsyncTestingExampleSpec { class AsyncTestingExampleSpec extends AnyWordSpec with BeforeAndAfterAll - //#test-header + // #test-header with LogCapturing - //#test-header + // #test-header with Matchers { val testKit = ActorTestKit() - //#test-header + // #test-header import AsyncTestingExampleSpec._ "A testkit" must { "support verifying a response" in { - //#test-spawn + // #test-spawn val pinger = testKit.spawn(Echo(), "ping") val probe = testKit.createTestProbe[Echo.Pong]() pinger ! Echo.Ping("hello", probe.ref) probe.expectMessage(Echo.Pong("hello")) - //#test-spawn + // #test-spawn } "support verifying a response - anonymous" in { - //#test-spawn-anonymous + // #test-spawn-anonymous val pinger = testKit.spawn(Echo()) - //#test-spawn-anonymous + // #test-spawn-anonymous val probe = testKit.createTestProbe[Echo.Pong]() pinger ! Echo.Ping("hello", probe.ref) probe.expectMessage(Echo.Pong("hello")) @@ -93,7 +92,7 @@ class AsyncTestingExampleSpec "be able to stop actors under test" in { // Will fail with 'name not unique' exception if the first actor is not fully stopped val probe = testKit.createTestProbe[Echo.Pong]() - //#test-stop-actors + // #test-stop-actors val pinger1 = testKit.spawn(Echo(), "pinger") pinger1 ! Echo.Ping("hello", probe.ref) probe.expectMessage(Echo.Pong("hello")) @@ -104,12 +103,12 @@ class AsyncTestingExampleSpec pinger2 ! Echo.Ping("hello", probe.ref) probe.expectMessage(Echo.Pong("hello")) testKit.stop(pinger2, 10.seconds) // Custom timeout - //#test-stop-actors + // #test-stop-actors } "support observing mocked behavior" in { - //#test-observe-mocked-behavior + // #test-observe-mocked-behavior import testKit._ // simulate the happy path @@ -130,13 +129,13 @@ class AsyncTestingExampleSpec val msg = probe.expectMessageType[Message] msg.i shouldBe i } - //#test-observe-mocked-behavior + // #test-observe-mocked-behavior } } - //#test-shutdown + // #test-shutdown override def afterAll(): Unit = testKit.shutdownTestKit() - //#test-shutdown + // #test-shutdown //#test-header } //#test-header diff --git a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/ManualTimerExampleSpec.scala b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/ManualTimerExampleSpec.scala index 97279b1234c..000877cc044 100644 --- a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/ManualTimerExampleSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/ManualTimerExampleSpec.scala @@ -43,7 +43,7 @@ class ManualTimerExampleSpec manualTime.expectNoMessageFor(10.seconds, probe) } - //#manual-scheduling-simple + // #manual-scheduling-simple "schedule repeated ticks" in { case object Tick @@ -113,7 +113,7 @@ class ManualTimerExampleSpec probe.expectMessage(Tock(2)) } - //#manual-scheduling-simple + // #manual-scheduling-simple } } //#manual-scheduling-simple diff --git a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala index 92442b6aeb1..abb474475c6 100644 --- a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/SyncTestingExampleSpec.scala @@ -22,13 +22,13 @@ import scala.concurrent.duration._ import scala.util.{ Failure, Success } object SyncTestingExampleSpec { - //#child + // #child val childActor = Behaviors.receiveMessage[String] { _ => Behaviors.same[String] } - //#child + // #child - //#under-test + // #under-test object Hello { sealed trait Command case object CreateAnonymousChild extends Command @@ -82,7 +82,7 @@ object SyncTestingExampleSpec { case class Question(q: String, replyTo: ActorRef[Answer]) case class Answer(a: String) } - //#under-test + // #under-test object ConfigAware { sealed trait Command @@ -112,61 +112,61 @@ class SyncTestingExampleSpec extends AnyWordSpec with Matchers { "Typed actor synchronous testing" must { "record spawning" in { - //#test-child + // #test-child val testKit = BehaviorTestKit(Hello()) testKit.run(Hello.CreateChild("child")) testKit.expectEffect(Spawned(childActor, "child")) - //#test-child + // #test-child } "record spawning anonymous" in { - //#test-anonymous-child + // #test-anonymous-child val testKit = BehaviorTestKit(Hello()) testKit.run(Hello.CreateAnonymousChild) testKit.expectEffect(SpawnedAnonymous(childActor)) - //#test-anonymous-child + // #test-anonymous-child } "record message sends" in { - //#test-message + // #test-message val testKit = BehaviorTestKit(Hello()) val inbox = TestInbox[String]() testKit.run(Hello.SayHello(inbox.ref)) inbox.expectMessage("hello") - //#test-message + // #test-message } "send a message to a spawned child" in { - //#test-child-message + // #test-child-message val testKit = BehaviorTestKit(Hello()) testKit.run(Hello.SayHelloToChild("child")) val childInbox = testKit.childInbox[String]("child") childInbox.expectMessage("hello") - //#test-child-message + // #test-child-message } "send a message to an anonymous spawned child" in { - //#test-child-message-anonymous + // #test-child-message-anonymous val testKit = BehaviorTestKit(Hello()) testKit.run(Hello.SayHelloToAnonymousChild) val child = testKit.expectEffectType[SpawnedAnonymous[String]] val childInbox = testKit.childInbox(child.ref) childInbox.expectMessage("hello stranger") - //#test-child-message-anonymous + // #test-child-message-anonymous } "log a message to the logger" in { - //#test-check-logging + // #test-check-logging val testKit = BehaviorTestKit(Hello()) val inbox = TestInbox[String]("Inboxer") testKit.run(Hello.LogAndSayHello(inbox.ref)) testKit.logEntries() shouldBe Seq(CapturedLogEvent(Level.INFO, "Saying hello to Inboxer")) - //#test-check-logging + // #test-check-logging } "support the contextual ask pattern" in { - //#test-contextual-ask + // #test-contextual-ask val testKit = BehaviorTestKit(Hello()) val askee = TestInbox[Hello.Question]() testKit.run(Hello.AskAQuestion(askee.ref)) @@ -200,7 +200,7 @@ class SyncTestingExampleSpec extends AnyWordSpec with Matchers { // The response timeout can be inspected val responseTimeout = effect.responseTimeout - //#test-contextual-ask + // #test-contextual-ask // pro-forma assertions to satisfy warn-unused while following the pattern in this spec of not // using ScalaTest matchers in code exposed through paradox diff --git a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/TestConfigExample.scala b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/TestConfigExample.scala index 8e79aeb36d6..9fd83790031 100644 --- a/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/TestConfigExample.scala +++ b/akka-actor-testkit-typed/src/test/scala/docs/akka/actor/testkit/typed/scaladsl/TestConfigExample.scala @@ -8,24 +8,26 @@ object TestConfigExample { def illustrateApplicationConfig(): Unit = { - //#default-application-conf + // #default-application-conf import com.typesafe.config.ConfigFactory ConfigFactory.load() - //#default-application-conf + // #default-application-conf - //#parse-string + // #parse-string ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.log-config-on-start = on """) - //#parse-string + // #parse-string - //#fallback-application-conf - ConfigFactory.parseString(""" + // #fallback-application-conf + ConfigFactory + .parseString(""" akka.loglevel = DEBUG akka.log-config-on-start = on - """).withFallback(ConfigFactory.load()) - //#fallback-application-conf + """) + .withFallback(ConfigFactory.load()) + // #fallback-application-conf } } diff --git a/akka-actor-tests/src/test/scala/akka/AkkaExceptionSpec.scala b/akka-actor-tests/src/test/scala/akka/AkkaExceptionSpec.scala index dc7efec7463..22c593992a8 100644 --- a/akka-actor-tests/src/test/scala/akka/AkkaExceptionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/AkkaExceptionSpec.scala @@ -18,11 +18,11 @@ class AkkaExceptionSpec extends AnyWordSpec with Matchers { "AkkaException" must { "have a AkkaException(String msg) constructor to be serialization friendly" in { - //if the call to this method completes, we know what there is at least a single constructor which has - //the expected argument type. + // if the call to this method completes, we know what there is at least a single constructor which has + // the expected argument type. verify(classOf[AkkaException]) - //lets also try it for the exception that triggered this bug to be discovered. + // lets also try it for the exception that triggered this bug to be discovered. verify(classOf[ActorKilledException]) } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala index 6737692f929..4895bdd0a77 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala @@ -18,8 +18,7 @@ import akka.testkit.TestEvent._ object ActorConfigurationVerificationSpec { class TestActor extends Actor { - def receive: Receive = { - case _ => + def receive: Receive = { case _ => } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorCreationPerfSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorCreationPerfSpec.scala index d4b0e1adfb2..fd96cbea1e9 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorCreationPerfSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorCreationPerfSpec.scala @@ -41,14 +41,14 @@ object ActorCreationPerfSpec { case object Waited class EmptyActor extends Actor { - def receive = { - case IsAlive => sender() ! Alive + def receive = { case IsAlive => + sender() ! Alive } } class EmptyArgsActor(val foo: Int, val bar: Int) extends Actor { - def receive = { - case IsAlive => sender() ! Alive + def receive = { case IsAlive => + sender() ! Alive } } @@ -75,13 +75,12 @@ object ActorCreationPerfSpec { def waiting(number: Int, replyTo: ActorRef): Receive = { var current = number - { - case Alive => - current -= 1 - if (current == 0) { - replyTo ! Waited - context.unbecome() - } + { case Alive => + current -= 1 + if (current == 0) { + replyTo ! Waited + context.unbecome() + } } } } @@ -104,13 +103,12 @@ object ActorCreationPerfSpec { def waiting(number: Int, replyTo: ActorRef): Receive = { var current = number - { - case Alive => - current -= 1 - if (current == 0) { - replyTo ! Waited - context.unbecome() - } + { case Alive => + current -= 1 + if (current == 0) { + replyTo ! Waited + context.unbecome() + } } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala index 780cb067e7c..42dbce76823 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala @@ -158,24 +158,23 @@ class ActorLifeCycleSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitS import akka.pattern._ - override def receive: Receive = { - case "ping" => - val replyTo = sender() + override def receive: Receive = { case "ping" => + val replyTo = sender() - context.stop(self) + context.stop(self) - Future { - latch.await() - Thread.sleep(50) - "po" - } + Future { + latch.await() + Thread.sleep(50) + "po" + } // Here, we implicitly close over the actor instance and access the context // when the flatMap thunk is run. Previously, the context was nulled when the actor // was terminated. This isn't done any more. Still, the pattern of `import context.dispatcher` // is discouraged as closing over `context` is unsafe in general. - .flatMap(x => Future { x + "ng" } /* implicitly: (this.context.dispatcher) */ ) - .recover { case _: NullPointerException => "npe" } - .pipeTo(replyTo) + .flatMap(x => Future { x + "ng" } /* implicitly: (this.context.dispatcher) */ ) + .recover { case _: NullPointerException => "npe" } + .pipeTo(replyTo) } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala index dbf4eb8e246..f921df7e33e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala @@ -171,8 +171,8 @@ object ActorMailboxSpec { """) class QueueReportingActor extends Actor { - def receive = { - case _ => sender() ! context.asInstanceOf[ActorCell].mailbox.messageQueue + def receive = { case _ => + sender() ! context.asInstanceOf[ActorCell].mailbox.messageQueue } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala index df51bbf3e26..4d09c68fbd1 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala @@ -111,10 +111,12 @@ object ActorRefSpec { } } -class ActorRefSpec extends AkkaSpec(""" +class ActorRefSpec + extends AkkaSpec(""" # testing Java serialization of ActorRef akka.actor.allow-java-serialization = on - """) with DefaultTimeout { + """) + with DefaultTimeout { import akka.actor.ActorRefSpec._ def promiseIntercept(f: => Actor)(to: Promise[Actor]): Actor = @@ -159,8 +161,8 @@ class ActorRefSpec extends AkkaSpec(""" EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { - wrap( - result => actorOf(Props(promiseIntercept(new FailingOuterActor(actorOf(Props(new InnerActor))))(result)))) + wrap(result => + actorOf(Props(promiseIntercept(new FailingOuterActor(actorOf(Props(new InnerActor))))(result)))) } contextStackMustBeEmpty() @@ -168,8 +170,8 @@ class ActorRefSpec extends AkkaSpec(""" EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { - wrap( - result => actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept(new FailingInnerActor)(result))))))) + wrap(result => + actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept(new FailingInnerActor)(result))))))) } contextStackMustBeEmpty() @@ -196,9 +198,9 @@ class ActorRefSpec extends AkkaSpec(""" EventFilter[ActorInitializationException](occurrences = 2).intercept { intercept[akka.actor.ActorInitializationException] { - wrap( - result => - actorOf(Props(new FailingInheritingOuterActor( + wrap(result => + actorOf( + Props(new FailingInheritingOuterActor( actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) } @@ -247,22 +249,20 @@ class ActorRefSpec extends AkkaSpec(""" EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { - wrap( - result => - actorOf( - Props(new OuterActor(actorOf(Props(promiseIntercept({ new InnerActor; new InnerActor })(result))))))) + wrap(result => + actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept { new InnerActor; new InnerActor }(result))))))) } contextStackMustBeEmpty() } EventFilter[ActorInitializationException](occurrences = 1).intercept { - (intercept[java.lang.IllegalStateException] { + intercept[java.lang.IllegalStateException] { wrap(result => - actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept({ + actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept { throw new IllegalStateException("Ur state be b0rked") - })(result))))))) - }).getMessage should ===("Ur state be b0rked") + }(result))))))) + }.getMessage should ===("Ur state be b0rked") contextStackMustBeEmpty() } @@ -272,9 +272,11 @@ class ActorRefSpec extends AkkaSpec(""" EventFilter[ActorInitializationException](occurrences = 1, pattern = "/user/failingActor:").intercept { intercept[java.lang.IllegalStateException] { wrap(result => - system.actorOf(Props(promiseIntercept({ - throw new IllegalStateException - })(result)), "failingActor")) + system.actorOf( + Props(promiseIntercept { + throw new IllegalStateException + }(result)), + "failingActor")) } } } @@ -325,9 +327,9 @@ class ActorRefSpec extends AkkaSpec(""" val in = new ObjectInputStream(new ByteArrayInputStream(baos.toByteArray)) - (intercept[java.lang.IllegalStateException] { + intercept[java.lang.IllegalStateException] { in.readObject - }).getMessage should ===( + }.getMessage should ===( "Trying to deserialize a serialized ActorRef without an ActorSystem in scope." + " Use 'akka.serialization.JavaSerializer.currentSystem.withValue(system) { ... }'") } @@ -422,8 +424,8 @@ class ActorRefSpec extends AkkaSpec(""" } })) - val ffive = (ref.ask(5)(timeout)).mapTo[String] - val fnull = (ref.ask(0)(timeout)).mapTo[String] + val ffive = ref.ask(5)(timeout).mapTo[String] + val fnull = ref.ask(0)(timeout).mapTo[String] ref ! PoisonPill Await.result(ffive, timeout.duration) should ===("five") @@ -457,17 +459,21 @@ class ActorRefSpec extends AkkaSpec(""" } "be able to check for existence of children" in { - val parent = system.actorOf(Props(new Actor { + val parent = system.actorOf( + Props(new Actor { - val child = context.actorOf(Props(new Actor { - def receive = { case _ => } - }), "child") + val child = context.actorOf( + Props(new Actor { + def receive = { case _ => } + }), + "child") - def receive = { case name: String => sender() ! context.child(name).isDefined } - }), "parent") + def receive = { case name: String => sender() ! context.child(name).isDefined } + }), + "parent") - assert(Await.result((parent ? "child"), timeout.duration) === true) - assert(Await.result((parent ? "whatnot"), timeout.duration) === false) + assert(Await.result(parent ? "child", timeout.duration) === true) + assert(Await.result(parent ? "whatnot", timeout.duration) === false) } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala index d9fcc8c1ca4..4d0d4ccd67e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala @@ -49,17 +49,20 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { val root = sysImpl.lookupRoot def empty(path: String) = - new EmptyLocalActorRef(sysImpl.provider, path match { - case RelativeActorPath(elems) => sysImpl.lookupRoot.path / elems - case _ => throw new RuntimeException() - }, system.eventStream) + new EmptyLocalActorRef( + sysImpl.provider, + path match { + case RelativeActorPath(elems) => sysImpl.lookupRoot.path / elems + case _ => throw new RuntimeException() + }, + system.eventStream) val idProbe = TestProbe() def identify(selection: ActorSelection): Option[ActorRef] = { selection.tell(Identify(selection), idProbe.ref) - val result = idProbe.expectMsgPF() { - case ActorIdentity(`selection`, ref) => ref + val result = idProbe.expectMsgPF() { case ActorIdentity(`selection`, ref) => + ref } val asked = Await.result((selection ? Identify(selection)).mapTo[ActorIdentity], timeout.duration) asked.ref should ===(result) @@ -128,7 +131,7 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { val a2 = system.actorOf(p, name) a2.path should ===(a1.path) a2.path.toString should ===(a1.path.toString) - a2 should not be (a1) + a2 should not be a1 a2.toString should not be (a1.toString) watch(a2) @@ -253,12 +256,13 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { } def check(looker: ActorRef): Unit = { for ((l, r) <- Seq( - SelectString("a/b/c") -> None, - SelectString("akka://all-systems/Nobody") -> None, - SelectPath(system / "hallo") -> None, - SelectPath(looker.path.child("hallo")) -> None, // test Java API - SelectPath(looker.path.descendant(Seq("a", "b").asJava)) -> None) // test Java API - ) checkOne(looker, l, r) + SelectString("a/b/c") -> None, + SelectString("akka://all-systems/Nobody") -> None, + SelectPath(system / "hallo") -> None, + SelectPath(looker.path.child("hallo")) -> None, // test Java API + SelectPath(looker.path.descendant(Seq("a", "b").asJava)) -> None + ) // test Java API + ) checkOne(looker, l, r) } for (looker <- all) check(looker) } @@ -288,9 +292,9 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { "send messages with correct sender" in { implicit val sender = c1 ActorSelection(c21, "../../*") ! GetSender(testActor) - val actors = Set() ++ receiveWhile(messages = 2) { - case `c1` => lastSender - } + val actors = Set() ++ receiveWhile(messages = 2) { case `c1` => + lastSender + } actors should ===(Set(c1, c2)) expectNoMessage() } @@ -298,8 +302,8 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { "drop messages which cannot be delivered" in { implicit val sender = c2 ActorSelection(c21, "../../*/c21") ! GetSender(testActor) - val actors = receiveWhile(messages = 2) { - case `c2` => lastSender + val actors = receiveWhile(messages = 2) { case `c2` => + lastSender } actors should ===(Seq(c21)) expectNoMessage() diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemDispatcherSpec.scala index db3eff4effe..75d1ce2b53b 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemDispatcherSpec.scala @@ -31,10 +31,12 @@ object ActorSystemDispatchersSpec { } -class ActorSystemDispatchersSpec extends AkkaSpec(ConfigFactory.parseString(""" +class ActorSystemDispatchersSpec + extends AkkaSpec(ConfigFactory.parseString(""" dispatcher-loop-1 = "dispatcher-loop-2" dispatcher-loop-2 = "dispatcher-loop-1" - """)) with ImplicitSender { + """)) + with ImplicitSender { import ActorSystemDispatchersSpec._ @@ -48,8 +50,8 @@ class ActorSystemDispatchersSpec extends AkkaSpec(ConfigFactory.parseString(""" try { val ref = system2.actorOf(Props(new Actor { - def receive = { - case "ping" => sender() ! "pong" + def receive = { case "ping" => + sender() ! "pong" } })) @@ -123,8 +125,8 @@ class ActorSystemDispatchersSpec extends AkkaSpec(ConfigFactory.parseString(""" try { val ref = system2.actorOf(Props(new Actor { - def receive = { - case "ping" => sender() ! "pong" + def receive = { case "ping" => + sender() ! "pong" } }).withDispatcher(Dispatchers.InternalDispatcherId)) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala index cfceb950102..151425b4741 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala @@ -34,10 +34,10 @@ object ActorSystemSpec { case n: Int => master = sender() terminaters = Set() ++ (for (_ <- 1 to n) yield { - val man = context.watch(context.system.actorOf(Props[Terminater]())) - man ! "run" - man - }) + val man = context.watch(context.system.actorOf(Props[Terminater]())) + man ! "run" + man + }) case Terminated(child) if terminaters contains child => terminaters -= child if (terminaters.isEmpty) { @@ -55,14 +55,14 @@ object ActorSystemSpec { } class Terminater extends Actor { - def receive = { - case "run" => context.stop(self) + def receive = { case "run" => + context.stop(self) } } class Strategy extends SupervisorStrategyConfigurator { - def create() = OneForOneStrategy() { - case _ => SupervisorStrategy.Escalate + def create() = OneForOneStrategy() { case _ => + SupervisorStrategy.Escalate } } @@ -72,8 +72,7 @@ object ActorSystemSpec { context.actorSelection(ref1.path.toString).tell(Identify(ref1), testActor) latch.countDown() - def receive = { - case _ => + def receive = { case _ => } } @@ -103,9 +102,7 @@ object ActorSystemSpec { } } - /** - * Returns the same dispatcher instance for each invocation - */ + /** Returns the same dispatcher instance for each invocation */ override def dispatcher(): MessageDispatcher = instance } @@ -125,15 +122,15 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend "reject common invalid names" in { for (n <- Seq( - "-hallowelt", - "_hallowelt", - "hallo welt", - "hallo*welt", - "hallo@welt", - "hallo#welt", - "hallo$welt", - "hallo%welt", - "hallo/welt")) intercept[IllegalArgumentException] { + "-hallowelt", + "_hallowelt", + "hallo welt", + "hallo*welt", + "hallo@welt", + "hallo#welt", + "hallo$welt", + "hallo%welt", + "hallo/welt")) intercept[IllegalArgumentException] { ActorSystem(n) } } @@ -330,8 +327,8 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend .parseString("akka.actor.guardian-supervisor-strategy=akka.actor.StoppingSupervisorStrategy") .withFallback(AkkaSpec.testConf)) val a = system.actorOf(Props(new Actor { - def receive = { - case "die" => throw new Exception("hello") + def receive = { case "die" => + throw new Exception("hello") } })) val probe = TestProbe() @@ -353,8 +350,8 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend .parseString("akka.actor.guardian-supervisor-strategy=\"akka.actor.ActorSystemSpec$Strategy\"") .withFallback(AkkaSpec.testConf)) val a = system.actorOf(Props(new Actor { - def receive = { - case "die" => throw new Exception("hello") + def receive = { case "die" => + throw new Exception("hello") } })) EventFilter[Exception]("hello").intercept { diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala index 3a02d666fe6..a70f4fa3e2d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala @@ -24,7 +24,7 @@ class ActorTimeoutSpec extends AkkaSpec { "use implicitly supplied timeout" in { implicit val timeout = Timeout(testTimeout) val echo = system.actorOf(Props.empty) - val f = (echo ? "hallo") + val f = echo ? "hallo" intercept[AskTimeoutException] { Await.result(f, testTimeout + leeway) } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala index 169107f2708..60d88963bd3 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala @@ -31,8 +31,8 @@ object ActorWithBoundedStashSpec { } - def afterWorldBehavior: Receive = { - case _ => stash() + def afterWorldBehavior: Receive = { case _ => + stash() } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala index b3e074601f8..38192d02e26 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala @@ -77,14 +77,13 @@ object ActorWithStashSpec { context.stop(watched) - def receive = { - case Terminated(`watched`) => - if (!stashed) { - stash() - stashed = true - unstashAll() - } - probe ! "terminated" + def receive = { case Terminated(`watched`) => + if (!stashed) { + stash() + stashed = true + unstashAll() + } + probe ! "terminated" } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala index 2e3b87258ea..b1f0930b9b4 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala @@ -15,7 +15,8 @@ object ConsistencySpec { val minThreads = 1 val maxThreads = 2000 val factor = 1.5d - val threads = ThreadPoolConfig.scaledPoolSize(minThreads, factor, maxThreads) // Make sure we have more threads than cores + val threads = + ThreadPoolConfig.scaledPoolSize(minThreads, factor, maxThreads) // Make sure we have more threads than cores val config = s""" consistency-dispatcher { @@ -28,11 +29,15 @@ object ConsistencySpec { } } """ - class CacheMisaligned(var value: Long, var padding1: Long, var padding2: Long, var padding3: Int) //Vars, no final fences + class CacheMisaligned( + var value: Long, + var padding1: Long, + var padding2: Long, + var padding3: Int) // Vars, no final fences class ConsistencyCheckingActor extends Actor { - var left = new CacheMisaligned(42, 0, 0, 0) //var - var right = new CacheMisaligned(0, 0, 0, 0) //var + var left = new CacheMisaligned(42, 0, 0, 0) // var + var right = new CacheMisaligned(0, 0, 0, 0) // var var lastStep = -1L def receive = { case step: Long => diff --git a/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala index f92f1cb6afb..2b3b0ba207f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala @@ -26,8 +26,7 @@ import akka.testkit.EventFilter import akka.testkit.TestKit import akka.testkit.TestProbe -class CoordinatedShutdownSpec - extends AkkaSpec(ConfigFactory.parseString(""" +class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString(""" akka.loglevel=INFO akka.loggers = ["akka.testkit.TestEventListener"] """)) { @@ -40,18 +39,17 @@ class CoordinatedShutdownSpec private def checkTopologicalSort(phases: Map[String, Phase]): List[String] = { val result = CoordinatedShutdown.topologicalSort(phases) - result.zipWithIndex.foreach { - case (phase, i) => - phases.get(phase) match { - case Some(Phase(dependsOn, _, _, _)) => - dependsOn.foreach { depPhase => - withClue( - s"phase [$phase] depends on [$depPhase] but was ordered before it in topological sort result $result") { - i should be > result.indexOf(depPhase) - } + result.zipWithIndex.foreach { case (phase, i) => + phases.get(phase) match { + case Some(Phase(dependsOn, _, _, _)) => + dependsOn.foreach { depPhase => + withClue( + s"phase [$phase] depends on [$depPhase] but was ordered before it in topological sort result $result") { + i should be > result.indexOf(depPhase) } - case None => // ok - } + } + case None => // ok + } } result } @@ -81,7 +79,8 @@ class CoordinatedShutdownSpec // a, b can be in any order result2.toSet should ===(Set("a", "b", "c")) - checkTopologicalSort(Map("b" -> phase("a"), "c" -> phase("b"), "d" -> phase("b", "c"), "e" -> phase("d"))) should ===( + checkTopologicalSort( + Map("b" -> phase("a"), "c" -> phase("b"), "d" -> phase("b", "c"), "e" -> phase("d"))) should ===( List("a", "b", "c", "d", "e")) val result3 = @@ -333,9 +332,8 @@ class CoordinatedShutdownSpec c.cancel() shouldBe true Done } - cancelFutures.foldLeft(Future.successful(Done)) { - case (acc, fut) => - acc.flatMap(_ => fut) + cancelFutures.foldLeft(Future.successful(Done)) { case (acc, fut) => + acc.flatMap(_ => fut) } } @@ -787,7 +785,7 @@ class CoordinatedShutdownSpec withSystemRunning(newSystem, cs) TestKit.shutdownActorSystem(newSystem) - shutdownHooks should have size (0) + shutdownHooks should have size 0 protected def myHooksCount: Int = synchronized(shutdownHooks.size) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeadLetterSuspensionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeadLetterSuspensionSpec.scala index 3378f12f3c3..d60a28416fa 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeadLetterSuspensionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeadLetterSuspensionSpec.scala @@ -16,9 +16,8 @@ object DeadLetterSuspensionSpec { } class Dropping extends Actor { - override def receive: Receive = { - case n: Int => - context.system.eventStream.publish(Dropped(n, "Don't like numbers", self)) + override def receive: Receive = { case n: Int => + context.system.eventStream.publish(Dropped(n, "Don't like numbers", self)) } } @@ -27,17 +26,19 @@ object DeadLetterSuspensionSpec { } class Unandled extends Actor { - override def receive: Receive = { - case n: Int => unhandled(n) + override def receive: Receive = { case n: Int => + unhandled(n) } } } -class DeadLetterSuspensionSpec extends AkkaSpec(""" +class DeadLetterSuspensionSpec + extends AkkaSpec(""" akka.loglevel = INFO akka.log-dead-letters = 4 akka.log-dead-letters-suspend-duration = 2s - """) with ImplicitSender { + """) + with ImplicitSender { import DeadLetterSuspensionSpec._ private val deadActor = system.actorOf(TestActors.echoActorProps) diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala index 5e6bbae101c..e8295385b17 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala @@ -36,17 +36,18 @@ object DeathWatchSpec { } class NKOTBWatcher(testActor: ActorRef) extends Actor { - def receive = { - case "NKOTB" => - val currentKid = context.watch(context.actorOf(Props(new Actor { - def receive = { case "NKOTB" => context.stop(self) } - }), "kid")) - currentKid.forward("NKOTB") - context.become { - case Terminated(`currentKid`) => - testActor ! "GREEN" - context.unbecome() - } + def receive = { case "NKOTB" => + val currentKid = context.watch( + context.actorOf( + Props(new Actor { + def receive = { case "NKOTB" => context.stop(self) } + }), + "kid")) + currentKid.forward("NKOTB") + context.become { case Terminated(`currentKid`) => + testActor ! "GREEN" + context.unbecome() + } } } @@ -171,7 +172,7 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout => monitor2 ! "ping" - expectMsg("pong") //Needs to be here since watch and unwatch are asynchronous + expectMsg("pong") // Needs to be here since watch and unwatch are asynchronous terminal ! PoisonPill diff --git a/akka-actor-tests/src/test/scala/akka/actor/DynamicAccessSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DynamicAccessSpec.scala index 28c76f9d413..0757db06af4 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DynamicAccessSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DynamicAccessSpec.scala @@ -47,7 +47,8 @@ class DynamicAccessSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll } "try different constructors with recoverWith" in { - instantiateWithDefaultOrStringCtor("akka.actor.TestClassWithStringConstructor").get.name shouldBe "string ctor argument" + instantiateWithDefaultOrStringCtor( + "akka.actor.TestClassWithStringConstructor").get.name shouldBe "string ctor argument" instantiateWithDefaultOrStringCtor("akka.actor.TestClassWithDefaultConstructor").get.name shouldBe "default" instantiateWithDefaultOrStringCtor("akka.actor.foo.NonExistingClass") match { case Failure(t) => diff --git a/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala index 2744291f26f..a34bfca5626 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala @@ -127,8 +127,7 @@ class ExtensionSpec extends AnyWordSpec with Matchers { val countBefore = InstanceCountingExtension.createCount.get() val system = ActorSystem( "extensions", - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.library-extensions = ["akka.actor.InstanceCountingExtension", "akka.actor.InstanceCountingExtension", "akka.actor.InstanceCountingExtension$"] """)) val listedExtensions = system.settings.config.getStringList("akka.library-extensions").asScala @@ -143,9 +142,12 @@ class ExtensionSpec extends AnyWordSpec with Matchers { intercept[FailingTestExtension.TestException] { ActorSystem( "failing", - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.library-extensions += "akka.actor.FailingTestExtension" - """).withFallback(ConfigFactory.load()).resolve()) + """) + .withFallback(ConfigFactory.load()) + .resolve()) } } @@ -154,9 +156,11 @@ class ExtensionSpec extends AnyWordSpec with Matchers { intercept[RuntimeException] { ActorSystem( "failing", - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.library-extensions += "akka.actor.MissingExtension" - """).withFallback(ConfigFactory.load())) + """) + .withFallback(ConfigFactory.load())) } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala index 2f6b0a930e1..8f96b58af76 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala @@ -44,7 +44,7 @@ object FSMActorSpec { soFar + digit match { case incomplete if incomplete.length < code.length => stay().using(CodeState(incomplete, code)) - case codeTry if (codeTry == code) => { + case codeTry if codeTry == code => { doUnlock() goto(Open).using(CodeState("", code)).forMax(timeout) } @@ -72,8 +72,8 @@ object FSMActorSpec { } } - onTransition { - case Locked -> Open => transitionLatch.open() + onTransition { case Locked -> Open => + transitionLatch.open() } // verify that old-style does still compile @@ -83,10 +83,9 @@ object FSMActorSpec { // dummy } - onTermination { - case StopEvent(FSM.Shutdown, Locked, _) => - // stop is called from lockstate with shutdown as reason... - terminatedLatch.open() + onTermination { case StopEvent(FSM.Shutdown, Locked, _) => + // stop is called from lockstate with shutdown as reason... + terminatedLatch.open() } // initialize the lock @@ -161,8 +160,8 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im "log termination" in { val fsm = TestActorRef(new Actor with FSM[Int, Null] { startWith(1, null) - when(1) { - case Event("go", _) => goto(2) + when(1) { case Event("go", _) => + goto(2) } }) val name = fsm.path.toString @@ -189,8 +188,8 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im when(1) { FSM.NullFunction } - onTermination { - case x => testActor ! x + onTermination { case x => + testActor ! x } } @@ -210,11 +209,11 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im val expected = "pigdog" val actor = system.actorOf(Props(new Actor with FSM[Int, String] { startWith(1, null) - when(1) { - case Event(2, null) => stop(FSM.Normal, expected) + when(1) { case Event(2, null) => + stop(FSM.Normal, expected) } - onTermination { - case StopEvent(FSM.Normal, 1, `expected`) => testActor ! "green" + onTermination { case StopEvent(FSM.Normal, 1, `expected`) => + testActor ! "green" } })) actor ! 2 @@ -227,15 +226,14 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im // can't be anonymous class due to https://github.com/akka/akka/issues/32128 class FsmActor extends Actor with FSM[String, Null] { startWith("not-started", null) - when("not-started") { - case Event("start", _) => goto("started").replying("starting") + when("not-started") { case Event("start", _) => + goto("started").replying("starting") } - when("started", stateTimeout = 10 seconds) { - case Event("stop", _) => stop() + when("started", stateTimeout = 10 seconds) { case Event("stop", _) => + stop() } - onTransition { - case "not-started" -> "started" => - for (timerName <- timerNames) startSingleTimer(timerName, (), 10 seconds) + onTransition { case "not-started" -> "started" => + for (timerName <- timerNames) startSingleTimer(timerName, (), 10 seconds) } onTermination { case _ => { @@ -274,18 +272,16 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im EventFilter.debug(occurrences = 5).intercept { val fsm = TestActorRef(new Actor with LoggingFSM[Int, Null] { startWith(1, null) - when(1) { - case Event("go", _) => - startSingleTimer("t", FSM.Shutdown, 1.5 seconds) - goto(2) + when(1) { case Event("go", _) => + startSingleTimer("t", FSM.Shutdown, 1.5 seconds) + goto(2) } - when(2) { - case Event("stop", _) => - cancelTimer("t") - stop() + when(2) { case Event("stop", _) => + cancelTimer("t") + stop() } - onTermination { - case StopEvent(r, _, _) => testActor ! r + onTermination { case StopEvent(r, _, _) => + testActor ! r } }) val name = fsm.path.toString @@ -339,13 +335,13 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im import akka.actor.FSM._ val fsmref = system.actorOf(Props(new Actor with FSM[Int, Int] { startWith(0, 0) - when(0)(transform { - case Event("go", _) => stay() - }.using { - case _ => goto(1) + when(0)(transform { case Event("go", _) => + stay() + }.using { case _ => + goto(1) }) - when(1) { - case _ => stay() + when(1) { case _ => + stay() } })) fsmref ! SubscribeTransitionCallBack(testActor) diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala index 04d4bf1c0b8..5021f3b51e4 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala @@ -121,8 +121,8 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender { "receive and cancel a repeated timer" taggedAs TimingTest in { fsm ! TestRepeatedTimer - val seq = receiveWhile(2 seconds) { - case Tick => Tick + val seq = receiveWhile(2 seconds) { case Tick => + Tick } (seq should have).length(5) within(500 millis) { @@ -208,13 +208,12 @@ object FSMTimingSpec { case Event(StateTimeout, _) => goto(Initial) case Event(Cancel, _) => goto(Initial).replying(Cancel) } - when(TestSingleTimer) { - case Event(Tick, _) => - tester ! Tick - goto(Initial) + when(TestSingleTimer) { case Event(Tick, _) => + tester ! Tick + goto(Initial) } - onTransition { - case Initial -> TestSingleTimerResubmit => startSingleTimer("blah", Tick, 500.millis.dilated) + onTransition { case Initial -> TestSingleTimerResubmit => + startSingleTimer("blah", Tick, 500.millis.dilated) } when(TestSingleTimerResubmit) { case Event(Tick, _) => @@ -240,15 +239,14 @@ object FSMTimingSpec { cancelTimer("hallo") goto(Initial) } - when(TestRepeatedTimer) { - case Event(Tick, remaining) => - tester ! Tick - if (remaining == 0) { - cancelTimer("tester") - goto(Initial) - } else { - stay().using(remaining - 1) - } + when(TestRepeatedTimer) { case Event(Tick, remaining) => + tester ! Tick + if (remaining == 0) { + cancelTimer("tester") + goto(Initial) + } else { + stay().using(remaining - 1) + } } when(TestCancelStateTimerInNamedTimerMessage) { // FSM is suspended after processing this message and resumed 500ms later @@ -268,10 +266,9 @@ object FSMTimingSpec { } when(TestUnhandled) { case Event(SetHandler, _) => - whenUnhandled { - case Event(Tick, _) => - tester ! Unhandled(Tick) - stay() + whenUnhandled { case Event(Tick, _) => + tester ! Unhandled(Tick) + stay() } stay() case Event(Cancel, _) => @@ -283,10 +280,9 @@ object FSMTimingSpec { class StoppingActor extends Actor with FSM[State, Int] { startWith(Initial, 0) - when(Initial, 200 millis) { - case Event(TestStoppingActorStateTimeout, _) => - context.stop(self) - stay() + when(Initial, 200 millis) { case Event(TestStoppingActorStateTimeout, _) => + context.stop(self) + stay() } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala index bf2f67a7ecb..f1596a97f89 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala @@ -28,14 +28,14 @@ object FSMTransitionSpec { class MyFSM(target: ActorRef) extends Actor with FSM[Int, Unit] { startWith(0, ()) - when(0) { - case Event("tick", _) => goto(1) + when(0) { case Event("tick", _) => + goto(1) } - when(1) { - case Event("tick", _) => goto(0) + when(1) { case Event("tick", _) => + goto(0) } - whenUnhandled { - case Event("reply", _) => stay().replying("reply") + whenUnhandled { case Event("reply", _) => + stay().replying("reply") } initialize() override def preRestart(reason: Throwable, msg: Option[Any]): Unit = { target ! "restarted" } @@ -47,8 +47,8 @@ object FSMTransitionSpec { case Event("tick", _) => goto(1).using(1) case Event("stay", _) => stay() } - when(1) { - case _ => goto(1) + when(1) { case _ => + goto(1) } onTransition { case 0 -> 1 => target ! ((stateData, nextStateData)) @@ -147,20 +147,19 @@ class FSMTransitionSpec extends AkkaSpec with ImplicitSender { "not leak memory in nextState" in { val fsmref = system.actorOf(Props(new Actor with FSM[Int, ActorRef] { startWith(0, null) - when(0) { - case Event("switch", _) => goto(1).using(sender()) + when(0) { case Event("switch", _) => + goto(1).using(sender()) } - onTransition { - case x -> y => nextStateData ! (x -> y) + onTransition { case x -> y => + nextStateData ! (x -> y) } - when(1) { - case Event("test", _) => - try { - sender() ! s"failed: $nextStateData" - } catch { - case _: IllegalStateException => sender() ! "ok" - } - stay() + when(1) { case Event("test", _) => + try { + sender() ! s"failed: $nextStateData" + } catch { + case _: IllegalStateException => sender() ! "ok" + } + stay() } })) fsmref ! "switch" diff --git a/akka-actor-tests/src/test/scala/akka/actor/FunctionRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FunctionRefSpec.scala index 498d436c5d4..615f229a41e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FunctionRefSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FunctionRefSpec.scala @@ -29,20 +29,22 @@ object FunctionRefSpec { class SupSuper extends Actor { val s = context.actorOf(Props[Super](), "super") - def receive = { - case msg => s ! msg + def receive = { case msg => + s ! msg } } } -class FunctionRefSpec extends AkkaSpec(""" +class FunctionRefSpec + extends AkkaSpec(""" # test is using Java serialization and relies on serialize-messages=on akka.actor.allow-java-serialization = on akka.actor.warn-about-java-serializer-usage = off akka.actor.serialize-messages = on akka.actor.no-serialization-verification-needed-class-prefix = [] - """) with ImplicitSender { + """) + with ImplicitSender { import FunctionRefSpec._ def commonTests(s: ActorRef) = { diff --git a/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala index ff316f4a755..8cc37108439 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala @@ -17,7 +17,7 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { "be able to become in its constructor" in { val a = system.actorOf(Props(new Becomer { context.become { case always => sender() ! always } - def receive = { case _ => sender() ! "FAILURE" } + def receive = { case _ => sender() ! "FAILURE" } })) a ! "pigdog" expectMsg("pigdog") @@ -25,7 +25,7 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { "be able to become multiple times in its constructor" in { val a = system.actorOf(Props(new Becomer { - for (i <- 1 to 4) context.become({ case always => sender() ! s"$i:$always" }) + for (i <- 1 to 4) context.become { case always => sender() ! s"$i:$always" } def receive = { case _ => sender() ! "FAILURE" } })) a ! "pigdog" @@ -35,7 +35,7 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { "be able to become with stacking in its constructor" in { val a = system.actorOf(Props(new Becomer { context.become({ case always => sender() ! "pigdog:" + always; context.unbecome() }, false) - def receive = { case always => sender() ! "badass:" + always } + def receive = { case always => sender() ! "badass:" + always } })) a ! "pigdog" expectMsg("pigdog:pigdog") @@ -62,7 +62,7 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { val a = system.actorOf(Props(new Actor { def receive = { case "init" => sender() ! "init" - case "swap" => context.become({ case x: String => context.sender() ! x }) + case "swap" => context.become { case x: String => context.sender() ! x } } })) @@ -78,10 +78,10 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { def receive = { case "init" => sender() ! "init" case "swap" => - context.become({ + context.become { case "swapped" => sender() ! "swapped" case "revert" => context.unbecome() - }) + } } })) @@ -103,11 +103,11 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { def receive = { case "state" => sender() ! "0" case "swap" => - context.become({ + context.become { case "state" => sender() ! "1" case "swapped" => sender() ! "swapped" case "crash" => throw new Exception("Crash (expected)!") - }) + } sender() ! "swapped" } })) diff --git a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala index 0cfad910d1f..b9ac21fba2f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala @@ -41,12 +41,11 @@ class LocalActorRefProviderSpec extends AkkaSpec(LocalActorRefProviderSpec.confi val childName = "akka%3A%2F%2FClusterSystem%40127.0.0.1%3A2552" val a = system.actorOf(Props(new Actor { val child = context.actorOf(Props.empty, name = childName) - def receive = { - case "lookup" => - if (childName == child.path.name) { - val resolved = system.asInstanceOf[ExtendedActorSystem].provider.resolveActorRef(child.path) - sender() ! resolved - } else sender() ! s"$childName is not ${child.path.name}!" + def receive = { case "lookup" => + if (childName == child.path.name) { + val resolved = system.asInstanceOf[ExtendedActorSystem].provider.resolveActorRef(child.path) + sender() ! resolved + } else sender() ! s"$childName is not ${child.path.name}!" } })) a.tell("lookup", testActor) @@ -112,11 +111,13 @@ class LocalActorRefProviderSpec extends AkkaSpec(LocalActorRefProviderSpec.confi expectTerminated(a) // the fields are cleared after the Terminated message has been sent, // so we need to check for a reasonable time after we receive it - awaitAssert({ - val childProps2 = child.asInstanceOf[LocalActorRef].underlying.props - childProps2 should not be theSameInstanceAs(childProps1) - (childProps2 should be).theSameInstanceAs(ActorCell.terminatedProps) - }, 1 second) + awaitAssert( + { + val childProps2 = child.asInstanceOf[LocalActorRef].underlying.props + childProps2 should not be theSameInstanceAs(childProps1) + (childProps2 should be).theSameInstanceAs(ActorCell.terminatedProps) + }, + 1 second) } } @@ -135,20 +136,19 @@ class LocalActorRefProviderSpec extends AkkaSpec(LocalActorRefProviderSpec.confi for (_ <- 1 to 4) yield Future(system.actorOf(Props(new Actor { def receive = { case _ => } }), address)) val set: Set[Any] = Set() ++ actors.map(a => - Await.ready(a, timeout.duration).value match { - case Some(Success(_: ActorRef)) => 1 - case Some(Failure(_: InvalidActorNameException)) => 2 - case x => x - }) + Await.ready(a, timeout.duration).value match { + case Some(Success(_: ActorRef)) => 1 + case Some(Failure(_: InvalidActorNameException)) => 2 + case x => x + }) set should ===(Set[Any](1, 2)) } } "only create one instance of an actor from within the same message invocation" in { val supervisor = system.actorOf(Props(new Actor { - def receive = { - case "" => - val a, b = context.actorOf(Props.empty, "duplicate") + def receive = { case "" => + val a, b = context.actorOf(Props.empty, "duplicate") } })) EventFilter[InvalidActorNameException](occurrences = 1).intercept { diff --git a/akka-actor-tests/src/test/scala/akka/actor/ProviderSelectionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ProviderSelectionSpec.scala index d53e87c206e..58595bf488c 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ProviderSelectionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ProviderSelectionSpec.scala @@ -50,7 +50,7 @@ class ProviderSelectionSpec extends AbstractSpec { "create a Custom ProviderSelection and set custom provider fqcn in Settings" in { val other = "other.ActorRefProvider" - val ps = ProviderSelection.Custom(other) //checked by dynamicAccess + val ps = ProviderSelection.Custom(other) // checked by dynamicAccess ps.fqcn shouldEqual "other.ActorRefProvider" ps.hasCluster shouldBe false settingsWith(other).ProviderClass shouldEqual ps.fqcn diff --git a/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala index 950d81836b6..b164f171320 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala @@ -22,9 +22,8 @@ object ReceiveTimeoutSpec { class RestartingParent(probe: ActorRef) extends Actor { val restarting = new AtomicBoolean(false) val child = context.actorOf(Props(new RestartingChild(probe, restarting))) - def receive = { - case msg => - child.forward(msg) + def receive = { case msg => + child.forward(msg) } } class RestartingChild(probe: ActorRef, restarting: AtomicBoolean) extends Actor { @@ -55,10 +54,9 @@ object ReceiveTimeoutSpec { } class StoppingSelfActor(probe: ActorRef) extends Actor { - override def receive: Receive = { - case "Stop" => - context.setReceiveTimeout(200.millis) - context.stop(self) + override def receive: Receive = { case "Stop" => + context.setReceiveTimeout(200.millis) + context.stop(self) } override def postStop(): Unit = { @@ -78,8 +76,8 @@ class ReceiveTimeoutSpec extends AkkaSpec() { val timeoutActor = system.actorOf(Props(new Actor { context.setReceiveTimeout(500 milliseconds) - def receive = { - case ReceiveTimeout => timeoutLatch.open() + def receive = { case ReceiveTimeout => + timeoutLatch.open() } })) @@ -132,8 +130,8 @@ class ReceiveTimeoutSpec extends AkkaSpec() { val timeoutLatch = TestLatch() val timeoutActor = system.actorOf(Props(new Actor { - def receive = { - case ReceiveTimeout => timeoutLatch.open() + def receive = { case ReceiveTimeout => + timeoutLatch.open() } })) diff --git a/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala index 543e8190063..26e6a31dfc9 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala @@ -86,8 +86,8 @@ class RestartStrategySpec extends AkkaSpec with DefaultTimeout { val employeeProps = Props(new Actor { - def receive = { - case Crash => throw new Exception("Crashing...") + def receive = { case Crash => + throw new Exception("Crashing...") } override def postRestart(reason: Throwable) = { diff --git a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala index bd1999dde0e..7517dd46423 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala @@ -25,7 +25,8 @@ import akka.testkit._ object SchedulerSpec { val testConfRevolver = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.scheduler.implementation = akka.actor.LightArrayRevolverScheduler akka.scheduler.ticks-per-wheel = 32 """).withFallback(AkkaSpec.testConf) @@ -38,21 +39,19 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit def collectCancellable(c: Cancellable): Cancellable abstract class ScheduleAdapter { - def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable + def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable - def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, receiver: ActorRef, message: Any)( - implicit + def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, receiver: ActorRef, message: Any)(implicit executor: ExecutionContext): Cancellable } class ScheduleWithFixedDelayAdapter extends ScheduleAdapter { - def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = + def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = system.scheduler.scheduleWithFixedDelay(initialDelay, delay)(runnable) - def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, receiver: ActorRef, message: Any)( - implicit + def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, receiver: ActorRef, message: Any)(implicit executor: ExecutionContext): Cancellable = system.scheduler.scheduleWithFixedDelay(initialDelay, delay, receiver, message) @@ -60,12 +59,11 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit } class ScheduleAtFixedRateAdapter extends ScheduleAdapter { - def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = + def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = system.scheduler.scheduleAtFixedRate(initialDelay, delay)(runnable) - def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, receiver: ActorRef, message: Any)( - implicit + def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, receiver: ActorRef, message: Any)(implicit executor: ExecutionContext): Cancellable = system.scheduler.scheduleAtFixedRate(initialDelay, delay, receiver, message) @@ -96,9 +94,7 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit countDownLatch.getCount should ===(1L) } - /** - * ticket #372 - */ + /** ticket #372 */ "be cancellable" taggedAs TimingTest in { for (_ <- 1 to 10) system.scheduler.scheduleOnce(1 second, testActor, "fail").cancel() @@ -129,12 +125,11 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit final case class Msg(ts: Long) val actor = system.actorOf(Props(new Actor { - def receive = { - case Msg(ts) => - val now = System.nanoTime - // Make sure that no message has been dispatched before the scheduled time (10ms) has occurred - if (now < ts) throw new RuntimeException("Interval is too small: " + (now - ts)) - ticks.countDown() + def receive = { case Msg(ts) => + val now = System.nanoTime + // Make sure that no message has been dispatched before the scheduled time (10ms) has occurred + if (now < ts) throw new RuntimeException("Interval is too small: " + (now - ts)) + ticks.countDown() } })) @@ -194,12 +189,11 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit val tickActor, tickActor2 = system.actorOf(Props(new Actor { var ticks = 0 - def receive = { - case Tick => - if (ticks < 3) { - sender() ! Tock - ticks += 1 - } + def receive = { case Tick => + if (ticks < 3) { + sender() ! Tock + ticks += 1 + } } })) // run every 50 milliseconds @@ -222,8 +216,8 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit "stop continuous scheduling if the receiving actor has been terminated" taggedAs TimingTest in { val actor = system.actorOf(Props(new Actor { - def receive = { - case x => sender() ! x + def receive = { case x => + sender() ! x } })) @@ -240,11 +234,15 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit "stop continuous scheduling if the task throws exception" taggedAs TimingTest in { EventFilter[Exception]("TEST", occurrences = 1).intercept { val count = new AtomicInteger(0) - collectCancellable(scheduleAdapter.schedule(Duration.Zero, 20.millis, () => { - val c = count.incrementAndGet() - testActor ! c - if (c == 3) throw new RuntimeException("TEST") with NoStackTrace - })) + collectCancellable( + scheduleAdapter.schedule( + Duration.Zero, + 20.millis, + () => { + val c = count.incrementAndGet() + testActor ! c + if (c == 3) throw new RuntimeException("TEST") with NoStackTrace + })) expectMsg(1) expectMsg(2) expectMsg(3) @@ -256,24 +254,32 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit // when first throws EventFilter[Exception]("TEST-1", occurrences = 1).intercept { val count1 = new AtomicInteger(0) - collectCancellable(scheduleAdapter.schedule(Duration.Zero, 20.millis, () => { - val c = count1.incrementAndGet() - if (c == 1) - throw new IllegalStateException("TEST-1") with NoStackTrace - else - testActor ! c - })) + collectCancellable( + scheduleAdapter.schedule( + Duration.Zero, + 20.millis, + () => { + val c = count1.incrementAndGet() + if (c == 1) + throw new IllegalStateException("TEST-1") with NoStackTrace + else + testActor ! c + })) expectNoMessage(200.millis) } // when later EventFilter[Exception]("TEST-3", occurrences = 1).intercept { val count2 = new AtomicInteger(0) - collectCancellable(scheduleAdapter.schedule(Duration.Zero, 20.millis, () => { - val c = count2.incrementAndGet() - testActor ! c - if (c == 3) throw new IllegalStateException("TEST-3") with NoStackTrace - })) + collectCancellable( + scheduleAdapter.schedule( + Duration.Zero, + 20.millis, + () => { + val c = count2.incrementAndGet() + testActor ! c + if (c == 3) throw new IllegalStateException("TEST-3") with NoStackTrace + })) expectMsg(1) expectMsg(2) expectMsg(3) @@ -286,9 +292,13 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit val initialDelay = 200.millis.dilated val delay = 10.millis.dilated - val timeout = collectCancellable(scheduleAdapter.schedule(initialDelay, delay, () => { - ticks.incrementAndGet() - })) + val timeout = collectCancellable( + scheduleAdapter.schedule( + initialDelay, + delay, + () => { + ticks.incrementAndGet() + })) Thread.sleep(10.millis.dilated.toMillis) timeout.cancel() Thread.sleep((initialDelay + 100.millis.dilated).toMillis) @@ -301,9 +311,13 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit val initialDelay = 90.millis.dilated val delay = 500.millis.dilated - val timeout = collectCancellable(scheduleAdapter.schedule(initialDelay, delay, () => { - ticks.incrementAndGet() - })) + val timeout = collectCancellable( + scheduleAdapter.schedule( + initialDelay, + delay, + () => { + ticks.incrementAndGet() + })) Thread.sleep((initialDelay + 200.millis.dilated).toMillis) timeout.cancel() Thread.sleep((delay + 100.millis.dilated).toMillis) @@ -311,9 +325,7 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit ticks.get should ===(1) } - /** - * ticket #307 - */ + /** ticket #307 */ "pick up schedule after actor restart" taggedAs TimingTest in { object Ping @@ -473,9 +485,11 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev val counter = new AtomicInteger val terminated = Future { var rounds = 0 - while (Try(sched.scheduleOnce(Duration.Zero, new Scheduler.TaskRunOnClose { - override def run(): Unit = () - })(localEC)).isSuccess) { + while (Try(sched.scheduleOnce( + Duration.Zero, + new Scheduler.TaskRunOnClose { + override def run(): Unit = () + })(localEC)).isSuccess) { Thread.sleep(1) driver.wakeUp(step) rounds += 1 @@ -485,9 +499,11 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev def delay = if (ThreadLocalRandom.current.nextBoolean) step * 2 else step val N = 1000000 (1 to N).foreach(_ => - sched.scheduleOnce(delay, new Scheduler.TaskRunOnClose { - override def run(): Unit = counter.incrementAndGet() - })) + sched.scheduleOnce( + delay, + new Scheduler.TaskRunOnClose { + override def run(): Unit = counter.incrementAndGet() + })) sched.close() Await.result(terminated, 3.seconds.dilated) should be > 10 awaitAssert(counter.get should ===(N)) @@ -614,9 +630,12 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev var overrun = headroom val cap = 1000000 val (success, failure) = Iterator - .continually(Try(sched.scheduleOnce(100.millis, new Scheduler.TaskRunOnClose { - override def run(): Unit = counter.incrementAndGet() - }))) + .continually( + Try(sched.scheduleOnce( + 100.millis, + new Scheduler.TaskRunOnClose { + override def run(): Unit = counter.incrementAndGet() + }))) .take(cap) .takeWhile(_.isSuccess || { overrun -= 1; overrun >= 0 }) .partition(_.isSuccess) @@ -632,9 +651,11 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev import system.dispatcher val counter = new AtomicInteger() sched.scheduleOnce(10.seconds)(counter.incrementAndGet()) - sched.scheduleOnce(10.seconds, new Scheduler.TaskRunOnClose { - override def run(): Unit = counter.incrementAndGet() - }) + sched.scheduleOnce( + 10.seconds, + new Scheduler.TaskRunOnClose { + override def run(): Unit = counter.incrementAndGet() + }) driver.close() sched.close() counter.get should ===(1) @@ -703,7 +724,8 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev override protected def waitNanos(ns: Long): Unit = { // println(s"waiting $ns") prb.ref ! ns - try time += (lbq.get match { + try + time += (lbq.get match { case q: LinkedBlockingQueue[Long] => q.take() case null => 0L }) diff --git a/akka-actor-tests/src/test/scala/akka/actor/Supervisor.scala b/akka-actor-tests/src/test/scala/akka/actor/Supervisor.scala index 413dba81080..566eee16297 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/Supervisor.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/Supervisor.scala @@ -10,8 +10,8 @@ package akka.actor */ class Supervisor(override val supervisorStrategy: SupervisorStrategy) extends Actor { - def receive = { - case x: Props => sender() ! context.actorOf(x) + def receive = { case x: Props => + sender() ! context.actorOf(x) } // need to override the default of stopping all children upon restart, tests rely on keeping them around override def preRestart(cause: Throwable, msg: Option[Any]): Unit = {} diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala index eb4497ed5c9..cf6e3c90b5d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala @@ -40,8 +40,8 @@ object SupervisorHierarchySpec { */ class CountDownActor(countDown: CountDownLatch, override val supervisorStrategy: SupervisorStrategy) extends Actor { - def receive = { - case p: Props => sender() ! context.actorOf(p) + def receive = { case p: Props => + sender() ! context.actorOf(p) } // test relies on keeping children around during restart override def preRestart(cause: Throwable, msg: Option[Any]): Unit = {} @@ -170,7 +170,7 @@ object SupervisorHierarchySpec { val sizes = s / kids var rest = s % kids val propsTemplate = Props.empty.withDispatcher("hierarchy") - (1 to kids).iterator.map { (id) => + (1 to kids).iterator.map { id => val kidSize = if (rest > 0) { rest -= 1; sizes + 1 } else sizes @@ -238,15 +238,14 @@ object SupervisorHierarchySpec { val state = stateCache.get(self.path) log = state.log log :+= Event("restarted " + suspendCount + " " + cause, identityHashCode(this)) - state.kids.foreach { - case (childPath, kidSize) => - val name = childPath.name - if (context.child(name).isEmpty) { - listener ! Died(childPath) - val props = - Props(new Hierarchy(kidSize, breadth, listener, myLevel + 1, random)).withDispatcher("hierarchy") - context.watch(context.actorOf(props, name)) - } + state.kids.foreach { case (childPath, kidSize) => + val name = childPath.name + if (context.child(name).isEmpty) { + listener ! Died(childPath) + val props = + Props(new Hierarchy(kidSize, breadth, listener, myLevel + 1, random)).withDispatcher("hierarchy") + context.watch(context.actorOf(props, name)) + } } if (context.children.size != state.kids.size) { abort("invariant violated: " + state.kids.size + " != " + context.children.size) @@ -463,12 +462,11 @@ object SupervisorHierarchySpec { // number of Work packages to execute for the test startWith(Idle, size * 1000) - when(Idle) { - case this.Event(Init, _) => - hierarchy = context.watch( - context.actorOf(Props(new Hierarchy(size, breadth, self, 0, random)).withDispatcher("hierarchy"), "head")) - startSingleTimer("phase", StateTimeout, 5 seconds) - goto(Init) + when(Idle) { case this.Event(Init, _) => + hierarchy = context.watch( + context.actorOf(Props(new Hierarchy(size, breadth, self, 0, random)).withDispatcher("hierarchy"), "head")) + startSingleTimer("phase", StateTimeout, 5 seconds) + goto(Init) } when(Init) { @@ -486,13 +484,12 @@ object SupervisorHierarchySpec { stop() } - onTransition { - case Init -> Stress => - self ! Work - idleChildren = children - activeChildren = children - // set timeout for completion of the whole test (i.e. including Finishing and Stopping) - startSingleTimer("phase", StateTimeout, 90.seconds.dilated) + onTransition { case Init -> Stress => + self ! Work + idleChildren = children + activeChildren = children + // set timeout for completion of the whole test (i.e. including Finishing and Stopping) + startSingleTimer("phase", StateTimeout, 90.seconds.dilated) } val workSchedule = 50.millis @@ -558,8 +555,8 @@ object SupervisorHierarchySpec { goto(Failed) } - onTransition { - case Stress -> Finishing => ignoreFailConstr = true + onTransition { case Stress -> Finishing => + ignoreFailConstr = true } when(Finishing) { @@ -572,11 +569,10 @@ object SupervisorHierarchySpec { if (pingChildren.isEmpty) goto(LastPing) else stay() } - onTransition { - case _ -> LastPing => - idleChildren.foreach(_ ! "ping") - pingChildren ++= idleChildren - idleChildren = Vector.empty + onTransition { case _ -> LastPing => + idleChildren.foreach(_ ! "ping") + pingChildren ++= idleChildren + idleChildren = Vector.empty } when(LastPing) { @@ -589,10 +585,9 @@ object SupervisorHierarchySpec { if (pingChildren.isEmpty) goto(Stopping) else stay() } - onTransition { - case _ -> Stopping => - ignoreNotResumedLogs = false - hierarchy ! PingOfDeath + onTransition { case _ -> Stopping => + ignoreNotResumedLogs = false + hierarchy ! PingOfDeath } when(Stopping, stateTimeout = 5.seconds.dilated) { @@ -704,10 +699,9 @@ object SupervisorHierarchySpec { case (origin, ErrorLog("dump", _)) => getErrors(origin, 1) case (origin, ErrorLog(msg, _)) if msg.startsWith("not resumed") => getErrorsUp(origin) } - val merged = errors.sortBy(_._1.toString).flatMap { - case (ref, ErrorLog(msg, log)) => - println("Error: " + ref + " " + msg) - log.map(l => (l.time, ref, l.identity, l.msg.toString)) + val merged = errors.sortBy(_._1.toString).flatMap { case (ref, ErrorLog(msg, log)) => + println("Error: " + ref + " " + msg) + log.map(l => (l.time, ref, l.identity, l.msg.toString)) } println("random seed: " + randomSeed) merged.sorted.distinct.foreach(println) @@ -821,14 +815,16 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w "suspend children while failing" taggedAs LongRunningTest in { val latch = TestLatch() - val slowResumer = system.actorOf(Props(new Actor { - override def supervisorStrategy = OneForOneStrategy() { - case _ => Await.ready(latch, 4.seconds.dilated); SupervisorStrategy.Resume - } - def receive = { - case "spawn" => sender() ! context.actorOf(Props[Resumer]()) - } - }), "slowResumer") + val slowResumer = system.actorOf( + Props(new Actor { + override def supervisorStrategy = OneForOneStrategy() { case _ => + Await.ready(latch, 4.seconds.dilated); SupervisorStrategy.Resume + } + def receive = { case "spawn" => + sender() ! context.actorOf(Props[Resumer]()) + } + }), + "slowResumer") slowResumer ! "spawn" val boss = expectMsgType[ActorRef] boss ! "spawn" @@ -861,33 +857,34 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w val failResumer = system.actorOf( Props(new Actor { - override def supervisorStrategy = OneForOneStrategy() { - case _: ActorInitializationException => - if (createAttempt.get % 2 == 0) SupervisorStrategy.Resume else SupervisorStrategy.Restart + override def supervisorStrategy = OneForOneStrategy() { case _: ActorInitializationException => + if (createAttempt.get % 2 == 0) SupervisorStrategy.Resume else SupervisorStrategy.Restart } - val child = context.actorOf(Props(new Actor { - val ca = createAttempt.incrementAndGet() - - if (ca <= 6 && ca % 3 == 0) - context.actorOf(Props(new Actor { override def receive = { case _ => } }), "workingChild") - - if (ca < 6) { - throw new IllegalArgumentException("OH NO!") - } - override def preStart() = { - preStartCalled.incrementAndGet() - } - override def postRestart(reason: Throwable) = { - postRestartCalled.incrementAndGet() - } - override def receive = { - case m => sender() ! m - } - }), "failChild") - - override def receive = { - case m => child.forward(m) + val child = context.actorOf( + Props(new Actor { + val ca = createAttempt.incrementAndGet() + + if (ca <= 6 && ca % 3 == 0) + context.actorOf(Props(new Actor { override def receive = { case _ => } }), "workingChild") + + if (ca < 6) { + throw new IllegalArgumentException("OH NO!") + } + override def preStart() = { + preStartCalled.incrementAndGet() + } + override def postRestart(reason: Throwable) = { + postRestartCalled.incrementAndGet() + } + override def receive = { case m => + sender() ! m + } + }), + "failChild") + + override def receive = { case m => + child.forward(m) } }), "failResumer") diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala index 2109c2c7b52..a555e7c1a7b 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala @@ -66,11 +66,11 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul countDownLatch.await(10, TimeUnit.SECONDS) Seq("actor1" -> actor1, "actor2" -> actor2, "actor3" -> actor3, "actor4" -> actor4) - .map { - case (id, ref) => (id, ref ? "status") + .map { case (id, ref) => + (id, ref ? "status") } - .foreach { - case (id, f) => (id, Await.result(f, timeout.duration)) should ===((id, "OK")) + .foreach { case (id, f) => + (id, Await.result(f, timeout.duration)) should ===((id, "OK")) } } } @@ -116,16 +116,15 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul "not be able to recreate child when old child is alive" in { val parent = system.actorOf(Props(new Actor { - def receive = { - case "engage" => - try { - val kid = context.actorOf(Props.empty, "foo") - context.stop(kid) - context.actorOf(Props.empty, "foo") - testActor ! "red" - } catch { - case _: InvalidActorNameException => testActor ! "green" - } + def receive = { case "engage" => + try { + val kid = context.actorOf(Props.empty, "foo") + context.stop(kid) + context.actorOf(Props.empty, "foo") + testActor ! "red" + } catch { + case _: InvalidActorNameException => testActor ! "green" + } } })) parent ! "engage" @@ -155,11 +154,11 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul "have access to the failing child’s reference in supervisorStrategy" in { val parent = system.actorOf(Props(new Actor { - override val supervisorStrategy = OneForOneStrategy() { - case _: Exception => testActor ! sender(); SupervisorStrategy.Stop + override val supervisorStrategy = OneForOneStrategy() { case _: Exception => + testActor ! sender(); SupervisorStrategy.Stop } - def receive = { - case "doit" => context.actorOf(Props.empty, "child") ! Kill + def receive = { case "doit" => + context.actorOf(Props.empty, "child") ! Kill } })) EventFilter[ActorKilledException](occurrences = 1).intercept { diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala index 953fe1ba9a4..941e0132bfc 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala @@ -75,13 +75,12 @@ object SupervisorSpec { } class Creator(target: ActorRef) extends Actor { - override val supervisorStrategy = OneForOneStrategy() { - case ex => - target ! ((self, sender(), ex)) - SupervisorStrategy.Stop + override val supervisorStrategy = OneForOneStrategy() { case ex => + target ! ((self, sender(), ex)) + SupervisorStrategy.Stop } - def receive = { - case p: Props => sender() ! context.actorOf(p) + def receive = { case p: Props => + sender() ! context.actorOf(p) } } @@ -197,7 +196,7 @@ class SupervisorSpec def kill(pingPongActor: ActorRef) = { val result = pingPongActor.?(DieReply)(DilatedTimeout) - expectMsg(Timeout, ExceptionMessage) //this is sent from PingPongActor's postRestart() + expectMsg(Timeout, ExceptionMessage) // this is sent from PingPongActor's postRestart() intercept[RuntimeException] { Await.result(result, DilatedTimeout) } } @@ -218,7 +217,7 @@ class SupervisorSpec } "restart properly when same instance is returned" in { - val restarts = 3 //max number of restarts + val restarts = 3 // max number of restarts // can't be anonymous class due to https://github.com/akka/akka/issues/32128 class ChildActor extends Actor { @@ -259,8 +258,8 @@ class SupervisorSpec override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = restarts)(List(classOf[Exception])) val child = context.actorOf(Props(childInstance)) - def receive = { - case msg => child.forward(msg) + def receive = { case msg => + child.forward(msg) } })) @@ -458,17 +457,20 @@ class SupervisorSpec "not lose system messages when a NonFatal exception occurs when processing a system message" in { val parent = system.actorOf(Props(new Actor { - override val supervisorStrategy = OneForOneStrategy()({ + override val supervisorStrategy = OneForOneStrategy() { case e: IllegalStateException if e.getMessage == "OHNOES" => throw e case _ => SupervisorStrategy.Restart - }) - val child = context.watch(context.actorOf(Props(new Actor { - override def postRestart(reason: Throwable): Unit = testActor ! "child restarted" - def receive = { - case l: TestLatch => { Await.ready(l, 5 seconds); throw new IllegalStateException("OHNOES") } - case "test" => sender() ! "child green" - } - }), "child")) + } + val child = context.watch( + context.actorOf( + Props(new Actor { + override def postRestart(reason: Throwable): Unit = testActor ! "child restarted" + def receive = { + case l: TestLatch => { Await.ready(l, 5 seconds); throw new IllegalStateException("OHNOES") } + case "test" => sender() ! "child green" + } + }), + "child")) override def postRestart(reason: Throwable): Unit = testActor ! "parent restarted" @@ -517,9 +519,8 @@ class SupervisorSpec top ! creator(testActor) val middle = expectMsgType[ActorRef] middle ! creator(testActor, fail = true) - expectMsgPF(hint = "ConfigurationException") { - case (_, _, ex: ConfigurationException) => - ex.getCause should ===(failure) + expectMsgPF(hint = "ConfigurationException") { case (_, _, ex: ConfigurationException) => + ex.getCause should ===(failure) } } @@ -534,9 +535,8 @@ class SupervisorSpec top ! creator(testActor) val middle = expectMsgType[ActorRef] middle ! creator(testActor, fail = true).withRouter(RoundRobinPool(1)) - expectMsgPF(hint = "ConfigurationException") { - case (_, _, ex: ConfigurationException) => - ex.getCause should ===(failure) + expectMsgPF(hint = "ConfigurationException") { case (_, _, ex: ConfigurationException) => + ex.getCause should ===(failure) } } @@ -578,7 +578,7 @@ class SupervisorSpec val pingpong = child(supervisor, Props(new PingPongActor(testActor))) - //impossible to confirm if the restart window is infinite, so making sure maxNrOfRetries is respected correctly + // impossible to confirm if the restart window is infinite, so making sure maxNrOfRetries is respected correctly kill(pingpong) kill(pingpong) kill(pingpong) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala index cbd32b84a9c..22520e96a6e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala @@ -22,8 +22,8 @@ class SupervisorTreeSpec extends AkkaSpec with ImplicitSender with DefaultTimeou val p = Props(new Actor { override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 1 second)(List(classOf[Exception])) - def receive = { - case p: Props => sender() ! context.actorOf(p) + def receive = { case p: Props => + sender() ! context.actorOf(p) } override def preRestart(cause: Throwable, msg: Option[Any]): Unit = { testActor ! self.path } }) diff --git a/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala b/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala index 2a8bb297778..136f3770495 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala @@ -21,7 +21,7 @@ class Ticket669Spec extends AkkaSpec with BeforeAndAfterAll with ImplicitSender // TODO: does this really make sense? override def atStartup(): Unit = { - Thread.interrupted() //remove interrupted status. + Thread.interrupted() // remove interrupted status. } "A supervised actor with lifecycle PERMANENT" should { @@ -53,8 +53,8 @@ class Ticket669Spec extends AkkaSpec with BeforeAndAfterAll with ImplicitSender object Ticket669Spec { class Supervised extends Actor { - def receive = { - case _ => throw new Exception("test") + def receive = { case _ => + throw new Exception("test") } override def preRestart(reason: scala.Throwable, msg: Option[Any]): Unit = { diff --git a/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala index 3876436092a..67b7448c928 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala @@ -318,8 +318,8 @@ class TimersAndStashSpec extends AkkaSpec { class ActorWithTimerAndStash(probe: ActorRef) extends Actor with Timers with Stash { timers.startSingleTimer("key", "scheduled", 50.millis) def receive: Receive = stashing - def notStashing: Receive = { - case msg => probe ! msg + def notStashing: Receive = { case msg => + probe ! msg } def stashing: Receive = { diff --git a/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala b/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala index 136724cad9e..f8baacf8659 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala @@ -25,7 +25,7 @@ object UidClashTest { val eventStream: EventStream) extends MinimalActorRef { - //Ignore everything + // Ignore everything override def isTerminated: Boolean = true override def sendSystemMessage(message: SystemMessage): Unit = () override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = () diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index 1dd5f8e4bfe..f3bd735cd1b 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -94,7 +94,7 @@ object ActorModelSpec { ack(); sender() ! Status.Failure(new ActorInterruptedException(new InterruptedException("Ping!"))); busy.switchOff(()); throw new InterruptedException("Ping!") } - case InterruptNicely(msg) => { ack(); sender() ! msg; busy.switchOff(()); Thread.currentThread().interrupt() } + case InterruptNicely(msg) => { ack(); sender() ! msg; busy.switchOff(()); Thread.currentThread().interrupt() } case ThrowException(e: Throwable) => { ack(); busy.switchOff(()); throw e } case DoubleStop => { ack(); context.stop(self); context.stop(self); busy.switchOff } } @@ -163,8 +163,8 @@ object ActorModelSpec { } } - def assertDispatcher(dispatcher: MessageDispatcherInterceptor)(stops: Long = dispatcher.stops.get())( - implicit system: ActorSystem): Unit = { + def assertDispatcher(dispatcher: MessageDispatcherInterceptor)(stops: Long = dispatcher.stops.get())(implicit + system: ActorSystem): Unit = { val deadline = System.currentTimeMillis + dispatcher.shutdownTimeout.toMillis * 5 try { await(deadline)(stops == dispatcher.stops.get) @@ -260,10 +260,13 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa def newTestActor(dispatcher: String) = system.actorOf(Props[DispatcherActor]().withDispatcher(dispatcher)) def awaitStarted(ref: ActorRef): Unit = { - awaitCond(ref match { - case r: RepointableRef => r.isStarted - case _ => true - }, 1 second, 10 millis) + awaitCond( + ref match { + case r: RepointableRef => r.isStarted + case _ => true + }, + 1 second, + 10 millis) } protected def interceptedDispatcher(): MessageDispatcherInterceptor @@ -428,10 +431,9 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa val c = system.scheduler.scheduleOnce(2.seconds) { import akka.util.ccompat.JavaConverters._ - Thread.getAllStackTraces().asScala.foreach { - case (thread, stack) => - println(s"$thread:") - stack.foreach(s => println(s"\t$s")) + Thread.getAllStackTraces().asScala.foreach { case (thread, stack) => + println(s"$thread:") + stack.foreach(s => println(s"\t$s")) } } assert(Await.result(f1, timeout.duration) === "foo") @@ -509,7 +511,8 @@ object DispatcherModelSpec { } """ + // use unique dispatcher id for each test, since MessageDispatcherInterceptor holds state - (for (n <- 1 to 30) yield """ + (for (n <- 1 to 30) + yield """ test-dispatcher-%s { type = "akka.actor.dispatch.DispatcherModelSpec$MessageDispatcherInterceptorConfigurator" }""".format(n)).mkString @@ -647,7 +650,7 @@ class BalancingDispatcherModelSpec extends ActorModelSpec(BalancingDispatcherMod system.stop(a) system.stop(b) - while (!a.isTerminated && !b.isTerminated) {} //Busy wait for termination + while (!a.isTerminated && !b.isTerminated) {} // Busy wait for termination assertRefDefaultZero(a)(registers = 1, unregisters = 1, msgsReceived = 1, msgsProcessed = 1) assertRefDefaultZero(b)(registers = 1, unregisters = 1, msgsReceived = 1, msgsProcessed = 1) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala index d1e72a06ae4..e4b30c389cd 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala @@ -49,8 +49,8 @@ object DispatcherActorSpec { val oneWay = new CountDownLatch(1) } class OneWayTestActor extends Actor { - def receive = { - case "OneWay" => OneWayTestActor.oneWay.countDown() + def receive = { case "OneWay" => + OneWayTestActor.oneWay.countDown() } } } @@ -111,8 +111,8 @@ class DispatcherActorSpec extends AkkaSpec(DispatcherActorSpec.config) with Defa val ready = new CountDownLatch(1) val fastOne = system.actorOf(Props(new Actor { - def receive = { - case "ping" => if (works.get) latch.countDown(); context.stop(self) + def receive = { case "ping" => + if (works.get) latch.countDown(); context.stop(self) } }).withDispatcher(throughputDispatcher)) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorsSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorsSpec.scala index 81dfaa6211c..c1f1f168e1e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorsSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorsSpec.scala @@ -9,9 +9,7 @@ import java.util.concurrent.CountDownLatch import akka.actor._ import akka.testkit.AkkaSpec -/** - * Tests the behavior of the executor based event driven dispatcher when multiple actors are being dispatched on it. - */ +/** Tests the behavior of the executor based event driven dispatcher when multiple actors are being dispatched on it. */ class DispatcherActorsSpec extends AkkaSpec { class SlowActor(finishedCounter: CountDownLatch) extends Actor { diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala index 8c8b1c5f237..3f07e618bf5 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala @@ -69,8 +69,8 @@ object DispatchersSpec { """ class ThreadNameEcho extends Actor { - def receive = { - case _ => sender() ! Thread.currentThread.getName + def receive = { case _ => + sender() ! Thread.currentThread.getName } } @@ -135,8 +135,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend def assertMyDispatcherIsUsed(actor: ActorRef): Unit = { actor ! "what's the name?" val Expected = R("(DispatchersSpec-myapp.mydispatcher-[1-9][0-9]*)") - expectMsgPF() { - case Expected(_) => + expectMsgPF() { case Expected(_) => } } @@ -175,7 +174,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend } "get the correct types of dispatchers" in { - //All created/obtained dispatchers are of the expected type/instance + // All created/obtained dispatchers are of the expected type/instance assert(typesAndValidators.forall(tuple => tuple._2(allDispatchers(tuple._1)))) } @@ -224,32 +223,28 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend "include system name and dispatcher id in thread names for thread-pool-executor" in { system.actorOf(Props[ThreadNameEcho]().withDispatcher("myapp.thread-pool-dispatcher")) ! "what's the name?" val Expected = R("(DispatchersSpec-myapp.thread-pool-dispatcher-[1-9][0-9]*)") - expectMsgPF() { - case Expected(_) => + expectMsgPF() { case Expected(_) => } } "include system name and dispatcher id in thread names for default-dispatcher" in { system.actorOf(Props[ThreadNameEcho]()) ! "what's the name?" val Expected = R("(DispatchersSpec-akka.actor.default-dispatcher-[1-9][0-9]*)") - expectMsgPF() { - case Expected(_) => + expectMsgPF() { case Expected(_) => } } "include system name and dispatcher id in thread names for pinned dispatcher" in { system.actorOf(Props[ThreadNameEcho]().withDispatcher("myapp.my-pinned-dispatcher")) ! "what's the name?" val Expected = R("(DispatchersSpec-myapp.my-pinned-dispatcher-[1-9][0-9]*)") - expectMsgPF() { - case Expected(_) => + expectMsgPF() { case Expected(_) => } } "include system name and dispatcher id in thread names for balancing dispatcher" in { system.actorOf(Props[ThreadNameEcho]().withDispatcher("myapp.balancing-dispatcher")) ! "what's the name?" val Expected = R("(DispatchersSpec-myapp.balancing-dispatcher-[1-9][0-9]*)") - expectMsgPF() { - case Expected(_) => + expectMsgPF() { case Expected(_) => } } @@ -268,19 +263,16 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend val routee = expectMsgType[ActorIdentity].ref.get routee ! "what's the name?" val Expected = R("""(DispatchersSpec-akka\.actor\.deployment\./pool1\.pool-dispatcher-[1-9][0-9]*)""") - expectMsgPF() { - case Expected(_) => + expectMsgPF() { case Expected(_) => } } "use balancing-pool router with special routees mailbox of deployment config" in { system.actorOf(FromConfig.props(Props[ThreadNameEcho]()), name = "balanced") ! "what's the name?" val Expected = R("""(DispatchersSpec-BalancingPool-/balanced-[1-9][0-9]*)""") - expectMsgPF() { - case Expected(_) => + expectMsgPF() { case Expected(_) => } - expectMsgPF() { - case Expected(_) => + expectMsgPF() { case Expected(_) => } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/dungeon/DispatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dungeon/DispatchSpec.scala index 89077f2b566..89b82ddadbf 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dungeon/DispatchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dungeon/DispatchSpec.scala @@ -11,15 +11,16 @@ import akka.testkit._ object DispatchSpec { class UnserializableMessageClass class EmptyActor extends Actor { - override def receive = { - case _: UnserializableMessageClass => // OK + override def receive = { case _: UnserializableMessageClass => // OK } } } -class DispatchSpec extends AkkaSpec(""" +class DispatchSpec + extends AkkaSpec(""" akka.actor.serialize-messages = on akka.actor.no-serialization-verification-needed-class-prefix = [] - """) with DefaultTimeout { + """) + with DefaultTimeout { import DispatchSpec._ "The dispatcher" should { diff --git a/akka-actor-tests/src/test/scala/akka/actor/routing/ListenerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/routing/ListenerSpec.scala index 9fc132f11c2..58047c1972d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/routing/ListenerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/routing/ListenerSpec.scala @@ -22,8 +22,8 @@ class ListenerSpec extends AkkaSpec { val barCount = new AtomicInteger(0) val broadcast = system.actorOf(Props(new Actor with Listeners { - def receive = listenerManagement.orElse { - case "foo" => gossip("bar") + def receive = listenerManagement.orElse { case "foo" => + gossip("bar") } })) diff --git a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala index 08f94c6b41b..707cba86eae 100644 --- a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala @@ -89,7 +89,7 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference(ActorSystem.fin { val c = config.getConfig("akka.actor.default-dispatcher") - //General dispatcher config + // General dispatcher config { c.getString("type") should ===("Dispatcher") @@ -100,13 +100,13 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference(ActorSystem.fin c.getBoolean("attempt-teamwork") should ===(true) } - //Default executor config + // Default executor config { val pool = c.getConfig("default-executor") pool.getString("fallback") should ===("fork-join-executor") } - //Fork join executor config + // Fork join executor config { val pool = c.getConfig("fork-join-executor") @@ -116,7 +116,7 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference(ActorSystem.fin pool.getString("task-peeking-mode") should be("FIFO") } - //Thread pool executor config + // Thread pool executor config { val pool = c.getConfig("thread-pool-executor") diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/ControlAwareDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ControlAwareDispatcherSpec.scala index c5a8e4ce507..94d6a9278df 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/ControlAwareDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ControlAwareDispatcherSpec.scala @@ -47,8 +47,8 @@ class ControlAwareDispatcherSpec extends AkkaSpec(ControlAwareDispatcherSpec.con self ! "test2" self ! ImportantMessage - def receive = { - case x => testActor ! x + def receive = { case x => + testActor ! x } }).withDispatcher(dispatcherKey)) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/DispatcherShutdownSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/DispatcherShutdownSpec.scala index 58a40b81708..5b822d77d7f 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/DispatcherShutdownSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/DispatcherShutdownSpec.scala @@ -29,7 +29,8 @@ class DispatcherShutdownSpec extends AnyWordSpec with Matchers { .map(_.getThreadName) .filter(name => name.startsWith("DispatcherShutdownSpec-akka.actor.default") || name.startsWith( - "DispatcherShutdownSpec-akka.actor.internal")) // nothing is run on default without any user actors started + "DispatcherShutdownSpec-akka.actor.internal" + )) // nothing is run on default without any user actors started .size val system = ActorSystem("DispatcherShutdownSpec") diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala index 17becb512cd..0f9d2b2320e 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala @@ -27,16 +27,16 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { val es = Executors.newCachedThreadPool() try { val executor: Executor with ExecutionContext = ExecutionContext.fromExecutor(es) - executor should not be (null) + executor should not be null val executorService: ExecutorService with ExecutionContext = ExecutionContext.fromExecutorService(es) - executorService should not be (null) + executorService should not be null val jExecutor: ExecutionContextExecutor = ExecutionContext.fromExecutor(es) - jExecutor should not be (null) + jExecutor should not be null val jExecutorService: ExecutionContextExecutorService = ExecutionContexts.fromExecutorService(es) - jExecutorService should not be (null) + jExecutorService should not be null } finally { es.shutdown } @@ -60,7 +60,7 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { (1 to 100).foreach { _ => batchable { if (callingThreadLock.get != 0) p.tryFailure(new IllegalStateException("Batch was executed inline!")) - else if (count.incrementAndGet == 100) p.trySuccess(()) //Done + else if (count.incrementAndGet == 100) p.trySuccess(()) // Done else if (lock.compareAndSet(0, 1)) { try Thread.sleep(10) finally lock.compareAndSet(1, 0) @@ -157,16 +157,15 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { "work with same-thread dispatcher plus blocking" in { val a = TestActorRef(Props(new Actor { - def receive = { - case msg => - blocking { - sender() ! msg - } + def receive = { case msg => + blocking { + sender() ! msg + } } })) val b = TestActorRef(Props(new Actor { - def receive = { - case msg => a.forward(msg) + def receive = { case msg => + a.forward(msg) } })) val p = TestProbe() diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/ForkJoinPoolStarvationSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ForkJoinPoolStarvationSpec.scala index 673698681b9..fbae7880e71 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/ForkJoinPoolStarvationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ForkJoinPoolStarvationSpec.scala @@ -27,17 +27,15 @@ object ForkJoinPoolStarvationSpec { class SelfBusyActor extends Actor { self ! "tick" - override def receive = { - case "tick" => - self ! "tick" + override def receive = { case "tick" => + self ! "tick" } } class InnocentActor extends Actor { - override def receive = { - case "ping" => - sender() ! "All fine" + override def receive = { case "ping" => + sender() ! "All fine" } } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala index 65f38fd3970..381395c2533 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala @@ -82,7 +82,7 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn } } - //CANDIDATE FOR TESTKIT + // CANDIDATE FOR TESTKIT def spawn[T <: AnyRef](fun: => T): Future[T] = Future(fun)(ExecutionContext.global) def createMessageInvocation(msg: Any): Envelope = Envelope(msg, system.deadLetters, system) @@ -138,7 +138,7 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn val q = factory(config) ensureInitialMailboxState(config, q) - EventFilter.warning(pattern = "received dead letter", occurrences = (enqueueN - dequeueN)).intercept { + EventFilter.warning(pattern = "received dead letter", occurrences = enqueueN - dequeueN).intercept { def createProducer(fromNum: Int, toNum: Int): Future[Vector[Envelope]] = spawn { val messages = Vector() ++ (for (i <- fromNum to toNum) yield createMessageInvocation(i)) @@ -171,13 +171,13 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn val ps = producers.map(Await.result(_, remainingOrDefault)) val cs = consumers.map(Await.result(_, remainingOrDefault)) - ps.map(_.size).sum should ===(enqueueN) //Must have produced 1000 messages - cs.map(_.size).sum should ===(dequeueN) //Must have consumed all produced messages - //No message is allowed to be consumed by more than one consumer + ps.map(_.size).sum should ===(enqueueN) // Must have produced 1000 messages + cs.map(_.size).sum should ===(dequeueN) // Must have consumed all produced messages + // No message is allowed to be consumed by more than one consumer cs.flatten.distinct.size should ===(dequeueN) - //All consumed messages should have been produced + // All consumed messages should have been produced cs.flatten.diff(ps.flatten).size should ===(0) - //The ones that were produced and not consumed + // The ones that were produced and not consumed ps.flatten.diff(cs.flatten).size should ===(enqueueN - dequeueN) } } @@ -247,10 +247,13 @@ class CustomMailboxSpec extends AkkaSpec(CustomMailboxSpec.config) { "Dispatcher configuration" must { "support custom mailboxType" in { val actor = system.actorOf(Props.empty.withDispatcher("my-dispatcher")) - awaitCond(actor match { - case r: RepointableRef => r.isStarted - case _ => true - }, 1 second, 10 millis) + awaitCond( + actor match { + case r: RepointableRef => r.isStarted + case _ => true + }, + 1 second, + 10 millis) val queue = actor.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].mailbox.messageQueue queue.getClass should ===(classOf[CustomMailboxSpec.MyMailbox]) } @@ -290,12 +293,11 @@ class SingleConsumerOnlyMailboxVerificationSpec val runner = system.actorOf(Props(new Actor { val a, b = context.watch(context.actorOf(Props(new Actor { var n = total / 2 - def receive = { - case Ping => - n -= 1 - sender() ! Ping - if (n == 0) - context.stop(self) + def receive = { case Ping => + n -= 1 + sender() ! Ping + if (n == 0) + context.stop(self) } }).withDispatcher(dispatcherId))) def receive = { diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala index a2370ada6e1..48eafa42676 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala @@ -28,17 +28,20 @@ object PriorityDispatcherSpec { class Unbounded(@unused settings: ActorSystem.Settings, @unused config: Config) extends UnboundedPriorityMailbox(PriorityGenerator({ - case i: Int => i //Reverse order + case i: Int => i // Reverse order case Result => Int.MaxValue case _ => throw new RuntimeException() // compiler exhaustiveness check pleaser }: Any => Int)) class Bounded(@unused settings: ActorSystem.Settings, @unused config: Config) - extends BoundedPriorityMailbox(PriorityGenerator({ - case i: Int => i //Reverse order - case Result => Int.MaxValue - case _ => throw new RuntimeException() // compiler exhaustiveness check pleaser - }: Any => Int), 1000, 10 seconds) + extends BoundedPriorityMailbox( + PriorityGenerator({ + case i: Int => i // Reverse order + case Result => Int.MaxValue + case _ => throw new RuntimeException() // compiler exhaustiveness check pleaser + }: Any => Int), + 1000, + 10 seconds) } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/StablePriorityDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/StablePriorityDispatcherSpec.scala index e77c3fed038..748d9461e2a 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/StablePriorityDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/StablePriorityDispatcherSpec.scala @@ -34,12 +34,15 @@ object StablePriorityDispatcherSpec { }: Any => Int)) class Bounded(@unused settings: ActorSystem.Settings, @unused config: Config) - extends BoundedStablePriorityMailbox(PriorityGenerator({ - case i: Int if i <= 100 => i // Small integers have high priority - case _: Int => 101 // Don't care for other integers - case Result => Int.MaxValue - case _ => throw new RuntimeException() // compiler exhaustiveness check pleaser - }: Any => Int), 1000, 10 seconds) + extends BoundedStablePriorityMailbox( + PriorityGenerator({ + case i: Int if i <= 100 => i // Small integers have high priority + case _: Int => 101 // Don't care for other integers + case Result => Int.MaxValue + case _ => throw new RuntimeException() // compiler exhaustiveness check pleaser + }: Any => Int), + 1000, + 10 seconds) } diff --git a/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala index 2af496ae758..8536098713a 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala @@ -13,8 +13,8 @@ import akka.testkit._ object EventBusSpec { class TestActorWrapperActor(testActor: ActorRef) extends Actor { - def receive = { - case x => testActor.forward(x) + def receive = { case x => + testActor.forward(x) } } } @@ -308,7 +308,7 @@ class ScanningEventBusSpec extends EventBusSpec("ScanningEventBus") { def createNewEventBus(): BusType = new MyScanningEventBus - def createEvents(numberOfEvents: Int) = (0 until numberOfEvents) + def createEvents(numberOfEvents: Int) = 0 until numberOfEvents def createSubscriber(pipeTo: ActorRef) = new Procedure[Int] { def apply(i: Int) = pipeTo ! i } @@ -339,7 +339,7 @@ class LookupEventBusSpec extends EventBusSpec("LookupEventBus") { def createNewEventBus(): BusType = new MyLookupEventBus - def createEvents(numberOfEvents: Int) = (0 until numberOfEvents) + def createEvents(numberOfEvents: Int) = 0 until numberOfEvents def createSubscriber(pipeTo: ActorRef) = new Procedure[Int] { def apply(i: Int) = pipeTo ! i } diff --git a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala index d5b3d3f9c0e..8ffec14fc2f 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala @@ -74,10 +74,10 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { "An EventStream" must { "manage subscriptions" in { - //#event-bus-start-unsubscriber-scala + // #event-bus-start-unsubscriber-scala val bus = new EventStream(system, true) bus.startUnsubscriber() - //#event-bus-start-unsubscriber-scala + // #event-bus-start-unsubscriber-scala bus.subscribe(testActor, classOf[M]) bus.publish(M(42)) @@ -293,9 +293,11 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { val a1, a2 = TestProbe() val tm = new A - val target = sys.actorOf(Props(new Actor { - def receive = { case in => a1.ref.forward(in) } - }), "to-be-killed") + val target = sys.actorOf( + Props(new Actor { + def receive = { case in => a1.ref.forward(in) } + }), + "to-be-killed") es.subscribe(a2.ref, classOf[Any]) es.subscribe(target, classOf[A]) should ===(true) @@ -321,9 +323,11 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { val es = sys.eventStream val probe = TestProbe() - val terminated = system.actorOf(Props(new Actor { - def receive = { case _ => } - }), "to-be-killed") + val terminated = system.actorOf( + Props(new Actor { + def receive = { case _ => } + }), + "to-be-killed") watch(terminated) terminated ! PoisonPill @@ -394,7 +398,10 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { es.subscribe(a2.ref, classOf[A]) es.subscribe(a2.ref, classOf[T]) fishForDebugMessage(a1, s"watching ${a2.ref}") - fishForDebugMessage(a1, s"watching ${a2.ref}") // the unsubscriber "starts to watch" each time, as watching is idempotent + fishForDebugMessage( + a1, + s"watching ${a2.ref}" + ) // the unsubscriber "starts to watch" each time, as watching is idempotent es.unsubscribe(a2.ref, classOf[A]) should equal(true) fishForDebugMessage(a1, s"unsubscribing ${a2.ref} from channel class akka.event.EventStreamSpec$$A") diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala index 3e43db11974..d7bbb8bee88 100644 --- a/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala @@ -27,40 +27,49 @@ import akka.util.Helpers object LoggerSpec { - val defaultConfig = ConfigFactory.parseString(""" + val defaultConfig = ConfigFactory + .parseString(""" akka { stdout-loglevel = "WARNING" loglevel = "DEBUG" # test verifies debug loggers = ["akka.event.LoggerSpec$TestLogger1"] } - """).withFallback(AkkaSpec.testConf) + """) + .withFallback(AkkaSpec.testConf) - val slowConfig = ConfigFactory.parseString(""" + val slowConfig = ConfigFactory + .parseString(""" akka { stdout-loglevel = "ERROR" loglevel = "ERROR" loggers = ["akka.event.LoggerSpec$SlowLogger"] } - """).withFallback(AkkaSpec.testConf) + """) + .withFallback(AkkaSpec.testConf) - val noLoggingConfig = ConfigFactory.parseString(""" + val noLoggingConfig = ConfigFactory + .parseString(""" akka { stdout-loglevel = "OFF" loglevel = "OFF" loggers = ["akka.event.LoggerSpec$TestLogger1"] } - """).withFallback(AkkaSpec.testConf) + """) + .withFallback(AkkaSpec.testConf) val multipleConfig = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka { stdout-loglevel = "OFF" loglevel = "WARNING" loggers = ["akka.event.LoggerSpec$TestLogger1", "akka.event.LoggerSpec$TestLogger2"] } - """).withFallback(AkkaSpec.testConf) + """) + .withFallback(AkkaSpec.testConf) - val ticket3165Config = ConfigFactory.parseString(s""" + val ticket3165Config = ConfigFactory + .parseString(s""" akka { stdout-loglevel = "WARNING" loglevel = "DEBUG" # test verifies debug @@ -74,15 +83,18 @@ object LoggerSpec { } } } - """).withFallback(AkkaSpec.testConf) + """) + .withFallback(AkkaSpec.testConf) - val ticket3671Config = ConfigFactory.parseString(""" + val ticket3671Config = ConfigFactory + .parseString(""" akka { stdout-loglevel = "WARNING" loglevel = "WARNING" loggers = ["akka.event.LoggerSpec$TestLogger1"] } - """).withFallback(AkkaSpec.testConf) + """) + .withFallback(AkkaSpec.testConf) final case class SetTarget(ref: ActorRef, qualifier: Int) @@ -96,7 +108,7 @@ object LoggerSpec { sender() ! LoggerInitialized case SetTarget(ref, `qualifier`) => target = Some(ref) - ref ! ("OK") + ref ! "OK" case event: LogEvent if !event.mdc.isEmpty => print(event) target.foreach { _ ! event } @@ -133,8 +145,8 @@ object LoggerSpec { always ++ perMessage } - def receive: Receive = { - case m: String => log.warning(m) + def receive: Receive = { case m: String => + log.warning(m) } } @@ -173,7 +185,7 @@ class LoggerSpec extends AnyWordSpec with Matchers { "log messages to standard output" in { val out = createSystemAndLogToBuffer("defaultLogger", defaultConfig, true) - out.size should be > (0) + out.size should be > 0 } "drain logger queue on system.terminate" in { diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala index f7fbd8bfd5f..9180ac3c4e2 100644 --- a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala @@ -27,9 +27,11 @@ object LoggingReceiveSpec { class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll { import LoggingReceiveSpec._ - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString(""" akka.loglevel=DEBUG # test verifies debug - """).withFallback(AkkaSpec.testConf) + """) + .withFallback(AkkaSpec.testConf) val appLogging = ActorSystem("logging", ConfigFactory.parseMap(Map("akka.actor.debug.receive" -> true).asJava).withFallback(config)) val appAuto = ActorSystem( @@ -49,8 +51,8 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll { appLifecycle.eventStream.publish(filter) def ignoreMute(t: TestKit): Unit = { - t.ignoreMsg { - case (_: TestEvent.Mute | _: TestEvent.UnMute) => true + t.ignoreMsg { case (_: TestEvent.Mute | _: TestEvent.UnMute) => + true } } @@ -68,9 +70,10 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll { system.eventStream.subscribe(testActor, classOf[UnhandledMessage]) val a = system.actorOf(Props(new Actor { def receive = - new LoggingReceive(Some("funky"), { - case null => - }) + new LoggingReceive( + Some("funky"), + { case null => + }) })) a ! "hallo" expectMsg( @@ -89,15 +92,14 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll { system.eventStream.subscribe(testActor, classOf[Logging.Debug]) system.eventStream.subscribe(testActor, classOf[UnhandledMessage]) - val r: Actor.Receive = { - case null => + val r: Actor.Receive = { case null => } val actor = TestActorRef(new Actor { def switch: Actor.Receive = { case "becomenull" => context.become(r, false) } def receive = - switch.orElse(LoggingReceive { - case _ => sender() ! "x" + switch.orElse(LoggingReceive { case _ => + sender() ! "x" }) }) @@ -110,8 +112,8 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll { actor ! "becomenull" actor ! "bah" - expectMsgPF() { - case UnhandledMessage("bah", _, `actor`) => true + expectMsgPF() { case UnhandledMessage("bah", _, `actor`) => + true } } } @@ -121,8 +123,8 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll { system.eventStream.subscribe(testActor, classOf[Logging.Debug]) val actor = TestActorRef(new Actor { def receive = - LoggingReceive(LoggingReceive { - case _ => sender() ! "x" + LoggingReceive(LoggingReceive { case _ => + sender() ! "x" }) }) actor ! "buh" @@ -139,8 +141,7 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll { val myMDC = Map("hello" -> "mdc") val a = system.actorOf(Props(new Actor with DiagnosticActorLogging { override def mdc(currentMessage: Any) = myMDC - def receive = LoggingReceive { - case "hello" => + def receive = LoggingReceive { case "hello" => } })) a ! "hello" @@ -154,8 +155,8 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll { new TestKit(appLogging) with ImplicitSender { system.eventStream.subscribe(testActor, classOf[Logging.Info]) val actor = TestActorRef(new Actor { - def receive = LoggingReceive(Logging.InfoLevel) { - case _ => sender() ! "x" + def receive = LoggingReceive(Logging.InfoLevel) { case _ => + sender() ! "x" } }) actor ! "buh" @@ -176,8 +177,7 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll { new TestKit(appAuto) { system.eventStream.subscribe(testActor, classOf[Logging.Debug]) val actor = TestActorRef(new Actor { - def receive = { - case _ => + def receive = { case _ => } }) val name = actor.path.toString diff --git a/akka-actor-tests/src/test/scala/akka/event/jul/JavaLoggerSpec.scala b/akka-actor-tests/src/test/scala/akka/event/jul/JavaLoggerSpec.scala index bdd0755f6d9..fac9766e1da 100644 --- a/akka-actor-tests/src/test/scala/akka/event/jul/JavaLoggerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/jul/JavaLoggerSpec.scala @@ -58,9 +58,9 @@ class JavaLoggerSpec extends AkkaSpec(JavaLoggerSpec.config) { val record = expectMsgType[logging.LogRecord] - record should not be (null) - record.getMillis should not be (0) - record.getThreadID should not be (0) + record should not be null + record.getMillis should not be 0 + record.getThreadID should not be 0 record.getLevel should ===(logging.Level.SEVERE) record.getMessage should ===("Simulated error") record.getThrown.getClass should ===(classOf[JavaLoggerSpec.SimulatedExc]) @@ -73,9 +73,9 @@ class JavaLoggerSpec extends AkkaSpec(JavaLoggerSpec.config) { val record = expectMsgType[logging.LogRecord] - record should not be (null) - record.getMillis should not be (0) - record.getThreadID should not be (0) + record should not be null + record.getMillis should not be 0 + record.getThreadID should not be 0 record.getLevel should ===(logging.Level.INFO) record.getMessage should ===("3 is the magic number") record.getThrown should ===(null) diff --git a/akka-actor-tests/src/test/scala/akka/io/CapacityLimitSpec.scala b/akka-actor-tests/src/test/scala/akka/io/CapacityLimitSpec.scala index 82a08477838..16af34f813c 100644 --- a/akka-actor-tests/src/test/scala/akka/io/CapacityLimitSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/CapacityLimitSpec.scala @@ -9,10 +9,12 @@ import Tcp._ import akka.testkit.{ AkkaSpec, TestProbe } import akka.testkit.SocketUtil.temporaryServerAddresses -class CapacityLimitSpec extends AkkaSpec(""" +class CapacityLimitSpec + extends AkkaSpec(""" akka.loglevel = ERROR akka.io.tcp.max-channels = 4 - """) with TcpIntegrationSpecSupport { + """) + with TcpIntegrationSpecSupport { "The TCP transport implementation" should { diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala b/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala index 7e5331136e4..a5a7afba996 100644 --- a/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala @@ -39,12 +39,14 @@ object TcpConnectionSpec { final case class Registration(channel: SelectableChannel, initialOps: Int) extends NoSerializationVerificationNeeded } -class TcpConnectionSpec extends AkkaSpec(""" +class TcpConnectionSpec + extends AkkaSpec(""" akka.loglevel = DEBUG akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] akka.io.tcp.trace-logging = on akka.io.tcp.register-timeout = 500ms - """) with WithLogCapturing { thisSpecs => + """) + with WithLogCapturing { thisSpecs => import TcpConnectionSpec._ // Helper to avoid Windows localization specific differences @@ -379,33 +381,35 @@ class TcpConnectionSpec extends AkkaSpec(""" ConfigFactory.parseString("akka.io.tcp.direct-buffer-size = 1k").withFallback(AkkaSpec.testConf) override implicit lazy val system: ActorSystem = ActorSystem("respectPullModeTest", config) - try run { - val maxBufferSize = 1 * 1024 - val ts = "t" * maxBufferSize - val us = "u" * (maxBufferSize / 2) + try + run { + val maxBufferSize = 1 * 1024 + val ts = "t" * maxBufferSize + val us = "u" * (maxBufferSize / 2) - // send a batch that is bigger than the default buffer to make sure we don't recurse and - // send more than one Received messages - serverSideChannel.write(ByteBuffer.wrap((ts ++ us).getBytes("ASCII"))) - connectionHandler.expectNoMessage(100.millis) + // send a batch that is bigger than the default buffer to make sure we don't recurse and + // send more than one Received messages + serverSideChannel.write(ByteBuffer.wrap((ts ++ us).getBytes("ASCII"))) + connectionHandler.expectNoMessage(100.millis) - connectionActor ! ResumeReading - connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(ts) + connectionActor ! ResumeReading + connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(ts) - connectionHandler.expectNoMessage(100.millis) + connectionHandler.expectNoMessage(100.millis) - connectionActor ! ResumeReading - connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(us) + connectionActor ! ResumeReading + connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(us) - connectionHandler.expectNoMessage(100.millis) + connectionHandler.expectNoMessage(100.millis) - val vs = "v" * (maxBufferSize / 2) - serverSideChannel.write(ByteBuffer.wrap(vs.getBytes("ASCII"))) + val vs = "v" * (maxBufferSize / 2) + serverSideChannel.write(ByteBuffer.wrap(vs.getBytes("ASCII"))) - connectionActor ! ResumeReading + connectionActor ! ResumeReading - connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(vs) - } finally shutdown(system) + connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(vs) + } + finally shutdown(system) } "close the connection and reply with `Closed` upon reception of a `Close` command" in @@ -653,7 +657,7 @@ class TcpConnectionSpec extends AkkaSpec(""" override lazy val connectionActor = createConnectionActor(serverAddress = UnboundAddress, timeout = Option(100.millis)) run { - connectionActor.toString should not be ("") + connectionActor.toString should not be "" userHandler.expectMsg(CommandFailed(Connect(UnboundAddress, timeout = Option(100.millis)))) watch(connectionActor) expectTerminated(connectionActor) @@ -708,8 +712,8 @@ class TcpConnectionSpec extends AkkaSpec(""" written += 1 } // dump the NACKs - writer.receiveWhile(1.second) { - case CommandFailed(_) => written -= 1 + writer.receiveWhile(1.second) { case CommandFailed(_) => + written -= 1 } writer.msgAvailable should ===(false) @@ -746,8 +750,8 @@ class TcpConnectionSpec extends AkkaSpec(""" written += 1 } // dump the NACKs - writer.receiveWhile(1.second) { - case CommandFailed(_) => written -= 1 + writer.receiveWhile(1.second) { case CommandFailed(_) => + written -= 1 } // drain the queue until it works again @@ -782,8 +786,8 @@ class TcpConnectionSpec extends AkkaSpec(""" written += 1 } // dump the NACKs - writer.receiveWhile(1.second) { - case CommandFailed(_) => written -= 1 + writer.receiveWhile(1.second) { case CommandFailed(_) => + written -= 1 } writer.msgAvailable should ===(false) @@ -815,8 +819,8 @@ class TcpConnectionSpec extends AkkaSpec(""" written += 1 } // dump the NACKs - writer.receiveWhile(1.second) { - case CommandFailed(_) => written -= 1 + writer.receiveWhile(1.second) { case CommandFailed(_) => + written -= 1 } // drain the queue until it works again @@ -982,7 +986,7 @@ class TcpConnectionSpec extends AkkaSpec(""" override def run(body: => Unit): Unit = super.run { try { serverSideChannel.configureBlocking(false) - serverSideChannel should not be (null) + serverSideChannel should not be null interestCallReceiver.expectMsg(OP_CONNECT) selector.send(connectionActor, ChannelConnectable) @@ -1044,9 +1048,7 @@ class TcpConnectionSpec extends AkkaSpec(""" } } finally Try(andThen()) - /** - * Tries to simultaneously act on client and server side to read from the server all pending data from the client. - */ + /** Tries to simultaneously act on client and server side to read from the server all pending data from the client. */ @tailrec final def pullFromServerSide( remaining: Int, remainingTries: Int = 1000, diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala index cf83a1849a9..053d6b8912a 100644 --- a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala @@ -18,11 +18,15 @@ import akka.testkit.{ AkkaSpec, TestProbe } import akka.testkit.WithLogCapturing import akka.util.ByteString -class TcpIntegrationSpec extends AkkaSpec(""" +class TcpIntegrationSpec + extends AkkaSpec(""" akka.loglevel = debug akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] akka.io.tcp.trace-logging = on - """) with TcpIntegrationSpecSupport with TimeLimits with WithLogCapturing { + """) + with TcpIntegrationSpecSupport + with TimeLimits + with WithLogCapturing { def verifyActorTermination(actor: ActorRef): Unit = { watch(actor) diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpecSupport.scala b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpecSupport.scala index 9388ad52b8a..1b70d8724f4 100644 --- a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpecSupport.scala +++ b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpecSupport.scala @@ -50,7 +50,7 @@ trait TcpIntegrationSpecSupport { this: AkkaSpec => connectCommander.sender() ! Register(clientHandler.ref) bindHandler.expectMsgType[Connected] match { - case Connected(`localAddress`, `endpoint`) => //ok + case Connected(`localAddress`, `endpoint`) => // ok case other => fail(s"No match: ${other}") } val serverHandler = TestProbe() diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpListenerSpec.scala b/akka-actor-tests/src/test/scala/akka/io/TcpListenerSpec.scala index 5a07f18988c..fdcfe4f6cff 100644 --- a/akka-actor-tests/src/test/scala/akka/io/TcpListenerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/TcpListenerSpec.scala @@ -167,11 +167,10 @@ class TcpListenerSpec extends AkkaSpec(""" def listener = parentRef.underlyingActor.listener def expectWorkerForCommand: SocketChannel = - selectorRouter.expectMsgPF() { - case WorkerForCommand(RegisterIncoming(chan), commander, _) => - chan.isOpen should ===(true) - commander should ===(listener) - chan + selectorRouter.expectMsgPF() { case WorkerForCommand(RegisterIncoming(chan), commander, _) => + chan.isOpen should ===(true) + commander should ===(listener) + chan } private class ListenerParent(pullMode: Boolean) extends Actor with ChannelRegistry { @@ -185,8 +184,8 @@ class TcpListenerSpec extends AkkaSpec(""" Bind(handler.ref, endpoint, 100, Nil, pullMode)).withDeploy(Deploy.local), name = "test-listener-" + counter.next()) parent.watch(listener) - def receive: Receive = { - case msg => parent.ref.forward(msg) + def receive: Receive = { case msg => + parent.ref.forward(msg) } override def supervisorStrategy = SupervisorStrategy.stoppingStrategy diff --git a/akka-actor-tests/src/test/scala/akka/io/UdpConnectedIntegrationSpec.scala b/akka-actor-tests/src/test/scala/akka/io/UdpConnectedIntegrationSpec.scala index 64751a2592e..d91c79ba528 100644 --- a/akka-actor-tests/src/test/scala/akka/io/UdpConnectedIntegrationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/UdpConnectedIntegrationSpec.scala @@ -16,7 +16,8 @@ import akka.testkit.TestProbe import akka.testkit.WithLogCapturing import akka.util.ByteString -class UdpConnectedIntegrationSpec extends AkkaSpec(""" +class UdpConnectedIntegrationSpec + extends AkkaSpec(""" akka.loglevel = DEBUG akka.actor.debug.lifecycle = on akka.actor.debug.autoreceive = on @@ -25,7 +26,9 @@ class UdpConnectedIntegrationSpec extends AkkaSpec(""" # Java native host resolution akka.io.dns.resolver = async-dns akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] - """) with ImplicitSender with WithLogCapturing { + """) + with ImplicitSender + with WithLogCapturing { val addresses = temporaryServerAddresses(5, udp = true) @@ -73,10 +76,9 @@ class UdpConnectedIntegrationSpec extends AkkaSpec(""" val data2 = ByteString("All your datagram belong to us") connectUdp(localAddress = None, serverAddress, testActor) ! UdpConnected.Send(data1) - val clientAddress = expectMsgPF() { - case Udp.Received(d, a) => - d should ===(data1) - a + val clientAddress = expectMsgPF() { case Udp.Received(d, a) => + d should ===(data1) + a } server ! Udp.Send(data2, clientAddress) @@ -92,10 +94,9 @@ class UdpConnectedIntegrationSpec extends AkkaSpec(""" val data2 = ByteString("All your datagram belong to us") connectUdp(Some(clientAddress), serverAddress, testActor) ! UdpConnected.Send(data1) - expectMsgPF() { - case Udp.Received(d, a) => - d should ===(data1) - a should ===(clientAddress) + expectMsgPF() { case Udp.Received(d, a) => + d should ===(data1) + a should ===(clientAddress) } server ! Udp.Send(data2, clientAddress) diff --git a/akka-actor-tests/src/test/scala/akka/io/UdpIntegrationSpec.scala b/akka-actor-tests/src/test/scala/akka/io/UdpIntegrationSpec.scala index 5d02ca5a01b..7bae3d1bbfe 100644 --- a/akka-actor-tests/src/test/scala/akka/io/UdpIntegrationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/UdpIntegrationSpec.scala @@ -14,10 +14,12 @@ import akka.testkit.{ AkkaSpec, ImplicitSender, TestProbe } import akka.testkit.SocketUtil.temporaryServerAddresses import akka.util.ByteString -class UdpIntegrationSpec extends AkkaSpec(""" +class UdpIntegrationSpec + extends AkkaSpec(""" akka.loglevel = INFO # tests expect to be able to mutate messages - """) with ImplicitSender { + """) + with ImplicitSender { def bindUdp(handler: ActorRef): InetSocketAddress = { val commander = TestProbe() @@ -73,18 +75,16 @@ class UdpIntegrationSpec extends AkkaSpec(""" def checkSendingToClient(): Unit = { server ! Send(data, clientAddress) - expectMsgPF() { - case Received(d, a) => - d should ===(data) - a should ===(serverAddress) + expectMsgPF() { case Received(d, a) => + d should ===(data) + a should ===(serverAddress) } } def checkSendingToServer(): Unit = { client ! Send(data, serverAddress) - expectMsgPF() { - case Received(d, a) => - d should ===(data) - a should ===(clientAddress) + expectMsgPF() { case Received(d, a) => + d should ===(data) + a should ===(clientAddress) } } diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala index 444aa7c6bf2..053f94db2f0 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala @@ -116,7 +116,7 @@ class AsyncDnsResolverIntegrationSpec val answer = (IO(Dns) ? DnsProtocol.Resolve(name)).mapTo[DnsProtocol.Resolved].futureValue answer.name shouldEqual name answer.records.collect { case r: CNameRecord => r.canonicalName }.toSet shouldEqual Set("a-single.bar.example") - answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set(InetAddress.getByName("192.168.2.20")) + answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set(InetAddress.getByName("192.168.2.20")) } "resolve internal CNAME record" in { @@ -124,7 +124,7 @@ class AsyncDnsResolverIntegrationSpec val answer = resolve(name) answer.name shouldEqual name answer.records.collect { case r: CNameRecord => r.canonicalName }.toSet shouldEqual Set("a-double.foo.test") - answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set( + answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set( InetAddress.getByName("192.168.1.21"), InetAddress.getByName("192.168.1.22")) } diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/DnsSettingsSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/DnsSettingsSpec.scala index 146a6b0aa0d..5525a9365ba 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/DnsSettingsSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/DnsSettingsSpec.scala @@ -48,9 +48,8 @@ class DnsSettingsSpec extends AkkaSpec { eas, ConfigFactory.parseString("nameservers = [\"127.0.0.1\", \"127.0.0.2\"]").withFallback(defaultConfig)) - dnsSettings.NameServers.map(_.getAddress) shouldEqual List( - InetAddress.getByName("127.0.0.1"), - InetAddress.getByName("127.0.0.2")) + dnsSettings.NameServers + .map(_.getAddress) shouldEqual List(InetAddress.getByName("127.0.0.1"), InetAddress.getByName("127.0.0.2")) } "use host search domains if set to default" in { @@ -68,10 +67,12 @@ class DnsSettingsSpec extends AkkaSpec { "parse a single search domain" in { val dnsSettings = new DnsSettings( eas, - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" nameservers = "127.0.0.1" search-domains = "example.com" - """).withFallback(defaultConfig)) + """) + .withFallback(defaultConfig)) dnsSettings.SearchDomains shouldEqual List("example.com") } @@ -79,10 +80,12 @@ class DnsSettingsSpec extends AkkaSpec { "parse a single list of search domains" in { val dnsSettings = new DnsSettings( eas, - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" nameservers = "127.0.0.1" search-domains = [ "example.com", "example.net" ] - """).withFallback(defaultConfig)) + """) + .withFallback(defaultConfig)) dnsSettings.SearchDomains shouldEqual List("example.com", "example.net") } @@ -90,11 +93,13 @@ class DnsSettingsSpec extends AkkaSpec { "use host ndots if set to default" in { val dnsSettings = new DnsSettings( eas, - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" nameservers = "127.0.0.1" search-domains = "example.com" ndots = "default" - """).withFallback(defaultConfig)) + """) + .withFallback(defaultConfig)) // Will differ based on name OS DNS servers so just validating it does not throw dnsSettings.NDots @@ -103,11 +108,13 @@ class DnsSettingsSpec extends AkkaSpec { "parse ndots" in { val dnsSettings = new DnsSettings( eas, - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" nameservers = "127.0.0.1" search-domains = "example.com" ndots = 5 - """).withFallback(defaultConfig)) + """) + .withFallback(defaultConfig)) dnsSettings.NDots shouldEqual 5 } @@ -120,10 +127,12 @@ class DnsSettingsSpec extends AkkaSpec { val dnsSettingsDuration = new DnsSettings( eas, - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" positive-ttl = 10 s negative-ttl = 10 d - """).withFallback(defaultConfig)) + """) + .withFallback(defaultConfig)) dnsSettingsDuration.PositiveCachePolicy shouldEqual CachePolicy.Ttl.fromPositive(10.seconds) dnsSettingsDuration.NegativeCachePolicy shouldEqual CachePolicy.Ttl.fromPositive(10.days) diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/NameserverAddressParserSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/NameserverAddressParserSpec.scala index e831faddcce..852afe84e2b 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/NameserverAddressParserSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/NameserverAddressParserSpec.scala @@ -23,9 +23,8 @@ class NameserverAddressParserSpec extends AnyWordSpec with Matchers { DnsSettings.parseNameserverAddress("8.8.8.8") shouldEqual new InetSocketAddress("8.8.8.8", 53) } "handle default port in IPv6 address" in { - DnsSettings.parseNameserverAddress("[2001:4860:4860::8888]") shouldEqual new InetSocketAddress( - "2001:4860:4860::8888", - 53) + DnsSettings + .parseNameserverAddress("[2001:4860:4860::8888]") shouldEqual new InetSocketAddress("2001:4860:4860::8888", 53) } } } diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsManagerSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsManagerSpec.scala index de659dc34f3..65c67cdd032 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsManagerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsManagerSpec.scala @@ -18,12 +18,15 @@ import akka.testkit.WithLogCapturing // tests deprecated DNS API @nowarn("msg=deprecated") -class AsyncDnsManagerSpec extends AkkaSpec(""" +class AsyncDnsManagerSpec + extends AkkaSpec(""" akka.loglevel = DEBUG akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] akka.io.dns.resolver = async-dns akka.io.dns.async-dns.nameservers = default - """) with ImplicitSender with WithLogCapturing { + """) + with ImplicitSender + with WithLogCapturing { val dns = Dns(system).manager diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala index 9cd5c245506..467755dd559 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala @@ -21,10 +21,12 @@ import akka.io.dns.internal.AsyncDnsResolver.ResolveFailedException import akka.io.dns.internal.DnsClient.{ Answer, Question4, Question6, SrvQuestion } import akka.testkit.{ AkkaSpec, TestActor, TestProbe, WithLogCapturing } -class AsyncDnsResolverSpec extends AkkaSpec(""" +class AsyncDnsResolverSpec + extends AkkaSpec(""" akka.loglevel = DEBUG akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] - """) with WithLogCapturing { + """) + with WithLogCapturing { val defaultConfig = ConfigFactory.parseString(""" nameservers = ["one","two"] @@ -89,8 +91,7 @@ class AsyncDnsResolverSpec extends AkkaSpec(""" "fails if all dns clients timeout" in new Setup { r ! Resolve("cats.com", Ip(ipv4 = true, ipv6 = false)) - senderProbe.expectMsgPF(remainingOrDefault) { - case Failure(ResolveFailedException(_)) => + senderProbe.expectMsgPF(remainingOrDefault) { case Failure(ResolveFailedException(_)) => } } @@ -100,8 +101,7 @@ class AsyncDnsResolverSpec extends AkkaSpec(""" dnsClient1.reply(Failure(new RuntimeException("Fail"))) dnsClient2.expectMsg(Question4(2, "cats.com")) dnsClient2.reply(Failure(new RuntimeException("Yet another fail"))) - senderProbe.expectMsgPF(remainingOrDefault) { - case Failure(ResolveFailedException(_)) => + senderProbe.expectMsgPF(remainingOrDefault) { case Failure(ResolveFailedException(_)) => } } @@ -265,23 +265,29 @@ class AsyncDnsResolverSpec extends AkkaSpec(""" // want to send up to 100 (or barring that, as many Resolves as we can) in 100 millis try { - awaitCond(p = { - if (resolvesSent < 100) { - r ! Resolve(s"{$resolvesSent}outof${resolvesSent + 1}cats.com", Ip(ipv4 = true, ipv6 = false)) - resolvesSent += 1 - false // not done yet - } else true - }, max = 100.millis, interval = 100.micros) + awaitCond( + p = { + if (resolvesSent < 100) { + r ! Resolve(s"{$resolvesSent}outof${resolvesSent + 1}cats.com", Ip(ipv4 = true, ipv6 = false)) + resolvesSent += 1 + false // not done yet + } else true + }, + max = 100.millis, + interval = 100.micros) } catch { case _: AssertionError => // not actually a problem if we didn't send 100 messages, but not being able to send multiple renders the test invalid resolvesSent shouldBe >(1) } - awaitAssert(a = { - usedIds.size shouldBe resolvesSent - qCount shouldBe resolvesSent - }, max = 300.millis, interval = 1.milli) + awaitAssert( + a = { + usedIds.size shouldBe resolvesSent + qCount shouldBe resolvesSent + }, + max = 300.millis, + interval = 1.milli) // Since we don't reply, the resolves may timeout, but that will still leave them pending } @@ -342,9 +348,12 @@ class AsyncDnsResolverSpec extends AkkaSpec(""" requestFrom(firstSender) requestFrom(secondSender) - awaitAssert(a = { - qCount shouldBe 1 - }, max = 100.millis, interval = 1.milli) + awaitAssert( + a = { + qCount shouldBe 1 + }, + max = 100.millis, + interval = 1.milli) firstSender.expectMsgType[Failure] secondSender.expectMsgType[Failure] @@ -359,8 +368,13 @@ class AsyncDnsResolverSpec extends AkkaSpec(""" def resolver(clients: List[ActorRef], config: Config): ActorRef = { val settings = new DnsSettings(system.asInstanceOf[ExtendedActorSystem], config) - system.actorOf(Props(new AsyncDnsResolver(settings, new SimpleDnsCache(), (_, _) => { - clients - }))) + system.actorOf( + Props( + new AsyncDnsResolver( + settings, + new SimpleDnsCache(), + (_, _) => { + clients + }))) } } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala index 13a1fe0b6cb..216c102abd7 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala @@ -17,10 +17,12 @@ import akka.testkit.WithLogCapturing import akka.util.Timeout @nowarn -class AskSpec extends AkkaSpec(""" +class AskSpec + extends AkkaSpec(""" akka.loglevel = DEBUG akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] - """) with WithLogCapturing { + """) + with WithLogCapturing { "The “ask” pattern" must { "send request to actor and wrap the answer in Future" in { @@ -132,9 +134,11 @@ class AskSpec extends AkkaSpec(""" val deadListener = TestProbe() system.eventStream.subscribe(deadListener.ref, classOf[DeadLetter]) - val echo = system.actorOf(Props(new Actor { - def receive = { case x => context.actorSelection(sender().path) ! x } - }), "select-echo2") + val echo = system.actorOf( + Props(new Actor { + def receive = { case x => context.actorSelection(sender().path) ! x } + }), + "select-echo2") val f = echo ? "hi" Await.result(f, 1 seconds) should ===("hi") @@ -163,14 +167,15 @@ class AskSpec extends AkkaSpec(""" val deadListener = TestProbe() system.eventStream.subscribe(deadListener.ref, classOf[DeadLetter]) - val echo = system.actorOf(Props(new Actor { - def receive = { - case x => + val echo = system.actorOf( + Props(new Actor { + def receive = { case x => val name = sender().path.name val parent = sender().path.parent context.actorSelection(parent / ".." / "temp" / name) ! x - } - }), "select-echo4") + } + }), + "select-echo4") val f = echo ? "hi" intercept[AskTimeoutException] { Await.result(f, 1 seconds) should ===("hi") @@ -184,13 +189,14 @@ class AskSpec extends AkkaSpec(""" val deadListener = TestProbe() system.eventStream.subscribe(deadListener.ref, classOf[DeadLetter]) - val echo = system.actorOf(Props(new Actor { - def receive = { - case x => + val echo = system.actorOf( + Props(new Actor { + def receive = { case x => val parent = sender().path.parent context.actorSelection(parent / "missing") ! x - } - }), "select-echo5") + } + }), + "select-echo5") val f = echo ? "hi" intercept[AskTimeoutException] { Await.result(f, 1 seconds) should ===("hi") @@ -203,12 +209,13 @@ class AskSpec extends AkkaSpec(""" val deadListener = TestProbe() system.eventStream.subscribe(deadListener.ref, classOf[DeadLetter]) - val echo = system.actorOf(Props(new Actor { - def receive = { - case x => + val echo = system.actorOf( + Props(new Actor { + def receive = { case x => context.actorSelection(sender().path / "missing") ! x - } - }), "select-echo6") + } + }), + "select-echo6") val f = echo ? "hi" intercept[AskTimeoutException] { Await.result(f, 1 seconds) should ===(ActorSelectionMessage("hi", Vector(SelectChildName("missing")), false)) @@ -221,8 +228,8 @@ class AskSpec extends AkkaSpec(""" val p = TestProbe() val act = system.actorOf(Props(new Actor { - def receive = { - case msg => p.ref ! sender() -> msg + def receive = { case msg => + p.ref ! sender() -> msg } })) @@ -241,11 +248,13 @@ class AskSpec extends AkkaSpec(""" implicit val timeout: Timeout = Timeout(300 millis) val p = TestProbe() - val act = system.actorOf(Props(new Actor { - def receive = { - case msg => p.ref ! sender() -> msg - } - }), "myName") + val act = system.actorOf( + Props(new Actor { + def receive = { case msg => + p.ref ! sender() -> msg + } + }), + "myName") (act ? "ask").mapTo[String] val (promiseActorRef, _) = p.expectMsgType[(ActorRef, String)] diff --git a/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala index 5fcc795b40e..b2d89770bd4 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala @@ -47,15 +47,18 @@ object TestParentActor { class TestParentActor(probe: ActorRef, supervisorProps: Props) extends Actor { val supervisor: ActorRef = context.actorOf(supervisorProps) - def receive: Receive = { - case other => probe.forward(other) + def receive: Receive = { case other => + probe.forward(other) } } -class BackoffOnRestartSupervisorSpec extends AkkaSpec(""" +class BackoffOnRestartSupervisorSpec + extends AkkaSpec(""" akka.loglevel = DEBUG akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] - """) with WithLogCapturing with ImplicitSender { + """) + with WithLogCapturing + with ImplicitSender { def supervisorProps(probeRef: ActorRef) = { val options = BackoffOpts @@ -157,8 +160,8 @@ class BackoffOnRestartSupervisorSpec extends AkkaSpec(""" val options = BackoffOpts .onFailure(Props(new SlowlyFailingActor(postStopLatch)), "someChildName", 1 nanos, 1 nanos, 0.0) .withMaxNrOfRetries(-1) - .withSupervisorStrategy(OneForOneStrategy(loggingEnabled = false) { - case _: TestActor.StoppingException => SupervisorStrategy.Stop + .withSupervisorStrategy(OneForOneStrategy(loggingEnabled = false) { case _: TestActor.StoppingException => + SupervisorStrategy.Stop }) val supervisor = system.actorOf(BackoffSupervisor.props(options)) @@ -271,10 +274,12 @@ class BackoffOnRestartSupervisorSpec extends AkkaSpec(""" supervisor ! "THROW" // note that the message could be lost (dead letters) because ended up with previous crashed child - probe.awaitAssert({ - supervisor ! "PING" - probe.expectMsg("PING") - }, 1.second) + probe.awaitAssert( + { + supervisor ! "PING" + probe.expectMsg("PING") + }, + 1.second) } } } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala index e336c0b3a6c..ffb0d573205 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala @@ -93,11 +93,11 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually } } filterException[TestException] { - val stoppingStrategy = OneForOneStrategy() { - case _: TestException => SupervisorStrategy.Stop + val stoppingStrategy = OneForOneStrategy() { case _: TestException => + SupervisorStrategy.Stop } - val restartingStrategy = OneForOneStrategy() { - case _: TestException => SupervisorStrategy.Restart + val restartingStrategy = OneForOneStrategy() { case _: TestException => + SupervisorStrategy.Restart } assertCustomStrategy(create(onStopOptions().withSupervisorStrategy(stoppingStrategy))) @@ -159,11 +159,11 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually expectMsg(BackoffSupervisor.RestartCount(0)) } - val stoppingStrategy = OneForOneStrategy() { - case _: TestException => SupervisorStrategy.Stop + val stoppingStrategy = OneForOneStrategy() { case _: TestException => + SupervisorStrategy.Stop } - val restartingStrategy = OneForOneStrategy() { - case _: TestException => SupervisorStrategy.Restart + val restartingStrategy = OneForOneStrategy() { case _: TestException => + SupervisorStrategy.Restart } assertManualReset( @@ -203,9 +203,8 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually "use provided actor while stopped and withHandlerWhileStopped is specified" in { val handler = system.actorOf(Props(new Actor { - override def receive: Receive = { - case "still there?" => - sender() ! "not here!" + override def receive: Receive = { case "still there?" => + sender() ! "not here!" } })) filterException[TestException] { @@ -253,7 +252,7 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually } EventFilter.warning(pattern = ".*boom.*", occurrences = 1).intercept { - supervisor ! "boom" //this will be sent to deadLetters + supervisor ! "boom" // this will be sent to deadLetters expectNoMessage(500.milliseconds) } } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala index bc0ca631b97..3791be4b4ac 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala @@ -23,10 +23,12 @@ class CircuitBreakerMTSpec extends AkkaSpec { def openBreaker(breaker: CircuitBreaker): Unit = { // returns true if the breaker is open def failingCall(): Boolean = - Await.result(breaker.withCircuitBreaker(Future.failed(new RuntimeException("FAIL"))).recover { - case _: CircuitBreakerOpenException => true - case _ => false - }, remainingOrDefault) + Await.result( + breaker.withCircuitBreaker(Future.failed(new RuntimeException("FAIL"))).recover { + case _: CircuitBreakerOpenException => true + case _ => false + }, + remainingOrDefault) // fire some failing calls (1 to (maxFailures + 1)).foreach { _ => @@ -45,10 +47,9 @@ class CircuitBreakerMTSpec extends AkkaSpec { Await.ready(aFewActive, 5.seconds.dilated) "succeed" }) - .recoverWith { - case _: CircuitBreakerOpenException => - aFewActive.countDown() - Future.successful("CBO") + .recoverWith { case _: CircuitBreakerOpenException => + aFewActive.countDown() + Future.successful("CBO") } } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala index 985d2e06c68..c6ab2d89c29 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala @@ -350,7 +350,7 @@ class CircuitBreakerSpec extends AkkaSpec(""" breaker().currentFailureCount should ===(0) intercept[TestException] { val ct = Thread.currentThread() // Ensure that the thunk is executed in the tests thread - breaker().withSyncCircuitBreaker({ if (Thread.currentThread() eq ct) throwException else "fail" }) + breaker().withSyncCircuitBreaker { if (Thread.currentThread() eq ct) throwException else "fail" } } breaker().currentFailureCount should ===(1) breaker().withSyncCircuitBreaker(sayHi) @@ -363,7 +363,7 @@ class CircuitBreakerSpec extends AkkaSpec(""" breaker().currentFailureCount should ===(0) intercept[TestException] { val ct = Thread.currentThread() // Ensure that the thunk is executed in the tests thread - breaker().withSyncCircuitBreaker({ if (Thread.currentThread() eq ct) throwException else "fail" }) + breaker().withSyncCircuitBreaker { if (Thread.currentThread() eq ct) throwException else "fail" } } breaker().currentFailureCount should ===(1) @@ -386,7 +386,7 @@ class CircuitBreakerSpec extends AkkaSpec(""" breaker().currentFailureCount should ===(0) intercept[TestException] { val ct = Thread.currentThread() // Ensure that the thunk is executed in the tests thread - breaker().withSyncCircuitBreaker({ if (Thread.currentThread() eq ct) throwException else "fail" }) + breaker().withSyncCircuitBreaker { if (Thread.currentThread() eq ct) throwException else "fail" } } breaker().currentFailureCount should ===(1) breaker().succeed() diff --git a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala index 6fcf5538cc0..7eaf50ebd83 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala @@ -16,9 +16,8 @@ import akka.testkit.{ AkkaSpec, TestLatch } object PatternSpec { final case class Work(duration: Duration) class TargetActor extends Actor { - def receive = { - case (testLatch: TestLatch, duration: FiniteDuration) => - Await.ready(testLatch, duration) + def receive = { case (testLatch: TestLatch, duration: FiniteDuration) => + Await.ready(testLatch, duration) } } } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala index de23c178924..64244289bce 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala @@ -29,10 +29,10 @@ class RetrySpec extends AkkaSpec with RetrySupport { @volatile var counter = 0 val retried = retry( () => - Future.successful({ + Future.successful { counter += 1 counter - }), + }, 5, 1 second) @@ -94,10 +94,13 @@ class RetrySpec extends AkkaSpec with RetrySupport { } else Future.successful(5) } - val retried = retry(() => attempt(), 5, attempted => { - attemptedCount = attempted - Some(100.milliseconds * attempted) - }) + val retried = retry( + () => attempt(), + 5, + attempted => { + attemptedCount = attempted + Some(100.milliseconds * attempted) + }) within(30000000 seconds) { intercept[IllegalStateException] { Await.result(retried, remaining) }.getMessage should ===("6") attemptedCount shouldBe 5 diff --git a/akka-actor-tests/src/test/scala/akka/pattern/StatusReplySpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/StatusReplySpec.scala index b758e0d03c5..a642f2c53f8 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/StatusReplySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/StatusReplySpec.scala @@ -59,8 +59,7 @@ class StatusReplySpec extends AkkaSpec with ScalaFutures { } "transform scala.util.Try" in { - StatusReply.fromTry(scala.util.Success("woho")) should matchPattern { - case StatusReply.Success("woho") => + StatusReply.fromTry(scala.util.Success("woho")) should matchPattern { case StatusReply.Success("woho") => } StatusReply.fromTry(scala.util.Failure(TestException("boho"))) should matchPattern { case StatusReply.Error(StatusReply.ErrorMessage("boho")) => diff --git a/akka-actor-tests/src/test/scala/akka/pattern/extended/ExplicitAskSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/extended/ExplicitAskSpec.scala index 64888a9153e..1b3882a10a1 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/extended/ExplicitAskSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/extended/ExplicitAskSpec.scala @@ -24,8 +24,8 @@ class ExplicitAskSpec extends AkkaSpec { implicit val timeout: Timeout = Timeout(5.seconds) val target = system.actorOf(Props(new Actor { - def receive = { - case Request(respondTo) => respondTo ! Response(self) + def receive = { case Request(respondTo) => + respondTo ! Response(self) } })) @@ -36,11 +36,13 @@ class ExplicitAskSpec extends AkkaSpec { "work for ActorSelection" in { implicit val timeout: Timeout = Timeout(5.seconds) - val target = system.actorOf(Props(new Actor { - def receive = { - case Request(respondTo) => respondTo ! Response(self) - } - }), "select-echo") + val target = system.actorOf( + Props(new Actor { + def receive = { case Request(respondTo) => + respondTo ! Response(self) + } + }), + "select-echo") val selection = system.actorSelection("/user/select-echo") val f = selection ? (respondTo => Request(respondTo)) diff --git a/akka-actor-tests/src/test/scala/akka/routing/BalancingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/BalancingSpec.scala index 3cd1332707b..6de16a3036c 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/BalancingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/BalancingSpec.scala @@ -25,41 +25,41 @@ object BalancingSpec { lazy val id = counter.getAndIncrement() log.debug("Worker started") - def receive = { - case _: Int => - latch.countDown() - if (id == 1) { - if (!latch.isOpen) { - log.debug("Waiting for all routees to receieve a message") - // wait for all routees to receive a message before processing - Await.result(latch, 1.minute) - log.debug("All routees receieved a message, continuing") - } - } else { - if (!startOthers.isCompleted) { - log.debug("Waiting for startOthers toggle") - // wait for the first worker to process messages before also processing - Await.result(startOthers, 1.minute) - log.debug("Continuing after wait for startOthers toggle") - } + def receive = { case _: Int => + latch.countDown() + if (id == 1) { + if (!latch.isOpen) { + log.debug("Waiting for all routees to receieve a message") + // wait for all routees to receive a message before processing + Await.result(latch, 1.minute) + log.debug("All routees receieved a message, continuing") } - sender() ! id + } else { + if (!startOthers.isCompleted) { + log.debug("Waiting for startOthers toggle") + // wait for the first worker to process messages before also processing + Await.result(startOthers, 1.minute) + log.debug("Continuing after wait for startOthers toggle") + } + } + sender() ! id } } class Parent extends Actor { val pool = context.actorOf( - BalancingPool(2).props( - routeeProps = Props(classOf[Worker], TestLatch(0)(context.system), Future.successful(())))) + BalancingPool(2).props(routeeProps = + Props(classOf[Worker], TestLatch(0)(context.system), Future.successful(())))) - def receive = { - case msg => pool.forward(msg) + def receive = { case msg => + pool.forward(msg) } } } -class BalancingSpec extends AkkaSpec(""" +class BalancingSpec + extends AkkaSpec(""" akka.loglevel=debug akka.actor.deployment { /balancingPool-2 { @@ -77,7 +77,9 @@ class BalancingSpec extends AkkaSpec(""" } } } - """) with ImplicitSender with BeforeAndAfterEach { + """) + with ImplicitSender + with BeforeAndAfterEach { import BalancingSpec._ val poolSize = 5 // must be less than fork-join parallelism-min, which is 8 in AkkaSpec diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala index 75961ee3b0a..21d6c0ee8f7 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala @@ -83,8 +83,8 @@ object ConfiguredLocalRoutingSpec { } class EchoProps extends Actor { - def receive = { - case "get" => sender() ! context.props + def receive = { case "get" => + sender() ! context.props } } @@ -94,9 +94,8 @@ object ConfiguredLocalRoutingSpec { } class Parent extends Actor { - def receive = { - case (p: Props, name: String) => - sender() ! context.actorOf(p, name) + def receive = { case (p: Props, name: String) => + sender() ! context.actorOf(p, name) } } diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala index ec7e0218637..80e9312fa6f 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala @@ -86,8 +86,8 @@ class ConsistentHashingRouterSpec } "select destination with defined hashMapping" in { - def hashMapping: ConsistentHashMapping = { - case Msg2(key, _) => key + def hashMapping: ConsistentHashMapping = { case Msg2(key, _) => + key } val router2 = system.actorOf( diff --git a/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala index a6ed797f41a..6da8076158f 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala @@ -29,10 +29,9 @@ object MetricsBasedResizerSpec { */ class TestLatchingActor(implicit timeout: Timeout) extends Actor { - def receive = { - case Latches(first, second) => - first.countDown() - Try(Await.ready(second, timeout.duration)) + def receive = { case Latches(first, second) => + first.countDown() + Try(Await.ready(second, timeout.duration)) } } @@ -134,9 +133,8 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT "stop an underutilizationStreak when fully utilized" in { val resizer = DefaultOptimalSizeExploringResizer() - resizer.record = ResizeRecord( - underutilizationStreak = - Some(UnderUtilizationStreak(start = LocalDateTime.now.minusHours(1), highestUtilization = 1))) + resizer.record = ResizeRecord(underutilizationStreak = + Some(UnderUtilizationStreak(start = LocalDateTime.now.minusHours(1), highestUtilization = 1))) val router = TestRouter(routees(2)) router.sendToAll(await = true) @@ -159,8 +157,8 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT "leave the underutilizationStreak highestUtilization unchanged if current utilization is lower" in { val resizer = DefaultOptimalSizeExploringResizer() - resizer.record = ResizeRecord( - underutilizationStreak = Some(UnderUtilizationStreak(start = LocalDateTime.now, highestUtilization = 2))) + resizer.record = ResizeRecord(underutilizationStreak = + Some(UnderUtilizationStreak(start = LocalDateTime.now, highestUtilization = 2))) val router = TestRouter(routees(2)) router.mockSend(await = true) @@ -173,8 +171,8 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT "update the underutilizationStreak highestUtilization if current utilization is higher" in { val resizer = DefaultOptimalSizeExploringResizer() - resizer.record = ResizeRecord( - underutilizationStreak = Some(UnderUtilizationStreak(start = LocalDateTime.now, highestUtilization = 1))) + resizer.record = ResizeRecord(underutilizationStreak = + Some(UnderUtilizationStreak(start = LocalDateTime.now, highestUtilization = 1))) val router = TestRouter(routees(3)) router.mockSend(await = true, routeeIdx = 0) @@ -232,12 +230,13 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT val resizer = DefaultOptimalSizeExploringResizer() val router = TestRouter(routees(2)) val msgs1 = router.sendToAll(await = true) - val msgs2 = router.sendToAll(await = false) //make sure the routees are still busy after the first batch of messages get processed. + val msgs2 = router.sendToAll(await = + false) // make sure the routees are still busy after the first batch of messages get processed. val before = System.nanoTime() - resizer.reportMessageCount(router.routees, router.msgs.size) //updates the records + resizer.reportMessageCount(router.routees, router.msgs.size) // updates the records - msgs1.foreach(_.second.open()) //process two messages + msgs1.foreach(_.second.open()) // process two messages // make sure some time passes in-between Thread.sleep(300) @@ -263,12 +262,13 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT val router = TestRouter(routees(2)) val msgs1 = router.sendToAll(await = true) - val msgs2 = router.sendToAll(await = false) //make sure the routees are still busy after the first batch of messages get processed. + val msgs2 = router.sendToAll(await = + false) // make sure the routees are still busy after the first batch of messages get processed. val before = System.nanoTime() - resizer.reportMessageCount(router.routees, router.msgs.size) //updates the records + resizer.reportMessageCount(router.routees, router.msgs.size) // updates the records - msgs1.foreach(_.second.open()) //process two messages + msgs1.foreach(_.second.open()) // process two messages // make sure some time passes in-between Thread.sleep(300) @@ -294,9 +294,8 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT "downsize to close to the highest retention when a streak of underutilization started downsizeAfterUnderutilizedFor" in { val resizer = DefaultOptimalSizeExploringResizer(downsizeAfterUnderutilizedFor = 72.hours, downsizeRatio = 0.5) - resizer.record = ResizeRecord( - underutilizationStreak = - Some(UnderUtilizationStreak(start = LocalDateTime.now.minusHours(73), highestUtilization = 8))) + resizer.record = ResizeRecord(underutilizationStreak = + Some(UnderUtilizationStreak(start = LocalDateTime.now.minusHours(73), highestUtilization = 8))) resizer.resize(routees(20)) should be(4 - 20) } diff --git a/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala index 3530f844ae8..185eba4ffbd 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala @@ -22,15 +22,17 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { "be able to shut down its instance" in { val stopLatch = new TestLatch(7) - val actor = system.actorOf(RandomPool(7).props(Props(new Actor { - def receive = { - case "hello" => sender() ! "world" - } - - override def postStop(): Unit = { - stopLatch.countDown() - } - })), "random-shutdown") + val actor = system.actorOf( + RandomPool(7).props(Props(new Actor { + def receive = { case "hello" => + sender() ! "world" + } + + override def postStop(): Unit = { + stopLatch.countDown() + } + })), + "random-shutdown") actor ! "hello" actor ! "hello" @@ -57,13 +59,15 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { replies = replies + (i -> 0) } - val actor = system.actorOf(RandomPool(connectionCount).props(routeeProps = Props(new Actor { - lazy val id = counter.getAndIncrement() - def receive = { - case "hit" => sender() ! id - case "end" => doneLatch.countDown() - } - })), name = "random") + val actor = system.actorOf( + RandomPool(connectionCount).props(routeeProps = Props(new Actor { + lazy val id = counter.getAndIncrement() + def receive = { + case "hit" => sender() ! id + case "end" => doneLatch.countDown() + } + })), + name = "random") for (_ <- 0 until iterationCount) { for (_ <- 0 until connectionCount) { @@ -77,7 +81,7 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { actor ! akka.routing.Broadcast("end") Await.ready(doneLatch, 5 seconds) - replies.values.foreach { _ should be > (0) } + replies.values.foreach { _ should be > 0 } replies.values.sum should ===(iterationCount * connectionCount) } @@ -85,15 +89,17 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val helloLatch = new TestLatch(6) val stopLatch = new TestLatch(6) - val actor = system.actorOf(RandomPool(6).props(routeeProps = Props(new Actor { - def receive = { - case "hello" => helloLatch.countDown() - } - - override def postStop(): Unit = { - stopLatch.countDown() - } - })), "random-broadcast") + val actor = system.actorOf( + RandomPool(6).props(routeeProps = Props(new Actor { + def receive = { case "hello" => + helloLatch.countDown() + } + + override def postStop(): Unit = { + stopLatch.countDown() + } + })), + "random-broadcast") actor ! akka.routing.Broadcast("hello") Await.ready(helloLatch, 5 seconds) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index 8714e67dca2..3f228e37498 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -30,8 +30,8 @@ object ResizerSpec { """ class TestActor extends Actor { - def receive = { - case latch: TestLatch => latch.countDown() + def receive = { case latch: TestLatch => + latch.countDown() } } @@ -227,16 +227,18 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with } val z = routeeSize(router) - z should be > (2) + z should be > 2 Thread.sleep((300 millis).dilated.toMillis) // let it cool down - awaitCond({ - router ! 0 // trigger resize - Thread.sleep((20 millis).dilated.toMillis) - routeeSize(router) < z - }, interval = 500.millis.dilated) + awaitCond( + { + router ! 0 // trigger resize + Thread.sleep((20 millis).dilated.toMillis) + routeeSize(router) < z + }, + interval = 500.millis.dilated) } diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala index d53b040e5dc..b17449236f0 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala @@ -29,15 +29,17 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val helloLatch = new TestLatch(5) val stopLatch = new TestLatch(5) - val actor = system.actorOf(RoundRobinPool(5).props(routeeProps = Props(new Actor { - def receive = { - case "hello" => helloLatch.countDown() - } + val actor = system.actorOf( + RoundRobinPool(5).props(routeeProps = Props(new Actor { + def receive = { case "hello" => + helloLatch.countDown() + } - override def postStop(): Unit = { - stopLatch.countDown() - } - })), "round-robin-shutdown") + override def postStop(): Unit = { + stopLatch.countDown() + } + })), + "round-robin-shutdown") actor ! "hello" actor ! "hello" @@ -58,13 +60,15 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val counter = new AtomicInteger var replies: Map[Int, Int] = Map.empty.withDefaultValue(0) - val actor = system.actorOf(RoundRobinPool(connectionCount).props(routeeProps = Props(new Actor { - lazy val id = counter.getAndIncrement() - def receive = { - case "hit" => sender() ! id - case "end" => doneLatch.countDown() - } - })), "round-robin") + val actor = system.actorOf( + RoundRobinPool(connectionCount).props(routeeProps = Props(new Actor { + lazy val id = counter.getAndIncrement() + def receive = { + case "hit" => sender() ! id + case "end" => doneLatch.countDown() + } + })), + "round-robin") for (_ <- 1 to iterationCount; _ <- 1 to connectionCount) { val id = Await.result((actor ? "hit").mapTo[Int], timeout.duration) @@ -83,15 +87,17 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val helloLatch = new TestLatch(5) val stopLatch = new TestLatch(5) - val actor = system.actorOf(RoundRobinPool(5).props(routeeProps = Props(new Actor { - def receive = { - case "hello" => helloLatch.countDown() - } + val actor = system.actorOf( + RoundRobinPool(5).props(routeeProps = Props(new Actor { + def receive = { case "hello" => + helloLatch.countDown() + } - override def postStop(): Unit = { - stopLatch.countDown() - } - })), "round-robin-broadcast") + override def postStop(): Unit = { + stopLatch.countDown() + } + })), + "round-robin-broadcast") actor ! akka.routing.Broadcast("hello") Await.ready(helloLatch, 5 seconds) @@ -101,9 +107,11 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { } "be controlled with management messages" in { - val actor = system.actorOf(RoundRobinPool(3).props(routeeProps = Props(new Actor { - def receive = Actor.emptyBehavior - })), "round-robin-managed") + val actor = system.actorOf( + RoundRobinPool(3).props(routeeProps = Props(new Actor { + def receive = Actor.emptyBehavior + })), + "round-robin-managed") routeeSize(actor) should ===(3) actor ! AdjustPoolSize(+4) @@ -129,12 +137,14 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { var replies: Map[String, Int] = Map.empty.withDefaultValue(0) val paths = (1 to connectionCount).map { n => - val ref = system.actorOf(Props(new Actor { - def receive = { - case "hit" => sender() ! self.path.name - case "end" => doneLatch.countDown() - } - }), name = "target-" + n) + val ref = system.actorOf( + Props(new Actor { + def receive = { + case "hit" => sender() ! self.path.name + case "end" => doneLatch.countDown() + } + }), + name = "target-" + n) ref.path.toStringWithoutAddress } diff --git a/akka-actor-tests/src/test/scala/akka/routing/RouteeCreationSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RouteeCreationSpec.scala index 52715c41509..ad7cec57452 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RouteeCreationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RouteeCreationSpec.scala @@ -34,12 +34,12 @@ class RouteeCreationSpec extends AkkaSpec { val N = 100 system.actorOf(RoundRobinPool(N).props(Props(new Actor { context.parent ! "one" - def receive = { - case "one" => testActor.forward("two") + def receive = { case "one" => + testActor.forward("two") } }))) - val gotit = receiveWhile(messages = N) { - case "two" => lastSender.toString + val gotit = receiveWhile(messages = N) { case "two" => + lastSender.toString } expectNoMessage(100.millis) if (gotit.size != N) { diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index cd1bf84163a..e96195d05a1 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -41,8 +41,8 @@ object RoutingSpec { } class Echo extends Actor { - def receive = { - case _ => sender() ! self + def receive = { case _ => + sender() ! self } } @@ -69,8 +69,8 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with awaitCond { router ! "" router ! "" - val res = receiveWhile(100 millis, messages = 2) { - case x: ActorRef => x + val res = receiveWhile(100 millis, messages = 2) { case x: ActorRef => + x } res == Seq(c1, c1) } @@ -136,15 +136,15 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with } "set supplied supervisorStrategy" in { - //#supervision + // #supervision val escalator = OneForOneStrategy() { - //#custom-strategy + // #custom-strategy case e => testActor ! e; SupervisorStrategy.Escalate - //#custom-strategy + // #custom-strategy } val router = system.actorOf(RoundRobinPool(1, supervisorStrategy = escalator).props(routeeProps = Props[TestActor]())) - //#supervision + // #supervision router ! GetRoutees EventFilter[ActorKilledException](occurrences = 1).intercept { expectMsgType[Routees].routees.head.send(Kill, testActor) @@ -161,8 +161,8 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with } "set supplied supervisorStrategy for FromConfig" in { - val escalator = OneForOneStrategy() { - case e => testActor ! e; SupervisorStrategy.Escalate + val escalator = OneForOneStrategy() { case e => + testActor ! e; SupervisorStrategy.Escalate } val router = system.actorOf(FromConfig.withSupervisorStrategy(escalator).props(routeeProps = Props[TestActor]()), "router1") @@ -174,13 +174,13 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with } "default to all-for-one-always-escalate strategy" in { - val restarter = OneForOneStrategy() { - case e => testActor ! e; SupervisorStrategy.Restart + val restarter = OneForOneStrategy() { case e => + testActor ! e; SupervisorStrategy.Restart } val supervisor = system.actorOf(Props(new Supervisor(restarter))) supervisor ! RoundRobinPool(3).props(routeeProps = Props(new Actor { - def receive = { - case x: String => throw new Exception(x) + def receive = { case x: String => + throw new Exception(x) } override def postRestart(reason: Throwable): Unit = testActor ! "restarted" })) @@ -196,11 +196,10 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with "start in-line for context.actorOf()" in { system.actorOf(Props(new Actor { - def receive = { - case "start" => - (context.actorOf(RoundRobinPool(2).props(routeeProps = Props(new Actor { - def receive = { case x => sender() ! x } - }))) ? "hello").pipeTo(sender()) + def receive = { case "start" => + (context.actorOf(RoundRobinPool(2).props(routeeProps = Props(new Actor { + def receive = { case x => sender() ! x } + }))) ? "hello").pipeTo(sender()) } })) ! "start" expectMsg("hello") @@ -212,8 +211,8 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with "send message to connection" in { class Actor1 extends Actor { - def receive = { - case msg => testActor.forward(msg) + def receive = { case msg => + testActor.forward(msg) } } diff --git a/akka-actor-tests/src/test/scala/akka/routing/ScatterGatherFirstCompletedSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ScatterGatherFirstCompletedSpec.scala index 1c77616815c..09e749230f0 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ScatterGatherFirstCompletedSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ScatterGatherFirstCompletedSpec.scala @@ -28,9 +28,9 @@ object ScatterGatherFirstCompletedSpec { system.actorOf( Props(new Actor { def receive = { - case Stop(None) => context.stop(self) - case Stop(Some(_id)) if (_id == id) => context.stop(self) - case _id: Int if (_id == id) => + case Stop(None) => context.stop(self) + case Stop(Some(_id)) if _id == id => context.stop(self) + case _id: Int if _id == id => case _ => { Thread.sleep(100 * id) sender() ! id diff --git a/akka-actor-tests/src/test/scala/akka/routing/SmallestMailboxSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/SmallestMailboxSpec.scala index 3cc32eef8c7..244907290c1 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/SmallestMailboxSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/SmallestMailboxSpec.scala @@ -51,15 +51,15 @@ class SmallestMailboxSpec extends AkkaSpec with DefaultTimeout with ImplicitSend busy.countDown() val busyPath = usedActors.get(0) - busyPath should not be (null) + busyPath should not be null val path1 = usedActors.get(1) val path2 = usedActors.get(2) val path3 = usedActors.get(3) - path1 should not be (busyPath) - path2 should not be (busyPath) - path3 should not be (busyPath) + path1 should not be busyPath + path2 should not be busyPath + path3 should not be busyPath } } diff --git a/akka-actor-tests/src/test/scala/akka/routing/TailChoppingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/TailChoppingSpec.scala index ca5ea129f39..9759f97b64a 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/TailChoppingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/TailChoppingSpec.scala @@ -16,18 +16,20 @@ import akka.testkit._ object TailChoppingSpec { def newActor(id: Int, sleepTime: Duration)(implicit system: ActorSystem) = - system.actorOf(Props(new Actor { - var times: Int = _ - - def receive = { - case "stop" => context.stop(self) - case "times" => sender() ! times - case _ => - times += 1 - Thread.sleep(sleepTime.toMillis) - sender() ! "ack" - } - }), "Actor:" + id) + system.actorOf( + Props(new Actor { + var times: Int = _ + + def receive = { + case "stop" => context.stop(self) + case "times" => sender() ! times + case _ => + times += 1 + Thread.sleep(sleepTime.toMillis) + sender() ! "ack" + } + }), + "Actor:" + id) } class TailChoppingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { @@ -100,8 +102,7 @@ class TailChoppingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender system.actorOf(TailChoppingGroup(paths, within = 300.milliseconds, interval = 50.milliseconds).props()) probe.send(routedActor, "") - probe.expectMsgPF() { - case Failure(_: AskTimeoutException) => + probe.expectMsgPF() { case Failure(_: AskTimeoutException) => } allShouldEqual(1, actor1, actor2)(ref => Await.result(ref ? "times", timeout.duration)) diff --git a/akka-actor-tests/src/test/scala/akka/serialization/DisabledJavaSerializerWarningSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/DisabledJavaSerializerWarningSpec.scala index 93d4172cbd2..a2c67d8cb86 100644 --- a/akka-actor-tests/src/test/scala/akka/serialization/DisabledJavaSerializerWarningSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/serialization/DisabledJavaSerializerWarningSpec.scala @@ -15,7 +15,8 @@ object DisabledJavaSerializerWarningSpec { final case class Msg(s: String) } -class DisabledJavaSerializerWarningSpec extends AkkaSpec(""" +class DisabledJavaSerializerWarningSpec + extends AkkaSpec(""" akka.actor { allow-java-serialization = off serialize-messages = on @@ -23,7 +24,8 @@ class DisabledJavaSerializerWarningSpec extends AkkaSpec(""" # this is by default on, but tests are running with off warn-about-java-serializer-usage = on } - """) with ImplicitSender { + """) + with ImplicitSender { import DisabledJavaSerializerWarningSpec._ diff --git a/akka-actor-tests/src/test/scala/akka/serialization/PrimitivesSerializationSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/PrimitivesSerializationSpec.scala index 47b117ef013..8e6449f93e8 100644 --- a/akka-actor-tests/src/test/scala/akka/serialization/PrimitivesSerializationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/serialization/PrimitivesSerializationSpec.scala @@ -122,19 +122,18 @@ class PrimitivesSerializationSpec extends AkkaSpec(PrimitivesSerializationSpec.t "StringSerializer" must { val random = Random.nextString(256) Seq("empty string" -> "", "hello" -> "hello", "árvíztűrőütvefúrógép" -> "árvíztűrőütvefúrógép", "random" -> random) - .foreach { - case (scenario, item) => - s"resolve serializer for [$scenario]" in { - serialization.serializerFor(item.getClass).getClass should ===(classOf[StringSerializer]) - } - - s"serialize and de-serialize [$scenario]" in { - verifySerialization(item) - } - - s"serialize and de-serialize value [$scenario] using ByteBuffers" in { - verifySerializationByteBuffer(item) - } + .foreach { case (scenario, item) => + s"resolve serializer for [$scenario]" in { + serialization.serializerFor(item.getClass).getClass should ===(classOf[StringSerializer]) + } + + s"serialize and de-serialize [$scenario]" in { + verifySerialization(item) + } + + s"serialize and de-serialize value [$scenario] using ByteBuffers" in { + verifySerializationByteBuffer(item) + } } "have right serializer id" in { diff --git a/akka-actor-tests/src/test/scala/akka/serialization/SerializationSetupSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/SerializationSetupSpec.scala index 191a271750c..62ed3bc1931 100644 --- a/akka-actor-tests/src/test/scala/akka/serialization/SerializationSetupSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/serialization/SerializationSetupSpec.scala @@ -112,11 +112,13 @@ class SerializationSetupSpec "fail during ActorSystem creation when misconfigured" in { val config = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.loglevel = OFF akka.stdout-loglevel = OFF akka.actor.serializers.doe = "john.is.not.here" - """).withFallback(ConfigFactory.load()) + """) + .withFallback(ConfigFactory.load()) a[ClassNotFoundException] should be thrownBy { val system = ActorSystem("SerializationSetupSpec-FailingSystem", config) diff --git a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala index 5610e3d7f5b..6b029af1dd5 100644 --- a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala @@ -117,8 +117,8 @@ object SerializationTests { """ class FooActor extends Actor { - def receive = { - case msg => sender() ! msg + def receive = { case msg => + sender() ! msg } } @@ -128,8 +128,8 @@ object SerializationTests { } class NonSerializableActor(@unused arg: AnyRef) extends Actor { - def receive = { - case s: String => sender() ! s + def receive = { case s: String => + sender() ! s } } @@ -184,10 +184,9 @@ class SerializeSpec extends AkkaSpec(SerializationTests.serializeConf) { "not serialize ActorCell" in { val a = system.actorOf(Props(new Actor { - def receive = { - case o: ObjectOutputStream => - try o.writeObject(this) - catch { case _: NotSerializableException => testActor ! "pass" } + def receive = { case o: ObjectOutputStream => + try o.writeObject(this) + catch { case _: NotSerializableException => testActor ! "pass" } } })) a ! new ObjectOutputStream(new ByteArrayOutputStream()) @@ -494,10 +493,9 @@ class AllowJavaSerializationSpec extends AkkaSpec(SerializationTests.allowJavaSe "not serialize ActorCell" in { val a = system.actorOf(Props(new Actor { - def receive = { - case o: ObjectOutputStream => - try o.writeObject(this) - catch { case _: NotSerializableException => testActor ! "pass" } + def receive = { case o: ObjectOutputStream => + try o.writeObject(this) + catch { case _: NotSerializableException => testActor ! "pass" } } })) a ! new ObjectOutputStream(new ByteArrayOutputStream()) @@ -527,8 +525,7 @@ class AllowJavaSerializationSpec extends AkkaSpec(SerializationTests.allowJavaSe } } -class NoVerificationWarningSpec - extends AkkaSpec(ConfigFactory.parseString(""" +class NoVerificationWarningSpec extends AkkaSpec(ConfigFactory.parseString(""" akka.actor.allow-java-serialization = on akka.actor.warn-about-java-serializer-usage = on akka.actor.warn-on-no-serialization-verification = on @@ -553,8 +550,7 @@ class NoVerificationWarningSpec } } -class NoVerificationWarningOffSpec - extends AkkaSpec(ConfigFactory.parseString(""" +class NoVerificationWarningOffSpec extends AkkaSpec(ConfigFactory.parseString(""" akka.actor.allow-java-serialization = on akka.actor.warn-about-java-serializer-usage = on akka.actor.warn-on-no-serialization-verification = off diff --git a/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala b/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala index 91389c04c2d..e3ab9b52abd 100644 --- a/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala @@ -183,7 +183,7 @@ class BoundedBlockingQueueSpec // queue.take() must happen first Thread.sleep(50) // this is why this test is tagged as TimingTest events should contain(awaitNotEmpty) - events should not contain (poll) + events should not contain poll } "block until the backing queue is non-empty" taggedAs TimingTest in { @@ -557,7 +557,7 @@ class BoundedBlockingQueueSpec val target = mutable.Buffer[String]() elems.foreach(queue.put) queue.drainTo(target.asJava) - elems should contain theSameElementsAs (target) + elems should contain theSameElementsAs target } } @@ -617,7 +617,7 @@ class BoundedBlockingQueueSpec queue.retainAll(elems.asJava) should equal(true) queue.remainingCapacity() should equal(1) queue.toArray() shouldNot contain("Akka") - queue.toArray() should contain theSameElementsAs (elems) + queue.toArray() should contain theSameElementsAs elems } "return false if no elements were removed" in { @@ -718,18 +718,14 @@ trait BlockingHelpers { action } - /** - * Check that a call does not return within a set timespan. - */ + /** Check that a call does not return within a set timespan. */ def mustBlockFor(timeout: Span)(action: => Unit)(implicit pos: Position): Unit = Exception.ignoring(classOf[TestFailedDueToTimeoutException]) { failAfter(timeout)(action) fail("Expected action to block for at least " + timeout.prettyString + " but it completed.") } - /** - * Check that a Future does not complete within a set timespan. - */ + /** Check that a Future does not complete within a set timespan. */ def mustBlockFor(timeout: Span, action: Future[_])(implicit pos: Position): Unit = Exception.ignoring(classOf[TimeoutException]) { Await.ready(action, timeout) @@ -738,9 +734,7 @@ trait BlockingHelpers { } -/** - * All events that can be recorded and asserted during a test. - */ +/** All events that can be recorded and asserted during a test. */ object QueueTestEvents { sealed abstract class QueueEvent case class Poll() extends QueueEvent @@ -760,9 +754,7 @@ object QueueTestEvents { val awaitNotFull = AwaitNotFull() } -/** - * Helper for setting up a queue under test with injected lock, conditions and backing queue. - */ +/** Helper for setting up a queue under test with injected lock, conditions and backing queue. */ trait QueueSetupHelper { import java.util.Date @@ -776,9 +768,7 @@ trait QueueSetupHelper { lock: ReentrantLock, backingQueue: util.Queue[String]) - /** - * Backing queue that records all poll and offer calls in `events` - */ + /** Backing queue that records all poll and offer calls in `events` */ class TestBackingQueue(events: mutable.Buffer[QueueEvent]) extends util.LinkedList[String] { override def poll(): String = { @@ -797,9 +787,7 @@ trait QueueSetupHelper { } } - /** - * Reentrant lock condition that records when the condition is signaled or `await`ed. - */ + /** Reentrant lock condition that records when the condition is signaled or `await`ed. */ class TestCondition( events: mutable.Buffer[QueueEvent], condition: Condition, @@ -876,9 +864,7 @@ trait QueueSetupHelper { val backingQueue = new TestBackingQueue(events) - /** - * Class under test with the necessary backing queue, lock and conditions injected. - */ + /** Class under test with the necessary backing queue, lock and conditions injected. */ class TestBoundedBlockingQueue() extends BoundedBlockingQueue[String](maxCapacity, backingQueue) { override def createLock(): ReentrantLock = realLock diff --git a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala index 9f3c2ebfbbd..907a67ea6d8 100644 --- a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala @@ -312,12 +312,10 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { val builder = ByteString.newBuilder for (i <- 0 until data.length) builder.putLongPart(data(i), nBytes)(byteOrder) - reference.zipWithIndex - .collect({ // Since there is no partial put on LongBuffer, we need to collect only the interesting bytes - case (r, i) if byteOrder == ByteOrder.LITTLE_ENDIAN && i % elemSize < nBytes => r - case (r, i) if byteOrder == ByteOrder.BIG_ENDIAN && i % elemSize >= (elemSize - nBytes) => r - }) - .toSeq == builder.result() + reference.zipWithIndex.collect { // Since there is no partial put on LongBuffer, we need to collect only the interesting bytes + case (r, i) if byteOrder == ByteOrder.LITTLE_ENDIAN && i % elemSize < nBytes => r + case (r, i) if byteOrder == ByteOrder.BIG_ENDIAN && i % elemSize >= (elemSize - nBytes) => r + }.toSeq == builder.result() } def testFloatEncoding(slice: ArraySlice[Float], byteOrder: ByteOrder): Boolean = { @@ -886,13 +884,13 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { "calling span" in { check { (a: ByteString, b: Byte) => - likeVector(a)({ _.span(_ != b) match { case (a, b) => (a, b) } }) + likeVector(a) { _.span(_ != b) match { case (a, b) => (a, b) } } } } "calling takeWhile" in { check { (a: ByteString, b: Byte) => - likeVector(a)({ _.takeWhile(_ != b) }) + likeVector(a) { _.takeWhile(_ != b) } } } "calling dropWhile" in { @@ -940,9 +938,9 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { check { (slice: ByteStringSlice) => slice match { case (xs, from, until) => - likeVector(xs)({ + likeVector(xs) { _.slice(from, until) - }) + } } } } @@ -951,9 +949,9 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { check { (slice: ByteStringSlice) => slice match { case (xs, from, until) => - likeVector(xs)({ + likeVector(xs) { _.drop(from).take(until - from) - }) + } } } } @@ -970,11 +968,11 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { check { (slice: ByteStringSlice) => slice match { case (xs, from, until) => - likeVector(xs)({ it => + likeVector(xs) { it => val array = new Array[Byte](xs.length) it.copyToArray(array, from, until) array.toSeq - }) + } } } } @@ -1113,9 +1111,11 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { check { (slice: ByteStringSlice) => slice match { case (xs, from, until) => - likeVecIt(xs)({ - _.slice(from, until).toSeq - }, strict = false) + likeVecIt(xs)( + { + _.slice(from, until).toSeq + }, + strict = false) } } } @@ -1124,9 +1124,11 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { check { (slice: ByteStringSlice) => slice match { case (xs, from, until) => - likeVecIt(xs)({ - _.drop(from).take(until - from).toSeq - }, strict = false) + likeVecIt(xs)( + { + _.drop(from).take(until - from).toSeq + }, + strict = false) } } } @@ -1135,11 +1137,13 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers { check { (slice: ByteStringSlice) => slice match { case (xs, from, until) => - likeVecIt(xs)({ it => - val array = new Array[Byte](xs.length) - it.slice(from, until).copyToArray(array, from, until) - array.toSeq - }, strict = false) + likeVecIt(xs)( + { it => + val array = new Array[Byte](xs.length) + it.slice(from, until).copyToArray(array, from, until) + array.toSeq + }, + strict = false) } } } diff --git a/akka-actor-tests/src/test/scala/akka/util/FrequencySketchSpec.scala b/akka-actor-tests/src/test/scala/akka/util/FrequencySketchSpec.scala index 162ea98f4c2..ab24c197d00 100644 --- a/akka-actor-tests/src/test/scala/akka/util/FrequencySketchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/FrequencySketchSpec.scala @@ -51,8 +51,8 @@ class FrequencySketchSpec extends AnyWordSpec with Matchers { // frequencies should be halved now (ignore value 500, the reset trigger) val frequencies2 = (1 to 499).map(i => i -> sketch.frequency(i.toString)) - val halved1 = frequencies1.zip(frequencies2).foldLeft(0) { - case (correct, ((_, f1), (_, f2))) => if (f2 == (f1 / 2)) correct + 1 else correct + val halved1 = frequencies1.zip(frequencies2).foldLeft(0) { case (correct, ((_, f1), (_, f2))) => + if (f2 == (f1 / 2)) correct + 1 else correct } // note: it's possible that the value that triggers the reset has a hash collision and this ends up // bumping the minimum value for another counter, so that the expected halved frequency is off-by-one @@ -71,8 +71,8 @@ class FrequencySketchSpec extends AnyWordSpec with Matchers { // frequencies should be halved now (ignore value 1000, the reset trigger) val frequencies4 = (1 to 999).map(i => i -> sketch.frequency(i.toString)) - val halved2 = frequencies3.zip(frequencies4).foldLeft(0) { - case (correct, ((_, f3), (_, f4))) => if (f4 == (f3 / 2)) correct + 1 else correct + val halved2 = frequencies3.zip(frequencies4).foldLeft(0) { case (correct, ((_, f3), (_, f4))) => + if (f4 == (f3 / 2)) correct + 1 else correct } // note: it's possible that the value that triggers the reset has a hash collision and this ends up // bumping the minimum value for another counter, so that the expected halved frequency is off-by-one @@ -296,8 +296,8 @@ class FrequencySketchSpec extends AnyWordSpec with Matchers { // frequencies should be halved now (ignore value 500, the reset trigger) val frequencies2 = (1 to 499).map(i => i -> sketch.frequency(i.toString)) - val halved1 = frequencies1.zip(frequencies2).foldLeft(0) { - case (correct, ((_, f1), (_, f2))) => if (f2 == (f1 / 2)) correct + 1 else correct + val halved1 = frequencies1.zip(frequencies2).foldLeft(0) { case (correct, ((_, f1), (_, f2))) => + if (f2 == (f1 / 2)) correct + 1 else correct } // note: it's possible that the value that triggers the reset has a hash collision and this ends up // bumping the minimum value for another counter, so that the expected halved frequency is off-by-one @@ -316,8 +316,8 @@ class FrequencySketchSpec extends AnyWordSpec with Matchers { // frequencies should be halved now (ignore value 1000, the reset trigger) val frequencies4 = (1 to 999).map(i => i -> sketch.frequency(i.toString)) - val halved2 = frequencies3.zip(frequencies4).foldLeft(0) { - case (correct, ((_, f3), (_, f4))) => if (f4 == (f3 / 2)) correct + 1 else correct + val halved2 = frequencies3.zip(frequencies4).foldLeft(0) { case (correct, ((_, f3), (_, f4))) => + if (f4 == (f3 / 2)) correct + 1 else correct } // note: it's possible that the value that triggers the reset has a hash collision and this ends up // bumping the minimum value for another counter, so that the expected halved frequency is off-by-one diff --git a/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala b/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala index 264e36e85df..f353e074ee0 100644 --- a/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala @@ -19,9 +19,11 @@ import akka.testkit.DefaultTimeout class IndexSpec extends AkkaSpec with Matchers with DefaultTimeout { implicit val ec: ExecutionContextExecutor = system.dispatcher private def emptyIndex = - new Index[String, Int](100, new Comparator[Int] { - override def compare(a: Int, b: Int): Int = Integer.compare(a, b) - }) + new Index[String, Int]( + 100, + new Comparator[Int] { + override def compare(a: Int, b: Int): Int = Integer.compare(a, b) + }) private def indexWithValues = { val index = emptyIndex @@ -58,11 +60,11 @@ class IndexSpec extends AkkaSpec with Matchers with DefaultTimeout { index.put("s1", 2) index.put("s2", 1) index.put("s2", 2) - //Remove value + // Remove value index.remove("s1", 1) should ===(true) index.remove("s1", 1) should ===(false) index.valueIterator("s1").toSet should ===(Set(2)) - //Remove key + // Remove key index.remove("s2") match { case Some(iter) => iter.toSet should ===(Set(1, 2)) case None => fail() @@ -101,16 +103,18 @@ class IndexSpec extends AkkaSpec with Matchers with DefaultTimeout { index.isEmpty should ===(true) } "be able to be accessed in parallel" in { - val index = new Index[Int, Int](100, new Comparator[Int] { - override def compare(a: Int, b: Int): Int = Integer.compare(a, b) - }) + val index = new Index[Int, Int]( + 100, + new Comparator[Int] { + override def compare(a: Int, b: Int): Int = Integer.compare(a, b) + }) val nrOfTasks = 10000 val nrOfKeys = 10 val nrOfValues = 10 - //Fill index + // Fill index for (key <- 0 until nrOfKeys; value <- 0 until nrOfValues) index.put(key, value) - //Tasks to be executed in parallel + // Tasks to be executed in parallel def putTask() = Future { index.put(Random.nextInt(nrOfKeys), Random.nextInt(nrOfValues)) } diff --git a/akka-actor-tests/src/test/scala/akka/util/PrettyDurationSpec.scala b/akka-actor-tests/src/test/scala/akka/util/PrettyDurationSpec.scala index 5c040f7cee0..350baab010e 100644 --- a/akka-actor-tests/src/test/scala/akka/util/PrettyDurationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/PrettyDurationSpec.scala @@ -28,11 +28,10 @@ class PrettyDurationSpec extends AnyWordSpec with Matchers { "PrettyDuration" should { - cases.foreach { - case (d, expectedValue) => - s"print $d nanos as $expectedValue" in { - d.pretty should ===(expectedValue) - } + cases.foreach { case (d, expectedValue) => + s"print $d nanos as $expectedValue" in { + d.pretty should ===(expectedValue) + } } "work with infinity" in { diff --git a/akka-actor-tests/src/test/scala/akka/util/SegmentedRecencyListSpec.scala b/akka-actor-tests/src/test/scala/akka/util/SegmentedRecencyListSpec.scala index 88e3581a5f8..4fbecc30b2f 100644 --- a/akka-actor-tests/src/test/scala/akka/util/SegmentedRecencyListSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/SegmentedRecencyListSpec.scala @@ -13,11 +13,10 @@ class SegmentedRecencyListSpec extends AnyWordSpec with Matchers { private def check(recency: SegmentedRecencyList[String], expectedSegments: List[List[String]]): Unit = { recency.size shouldBe expectedSegments.map(_.size).sum - expectedSegments.zipWithIndex.foreach { - case (expectedSegment, level) => - expectedSegment.forall(recency.contains) - recency.leastToMostRecentOf(level).toList shouldBe expectedSegment - recency.sizeOf(level) shouldBe expectedSegment.size + expectedSegments.zipWithIndex.foreach { case (expectedSegment, level) => + expectedSegment.forall(recency.contains) + recency.leastToMostRecentOf(level).toList shouldBe expectedSegment + recency.sizeOf(level) shouldBe expectedSegment.size } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ActorContextSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ActorContextSpec.scala index 1dafbdf4e9d..bcb93a73a64 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ActorContextSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ActorContextSpec.scala @@ -124,14 +124,12 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp val probe = TestProbe[Event]() val internal = Behaviors - .receivePartial[Command] { - case (_, Fail) => - throw new TestException("Boom") + .receivePartial[Command] { case (_, Fail) => + throw new TestException("Boom") } - .receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same + .receiveSignal { case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same } .decorate @@ -148,13 +146,12 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp val behavior: Behavior[Command] = Behaviors - .receivePartial[Command] { - case (_, Stop) => Behaviors.stopped + .receivePartial[Command] { case (_, Stop) => + Behaviors.stopped } - .receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same + .receiveSignal { case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same } .decorate @@ -173,10 +170,9 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp probe.ref ! Pong Behaviors.same } - .receiveSignal { - case (_, signal) => - probe.ref ! GotChildSignal(signal) - Behaviors.stopped + .receiveSignal { case (_, signal) => + probe.ref ! GotChildSignal(signal) + Behaviors.stopped } .decorate @@ -186,15 +182,13 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp probe.ref ! ChildMade(childRef) Behaviors - .receivePartial[Command] { - case (context, StopRef(ref)) => - context.stop(ref) - Behaviors.same + .receivePartial[Command] { case (context, StopRef(ref)) => + context.stop(ref) + Behaviors.same } - .receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.stopped + .receiveSignal { case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.stopped } .decorate }) @@ -221,15 +215,13 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp context.watch(childRef) probe.ref ! ChildMade(childRef) Behaviors - .receivePartial[Command] { - case (context, StopRef(ref)) => - context.stop(ref) - Behaviors.same + .receivePartial[Command] { case (context, StopRef(ref)) => + context.stop(ref) + Behaviors.same } - .receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.stopped + .receiveSignal { case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.stopped } }) .decorate @@ -301,25 +293,22 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp case (_, Fail) => throw new TestException("boom") } - .receiveSignal { - case (_, PostStop) => - probe.ref ! ReceivedSignal(PostStop) - Behaviors.same + .receiveSignal { case (_, PostStop) => + probe.ref ! ReceivedSignal(PostStop) + Behaviors.same } .decorate val actorToWatch = spawn(behavior) val watcher: ActorRef[Command] = spawn( Behaviors - .receivePartial[Any] { - case (context, Ping) => - context.watch(actorToWatch) - probe.ref ! Pong - Behaviors.same + .receivePartial[Any] { case (context, Ping) => + context.watch(actorToWatch) + probe.ref ! Pong + Behaviors.same } - .receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same + .receiveSignal { case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same } .decorate) actorToWatch ! Ping @@ -362,9 +351,8 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp "watch a child actor before its termination" in { val probe = TestProbe[Event]() val child = Behaviors - .receivePartial[Command] { - case (_, Stop) => - Behaviors.stopped + .receivePartial[Command] { case (_, Stop) => + Behaviors.stopped } .decorate spawn( @@ -374,15 +362,13 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp context.watch(childRef) probe.ref ! ChildMade(childRef) Behaviors - .receivePartial[Command] { - case (_, Ping) => - probe.ref ! Pong - Behaviors.same + .receivePartial[Command] { case (_, Ping) => + probe.ref ! Pong + Behaviors.same } - .receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same + .receiveSignal { case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same } }) .decorate) @@ -394,9 +380,8 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp "watch a child actor after its termination" in { val probe = TestProbe[Event]() val child = Behaviors - .receivePartial[Command] { - case (_, Stop) => - Behaviors.stopped + .receivePartial[Command] { case (_, Stop) => + Behaviors.stopped } .decorate val actor = spawn( @@ -405,16 +390,14 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp val childRef = context.spawn(child, "A") probe.ref ! ChildMade(childRef) Behaviors - .receivePartial[Command] { - case (context, Watch(ref)) => - context.watch(ref) - probe.ref ! Pong - Behaviors.same + .receivePartial[Command] { case (context, Watch(ref)) => + context.watch(ref) + probe.ref ! Pong + Behaviors.same } - .receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same + .receiveSignal { case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same } }) .decorate) @@ -430,9 +413,8 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp "unwatch a child actor before its termination" in { val probe = TestProbe[Event]() val child = Behaviors - .receivePartial[Command] { - case (_, Stop) => - Behaviors.stopped + .receivePartial[Command] { case (_, Stop) => + Behaviors.stopped } .decorate val actor = spawn( @@ -451,10 +433,9 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp probe.ref ! Pong Behaviors.same } - .receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same + .receiveSignal { case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same } }) .decorate) @@ -470,14 +451,12 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp "terminate upon not handling Terminated" in { val probe = TestProbe[Event]() val child = Behaviors - .receivePartial[Command] { - case (_, Stop) => - Behaviors.stopped + .receivePartial[Command] { case (_, Stop) => + Behaviors.stopped } - .receiveSignal { - case (_, signal) => - probe.ref ! GotChildSignal(signal) - Behaviors.same + .receiveSignal { case (_, signal) => + probe.ref ! GotChildSignal(signal) + Behaviors.same } .decorate val actor = spawn( @@ -487,24 +466,22 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp context.watch(childRef) probe.ref ! ChildMade(childRef) Behaviors - .receivePartial[Command] { - case (_, Inert) => - probe.ref ! InertEvent - Behaviors - .receive[Command] { - case (_, _) => Behaviors.unhandled - } - .receiveSignal { - case (_, Terminated(_)) => Behaviors.unhandled - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same - } + .receivePartial[Command] { case (_, Inert) => + probe.ref ! InertEvent + Behaviors + .receive[Command] { case (_, _) => + Behaviors.unhandled + } + .receiveSignal { + case (_, Terminated(_)) => Behaviors.unhandled + case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same + } } - .receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same + .receiveSignal { case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same } }) .decorate) @@ -524,10 +501,9 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp val probe = TestProbe[Info]() val actor = spawn( Behaviors - .receivePartial[String] { - case (context, "info") => - probe.ref ! (context.system -> context.self) - Behaviors.same + .receivePartial[String] { case (context, "info") => + probe.ref ! (context.system -> context.self) + Behaviors.same } .decorate) actor ! "info" @@ -613,10 +589,9 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp val probe = TestProbe[Event]() val actor = spawn( Behaviors - .receivePartial[Command] { - case (context, Ping) => - context.scheduleOnce(1.nano, probe.ref, Pong) - Behaviors.same + .receivePartial[Command] { case (context, Ping) => + context.scheduleOnce(1.nano, probe.ref, Pong) + Behaviors.same } .decorate) actor ! Ping @@ -659,11 +634,10 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp val actor = spawn(Behaviors.setup[Command](context => { val child = context.spawnAnonymous(Behaviors.empty[Command]) probe.ref ! ChildMade(child) - Behaviors.receivePartial[Command] { - case (context, StopRef(ref)) => - context.stop(ref) - probe.ref ! Pong - Behaviors.same + Behaviors.receivePartial[Command] { case (context, StopRef(ref)) => + context.stop(ref) + probe.ref ! Pong + Behaviors.same } })) val child = probe.expectMessageType[ChildMade].ref diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ActorRefIgnoreSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ActorRefIgnoreSpec.scala index c2a17173895..6b7a6a75dea 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ActorRefIgnoreSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ActorRefIgnoreSpec.scala @@ -26,11 +26,10 @@ class ActorRefIgnoreSpec extends ScalaTestWithActorTestKit() with AnyWordSpecLik // messages it received so far val askMeActorBehavior: Behavior[Request] = { def internalBehavior(counter: Int): Behavior[Request] = - Behaviors.receiveMessage[Request] { - case Request(replyTo) => - val newCounter = counter + 1 - replyTo ! newCounter - internalBehavior(newCounter) + Behaviors.receiveMessage[Request] { case Request(replyTo) => + val newCounter = counter + 1 + replyTo ! newCounter + internalBehavior(newCounter) } internalBehavior(0) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/AskSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/AskSpec.scala index e4ec2ef6ec3..787f388d9ac 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/AskSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/AskSpec.scala @@ -35,10 +35,13 @@ object AskSpec { final case class ProxyReply(s: String) extends Proxy } -class AskSpec extends ScalaTestWithActorTestKit(""" +class AskSpec + extends ScalaTestWithActorTestKit(""" akka.loglevel=DEBUG akka.actor.debug.event-stream = on - """) with AnyWordSpecLike with LogCapturing { + """) + with AnyWordSpecLike + with LogCapturing { import AskSpec._ @@ -199,8 +202,8 @@ class AskSpec extends ScalaTestWithActorTestKit(""" val ex = new RuntimeException("not good!") class LegacyActor extends akka.actor.Actor { - def receive = { - case Ping(respondTo) => respondTo ! akka.actor.Status.Failure(ex) + def receive = { case Ping(respondTo) => + respondTo ! akka.actor.Status.Failure(ex) } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala index 26cd1c1aad3..c820e793248 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala @@ -172,10 +172,9 @@ object BehaviorSpec { case (_, Stop) => SBehaviors.stopped case (_, _) => SBehaviors.unhandled } - .receiveSignal { - case (_, signal) => - monitor ! ReceivedSignal(signal) - SBehaviors.same + .receiveSignal { case (_, signal) => + monitor ! ReceivedSignal(signal) + SBehaviors.same } } /* @@ -361,10 +360,9 @@ class ReceiveBehaviorSpec extends Messages with BecomeWithLifecycle with Stoppab case (_, Stop) => SBehaviors.stopped case (_, _: AuxPing) => SBehaviors.unhandled } - .receiveSignal { - case (_, signal) => - monitor ! ReceivedSignal(signal) - SBehaviors.same + .receiveSignal { case (_, signal) => + monitor ! ReceivedSignal(signal) + SBehaviors.same } } } @@ -399,10 +397,9 @@ class ImmutableWithSignalScalaBehaviorSpec extends Messages with BecomeWithLifec case _: AuxPing => SBehaviors.unhandled } } - .receiveSignal { - case (_, sig) => - monitor ! ReceivedSignal(sig) - SBehaviors.same + .receiveSignal { case (_, sig) => + monitor ! ReceivedSignal(sig) + SBehaviors.same } } @@ -488,10 +485,12 @@ class DeferredScalaBehaviorSpec extends ImmutableWithSignalScalaBehaviorSpec { override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = { val inbox = TestInbox[Done]("deferredListener") - (SBehaviors.setup(_ => { - inbox.ref ! Done - super.behavior(monitor)._1 - }), inbox) + ( + SBehaviors.setup(_ => { + inbox.ref ! Done + super.behavior(monitor)._1 + }), + inbox) } override def checkAux(signal: Signal, aux: Aux): Unit = @@ -595,10 +594,16 @@ class ImmutableJavaBehaviorSpec extends Messages with Become with Stoppable { class TransformMessagesJavaBehaviorSpec extends ImmutableWithSignalJavaBehaviorSpec with Reuse with Siphon { override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = { val inbox = TestInbox[Command]("transformMessagesListener") - JBehaviors.transformMessages(classOf[Command], super.behavior(monitor)._1, pf(_.`match`(classOf[Command], fi(x => { - inbox.ref ! x - x - })))) -> inbox + JBehaviors.transformMessages( + classOf[Command], + super.behavior(monitor)._1, + pf( + _.`match`( + classOf[Command], + fi(x => { + inbox.ref ! x + x + })))) -> inbox } } @@ -607,10 +612,12 @@ class DeferredJavaBehaviorSpec extends ImmutableWithSignalJavaBehaviorSpec { override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = { val inbox = TestInbox[Done]("deferredListener") - (JBehaviors.setup(_ => { - inbox.ref ! Done - super.behavior(monitor)._1 - }), inbox) + ( + JBehaviors.setup(_ => { + inbox.ref ! Done + super.behavior(monitor)._1 + }), + inbox) } override def checkAux(signal: Signal, aux: Aux): Unit = diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/DeferredSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/DeferredSpec.scala index 0e1decdbdae..823d9a6db25 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/DeferredSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/DeferredSpec.scala @@ -58,10 +58,9 @@ class DeferredSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with L throw new RuntimeException("simulated exc from factory") with NoStackTrace }) context.watch(child) - Behaviors.receive[Command]((_, _) => Behaviors.same).receiveSignal { - case (_, Terminated(`child`)) => - probe.ref ! Pong - Behaviors.stopped + Behaviors.receive[Command]((_, _) => Behaviors.same).receiveSignal { case (_, Terminated(`child`)) => + probe.ref ! Pong + Behaviors.stopped } } LoggingTestKit.error[ActorInitializationException].expect { @@ -76,10 +75,9 @@ class DeferredSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with L val behv = Behaviors.setup[Command] { context => val child = context.spawnAnonymous(Behaviors.setup[Command](_ => Behaviors.stopped)) context.watch(child) - Behaviors.receive[Command]((_, _) => Behaviors.same).receiveSignal { - case (_, Terminated(`child`)) => - probe.ref ! Pong - Behaviors.stopped + Behaviors.receive[Command]((_, _) => Behaviors.same).receiveSignal { case (_, Terminated(`child`)) => + probe.ref ! Pong + Behaviors.stopped } } spawn(behv) @@ -105,8 +103,8 @@ class DeferredSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with L probe.ref ! Started target(probe.ref) } - .transformMessages[Command] { - case m => m + .transformMessages[Command] { case m => + m } probe.expectNoMessage() // not yet val ref = spawn(behv) @@ -120,10 +118,12 @@ class DeferredSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with L // monitor is implemented with tap, so this is testing both val probe = TestProbe[Event]("evt") val monitorProbe = TestProbe[Command]("monitor") - val behv = Behaviors.monitor(monitorProbe.ref, Behaviors.setup[Command] { _ => - probe.ref ! Started - target(probe.ref) - }) + val behv = Behaviors.monitor( + monitorProbe.ref, + Behaviors.setup[Command] { _ => + probe.ref ! Started + target(probe.ref) + }) probe.expectNoMessage() // not yet val ref = spawn(behv) // it's supposed to be created immediately (not waiting for first message) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ExtensionsSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ExtensionsSpec.scala index 2299f793c89..09b03e4a972 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ExtensionsSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ExtensionsSpec.scala @@ -63,15 +63,20 @@ object AccessSystemFromConstructorExtensionId extends ExtensionId[AccessSystemFr } class AccessSystemFromConstructor(system: ActorSystem[_]) extends Extension { system.log.info("I log from the constructor") - system.receptionist ! Receptionist.Find(ServiceKey[String]("i-just-made-it-up"), system.deadLetters) // or touch the receptionist! + system.receptionist ! Receptionist.Find( + ServiceKey[String]("i-just-made-it-up"), + system.deadLetters + ) // or touch the receptionist! } object ExtensionsSpec { - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString(""" akka.actor.typed { library-extensions += "akka.actor.typed.InstanceCountingExtension" } - """).resolve() + """) + .resolve() } class ExtensionsSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogCapturing { @@ -111,9 +116,7 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with "load extensions from the configuration" in withEmptyActorSystem( "ExtensionsSpec03", - Some( - ConfigFactory.parseString( - """ + Some(ConfigFactory.parseString(""" akka.actor.typed.extensions = ["akka.actor.typed.DummyExtension1$", "akka.actor.typed.SlowExtension$"] """))) { sys => sys.hasExtension(DummyExtension1) should ===(true) @@ -175,8 +178,9 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with intercept[RuntimeException] { withEmptyActorSystem( "ExtensionsSpec08", - Some(ConfigFactory.parseString( - """akka.actor.typed.library-extensions += "akka.actor.typed.MissingExtension""""))) { _ => + Some( + ConfigFactory.parseString( + """akka.actor.typed.library-extensions += "akka.actor.typed.MissingExtension""""))) { _ => () } } @@ -225,9 +229,7 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with "override extensions via ActorSystemSetup" in withEmptyActorSystem( "ExtensionsSpec10", - Some( - ConfigFactory.parseString( - """ + Some(ConfigFactory.parseString(""" akka.actor.typed.extensions = ["akka.actor.typed.DummyExtension1$", "akka.actor.typed.SlowExtension$"] """)), Some(ActorSystemSetup(new DummyExtension1Setup(_ => new DummyExtension1ViaSetup)))) { sys => @@ -243,9 +245,7 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with "allow for interaction with log from extension constructor" in { withEmptyActorSystem( "ExtensionsSpec11", - Some( - ConfigFactory.parseString( - """ + Some(ConfigFactory.parseString(""" akka.actor.typed.extensions = ["akka.actor.typed.AccessSystemFromConstructorExtensionId$"] """)), None) { sys => diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala index 2eaf67d955b..0c2763b6ac8 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala @@ -295,10 +295,9 @@ class InterceptSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with "be useful for implementing signal based PoisonPill" in { - def inner(count: Int): Behavior[Msg] = Behaviors.receiveMessage { - case Msg(hello, replyTo) => - replyTo ! s"$hello-$count" - inner(count + 1) + def inner(count: Int): Behavior[Msg] = Behaviors.receiveMessage { case Msg(hello, replyTo) => + replyTo ! s"$hello-$count" + inner(count + 1) } val decorated: Behavior[Msg] = @@ -318,10 +317,9 @@ class InterceptSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with "be useful for implementing custom message based PoisonPill" in { - def inner(count: Int): Behavior[Msg] = Behaviors.receiveMessage { - case Msg(hello, replyTo) => - replyTo ! s"$hello-$count" - inner(count + 1) + def inner(count: Int): Behavior[Msg] = Behaviors.receiveMessage { case Msg(hello, replyTo) => + replyTo ! s"$hello-$count" + inner(count + 1) } val poisonInterceptor = new BehaviorInterceptor[Any, Msg] { @@ -470,8 +468,8 @@ class InterceptSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with "be possible to combine with transformMessages" in { val probe = createTestProbe[String]() - val ref = spawn(MultiProtocol(probe.ref).transformMessages[String] { - case s => Command(s.toUpperCase()) + val ref = spawn(MultiProtocol(probe.ref).transformMessages[String] { case s => + Command(s.toUpperCase()) }) ref ! "a" @@ -483,10 +481,12 @@ class InterceptSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with "be possible to combine with MDC" in { val probe = createTestProbe[String]() val ref = spawn(Behaviors.setup[Command] { _ => - Behaviors.withMdc(staticMdc = Map("x" -> "y"), mdcForMessage = (msg: Command) => { - probe.ref ! s"mdc:${msg.s.toUpperCase()}" - Map("msg" -> msg.s.toUpperCase()) - }) { + Behaviors.withMdc( + staticMdc = Map("x" -> "y"), + mdcForMessage = (msg: Command) => { + probe.ref ! s"mdc:${msg.s.toUpperCase()}" + Map("msg" -> msg.s.toUpperCase()) + }) { MultiProtocol(probe.ref) } }) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/LogMessagesSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/LogMessagesSpec.scala index b9f4edb59b1..756e6eba6e1 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/LogMessagesSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/LogMessagesSpec.scala @@ -14,9 +14,12 @@ import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.scaladsl.adapter._ -class LogMessagesSpec extends ScalaTestWithActorTestKit(""" +class LogMessagesSpec + extends ScalaTestWithActorTestKit(""" akka.loglevel = DEBUG # test verifies debug - """) with AnyWordSpecLike with LogCapturing { + """) + with AnyWordSpecLike + with LogCapturing { implicit val classic: actor.ActorSystem = system.toClassic diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/MailboxSelectorSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/MailboxSelectorSpec.scala index d2025f2772b..e2c1576d951 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/MailboxSelectorSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/MailboxSelectorSpec.scala @@ -21,30 +21,32 @@ import akka.dispatch.BoundedNodeMessageQueue import akka.dispatch.MessageQueue import akka.dispatch.UnboundedMessageQueueSemantics -class MailboxSelectorSpec extends ScalaTestWithActorTestKit(""" +class MailboxSelectorSpec + extends ScalaTestWithActorTestKit(""" specific-mailbox { mailbox-type = "akka.dispatch.NonBlockingBoundedMailbox" mailbox-capacity = 4 } - """) with AnyWordSpecLike with LogCapturing { + """) + with AnyWordSpecLike + with LogCapturing { case class WhatsYourMailbox(replyTo: ActorRef[MessageQueue]) private def behavior: Behavior[WhatsYourMailbox] = Behaviors.setup { context => - Behaviors.receiveMessage[WhatsYourMailbox] { - case WhatsYourMailbox(replyTo) => - val mailbox = context match { - case adapter: ActorContextAdapter[_] => - adapter.classicActorContext match { - case cell: ActorCell => - cell.mailbox.messageQueue - case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") - } - case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") - } + Behaviors.receiveMessage[WhatsYourMailbox] { case WhatsYourMailbox(replyTo) => + val mailbox = context match { + case adapter: ActorContextAdapter[_] => + adapter.classicActorContext match { + case cell: ActorCell => + cell.mailbox.messageQueue + case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") + } + case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") + } - replyTo ! mailbox - Behaviors.stopped + replyTo ! mailbox + Behaviors.stopped } } @@ -65,15 +67,17 @@ class MailboxSelectorSpec extends ScalaTestWithActorTestKit(""" "set capacity on a bounded mailbox" in { val latch = new CountDownLatch(1) val probe = testKit.createTestProbe[String]() - val actor = spawn(Behaviors.receiveMessage[String] { - case "one" => - // block here so we can fill mailbox up - probe ! "blocking-on-one" - latch.await(10, TimeUnit.SECONDS) - Behaviors.same - case _ => - Behaviors.same - }, MailboxSelector.bounded(2)) + val actor = spawn( + Behaviors.receiveMessage[String] { + case "one" => + // block here so we can fill mailbox up + probe ! "blocking-on-one" + latch.await(10, TimeUnit.SECONDS) + Behaviors.same + case _ => + Behaviors.same + }, + MailboxSelector.bounded(2)) actor ! "one" // actor will block here probe.expectMessage("blocking-on-one") actor ! "two" diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/OrElseSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/OrElseSpec.scala index 9789233f590..a61d92f2016 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/OrElseSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/OrElseSpec.scala @@ -76,25 +76,22 @@ object OrElseSpec { def ping(counters: Map[String, Int]): Behavior[Ping] = { - val ping1: PartialFunction[Ping, Behavior[Ping]] = { - case Ping1(replyTo: ActorRef[Pong]) => - val newCounters = counters.updated("ping1", counters.getOrElse("ping1", 0) + 1) - replyTo ! Pong(newCounters("ping1")) - ping(newCounters) + val ping1: PartialFunction[Ping, Behavior[Ping]] = { case Ping1(replyTo: ActorRef[Pong]) => + val newCounters = counters.updated("ping1", counters.getOrElse("ping1", 0) + 1) + replyTo ! Pong(newCounters("ping1")) + ping(newCounters) } - val ping2: PartialFunction[Ping, Behavior[Ping]] = { - case Ping2(replyTo: ActorRef[Pong]) => - val newCounters = counters.updated("ping2", counters.getOrElse("ping2", 0) + 1) - replyTo ! Pong(newCounters("ping2")) - ping(newCounters) + val ping2: PartialFunction[Ping, Behavior[Ping]] = { case Ping2(replyTo: ActorRef[Pong]) => + val newCounters = counters.updated("ping2", counters.getOrElse("ping2", 0) + 1) + replyTo ! Pong(newCounters("ping2")) + ping(newCounters) } - val ping3: PartialFunction[Ping, Behavior[Ping]] = { - case Ping3(replyTo: ActorRef[Pong]) => - val newCounters = counters.updated("ping3", counters.getOrElse("ping3", 0) + 1) - replyTo ! Pong(newCounters("ping3")) - ping(newCounters) + val ping3: PartialFunction[Ping, Behavior[Ping]] = { case Ping3(replyTo: ActorRef[Pong]) => + val newCounters = counters.updated("ping3", counters.getOrElse("ping3", 0) + 1) + replyTo ! Pong(newCounters("ping3")) + ping(newCounters) } val pingHandlers: List[PartialFunction[Ping, Behavior[Ping]]] = ping1 :: ping2 :: ping3 :: Nil @@ -150,14 +147,13 @@ object OrElseSpec { } } - def ping1(count: Int): Behavior[Ping] = Behaviors.receiveMessagePartial { - case Ping1(replyTo: ActorRef[Pong]) => - val newCount = count + 1 - replyTo ! Pong(newCount) - // note that this is nice since it doesn't have to know anything about the shared - // state (counters Map) as in the other examples, and it can switch to it's own - // new behavior - ping1(newCount) + def ping1(count: Int): Behavior[Ping] = Behaviors.receiveMessagePartial { case Ping1(replyTo: ActorRef[Pong]) => + val newCount = count + 1 + replyTo ! Pong(newCount) + // note that this is nice since it doesn't have to know anything about the shared + // state (counters Map) as in the other examples, and it can switch to it's own + // new behavior + ping1(newCount) } def ping2(count: Int): Behavior[Ping] = Behaviors.receiveMessage { @@ -168,11 +164,10 @@ object OrElseSpec { case _ => Behaviors.unhandled } - def ping3(count: Int): Behavior[Ping] = Behaviors.receiveMessagePartial { - case Ping3(replyTo: ActorRef[Pong]) => - val newCount = count + 1 - replyTo ! Pong(newCount) - ping3(newCount) + def ping3(count: Int): Behavior[Ping] = Behaviors.receiveMessagePartial { case Ping3(replyTo: ActorRef[Pong]) => + val newCount = count + 1 + replyTo ! Pong(newCount) + ping3(newCount) } def ping(): Behavior[Ping] = { diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SpawnProtocolSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SpawnProtocolSpec.scala index 2ed43834c2a..1f14e01c741 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SpawnProtocolSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SpawnProtocolSpec.scala @@ -21,10 +21,9 @@ object SpawnProtocolSpec { case object Pong val target: Behavior[Message] = - Behaviors.receiveMessage { - case Ping(replyTo) => - replyTo ! Pong - Behaviors.same + Behaviors.receiveMessage { case Ping(replyTo) => + replyTo ! Pong + Behaviors.same } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SupervisionSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SupervisionSpec.scala index 33f93560868..73957c34b6f 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SupervisionSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SupervisionSpec.scala @@ -82,14 +82,13 @@ object SupervisionSpec { case Throw(e) => throw e } - }.receiveSignal { - case (_, sig) => - if (sig == PostStop) - slowStop.foreach(latch => latch.await(10, TimeUnit.SECONDS)) - else if (sig == PreRestart) - slowRestart.foreach(latch => latch.await(10, TimeUnit.SECONDS)) - monitor ! ReceivedSignal(sig) - Behaviors.same + }.receiveSignal { case (_, sig) => + if (sig == PostStop) + slowStop.foreach(latch => latch.await(10, TimeUnit.SECONDS)) + else if (sig == PreRestart) + slowRestart.foreach(latch => latch.await(10, TimeUnit.SECONDS)) + monitor ! ReceivedSignal(sig) + Behaviors.same } class FailingConstructor(context: ActorContext[Command], monitor: ActorRef[Event]) @@ -263,9 +262,12 @@ class StubbedSupervisionSpec extends AnyWordSpec with Matchers with LogCapturing } } -class SupervisionSpec extends ScalaTestWithActorTestKit(""" +class SupervisionSpec + extends ScalaTestWithActorTestKit(""" akka.log-dead-letters = off - """) with AnyWordSpecLike with LogCapturing { + """) + with AnyWordSpecLike + with LogCapturing { import BehaviorInterceptor._ import SupervisionSpec._ @@ -647,8 +649,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit(""" } "optionally NOT stop children when backoff" in { - testNotStopChildren( - strategy = SupervisorStrategy.restartWithBackoff(10.millis, 10.millis, 0).withStopChildren(enabled = false)) + testNotStopChildren(strategy = + SupervisorStrategy.restartWithBackoff(10.millis, 10.millis, 0).withStopChildren(enabled = false)) } def testNotStopChildren(strategy: SupervisorStrategy): Unit = { @@ -960,8 +962,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit(""" alreadyStarted.set(true) startedProbe.ref ! Started - Behaviors.receiveMessagePartial { - case Throw(boom) => throw boom + Behaviors.receiveMessagePartial { case Throw(boom) => + throw boom } }) .onFailure[Exception](strategy) @@ -1073,8 +1075,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit(""" } } - "fail to restart when deferred factory throws unhandled" in new FailingUnhandledTestSetup( - strategy = SupervisorStrategy.restart) { + "fail to restart when deferred factory throws unhandled" in new FailingUnhandledTestSetup(strategy = + SupervisorStrategy.restart) { LoggingTestKit.error[ActorInitializationException].expect { spawn(behv) @@ -1475,10 +1477,9 @@ class SupervisionSpec extends ScalaTestWithActorTestKit(""" Behaviors.same case _ => throw new Exc1 } - .receiveSignal { - case (_, PreRestart) => - probe.ref ! ReceivedSignal(PreRestart) - Behaviors.same + .receiveSignal { case (_, PreRestart) => + probe.ref ! ReceivedSignal(PreRestart) + Behaviors.same } .narrow @@ -1566,10 +1567,9 @@ class SupervisionSpec extends ScalaTestWithActorTestKit(""" probe.ref ! other Behaviors.same } - .receiveSignal { - case (_, PreRestart) => - probe.ref ! "PreRestart" - Behaviors.same + .receiveSignal { case (_, PreRestart) => + probe.ref ! "PreRestart" + Behaviors.same } }) .onFailure[TestException](SupervisorStrategy.restart)) @@ -1594,10 +1594,9 @@ class SupervisionSpec extends ScalaTestWithActorTestKit(""" case (_, "throw-test-exception") => throw TestException("boom") } - .receiveSignal { - case (_, signal @ (PreRestart | PostStop)) => - signalProbe.ref ! signal.toString - Behaviors.same + .receiveSignal { case (_, signal @ (PreRestart | PostStop)) => + signalProbe.ref ! signal.toString + Behaviors.same } // restart on all exceptions, stop on specific exception subtype @@ -1642,8 +1641,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit(""" Behaviors.stopped } else { stopInSetup.set(true) - Behaviors.receiveMessagePartial { - case "boom" => throw TestException("boom") + Behaviors.receiveMessagePartial { case "boom" => + throw TestException("boom") } } }) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/TimerSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/TimerSpec.scala index f60e17b4066..24b796ef7fb 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/TimerSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/TimerSpec.scala @@ -260,11 +260,10 @@ class TimerSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogC }) var seen = Set.empty[String] - probe.fishForMessage(500.millis) { - case message => - seen += message - if (seen.size == 2) FishingOutcomes.complete - else FishingOutcomes.continue + probe.fishForMessage(500.millis) { case message => + seen += message + if (seen.size == 2) FishingOutcomes.complete + else FishingOutcomes.continue } ref ! "stop" @@ -285,11 +284,10 @@ class TimerSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogC val ref = spawn(newBehavior(1)) var seen = Set.empty[String] - probe.fishForMessage(500.millis) { - case message => - seen += message - if (seen.size == 2) FishingOutcomes.complete - else FishingOutcomes.continue + probe.fishForMessage(500.millis) { case message => + seen += message + if (seen.size == 2) FishingOutcomes.complete + else FishingOutcomes.continue } ref ! "stop" diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/TransformMessagesSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/TransformMessagesSpec.scala index 44b54ea3758..de32c0defa1 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/TransformMessagesSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/TransformMessagesSpec.scala @@ -76,10 +76,9 @@ class TransformMessagesSpec extends ScalaTestWithActorTestKit with AnyWordSpecLi val transformCount = new AtomicInteger(0) // sadly the only "same" we can know is if it is the same PF - val transformPF: PartialFunction[String, String] = { - case s => - transformCount.incrementAndGet() - s + val transformPF: PartialFunction[String, String] = { case s => + transformCount.incrementAndGet() + s } def transform(behavior: Behavior[String]): Behavior[String] = behavior.transformMessages(transformPF) @@ -102,10 +101,9 @@ class TransformMessagesSpec extends ScalaTestWithActorTestKit with AnyWordSpecLi val transformCount = new AtomicInteger(0) // sadly the only "same" we can know is if it is the same PF - val transformPF: PartialFunction[String, String] = { - case s => - transformCount.incrementAndGet() - s + val transformPF: PartialFunction[String, String] = { case s => + transformCount.incrementAndGet() + s } def transform(behavior: Behavior[String]): Behavior[String] = behavior.transformMessages(transformPF) @@ -132,8 +130,8 @@ class TransformMessagesSpec extends ScalaTestWithActorTestKit with AnyWordSpecLi val probe = TestProbe[String]() def transform(behavior: Behavior[String]): Behavior[String] = - behavior.transformMessages[String] { - case s => s.toLowerCase + behavior.transformMessages[String] { case s => + s.toLowerCase } LoggingTestKit.error[ActorInitializationException].expect { @@ -156,8 +154,8 @@ class TransformMessagesSpec extends ScalaTestWithActorTestKit with AnyWordSpecLi Behaviors.same } } - .transformMessages[String] { - case msg => msg.toUpperCase() + .transformMessages[String] { case msg => + msg.toUpperCase() } val ref = spawn(behv) @@ -177,8 +175,8 @@ class TransformMessagesSpec extends ScalaTestWithActorTestKit with AnyWordSpecLi probe.ref ! msg Behaviors.same } - .transformMessages[String] { - case msg => msg.toUpperCase() + .transformMessages[String] { case msg => + msg.toUpperCase() } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala index 907d1af3058..503783acfe5 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala @@ -25,8 +25,8 @@ object WatchSpec { case object Stop val terminatorBehavior = - Behaviors.receive[Stop.type] { - case (_, Stop) => Behaviors.stopped + Behaviors.receive[Stop.type] { case (_, Stop) => + Behaviors.stopped } val mutableTerminatorBehavior = Behaviors.setup[Stop.type] { context => @@ -58,18 +58,17 @@ class WatchSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogC val watcher = spawn( Behaviors - .supervise(Behaviors - .receive[StartWatching] { - case (context, StartWatching(watchee)) => + .supervise( + Behaviors + .receive[StartWatching] { case (context, StartWatching(watchee)) => context.watch(watchee) watchProbe.ref ! Done Behaviors.same - } - .receiveSignal { - case (_, t: Terminated) => + } + .receiveSignal { case (_, t: Terminated) => receivedTerminationSignal.success(t) Behaviors.stopped - }) + }) .onFailure[Throwable](SupervisorStrategy.stop)) } @@ -159,15 +158,17 @@ class WatchSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogC val grossoBosso = spawn( Behaviors.setup[Any] { context => - val middleManagement = context.spawn(Behaviors.setup[Any] { context => - val sixPackJoe = context.spawn(Behaviors.receive[Any]((_, _) => throw ex), "joe") - context.watch(sixPackJoe) - - Behaviors.receive[Any] { (_, message) => - sixPackJoe ! message - Behaviors.same - } // no handling of terminated, even though we watched!!! - }, "middle-management") + val middleManagement = context.spawn( + Behaviors.setup[Any] { context => + val sixPackJoe = context.spawn(Behaviors.receive[Any]((_, _) => throw ex), "joe") + context.watch(sixPackJoe) + + Behaviors.receive[Any] { (_, message) => + sixPackJoe ! message + Behaviors.same + } // no handling of terminated, even though we watched!!! + }, + "middle-management") context.watch(middleManagement) @@ -176,10 +177,9 @@ class WatchSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogC middleManagement ! message Behaviors.same } - .receiveSignal { - case (_, t: Terminated) => - probe.ref ! Failed(t) - Behaviors.stopped + .receiveSignal { case (_, t: Terminated) => + probe.ref ! Failed(t) + Behaviors.stopped } }, @@ -303,21 +303,21 @@ class WatchSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogC val watcher = spawn( Behaviors - .supervise(Behaviors - .receive[Message] { - case (context, StartWatchingWith(watchee, message)) => - context.watchWith(watchee, message) - Behaviors.same - case (context, StartWatching(watchee)) => - context.watch(watchee) - Behaviors.same - case (_, _) => - Behaviors.stopped - } - .receiveSignal { - case (_, PostStop) => + .supervise( + Behaviors + .receive[Message] { + case (context, StartWatchingWith(watchee, message)) => + context.watchWith(watchee, message) + Behaviors.same + case (context, StartWatching(watchee)) => + context.watch(watchee) + Behaviors.same + case (_, _) => + Behaviors.stopped + } + .receiveSignal { case (_, PostStop) => Behaviors.stopped - }) + }) .onFailure[Throwable](SupervisorStrategy.stop)) def expectStopped(): Unit = stopProbe.expectTerminated(watcher, 1.second) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/coexistence/ClassicSupervisingTypedSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/coexistence/ClassicSupervisingTypedSpec.scala index 0e1d30edda5..7970a13397f 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/coexistence/ClassicSupervisingTypedSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/coexistence/ClassicSupervisingTypedSpec.scala @@ -19,13 +19,12 @@ import akka.testkit.TestProbe object ProbedBehavior { def behavior(probe: u.ActorRef): Behavior[String] = { Behaviors - .receiveMessagePartial[String] { - case "throw" => throw TestException("oh dear") + .receiveMessagePartial[String] { case "throw" => + throw TestException("oh dear") } - .receiveSignal { - case (_, s) => - probe ! s - Behaviors.same + .receiveSignal { case (_, s) => + probe ! s + Behaviors.same } } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/coexistence/TypedSupervisingClassicSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/coexistence/TypedSupervisingClassicSpec.scala index b2665c5c24c..5b9fc75c6b2 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/coexistence/TypedSupervisingClassicSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/coexistence/TypedSupervisingClassicSpec.scala @@ -20,15 +20,14 @@ object TypedSupervisingClassicSpec { final case class SpawnClassicActor(props: classic.Props, replyTo: ActorRef[SpawnedClassicActor]) extends Protocol final case class SpawnedClassicActor(ref: classic.ActorRef) - def classicActorOf() = Behaviors.receive[Protocol] { - case (ctx, SpawnClassicActor(props, replyTo)) => - replyTo ! SpawnedClassicActor(ctx.actorOf(props)) - Behaviors.same + def classicActorOf() = Behaviors.receive[Protocol] { case (ctx, SpawnClassicActor(props, replyTo)) => + replyTo ! SpawnedClassicActor(ctx.actorOf(props)) + Behaviors.same } class CLassicActor(lifecycleProbe: ActorRef[String]) extends Actor { - override def receive: Receive = { - case "throw" => throw TestException("oh dear") + override def receive: Receive = { case "throw" => + throw TestException("oh dear") } override def postStop(): Unit = { @@ -42,9 +41,12 @@ object TypedSupervisingClassicSpec { } -class TypedSupervisingClassicSpec extends ScalaTestWithActorTestKit(""" +class TypedSupervisingClassicSpec + extends ScalaTestWithActorTestKit(""" akka.loglevel = INFO - """.stripMargin) with AnyWordSpecLike with LogCapturing { + """.stripMargin) + with AnyWordSpecLike + with LogCapturing { import TypedSupervisingClassicSpec._ "Typed supervising classic" should { diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ConsumerControllerSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ConsumerControllerSpec.scala index 2d789220d5c..b8c2f2ca5bf 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ConsumerControllerSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ConsumerControllerSpec.scala @@ -17,7 +17,9 @@ import akka.actor.typed.delivery.internal.ProducerControllerImpl import akka.serialization.SerializationExtension class ConsumerControllerSpec - extends ScalaTestWithActorTestKit(ConfigFactory.parseString(""" + extends ScalaTestWithActorTestKit( + ConfigFactory + .parseString(""" akka.reliable-delivery.consumer-controller { flow-control-window = 20 resend-interval-min = 1s @@ -563,15 +565,14 @@ class ConsumerControllerSpec // one chunk for each letter, "123" => 3 chunks val chunks1 = ProducerControllerImpl.createChunks(TestConsumer.Job(s"123"), chunkSize = 1, serialization) - val seqMessages1 = chunks1.zipWithIndex.map { - case (chunk, i) => - ConsumerController.SequencedMessage.fromChunked( - producerId, - 1 + i, - chunk, - first = i == 0, - ack = false, - producerControllerProbe.ref) + val seqMessages1 = chunks1.zipWithIndex.map { case (chunk, i) => + ConsumerController.SequencedMessage.fromChunked( + producerId, + 1 + i, + chunk, + first = i == 0, + ack = false, + producerControllerProbe.ref) } consumerController ! seqMessages1.head @@ -584,15 +585,14 @@ class ConsumerControllerSpec producerControllerProbe.expectMessage(ProducerControllerImpl.Request(3, 22, true, false)) val chunks2 = ProducerControllerImpl.createChunks(TestConsumer.Job(s"45"), chunkSize = 1, serialization) - val seqMessages2 = chunks2.zipWithIndex.map { - case (chunk, i) => - ConsumerController.SequencedMessage.fromChunked( - producerId, - 4 + i, - chunk, - first = false, - ack = true, - producerControllerProbe.ref) + val seqMessages2 = chunks2.zipWithIndex.map { case (chunk, i) => + ConsumerController.SequencedMessage.fromChunked( + producerId, + 4 + i, + chunk, + first = false, + ack = true, + producerControllerProbe.ref) } consumerController ! seqMessages2.head @@ -620,15 +620,14 @@ class ConsumerControllerSpec TestConsumer.Job(s"1234567890123456789012345"), chunkSize = 1, serialization) - val seqMessages1 = chunks1.zipWithIndex.map { - case (chunk, i) => - ConsumerController.SequencedMessage.fromChunked( - producerId, - 1 + i, - chunk, - first = i == 0, - ack = false, - producerControllerProbe.ref) + val seqMessages1 = chunks1.zipWithIndex.map { case (chunk, i) => + ConsumerController.SequencedMessage.fromChunked( + producerId, + 1 + i, + chunk, + first = i == 0, + ack = false, + producerControllerProbe.ref) } consumerController ! seqMessages1.head diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/DurableProducerControllerSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/DurableProducerControllerSpec.scala index 74434ef28d4..cf7dd88df64 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/DurableProducerControllerSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/DurableProducerControllerSpec.scala @@ -21,7 +21,8 @@ import akka.util.ByteString class DurableProducerControllerSpec extends ScalaTestWithActorTestKit( - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.reliable-delivery.consumer-controller.flow-control-window = 20 akka.reliable-delivery.consumer-controller.resend-interval-min = 1s """).withFallback(TestSerializer.config)) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ProducerControllerSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ProducerControllerSpec.scala index 5989982a8d2..9b9e166f283 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ProducerControllerSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ProducerControllerSpec.scala @@ -17,7 +17,8 @@ import akka.actor.typed.delivery.internal.ProducerControllerImpl class ProducerControllerSpec extends ScalaTestWithActorTestKit( - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.reliable-delivery.consumer-controller.flow-control-window = 20 """).withFallback(TestSerializer.config)) with AnyWordSpecLike diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ReliableDeliveryRandomSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ReliableDeliveryRandomSpec.scala index 611d9856f03..b3dc1710592 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ReliableDeliveryRandomSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ReliableDeliveryRandomSpec.scala @@ -213,6 +213,9 @@ class ReliableDeliveryRandomSpec(config: Config) // same tests but with chunked messages class ReliableDeliveryRandomChunkedSpec extends ReliableDeliveryRandomSpec( - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.reliable-delivery.producer-controller.chunk-large-messages = 1b - """).withFallback(TestSerializer.config).withFallback(ReliableDeliveryRandomSpec.config)) + """) + .withFallback(TestSerializer.config) + .withFallback(ReliableDeliveryRandomSpec.config)) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ReliableDeliverySpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ReliableDeliverySpec.scala index c46515fb75e..b4c438143e7 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ReliableDeliverySpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/ReliableDeliverySpec.scala @@ -207,6 +207,8 @@ class ReliableDeliverySpec(config: Config) // Same tests but with chunked messages class ReliableDeliveryChunkedSpec extends ReliableDeliverySpec( - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.reliable-delivery.producer-controller.chunk-large-messages = 1b - """).withFallback(TestSerializer.config).withFallback(ReliableDeliverySpec.config)) + """).withFallback(TestSerializer.config) + .withFallback(ReliableDeliverySpec.config)) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/TestConsumer.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/TestConsumer.scala index 6672bd0d35d..42963b0914b 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/TestConsumer.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/TestConsumer.scala @@ -105,7 +105,8 @@ class TestConsumer( case job @ SomeAsyncJob(_, confirmTo, producerId, seqNr) => // when replacing producer the seqNr may start from 1 again val cleanProcessed = - if (seqNr == 1L) processed.filterNot { case (pid, _) => pid == producerId } else processed + if (seqNr == 1L) processed.filterNot { case (pid, _) => pid == producerId } + else processed if (cleanProcessed((producerId, seqNr))) throw new RuntimeException(s"Received duplicate [($producerId,$seqNr)]") diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/TestProducer.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/TestProducer.scala index ecfc18ce714..6b9d3bbcbcd 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/TestProducer.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/delivery/TestProducer.scala @@ -62,10 +62,9 @@ object TestProducer { } private def activeNoDelay(n: Int): Behavior[Command] = { - Behaviors.receivePartial { - case (ctx, RequestNext(sendTo)) => - sendMessage(n, sendTo, ctx) - activeNoDelay(n + 1) + Behaviors.receivePartial { case (ctx, RequestNext(sendTo)) => + sendMessage(n, sendTo, ctx) + activeNoDelay(n + 1) } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/eventstream/LoggingDocSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/eventstream/LoggingDocSpec.scala index db093f6dc8e..23b05730b27 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/eventstream/LoggingDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/eventstream/LoggingDocSpec.scala @@ -23,7 +23,7 @@ import scala.concurrent.Future object LoggingDocSpec { - //#deadletters + // #deadletters import akka.actor.typed.Behavior import akka.actor.typed.eventstream.EventStream.Subscribe import akka.actor.typed.scaladsl.Behaviors @@ -35,16 +35,15 @@ object LoggingDocSpec { val adapter = context.messageAdapter[DeadLetter](d => d.message.toString) context.system.eventStream ! Subscribe(adapter) - Behaviors.receiveMessage { - case msg: String => - println(msg) - Behaviors.same + Behaviors.receiveMessage { case msg: String => + println(msg) + Behaviors.same } } } - //#deadletters + // #deadletters - //#superclass-subscription-eventstream + // #superclass-subscription-eventstream object ListenerActor { abstract class AllKindsOfMusic { def artist: String } case class Jazz(artist: String) extends AllKindsOfMusic @@ -63,7 +62,7 @@ object LoggingDocSpec { } } } - //#superclass-subscription-eventstream + // #superclass-subscription-eventstream } @@ -73,16 +72,18 @@ class LoggingDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with "allow registration to dead letters" in { // #deadletters - ActorSystem(Behaviors.setup[Void] { context => - context.spawn(DeadLetterListener(), "DeadLetterListener", Props.empty) - Behaviors.empty - }, "System") + ActorSystem( + Behaviors.setup[Void] { context => + context.spawn(DeadLetterListener(), "DeadLetterListener", Props.empty) + Behaviors.empty + }, + "System") // #deadletters } "demonstrate superclass subscriptions on typed eventStream" in { import LoggingDocSpec.ListenerActor._ - //#superclass-subscription-eventstream + // #superclass-subscription-eventstream implicit val system: ActorSystem[SpawnProtocol.Command] = ActorSystem(SpawnProtocol(), "SpawnProtocol") implicit val ec: ExecutionContext = system.executionContext @@ -102,21 +103,21 @@ class LoggingDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with // jazzListener and musicListener will be notified about Jazz: system.eventStream ! Publish(Jazz("Sonny Rollins")) - //#superclass-subscription-eventstream + // #superclass-subscription-eventstream } "allow registration to suppressed dead letters" in { val listener: ActorRef[Any] = TestProbe().ref - //#suppressed-deadletters + // #suppressed-deadletters import akka.actor.SuppressedDeadLetter system.eventStream ! Subscribe[SuppressedDeadLetter](listener) - //#suppressed-deadletters + // #suppressed-deadletters - //#all-deadletters + // #all-deadletters import akka.actor.AllDeadLetters system.eventStream ! Subscribe[AllDeadLetters](listener) - //#all-deadletters + // #all-deadletters } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala index ce2cb6eeaad..6f75bd143b8 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala @@ -61,10 +61,13 @@ class ActorSystemSpec "An ActorSystem" must { "start the guardian actor and terminate when it terminates" in { - withSystem("a", Behaviors.receiveMessage[Probe] { p => - p.replyTo ! p.message - Behaviors.stopped - }, doTerminate = false) { sys => + withSystem( + "a", + Behaviors.receiveMessage[Probe] { p => + p.replyTo ! p.message + Behaviors.stopped + }, + doTerminate = false) { sys => val inbox = TestInbox[String]("a") sys ! Probe("hello", inbox.ref) eventually { @@ -90,14 +93,15 @@ class ActorSystemSpec "terminate the guardian actor" in { val inbox = TestInbox[String]("terminate") - val sys = system(Behaviors.setup[Any] { _ => - inbox.ref ! "started" - Behaviors.receiveSignal { - case (_, PostStop) => + val sys = system( + Behaviors.setup[Any] { _ => + inbox.ref ! "started" + Behaviors.receiveSignal { case (_, PostStop) => inbox.ref ! "done" Behaviors.same - } - }, "terminate") + } + }, + "terminate") eventually { inbox.hasMessages should ===(true) @@ -112,9 +116,11 @@ class ActorSystemSpec } "be able to terminate immediately" in { - val sys = system(Behaviors.receiveMessage[Probe] { _ => - Behaviors.unhandled - }, "terminate") + val sys = system( + Behaviors.receiveMessage[Probe] { _ => + Behaviors.unhandled + }, + "terminate") // for this case the guardian might not have been started before // the system terminates and then it will not receive PostStop, which // is OK since it wasn't really started yet @@ -177,17 +183,16 @@ class ActorSystemSpec "use a custom mailbox type for the user guardian" in { withSystem( "guardian-mailbox", - Behaviors.receive[WhatsYourMailbox] { - case (context, WhatsYourMailbox(replyTo)) => - replyTo ! context - .asInstanceOf[ActorContextImpl[_]] - .classicActorContext - .asInstanceOf[Dispatch] - .mailbox - .messageQueue - .getClass - .getName - Behaviors.same + Behaviors.receive[WhatsYourMailbox] { case (context, WhatsYourMailbox(replyTo)) => + replyTo ! context + .asInstanceOf[ActorContextImpl[_]] + .classicActorContext + .asInstanceOf[Dispatch] + .mailbox + .messageQueue + .getClass + .getName + Behaviors.same }, props = MailboxSelector.bounded(5)) { implicit sys => val probe = TestProbe[String]() diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/routing/PoolRouterSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/routing/PoolRouterSpec.scala index 4eecf6df3bc..f8d5e2e4bb8 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/routing/PoolRouterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/routing/PoolRouterSpec.scala @@ -16,10 +16,9 @@ object PoolRouterSpec { final case class WhichDispatcher(replyTo: ActorRef[String]) - def apply(): Behavior[WhichDispatcher] = Behaviors.receiveMessage { - case WhichDispatcher(replyTo) => - replyTo ! Thread.currentThread.getName - Behaviors.same + def apply(): Behavior[WhichDispatcher] = Behaviors.receiveMessage { case WhichDispatcher(replyTo) => + replyTo ! Thread.currentThread.getName + Behaviors.same } } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/javadsl/AdaptationFailureSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/javadsl/AdaptationFailureSpec.scala index a5b6fa28ad4..e40385cc819 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/javadsl/AdaptationFailureSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/javadsl/AdaptationFailureSpec.scala @@ -52,46 +52,46 @@ class AdaptationFailureSpec extends ScalaTestWithActorTestKit with AnyWordSpecLi "Failure in an adapter" must { - crashingBehaviors.foreach { - case (name, behavior) => - s"default to crash the actor or $name" in { - val probe = createTestProbe() - val ref = spawn(Behaviors.setup[Any] { ctx => - val adapter = ctx.messageAdapter[Any](classOf[Any], _ => throw TestException("boom")) - adapter ! "go boom" - - behavior - }) - probe.expectTerminated(ref) - } + crashingBehaviors.foreach { case (name, behavior) => + s"default to crash the actor or $name" in { + val probe = createTestProbe() + val ref = spawn(Behaviors.setup[Any] { ctx => + val adapter = ctx.messageAdapter[Any](classOf[Any], _ => throw TestException("boom")) + adapter ! "go boom" + + behavior + }) + probe.expectTerminated(ref) + } } - nonCrashingBehaviors.foreach { - case (name, behavior) => - s"ignore the failure for $name" in { - val probe = createTestProbe[Any]() - val threw = Promise[Done]() - val ref = spawn(Behaviors.setup[Any] { ctx => - val adapter = ctx.messageAdapter[Any](classOf[Any], { _ => + nonCrashingBehaviors.foreach { case (name, behavior) => + s"ignore the failure for $name" in { + val probe = createTestProbe[Any]() + val threw = Promise[Done]() + val ref = spawn(Behaviors.setup[Any] { ctx => + val adapter = ctx.messageAdapter[Any]( + classOf[Any], + { _ => threw.success(Done) throw TestException("boom") }) - adapter ! "go boom" - behavior - }) - spawn(Behaviors.setup[Any] { ctx => - ctx.watch(ref) - - Behaviors.receiveSignal { - case (_, Terminated(`ref`)) => - probe.ref ! "actor-stopped" - Behaviors.same - case _ => Behaviors.unhandled - } - }) - - probe.expectNoMessage() - } + adapter ! "go boom" + behavior + }) + spawn(Behaviors.setup[Any] { ctx => + ctx.watch(ref) + + Behaviors.receiveSignal { + case (_, Terminated(`ref`)) => + probe.ref ! "actor-stopped" + Behaviors.same + case _ => Behaviors.unhandled + } + }) + + probe.expectNoMessage() + } } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextAskSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextAskSpec.scala index cec2903978c..5c86c4afcb1 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextAskSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextAskSpec.scala @@ -44,10 +44,13 @@ class ActorContextAskSpec case class Ping(sender: ActorRef[Pong]) case class Pong(selfName: String, threadName: String) - val pingPong = spawn(Behaviors.receive[Ping] { (context, message) => - message.sender ! Pong(context.self.path.name, Thread.currentThread().getName) - Behaviors.same - }, "ping-pong", Props.empty.withDispatcherFromConfig("ping-pong-dispatcher")) + val pingPong = spawn( + Behaviors.receive[Ping] { (context, message) => + message.sender ! Pong(context.self.path.name, Thread.currentThread().getName) + Behaviors.same + }, + "ping-pong", + Props.empty.withDispatcherFromConfig("ping-pong-dispatcher")) val probe = TestProbe[Pong]() @@ -80,10 +83,9 @@ class ActorContextAskSpec case class Ping(respondTo: ActorRef[Pong.type]) extends Protocol case object Pong extends Protocol - val pingPong = spawn(Behaviors.receiveMessagePartial[Protocol] { - case Ping(respondTo) => - respondTo ! Pong - Behaviors.same + val pingPong = spawn(Behaviors.receiveMessagePartial[Protocol] { case Ping(respondTo) => + respondTo ! Pong + Behaviors.same }) val snitch = Behaviors.setup[AnyRef] { context => @@ -93,16 +95,13 @@ class ActorContextAskSpec } Behaviors - .receivePartial[AnyRef] { - case (_, message) => - probe.ref ! message - Behaviors.same + .receivePartial[AnyRef] { case (_, message) => + probe.ref ! message + Behaviors.same } - .receiveSignal { - - case (_, PostStop) => - probe.ref ! "stopped" - Behaviors.same + .receiveSignal { case (_, PostStop) => + probe.ref ! "stopped" + Behaviors.same } } @@ -200,10 +199,9 @@ class ActorContextAskSpec case Failure(ex) => throw ex } - Behaviors.receiveMessage { - case Pong => - probe.ref ! "got pong" - Behaviors.same + Behaviors.receiveMessage { case Pong => + probe.ref ! "got pong" + Behaviors.same } }) @@ -223,10 +221,9 @@ class ActorContextAskSpec case wat => throw new IllegalArgumentException(s"Unexpected response $wat") } - Behaviors.receiveMessage { - case ex: Throwable => - probe.ref ! s"got error: ${ex.getClass.getName}, ${ex.getMessage}" - Behaviors.same + Behaviors.receiveMessage { case ex: Throwable => + probe.ref ! s"got error: ${ex.getClass.getName}, ${ex.getMessage}" + Behaviors.same } }) @@ -247,10 +244,9 @@ class ActorContextAskSpec case wat => throw new IllegalArgumentException(s"Unexpected response $wat") } - Behaviors.receiveMessage { - case ex: Throwable => - probe.ref ! s"got error: ${ex.getClass.getName}, ${ex.getMessage}" - Behaviors.same + Behaviors.receiveMessage { case ex: Throwable => + probe.ref ! s"got error: ${ex.getClass.getName}, ${ex.getMessage}" + Behaviors.same } }) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextPipeToSelfSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextPipeToSelfSpec.scala index 929ebc26ad6..2e1279cba16 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextPipeToSelfSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextPipeToSelfSpec.scala @@ -45,10 +45,9 @@ final class ActorContextPipeToSelfSpec case Failure(ex) => throw ex } - Behaviors.receiveMessage { - case msg => - probe.ref ! msg - Behaviors.same + Behaviors.receiveMessage { case msg => + probe.ref ! msg + Behaviors.same } }) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala index 7b16d1afd87..6c78e45833b 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala @@ -52,9 +52,12 @@ class BehaviorWhereTheLoggerIsUsed(context: ActorContext[String]) extends Abstra } } -class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" +class ActorLoggingSpec + extends ScalaTestWithActorTestKit(""" akka.loglevel = DEBUG # test verifies debug - """) with AnyWordSpecLike with LogCapturing { + """) + with AnyWordSpecLike + with LogCapturing { val marker = new BasicMarkerFactory().getMarker("marker") val cause = TestException("böö") @@ -119,43 +122,46 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" } "contain the class name where the first log was called" in { - val eventFilter = LoggingTestKit.custom({ + val eventFilter = LoggingTestKit.custom { case event if event.loggerName == classOf[ActorLoggingSpec].getName => true case event => println(event.loggerName) false - }) + } - eventFilter.expect(spawn(Behaviors.setup[String] { context => - context.log.info("Started") + eventFilter.expect( + spawn( + Behaviors.setup[String] { context => + context.log.info("Started") - Behaviors.receive { (context, message) => - context.log.info("got message {}", message) - Behaviors.same - } - }, "the-actor-with-class")) + Behaviors.receive { (context, message) => + context.log.info("got message {}", message) + Behaviors.same + } + }, + "the-actor-with-class")) } "contain the object class name where the first log was called" in { - val eventFilter = LoggingTestKit.custom({ + val eventFilter = LoggingTestKit.custom { case event if event.loggerName == WhereTheBehaviorIsDefined.getClass.getName => true case other => println(other.loggerName) false - }) + } eventFilter.expect(spawn(WhereTheBehaviorIsDefined.behavior, "the-actor-with-object")) } "contain the abstract behavior class name where the first log was called" in { - val eventFilter = LoggingTestKit.custom({ + val eventFilter = LoggingTestKit.custom { case event if event.loggerName == classOf[BehaviorWhereTheLoggerIsUsed].getName => true case other => println(other.loggerName) false - }) + } eventFilter.expect { spawn(Behaviors.setup[String](context => new BehaviorWhereTheLoggerIsUsed(context)), "the-actor-with-behavior") @@ -200,54 +206,53 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" true // any is fine, we're just after the right count of statements reaching the listener } .withOccurrences(36) - .expect({ - spawn(Behaviors.setup[String] { - context => - context.log.debug("message") - context.log.debug("{}", "arg1") - // using `: Any` to avoid "ambiguous reference to overloaded definition", see also LoggerOpsSpec - context.log.debug("{} {}", "arg1", "arg2": Any) - context.log.debug("{} {} {}", "arg1", "arg2", "arg3") - context.log.debug(marker, "message") - context.log.debug(marker, "{}", "arg1") - context.log.debug(marker, "{} {}", "arg1", "arg2": Any) - context.log.debug(marker, "{} {} {}", "arg1", "arg2", "arg3") - - context.log.info("message") - context.log.info("{}", "arg1") - context.log.info("{} {}", "arg1", "arg2": Any) - context.log.info("{} {} {}", "arg1", "arg2", "arg3") - context.log.info(marker, "message") - context.log.info(marker, "{}", "arg1") - context.log.info(marker, "{} {}", "arg1", "arg2": Any) - context.log.info(marker, "{} {} {}", "arg1", "arg2", "arg3") - - context.log.warn("message") - context.log.warn("{}", "arg1") - context.log.warn("{} {}", "arg1", "arg2": Any) - context.log.warn("{} {} {}", "arg1", "arg2", "arg3") - context.log.warn(marker, "message") - context.log.warn(marker, "{}", "arg1") - context.log.warn(marker, "{} {}", "arg1", "arg2": Any) - context.log.warn(marker, "{} {} {}", "arg1", "arg2", "arg3") - context.log.warn("message", cause) - - context.log.error("message") - context.log.error("{}", "arg1") - context.log.error("{} {}", "arg1", "arg2": Any) - context.log.error("{} {} {}", "arg1", "arg2", "arg3") - // using to avoid vararg problem for primitive type, see also LoggerOpsSpec - context.log.error("{} {} {}", "arg1", "arg2", 3.asInstanceOf[AnyRef]) - context.log.error(marker, "message") - context.log.error(marker, "{}", "arg1") - context.log.error(marker, "{} {}", "arg1", "arg2": Any) - context.log.error(marker, "{} {} {}", "arg1", "arg2", "arg3") - context.log.error(marker, "{} {} {}", "arg1", "arg2", 3.asInstanceOf[AnyRef]) - context.log.error("message", cause) - - Behaviors.stopped + .expect { + spawn(Behaviors.setup[String] { context => + context.log.debug("message") + context.log.debug("{}", "arg1") + // using `: Any` to avoid "ambiguous reference to overloaded definition", see also LoggerOpsSpec + context.log.debug("{} {}", "arg1", "arg2": Any) + context.log.debug("{} {} {}", "arg1", "arg2", "arg3") + context.log.debug(marker, "message") + context.log.debug(marker, "{}", "arg1") + context.log.debug(marker, "{} {}", "arg1", "arg2": Any) + context.log.debug(marker, "{} {} {}", "arg1", "arg2", "arg3") + + context.log.info("message") + context.log.info("{}", "arg1") + context.log.info("{} {}", "arg1", "arg2": Any) + context.log.info("{} {} {}", "arg1", "arg2", "arg3") + context.log.info(marker, "message") + context.log.info(marker, "{}", "arg1") + context.log.info(marker, "{} {}", "arg1", "arg2": Any) + context.log.info(marker, "{} {} {}", "arg1", "arg2", "arg3") + + context.log.warn("message") + context.log.warn("{}", "arg1") + context.log.warn("{} {}", "arg1", "arg2": Any) + context.log.warn("{} {} {}", "arg1", "arg2", "arg3") + context.log.warn(marker, "message") + context.log.warn(marker, "{}", "arg1") + context.log.warn(marker, "{} {}", "arg1", "arg2": Any) + context.log.warn(marker, "{} {} {}", "arg1", "arg2", "arg3") + context.log.warn("message", cause) + + context.log.error("message") + context.log.error("{}", "arg1") + context.log.error("{} {}", "arg1", "arg2": Any) + context.log.error("{} {} {}", "arg1", "arg2", "arg3") + // using to avoid vararg problem for primitive type, see also LoggerOpsSpec + context.log.error("{} {} {}", "arg1", "arg2", 3.asInstanceOf[AnyRef]) + context.log.error(marker, "message") + context.log.error(marker, "{}", "arg1") + context.log.error(marker, "{} {}", "arg1", "arg2": Any) + context.log.error(marker, "{} {} {}", "arg1", "arg2", "arg3") + context.log.error(marker, "{} {} {}", "arg1", "arg2", 3.asInstanceOf[AnyRef]) + context.log.error("message", cause) + + Behaviors.stopped }) - }) + } } "use Slf4jLogger from akka-slf4j automatically" in { @@ -261,10 +266,9 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" val behavior = Behaviors.setup[String] { ctx => ctx.log.info("Starting up") - Behaviors.receiveMessage { - case msg => - ctx.log.info("Got message {}", msg) - Behaviors.same + Behaviors.receiveMessage { case msg => + ctx.log.info("Got message {}", msg) + Behaviors.same } } val actor = diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorThreadSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorThreadSpec.scala index e13be19b1f2..6105bc8e3a6 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorThreadSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorThreadSpec.scala @@ -25,10 +25,9 @@ object ActorThreadSpec { final case class Msg(i: Int, replyTo: ActorRef[Int]) def apply(): Behavior[Msg] = - Behaviors.receiveMessage { - case Msg(i, replyTo) => - replyTo ! i - Behaviors.same + Behaviors.receiveMessage { case Msg(i, replyTo) => + replyTo ! i + Behaviors.same } } @@ -60,18 +59,17 @@ class ActorThreadSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike wit "detect illegal access to ActorContext from other thread when processing message" in { val probe = createTestProbe[UnsupportedOperationException]() - val ref = spawn(Behaviors.receive[CountDownLatch] { - case (context, latch) => - Future { - try { - context.children - } catch { - case e: UnsupportedOperationException => - probe.ref ! e - } - }(context.executionContext) - latch.await(5, TimeUnit.SECONDS) - Behaviors.same + val ref = spawn(Behaviors.receive[CountDownLatch] { case (context, latch) => + Future { + try { + context.children + } catch { + case e: UnsupportedOperationException => + probe.ref ! e + } + }(context.executionContext) + latch.await(5, TimeUnit.SECONDS) + Behaviors.same }) val l = new CountDownLatch(1) @@ -86,19 +84,18 @@ class ActorThreadSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike wit "detect illegal access to ActorContext from other thread after processing message" in { val probe = createTestProbe[UnsupportedOperationException]() - val ref = spawn(Behaviors.receive[CountDownLatch] { - case (context, latch) => - Future { - try { - latch.await(5, TimeUnit.SECONDS) - context.children - } catch { - case e: UnsupportedOperationException => - probe.ref ! e - } - }(context.executionContext) - - Behaviors.stopped + val ref = spawn(Behaviors.receive[CountDownLatch] { case (context, latch) => + Future { + try { + latch.await(5, TimeUnit.SECONDS) + context.children + } catch { + case e: UnsupportedOperationException => + probe.ref ! e + } + }(context.executionContext) + + Behaviors.stopped }) val l = new CountDownLatch(1) @@ -114,19 +111,18 @@ class ActorThreadSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike wit "detect illegal access from child" in { val probe = createTestProbe[UnsupportedOperationException]() - val ref = spawn(Behaviors.receive[String] { - case (context, _) => - // really bad idea to define a child actor like this - context.spawnAnonymous(Behaviors.setup[String] { _ => - try { - context.children - } catch { - case e: UnsupportedOperationException => - probe.ref ! e - } - Behaviors.empty - }) - Behaviors.same + val ref = spawn(Behaviors.receive[String] { case (context, _) => + // really bad idea to define a child actor like this + context.spawnAnonymous(Behaviors.setup[String] { _ => + try { + context.children + } catch { + case e: UnsupportedOperationException => + probe.ref ! e + } + Behaviors.empty + }) + Behaviors.same }) ref ! "hello" diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/AdaptationFailureSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/AdaptationFailureSpec.scala index 14d6f7273ac..d33f62b317d 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/AdaptationFailureSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/AdaptationFailureSpec.scala @@ -30,8 +30,8 @@ object AdaptationFailureSpec { def onMessage(msg: Any): Behavior[Any] = this - override def onSignal: PartialFunction[Signal, Behavior[Any]] = { - case PreRestart => Behaviors.same + override def onSignal: PartialFunction[Signal, Behavior[Any]] = { case PreRestart => + Behaviors.same } } @@ -41,8 +41,8 @@ object AdaptationFailureSpec { def onMessage(msg: Any): Behavior[Any] = this - override def onSignal: PartialFunction[Signal, Behavior[Any]] = { - case MessageAdaptionFailure(_) => Behaviors.same + override def onSignal: PartialFunction[Signal, Behavior[Any]] = { case MessageAdaptionFailure(_) => + Behaviors.same } } @@ -57,8 +57,8 @@ class AdaptationFailureSpec extends ScalaTestWithActorTestKit with AnyWordSpecLi "receivePartial" -> Behaviors.receivePartial[Any](PartialFunction.empty) :: "receiveSignal" -> Behaviors.receiveSignal[Any](PartialFunction.empty) :: "receiveSignal not catching adaption failure" -> - Behaviors.receiveSignal[Any] { - case (_, PreRestart) => Behaviors.same + Behaviors.receiveSignal[Any] { case (_, PreRestart) => + Behaviors.same } :: "AbstractBehavior" -> emptyAbstractBehavior :: "AbstractBehavior handling other signals" -> abstractBehaviorHandlingOtherSignals :: @@ -68,53 +68,50 @@ class AdaptationFailureSpec extends ScalaTestWithActorTestKit with AnyWordSpecLi "empty" -> Behaviors.empty[Any] :: "ignore" -> Behaviors.ignore[Any] :: "receiveSignal catching adaption failure" -> - Behaviors.receiveSignal[Any] { - case (_, MessageAdaptionFailure(_)) => Behaviors.same + Behaviors.receiveSignal[Any] { case (_, MessageAdaptionFailure(_)) => + Behaviors.same } :: "AbstractBehavior handling MessageAdaptionFailure" -> abstractBehaviorHandlingMessageAdaptionFailure :: Nil "Failure in an adapter" must { - crashingBehaviors.foreach { - case (name, behavior) => - s"default to crash the actor or $name" in { - val probe = createTestProbe() - val ref = spawn(Behaviors.setup[Any] { ctx => - val adapter = ctx.messageAdapter[Any](_ => throw TestException("boom")) - adapter ! "go boom" - - behavior - }) - probe.expectTerminated(ref) - } + crashingBehaviors.foreach { case (name, behavior) => + s"default to crash the actor or $name" in { + val probe = createTestProbe() + val ref = spawn(Behaviors.setup[Any] { ctx => + val adapter = ctx.messageAdapter[Any](_ => throw TestException("boom")) + adapter ! "go boom" + + behavior + }) + probe.expectTerminated(ref) + } } - nonCrashingBehaviors.foreach { - case (name, behavior) => - s"ignore the failure for $name" in { - val probe = createTestProbe[Any]() - val threw = Promise[Done]() - val ref = spawn(Behaviors.setup[Any] { ctx => - val adapter = ctx.messageAdapter[Any] { _ => - threw.success(Done) - throw TestException("boom") - } - adapter ! "go boom" - behavior - }) - spawn(Behaviors.setup[Any] { ctx => - ctx.watch(ref) - - Behaviors.receiveSignal { - case (_, Terminated(`ref`)) => - probe.ref ! "actor-stopped" - Behaviors.same - } - }) - - probe.expectNoMessage() - } + nonCrashingBehaviors.foreach { case (name, behavior) => + s"ignore the failure for $name" in { + val probe = createTestProbe[Any]() + val threw = Promise[Done]() + val ref = spawn(Behaviors.setup[Any] { ctx => + val adapter = ctx.messageAdapter[Any] { _ => + threw.success(Done) + throw TestException("boom") + } + adapter ! "go boom" + behavior + }) + spawn(Behaviors.setup[Any] { ctx => + ctx.watch(ref) + + Behaviors.receiveSignal { case (_, Terminated(`ref`)) => + probe.ref ! "actor-stopped" + Behaviors.same + } + }) + + probe.expectNoMessage() + } } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/DispatcherSelectorSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/DispatcherSelectorSpec.scala index 328f3f5bf29..96dca1b29d1 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/DispatcherSelectorSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/DispatcherSelectorSpec.scala @@ -58,20 +58,19 @@ class DispatcherSelectorSpec(config: Config) private def behavior: Behavior[WhatsYourDispatcherAndMailbox] = Behaviors.setup { context => - Behaviors.receiveMessage[WhatsYourDispatcherAndMailbox] { - case WhatsYourDispatcherAndMailbox(replyTo) => - val result = context match { - case adapter: ActorContextAdapter[_] => - adapter.classicActorContext match { - case cell: ActorCell => - (cell.dispatcher.id, cell.mailbox.messageQueue) - case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") - } - case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") - } - - replyTo ! result - Behaviors.stopped + Behaviors.receiveMessage[WhatsYourDispatcherAndMailbox] { case WhatsYourDispatcherAndMailbox(replyTo) => + val result = context match { + case adapter: ActorContextAdapter[_] => + adapter.classicActorContext match { + case cell: ActorCell => + (cell.dispatcher.id, cell.mailbox.messageQueue) + case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") + } + case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") + } + + replyTo ! result + Behaviors.stopped } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/GracefulStopSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/GracefulStopSpec.scala index 1c485a88b08..43324014b48 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/GracefulStopSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/GracefulStopSpec.scala @@ -22,17 +22,19 @@ final class GracefulStopSpec extends ScalaTestWithActorTestKit with AnyWordSpecL val behavior = Behaviors.setup[akka.NotUsed] { context => - context.spawn[NotUsed](Behaviors.receiveSignal { - case (_, PostStop) => + context.spawn[NotUsed]( + Behaviors.receiveSignal { case (_, PostStop) => probe.ref ! "child-done" Behaviors.stopped - }, "child1") + }, + "child1") - context.spawn[NotUsed](Behaviors.receiveSignal { - case (_, PostStop) => + context.spawn[NotUsed]( + Behaviors.receiveSignal { case (_, PostStop) => probe.ref ! "child-done" Behaviors.stopped - }, "child2") + }, + "child2") Behaviors.stopped { () => // cleanup function body diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/MessageAdapterSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/MessageAdapterSpec.scala index 96a96570740..f7ed20963ee 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/MessageAdapterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/MessageAdapterSpec.scala @@ -69,10 +69,13 @@ class MessageAdapterSpec case class AnotherPong(selfName: String, threadName: String) - val pingPong = spawn(Behaviors.receive[Ping] { (context, message) => - message.sender ! Pong(context.self.path.name, Thread.currentThread().getName) - Behaviors.same - }, "ping-pong", Props.empty.withDispatcherFromConfig("ping-pong-dispatcher")) + val pingPong = spawn( + Behaviors.receive[Ping] { (context, message) => + message.sender ! Pong(context.self.path.name, Thread.currentThread().getName) + Behaviors.same + }, + "ping-pong", + Props.empty.withDispatcherFromConfig("ping-pong-dispatcher")) val probe = TestProbe[AnotherPong]() @@ -226,10 +229,9 @@ class MessageAdapterSpec probe.ref ! wrapped Behaviors.same } - .receiveSignal { - case (_, PostStop) => - probe.ref ! "stopped" - Behaviors.same + .receiveSignal { case (_, PostStop) => + probe.ref ! "stopped" + Behaviors.same } } @@ -272,10 +274,9 @@ class MessageAdapterSpec } behv(count + 1) } - .receiveSignal { - case (_, PostStop) => - probe.ref ! "stopped" - Behaviors.same + .receiveSignal { case (_, PostStop) => + probe.ref ! "stopped" + Behaviors.same } behv(count = 1) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/OnSignalSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/OnSignalSpec.scala index ee687648396..604e7d065cb 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/OnSignalSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/OnSignalSpec.scala @@ -22,10 +22,9 @@ final class OnSignalSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike Behaviors.setup[Nothing] { context => val stoppedChild = context.spawn(Behaviors.stopped, "stopped-child") context.watch(stoppedChild) - Behaviors.receiveSignal[Nothing] { - case (_, Terminated(`stoppedChild`)) => - probe.ref ! Done - Behaviors.stopped + Behaviors.receiveSignal[Nothing] { case (_, Terminated(`stoppedChild`)) => + probe.ref ! Done + Behaviors.stopped } } spawn[Nothing](behavior) @@ -34,19 +33,19 @@ final class OnSignalSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike def stopper(probe: TestProbe[Done], children: Int) = Behaviors.setup[String] { ctx => (0 until children).foreach { i => - ctx.spawn(Behaviors.receiveMessage[String] { _ => - Behaviors.same - }, s"$i") + ctx.spawn( + Behaviors.receiveMessage[String] { _ => + Behaviors.same + }, + s"$i") } Behaviors - .receiveMessagePartial[String] { - case "stop" => - Behaviors.stopped + .receiveMessagePartial[String] { case "stop" => + Behaviors.stopped } - .receiveSignal { - case (_, PostStop) => - probe.ref ! Done - Behaviors.same + .receiveSignal { case (_, PostStop) => + probe.ref ! Done + Behaviors.same } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ReceivePartialSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ReceivePartialSpec.scala index 3e9fad44cbc..80819898d13 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ReceivePartialSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ReceivePartialSpec.scala @@ -21,10 +21,9 @@ class ReceivePartialSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike "correctly install the receiveMessage handler" in { val probe = TestProbe[Command]("probe") val behavior = - Behaviors.receiveMessagePartial[Command] { - case Command2 => - probe.ref ! Command2 - Behaviors.same + Behaviors.receiveMessagePartial[Command] { case Command2 => + probe.ref ! Command2 + Behaviors.same } val actor = spawn(behavior) @@ -38,10 +37,9 @@ class ReceivePartialSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike "correctly install the receive handler" in { val probe = TestProbe[Command]("probe") val behavior = - Behaviors.receivePartial[Command] { - case (_, Command2) => - probe.ref ! Command2 - Behaviors.same + Behaviors.receivePartial[Command] { case (_, Command2) => + probe.ref ! Command2 + Behaviors.same } val actor = spawn(behavior) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/RoutersSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/RoutersSpec.scala index b6dc77ec67d..98977014b3e 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/RoutersSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/RoutersSpec.scala @@ -20,9 +20,13 @@ import akka.actor.typed.receptionist.Receptionist import akka.actor.typed.receptionist.ServiceKey import akka.actor.typed.scaladsl.adapter._ -class RoutersSpec extends ScalaTestWithActorTestKit(""" +class RoutersSpec + extends ScalaTestWithActorTestKit(""" akka.loglevel=debug - """) with AnyWordSpecLike with Matchers with LogCapturing { + """) + with AnyWordSpecLike + with Matchers + with LogCapturing { // needed for the event filter implicit val classicSystem: ActorSystem = system.toClassic @@ -121,11 +125,10 @@ class RoutersSpec extends ScalaTestWithActorTestKit(""" case object BCast extends Cmd def behavior(replyTo: ActorRef[AnyRef]) = Behaviors.setup[Cmd] { ctx => - Behaviors.receiveMessagePartial[Cmd] { - case ReplyWithAck | BCast => - val reply = ctx.self.path - replyTo ! reply - Behaviors.same + Behaviors.receiveMessagePartial[Cmd] { case ReplyWithAck | BCast => + val reply = ctx.self.path + replyTo ! reply + Behaviors.same } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StashSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StashSpec.scala index b7e0a162946..99df901634e 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StashSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StashSpec.scala @@ -258,10 +258,9 @@ abstract class AbstractStashSpec extends ScalaTestWithActorTestKit with AnyWordS class UnstashingSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogCapturing { private def slowStoppingChild(latch: CountDownLatch): Behavior[String] = - Behaviors.receiveSignal { - case (_, PostStop) => - latch.await(10, TimeUnit.SECONDS) - Behaviors.same + Behaviors.receiveSignal { case (_, PostStop) => + latch.await(10, TimeUnit.SECONDS) + Behaviors.same } private def stashingBehavior( @@ -339,16 +338,15 @@ class UnstashingSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with "work with intermediate Behaviors.same" in { val probe = TestProbe[String]() // unstashing is inside setup - val ref = spawn(Behaviors.receiveMessagePartial[String] { - case "unstash" => - Behaviors.withStash(10) { stash => - stash.stash("one") - stash.stash("two") - stash.unstashAll(Behaviors.receiveMessage { msg => - probe.ref ! msg - Behaviors.same - }) - } + val ref = spawn(Behaviors.receiveMessagePartial[String] { case "unstash" => + Behaviors.withStash(10) { stash => + stash.stash("one") + stash.stash("two") + stash.unstashAll(Behaviors.receiveMessage { msg => + probe.ref ! msg + Behaviors.same + }) + } }) ref ! "unstash" @@ -386,16 +384,15 @@ class UnstashingSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with // unstashing is inside setup val ref = spawn( Behaviors - .supervise(Behaviors.receiveMessagePartial[String] { - case "unstash" => - Behaviors.withStash(10) { stash => - stash.stash("one") - stash.stash("two") - stash.unstashAll(Behaviors.receiveMessage { msg => - probe.ref ! msg - Behaviors.same - }) - } + .supervise(Behaviors.receiveMessagePartial[String] { case "unstash" => + Behaviors.withStash(10) { stash => + stash.stash("one") + stash.stash("two") + stash.unstashAll(Behaviors.receiveMessage { msg => + probe.ref ! msg + Behaviors.same + }) + } }) .onFailure[TestException](SupervisorStrategy.stop)) @@ -411,30 +408,29 @@ class UnstashingSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with val ref = spawn(Behaviors.receiveMessage[String] { case "unstash" => - Behaviors.withStash(10) { - stash => - def expectingA: Behaviors.Receive[String] = Behaviors.receiveMessage { - case "a" => - probe.ref ! "a" - stash.unstash(expectingB, 1, identity) - case other => - probe.ref ! s"unexpected [$other] when expecting [a]" - Behaviors.stopped - } + Behaviors.withStash(10) { stash => + def expectingA: Behaviors.Receive[String] = Behaviors.receiveMessage { + case "a" => + probe.ref ! "a" + stash.unstash(expectingB, 1, identity) + case other => + probe.ref ! s"unexpected [$other] when expecting [a]" + Behaviors.stopped + } - def expectingB: Behaviors.Receive[String] = Behaviors.receiveMessage { - case b @ ("b1" | "b2") => - probe.ref ! b - stash.unstash(Behaviors.same, 1, identity) - case other => - probe.ref ! s"unexpected [$other] when expecting [b]" - Behaviors.stopped - } + def expectingB: Behaviors.Receive[String] = Behaviors.receiveMessage { + case b @ ("b1" | "b2") => + probe.ref ! b + stash.unstash(Behaviors.same, 1, identity) + case other => + probe.ref ! s"unexpected [$other] when expecting [b]" + Behaviors.stopped + } - stash.stash("a") - stash.stash("b1") - stash.stash("b2") - stash.unstash(expectingA, 1, identity) + stash.stash("a") + stash.stash("b1") + stash.stash("b2") + stash.unstash(expectingA, 1, identity) } case other => probe.ref ! s"unexpected [$other] in first Behavior" @@ -642,9 +638,8 @@ class UnstashingSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with unstashing(n + 1) } - Behaviors.receiveMessagePartial { - case "unstash" => - stash.unstashAll(unstashing(1)) + Behaviors.receiveMessagePartial { case "unstash" => + stash.unstashAll(unstashing(1)) } }) @@ -665,9 +660,8 @@ class UnstashingSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with val ref = spawn(Behaviors.withStash[String](10) { stash => stash.stash("one") - Behaviors.receiveMessagePartial { - case "unstash" => - stash.unstashAll(Behaviors.stopped) + Behaviors.receiveMessagePartial { case "unstash" => + stash.unstashAll(Behaviors.stopped) } }) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StopSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StopSpec.scala index d0525a26540..41fc4916e76 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StopSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StopSpec.scala @@ -32,11 +32,10 @@ class StopSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogCa "execute the post stop" in { val probe = TestProbe[Done]() - val ref = spawn(Behaviors.receiveMessagePartial[String] { - case "stop" => - Behaviors.stopped { () => - probe.ref ! Done - } + val ref = spawn(Behaviors.receiveMessagePartial[String] { case "stop" => + Behaviors.stopped { () => + probe.ref ! Done + } }) ref ! "stop" probe.expectMessage(Done) @@ -46,16 +45,14 @@ class StopSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogCa val probe = TestProbe[String]() val ref = spawn( Behaviors - .receiveMessagePartial[String] { - case "stop" => - Behaviors.stopped { () => - probe.ref ! "callback" - } + .receiveMessagePartial[String] { case "stop" => + Behaviors.stopped { () => + probe.ref ! "callback" + } } - .receiveSignal { - case (_, PostStop) => - probe.ref ! "signal" - Behaviors.same + .receiveSignal { case (_, PostStop) => + probe.ref ! "signal" + Behaviors.same }) ref ! "stop" probe.expectMessage("signal") diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala index cd629fe4d9b..f9f1e1fdd09 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala @@ -41,16 +41,16 @@ object AdapterSpec { class ClassicFailInConstructor extends classic.Actor { throw new TestException("Exception in constructor") - def receive = { - case "ping" => sender() ! "pong" + def receive = { case "ping" => + sender() ! "pong" } } def classicForwarder(ref: classic.ActorRef): classic.Props = classic.Props(new ClassicForwarder(ref)) class ClassicForwarder(ref: classic.ActorRef) extends classic.Actor { - def receive = { - case a: String => ref ! a + def receive = { case a: String => + ref ! a } } @@ -90,10 +90,9 @@ object AdapterSpec { Behaviors.same } } - .receiveSignal { - case (_, Terminated(_)) => - probe ! "terminated" - Behaviors.same + .receiveSignal { case (_, Terminated(_)) => + probe ! "terminated" + Behaviors.same } def unhappyTyped(msg: String): Behavior[String] = Behaviors.setup[String] { ctx => @@ -212,13 +211,15 @@ class AdapterSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with for { _ <- 0 to 10 } { var systemN: akka.actor.typed.ActorSystem[Done] = null try { - systemN = ActorSystem.create(Behaviors.receive[Done] { (context, message) => - context.self ! Done - message match { - case Done => Behaviors.stopped - } + systemN = ActorSystem.create( + Behaviors.receive[Done] { (context, message) => + context.self ! Done + message match { + case Done => Behaviors.stopped + } - }, "AdapterSpec-stopping-guardian-2") + }, + "AdapterSpec-stopping-guardian-2") } finally if (system != null) TestKit.shutdownActorSystem(systemN.toClassic) } @@ -231,7 +232,7 @@ class AdapterSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with } "allow seamless access to untyped extensions" in { - SerializationExtension(typedSystem) should not be (null) + SerializationExtension(typedSystem) should not be null } } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/AggregatorSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/AggregatorSpec.scala index d227d1964ab..1fc0cd8d321 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/AggregatorSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/AggregatorSpec.scala @@ -14,7 +14,7 @@ import org.scalatest.wordspec.AnyWordSpecLike object AggregatorSpec { object IllustrateUsage { - //#usage + // #usage object Hotel1 { final case class RequestQuote(replyTo: ActorRef[Quote]) final case class Quote(hotel: String, price: BigDecimal) @@ -57,15 +57,14 @@ object AggregatorSpec { .toList), timeout = 5.seconds)) - Behaviors.receiveMessage { - case AggregatedQuotes(quotes) => - context.log.info("Best {}", quotes.headOption.getOrElse("Quote N/A")) - Behaviors.same + Behaviors.receiveMessage { case AggregatedQuotes(quotes) => + context.log.info("Best {}", quotes.headOption.getOrElse("Quote N/A")) + Behaviors.same } } } } - //#usage + // #usage } } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala index be87bf918c8..25ec1f74bf7 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala @@ -42,7 +42,7 @@ object DispatchersDocSpec { val yourBehavior: Behavior[String] = Behaviors.same val example = Behaviors.receive[Any] { (context, _) => - //#spawn-dispatcher + // #spawn-dispatcher import akka.actor.typed.DispatcherSelector context.spawn(yourBehavior, "DefaultDispatcher") @@ -50,7 +50,7 @@ object DispatchersDocSpec { context.spawn(yourBehavior, "BlockingDispatcher", DispatcherSelector.blocking()) context.spawn(yourBehavior, "ParentDispatcher", DispatcherSelector.sameAsParent()) context.spawn(yourBehavior, "DispatcherFromConfig", DispatcherSelector.fromConfig("your-dispatcher")) - //#spawn-dispatcher + // #spawn-dispatcher Behaviors.same } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala index e93135a0f12..56989842393 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FSMDocSpec.scala @@ -16,10 +16,10 @@ import org.scalatest.wordspec.AnyWordSpecLike object FSMDocSpec { - //#simple-state - //#simple-events + // #simple-state + // #simple-events object Buncher { - //#simple-state + // #simple-state // FSM event becomes the type of the message Actor supports sealed trait Event @@ -27,17 +27,17 @@ object FSMDocSpec { final case class Queue(obj: Any) extends Event case object Flush extends Event private case object Timeout extends Event - //#simple-events + // #simple-events - //#storing-state + // #storing-state sealed trait Data case object Uninitialized extends Data final case class Todo(target: ActorRef[Batch], queue: immutable.Seq[Any]) extends Data final case class Batch(obj: immutable.Seq[Any]) - //#storing-state + // #storing-state - //#simple-state + // #simple-state // states of the FSM represented as behaviors // initial state @@ -67,10 +67,10 @@ object FSMDocSpec { } } - //#simple-events + // #simple-events } - //#simple-events - //#simple-state + // #simple-events + // #simple-state } class FSMDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogCapturing { diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FaultToleranceDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FaultToleranceDocSpec.scala index b7ea4b66b37..841afe9fb72 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FaultToleranceDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FaultToleranceDocSpec.scala @@ -78,10 +78,12 @@ object FaultToleranceDocSpec { } @nowarn("msg=never used") -class FaultToleranceDocSpec extends ScalaTestWithActorTestKit(""" +class FaultToleranceDocSpec + extends ScalaTestWithActorTestKit(""" # silenced to not put noise in test logs akka.loglevel = off - """) with AnyWordSpecLike { + """) + with AnyWordSpecLike { import FaultToleranceDocSpec._ "Bubbling of failures" must { diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala index 40217ecc5df..66e9fd5264d 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala @@ -22,7 +22,7 @@ import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit object GracefulStopDocSpec { - //#master-actor + // #master-actor object MasterControlProgram { sealed trait Command @@ -44,32 +44,30 @@ object GracefulStopDocSpec { Behaviors.stopped } } - .receiveSignal { - case (context, PostStop) => - context.log.info("Master Control Program stopped") - Behaviors.same + .receiveSignal { case (context, PostStop) => + context.log.info("Master Control Program stopped") + Behaviors.same } } } - //#master-actor + // #master-actor - //#worker-actor + // #worker-actor object Job { sealed trait Command def apply(name: String): Behavior[Command] = { - Behaviors.receiveSignal[Command] { - case (context, PostStop) => - context.log.info("Worker {} stopped", name) - Behaviors.same + Behaviors.receiveSignal[Command] { case (context, PostStop) => + context.log.info("Worker {} stopped", name) + Behaviors.same } } } - //#worker-actor + // #worker-actor object IllustrateWatch { - //#master-actor-watch + // #master-actor-watch object MasterControlProgram { sealed trait Command @@ -86,18 +84,17 @@ object GracefulStopDocSpec { Behaviors.same } } - .receiveSignal { - case (context, Terminated(ref)) => - context.log.info("Job stopped: {}", ref.path.name) - Behaviors.same + .receiveSignal { case (context, Terminated(ref)) => + context.log.info("Job stopped: {}", ref.path.name) + Behaviors.same } } } - //#master-actor-watch + // #master-actor-watch } object IllustrateWatchWith { - //#master-actor-watchWith + // #master-actor-watchWith object MasterControlProgram { sealed trait Command @@ -121,7 +118,7 @@ object GracefulStopDocSpec { } } } - //#master-actor-watchWith + // #master-actor-watchWith } } @@ -133,7 +130,7 @@ class GracefulStopDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike "Graceful stop example" must { "start some workers" in { - //#start-workers + // #start-workers import MasterControlProgram._ val system: ActorSystem[Command] = ActorSystem(MasterControlProgram(), "B6700") @@ -148,7 +145,7 @@ class GracefulStopDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike system.terminate() Await.result(system.whenTerminated, 3.seconds) - //#start-workers + // #start-workers } "gracefully stop workers and master" in { diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala index 046b06ccbb8..9062a8c7902 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala @@ -42,10 +42,9 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec case class PrintMe(message: String) def apply(): Behavior[PrintMe] = - Behaviors.receive { - case (context, PrintMe(message)) => - context.log.info(message) - Behaviors.same + Behaviors.receive { case (context, PrintMe(message)) => + context.log.info(message) + Behaviors.same } } // #fire-and-forget-definition @@ -75,11 +74,10 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec // #request-response-respond def apply(): Behaviors.Receive[Request] = - Behaviors.receiveMessage[Request] { - case Request(query, replyTo) => - // ... process query ... - replyTo ! Response(s"Here are the cookies for [$query]!") - Behaviors.same + Behaviors.receiveMessage[Request] { case Request(query, replyTo) => + // ... process query ... + replyTo ! Response(s"Here are the cookies for [$query]!") + Behaviors.same } // #request-response-respond } @@ -175,7 +173,7 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec "contain a sample for scheduling messages to self" in { - //#timer + // #timer object Buncher { sealed trait Command @@ -219,7 +217,7 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec } } } - //#timer + // #timer val probe = createTestProbe[Buncher.Batch]() val buncher: ActorRef[Buncher.Command] = spawn(Buncher(probe.ref, 1.second, 10)) @@ -241,10 +239,9 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec case class Response(message: String) def apply(): Behaviors.Receive[Hal.Command] = - Behaviors.receiveMessage[Command] { - case OpenThePodBayDoorsPlease(replyTo) => - replyTo ! Response("I'm sorry, Dave. I'm afraid I can't do that.") - Behaviors.same + Behaviors.receiveMessage[Command] { case OpenThePodBayDoorsPlease(replyTo) => + replyTo ! Response("I'm sorry, Dave. I'm afraid I can't do that.") + Behaviors.same } } @@ -307,11 +304,10 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec case class OpenThePodBayDoorsPlease(replyTo: ActorRef[StatusReply[String]]) extends Command def apply(): Behaviors.Receive[Hal.Command] = - Behaviors.receiveMessage[Command] { - case OpenThePodBayDoorsPlease(replyTo) => - // reply with a validation error description - replyTo ! StatusReply.Error("I'm sorry, Dave. I'm afraid I can't do that.") - Behaviors.same + Behaviors.receiveMessage[Command] { case OpenThePodBayDoorsPlease(replyTo) => + // reply with a validation error description + replyTo ! StatusReply.Error("I'm sorry, Dave. I'm afraid I can't do that.") + Behaviors.same } } @@ -359,10 +355,9 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec case class GetKeys(whoseKeys: String, replyTo: ActorRef[Keys]) def apply(): Behavior[GetKeys] = - Behaviors.receiveMessage { - case GetKeys(_, replyTo) => - replyTo ! Keys() - Behaviors.same + Behaviors.receiveMessage { case GetKeys(_, replyTo) => + replyTo ! Keys() + Behaviors.same } } @@ -370,10 +365,9 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec case class GetWallet(whoseWallet: String, replyTo: ActorRef[Wallet]) def apply(): Behavior[GetWallet] = - Behaviors.receiveMessage { - case GetWallet(_, replyTo) => - replyTo ! Wallet() - Behaviors.same + Behaviors.receiveMessage { case GetWallet(_, replyTo) => + replyTo ! Wallet() + Behaviors.same } } @@ -389,10 +383,9 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec val keyCabinet: ActorRef[KeyCabinet.GetKeys] = context.spawn(KeyCabinet(), "key-cabinet") val drawer: ActorRef[Drawer.GetWallet] = context.spawn(Drawer(), "drawer") - Behaviors.receiveMessage[Command] { - case LeaveHome(who, replyTo) => - context.spawn(prepareToLeaveHome(who, replyTo, keyCabinet, drawer), s"leaving-$who") - Behaviors.same + Behaviors.receiveMessage[Command] { case LeaveHome(who, replyTo) => + context.spawn(prepareToLeaveHome(who, replyTo, keyCabinet, drawer), s"leaving-$who") + Behaviors.same } } } @@ -580,7 +573,7 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec } "contain a sample for pipeToSelf" in { - //#pipeToSelf + // #pipeToSelf trait CustomerDataAccess { def update(value: Customer): Future[Done] @@ -631,7 +624,7 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with AnyWordSpec } } } - //#pipeToSelf + // #pipeToSelf val dataAccess = new CustomerDataAccess { override def update(value: Customer): Future[Done] = Future.successful(Done) diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala index 8854cc732d1..aaabf1ba80e 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala @@ -23,23 +23,23 @@ import java.nio.charset.StandardCharsets object IntroSpec { - //#hello-world-actor + // #hello-world-actor object HelloWorld { final case class Greet(whom: String, replyTo: ActorRef[Greeted]) final case class Greeted(whom: String, from: ActorRef[Greet]) def apply(): Behavior[Greet] = Behaviors.receive { (context, message) => context.log.info("Hello {}!", message.whom) - //#hello-world-actor + // #hello-world-actor println(s"Hello ${message.whom}!") - //#hello-world-actor + // #hello-world-actor message.replyTo ! Greeted(message.whom, context.self) Behaviors.same } } - //#hello-world-actor + // #hello-world-actor - //#hello-world-bot + // #hello-world-bot object HelloWorldBot { def apply(max: Int): Behavior[HelloWorld.Greeted] = { @@ -50,9 +50,9 @@ object IntroSpec { Behaviors.receive { (context, message) => val n = greetingCounter + 1 context.log.info2("Greeting {} for {}", n, message.whom) - //#hello-world-bot + // #hello-world-bot println(s"Greeting $n for ${message.whom}") - //#hello-world-bot + // #hello-world-bot if (n == max) { Behaviors.stopped } else { @@ -61,9 +61,9 @@ object IntroSpec { } } } - //#hello-world-bot + // #hello-world-bot - //#hello-world-main + // #hello-world-main object HelloWorldMain { final case class SayHello(name: String) @@ -79,7 +79,7 @@ object IntroSpec { } } - //#hello-world-main + // #hello-world-main def main(args: Array[String]): Unit = { val system: ActorSystem[HelloWorldMain.SayHello] = ActorSystem(HelloWorldMain(), "hello") @@ -87,16 +87,16 @@ object IntroSpec { system ! HelloWorldMain.SayHello("World") system ! HelloWorldMain.SayHello("Akka") } - //#hello-world-main + // #hello-world-main } - //#hello-world-main + // #hello-world-main object CustomDispatchersExample { object HelloWorldMain { final case class SayHello(name: String) - //#hello-world-main-with-dispatchers + // #hello-world-main-with-dispatchers def apply(): Behavior[SayHello] = Behaviors.setup { context => val dispatcherPath = "akka.actor.default-blocking-io-dispatcher" @@ -111,21 +111,21 @@ object IntroSpec { Behaviors.same } } - //#hello-world-main-with-dispatchers + // #hello-world-main-with-dispatchers } } - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior object ChatRoom { - //#chatroom-behavior + // #chatroom-behavior sealed trait RoomCommand final case class GetSession(screenName: String, replyTo: ActorRef[SessionEvent]) extends RoomCommand - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior private final case class PublishSessionMessage(screenName: String, message: String) extends RoomCommand - //#chatroom-behavior - //#chatroom-protocol + // #chatroom-behavior + // #chatroom-protocol sealed trait SessionEvent final case class SessionGranted(handle: ActorRef[PostMessage]) extends SessionEvent @@ -135,8 +135,8 @@ object IntroSpec { sealed trait SessionCommand final case class PostMessage(message: String) extends SessionCommand private final case class NotifyClient(message: MessagePosted) extends SessionCommand - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior def apply(): Behavior[RoomCommand] = chatRoom(List.empty) @@ -172,24 +172,24 @@ object IntroSpec { client ! message Behaviors.same } - //#chatroom-protocol + // #chatroom-protocol } - //#chatroom-behavior - //#chatroom-protocol + // #chatroom-behavior + // #chatroom-protocol - //#chatroom-gabbler + // #chatroom-gabbler object Gabbler { import ChatRoom._ def apply(): Behavior[SessionEvent] = Behaviors.setup { context => Behaviors.receiveMessage { - //#chatroom-gabbler + // #chatroom-gabbler // We document that the compiler warns about the missing handler for `SessionDenied` case SessionDenied(reason) => context.log.info("cannot start chat room session: {}", reason) Behaviors.stopped - //#chatroom-gabbler + // #chatroom-gabbler case SessionGranted(handle) => handle ! PostMessage("Hello World!") Behaviors.same @@ -199,9 +199,9 @@ object IntroSpec { } } } - //#chatroom-gabbler + // #chatroom-gabbler - //#chatroom-main + // #chatroom-main object Main { def apply(): Behavior[NotUsed] = Behaviors.setup { context => @@ -210,9 +210,8 @@ object IntroSpec { context.watch(gabblerRef) chatRoom ! ChatRoom.GetSession("ol’ Gabbler", gabblerRef) - Behaviors.receiveSignal { - case (_, Terminated(_)) => - Behaviors.stopped + Behaviors.receiveSignal { case (_, Terminated(_)) => + Behaviors.stopped } } @@ -221,7 +220,7 @@ object IntroSpec { } } - //#chatroom-main + // #chatroom-main } @@ -231,7 +230,7 @@ class IntroSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogC "Intro sample" must { "say hello" in { - //#hello-world + // #hello-world val system: ActorSystem[HelloWorldMain.SayHello] = ActorSystem(HelloWorldMain(), "hello") @@ -239,7 +238,7 @@ class IntroSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogC system ! HelloWorldMain.SayHello("World") system ! HelloWorldMain.SayHello("Akka") - //#hello-world + // #hello-world Thread.sleep(500) // it will not fail if too short ActorTestKit.shutdown(system) diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/LoggingDocExamples.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/LoggingDocExamples.scala index 00ced001ee9..0246a60bca1 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/LoggingDocExamples.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/LoggingDocExamples.scala @@ -26,14 +26,14 @@ object LoggingDocExamples { def howToUse(): Unit = { - //#context-log + // #context-log Behaviors.receive[String] { (context, message) => context.log.info("Received message: {}", message) Behaviors.same } - //#context-log + // #context-log - //#logger-name + // #logger-name Behaviors.setup[String] { context => context.setLoggerName("com.myservice.BackendManager") context.log.info("Starting up") @@ -43,9 +43,9 @@ object LoggingDocExamples { Behaviors.same } } - //#logger-name + // #logger-name - //#logger-factory + // #logger-factory val log = LoggerFactory.getLogger("com.myservice.BackendTask") Future { @@ -55,21 +55,21 @@ object LoggingDocExamples { case Success(result) => log.info("Task completed: {}", result) case Failure(exc) => log.error("Task failed", exc) } - //#logger-factory + // #logger-factory } def placeholders(): Unit = { - //#info2 + // #info2 import akka.actor.typed.scaladsl.LoggerOps Behaviors.receive[String] { (context, message) => context.log.info2("{} received message: {}", context.self.path.name, message) Behaviors.same } - //#info2 + // #info2 - //#infoN + // #infoN import akka.actor.typed.scaladsl.LoggerOps Behaviors.receive[String] { (context, message) => @@ -80,23 +80,23 @@ object LoggingDocExamples { message.take(10)) Behaviors.same } - //#infoN + // #infoN } def logMessages(): Unit = { - //#logMessages + // #logMessages import akka.actor.typed.LogOptions import org.slf4j.event.Level Behaviors.logMessages(LogOptions().withLevel(Level.TRACE), BackendManager()) - //#logMessages + // #logMessages } def withMdc(): Unit = { val system: ActorSystem[_] = ??? - //#withMdc + // #withMdc val staticMdc = Map("startTime" -> system.startTime.toString) Behaviors.withMdc[BackendManager.Command]( staticMdc, @@ -104,7 +104,7 @@ object LoggingDocExamples { (msg: BackendManager.Command) => Map("identifier" -> msg.identifier, "upTime" -> system.uptime.toString)) { BackendManager() } - //#withMdc + // #withMdc } def logging(): Unit = { @@ -112,18 +112,18 @@ object LoggingDocExamples { final case class Message(s: String) val ref: ActorRef[Message] = ??? - //#test-logging + // #test-logging import akka.actor.testkit.typed.scaladsl.LoggingTestKit // implicit ActorSystem is needed, but that is given by ScalaTestWithActorTestKit - //implicit val system: ActorSystem[_] + // implicit val system: ActorSystem[_] LoggingTestKit.info("Received message").expect { ref ! Message("hello") } - //#test-logging + // #test-logging - //#test-logging-criteria + // #test-logging-criteria LoggingTestKit .error[IllegalArgumentException] .withMessageRegex(".*was rejected.*expecting ascii input.*") @@ -138,15 +138,15 @@ object LoggingDocExamples { ref ! Message("hellö") ref ! Message("hejdå") } - //#test-logging-criteria + // #test-logging-criteria } def tagsExample(): Unit = { Behaviors.setup[AnyRef] { context => val myBehavior = Behaviors.empty[AnyRef] - //#tags + // #tags context.spawn(myBehavior, "MyActor", ActorTags("processing")) - //#tags + // #tags Behaviors.stopped } } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala index 1b4ab1ab4c9..d418b303a65 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala @@ -22,17 +22,17 @@ import org.scalatest.wordspec.AnyWordSpecLike object OOIntroSpec { - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior object ChatRoom { - //#chatroom-behavior + // #chatroom-behavior sealed trait RoomCommand final case class GetSession(screenName: String, replyTo: ActorRef[SessionEvent]) extends RoomCommand - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior private final case class PublishSessionMessage(screenName: String, message: String) extends RoomCommand - //#chatroom-behavior - //#chatroom-protocol + // #chatroom-behavior + // #chatroom-protocol sealed trait SessionEvent final case class SessionGranted(handle: ActorRef[PostMessage]) extends SessionEvent @@ -42,8 +42,8 @@ object OOIntroSpec { sealed trait SessionCommand final case class PostMessage(message: String) extends SessionCommand private final case class NotifyClient(message: MessagePosted) extends SessionCommand - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior def apply(): Behavior[RoomCommand] = Behaviors.setup(context => new ChatRoomBehavior(context)) @@ -96,12 +96,12 @@ object OOIntroSpec { Behaviors.same } } - //#chatroom-protocol + // #chatroom-protocol } - //#chatroom-protocol - //#chatroom-behavior + // #chatroom-protocol + // #chatroom-behavior - //#chatroom-gabbler + // #chatroom-gabbler object Gabbler { import ChatRoom._ @@ -119,10 +119,10 @@ object OOIntroSpec { Behaviors.stopped } } - //#chatroom-gabbler + // #chatroom-gabbler } - //#chatroom-main + // #chatroom-main object Main { def apply(): Behavior[NotUsed] = Behaviors.setup { context => @@ -131,9 +131,8 @@ object OOIntroSpec { context.watch(gabblerRef) chatRoom ! ChatRoom.GetSession("ol’ Gabbler", gabblerRef) - Behaviors.receiveSignal { - case (_, Terminated(_)) => - Behaviors.stopped + Behaviors.receiveSignal { case (_, Terminated(_)) => + Behaviors.stopped } } @@ -142,7 +141,7 @@ object OOIntroSpec { } } - //#chatroom-main + // #chatroom-main } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/RouterSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/RouterSpec.scala index f0cfd8d4800..9f90ec76818 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/RouterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/RouterSpec.scala @@ -23,17 +23,16 @@ object RouterSpec { def apply(): Behavior[Command] = Behaviors.setup { context => context.log.info("Starting worker") - Behaviors.receiveMessage { - case DoLog(text) => - context.log.info("Got message {}", text) - Behaviors.same + Behaviors.receiveMessage { case DoLog(text) => + context.log.info("Got message {}", text) + Behaviors.same } } } // #routee - //intentionally out of the routee section + // intentionally out of the routee section class DoBroadcastLog(text: String) extends Worker.DoLog(text) object DoBroadcastLog { def apply(text: String) = new DoBroadcastLog(text) @@ -93,17 +92,17 @@ class RouterSpec extends ScalaTestWithActorTestKit("akka.loglevel=warning") with val alternativeRouter = ctx.spawn(alternativePool, "alternative-pool") alternativeRouter ! Worker.DoLog("msg") - //#pool + // #pool // #broadcast val poolWithBroadcast = pool.withBroadcastPredicate(_.isInstanceOf[DoBroadcastLog]) val routerWithBroadcast = ctx.spawn(poolWithBroadcast, "pool-with-broadcast") - //this will be sent to all 4 routees + // this will be sent to all 4 routees routerWithBroadcast ! DoBroadcastLog("msg") Behaviors.empty // #broadcast } - //#pool + // #pool ) probe.receiveMessages(15) @@ -157,24 +156,23 @@ class RouterSpec extends ScalaTestWithActorTestKit("akka.loglevel=warning") with case class Message(id: String, content: String) def apply(monitor: ActorRef[String]): Behavior[Message] = - Behaviors.receiveMessage { - case Message(id, _) => - monitor ! id - Behaviors.same + Behaviors.receiveMessage { case Message(id, _) => + monitor ! id + Behaviors.same } } - //registering proxies + // registering proxies val proxy1 = spawn(Proxy(probe1.ref)) val proxy2 = spawn(Proxy(probe2.ref)) val waiterProbe = createTestProbe[Receptionist.Registered]() system.receptionist ! Receptionist.Register(Proxy.RegisteringKey, proxy1, waiterProbe.ref) system.receptionist ! Receptionist.Register(Proxy.RegisteringKey, proxy2, waiterProbe.ref) - //wait until both registrations get Receptionist.Registered + // wait until both registrations get Receptionist.Registered waiterProbe.receiveMessages(2) - //messages sent to a router with consistent hashing + // messages sent to a router with consistent hashing // #consistent-hashing val router = spawn(Routers.group(Proxy.RegisteringKey).withConsistentHashingRouting(10, Proxy.mapping)) @@ -185,8 +183,8 @@ class RouterSpec extends ScalaTestWithActorTestKit("akka.loglevel=warning") with router ! Proxy.Message("zh3", "Text4") // the hash is calculated over the Proxy.Message first parameter obtained through the Proxy.mapping function // #consistent-hashing - //Then messages with equal Message.id reach the same actor - //so the first message in each probe queue is equal to its second + // Then messages with equal Message.id reach the same actor + // so the first message in each probe queue is equal to its second probe1.receiveMessage() shouldBe probe1.receiveMessage() probe2.receiveMessage() shouldBe probe2.receiveMessage() diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/SpawnProtocolDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/SpawnProtocolDocSpec.scala index 32498b6098f..dd83e6c7b3b 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/SpawnProtocolDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/SpawnProtocolDocSpec.scala @@ -34,7 +34,7 @@ object SpawnProtocolDocSpec { // Silent because we want to name the unused 'context' parameter @nowarn("msg=never used") - //#main + // #main object HelloWorldMain { def apply(): Behavior[SpawnProtocol.Command] = Behaviors.setup { context => @@ -44,7 +44,7 @@ object SpawnProtocolDocSpec { SpawnProtocol() } } - //#main + // #main } class SpawnProtocolDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogCapturing { @@ -53,7 +53,7 @@ class SpawnProtocolDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLik "ActorSystem with SpawnProtocol" must { "be able to spawn actors" in { - //#system-spawn + // #system-spawn implicit val system: ActorSystem[SpawnProtocol.Command] = ActorSystem(HelloWorldMain(), "hello") @@ -78,7 +78,7 @@ class SpawnProtocolDocSpec extends ScalaTestWithActorTestKit with AnyWordSpecLik greeterRef ! HelloWorld.Greet("Akka", replyToRef) } - //#system-spawn + // #system-spawn Thread.sleep(500) // it will not fail if too short ActorTestKit.shutdown(system) diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StyleGuideDocExamples.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StyleGuideDocExamples.scala index 307a941b495..df9f7ab81bb 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StyleGuideDocExamples.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StyleGuideDocExamples.scala @@ -31,15 +31,15 @@ object StyleGuideDocExamples { object FunctionalStyle { - //#fun-style + // #fun-style - //#messages + // #messages object Counter { sealed trait Command case object Increment extends Command final case class GetValue(replyTo: ActorRef[Value]) extends Command final case class Value(n: Int) - //#messages + // #messages def apply(): Behavior[Command] = counter(0) @@ -56,16 +56,16 @@ object StyleGuideDocExamples { Behaviors.same } } - //#messages + // #messages } - //#messages - //#fun-style + // #messages + // #fun-style } object OOStyle { - //#oo-style + // #oo-style object Counter { sealed trait Command @@ -95,7 +95,7 @@ object StyleGuideDocExamples { } } } - //#oo-style + // #oo-style } @@ -270,7 +270,7 @@ object StyleGuideDocExamples { } object FactoryMethod { - //#behavior-factory-method + // #behavior-factory-method object CountDown { sealed trait Command case object Down extends Command @@ -284,37 +284,36 @@ object StyleGuideDocExamples { import CountDown._ private def counter(remaining: Int): Behavior[Command] = { - //#exhastivness-check - Behaviors.receiveMessage { - case Down => - if (remaining == 1) { - notifyWhenZero.tell(Done) - Behaviors.stopped - } else - counter(remaining - 1) + // #exhastivness-check + Behaviors.receiveMessage { case Down => + if (remaining == 1) { + notifyWhenZero.tell(Done) + Behaviors.stopped + } else + counter(remaining - 1) } - //#exhastivness-check + // #exhastivness-check } } - //#behavior-factory-method + // #behavior-factory-method object Usage { val context: ActorContext[_] = ??? val doneRef: ActorRef[Done] = ??? - //#behavior-factory-method-spawn + // #behavior-factory-method-spawn val countDown = context.spawn(CountDown(100, doneRef), "countDown") - //#behavior-factory-method-spawn + // #behavior-factory-method-spawn - //#message-prefix-in-tell + // #message-prefix-in-tell countDown ! CountDown.Down - //#message-prefix-in-tell + // #message-prefix-in-tell } } object Messages { - //#message-protocol + // #message-protocol object CounterProtocol { sealed trait Command @@ -325,11 +324,11 @@ object StyleGuideDocExamples { case object Confirmed extends OperationResult final case class Rejected(reason: String) extends OperationResult } - //#message-protocol + // #message-protocol } object PublicVsPrivateMessages1 { - //#public-private-messages-1 + // #public-private-messages-1 object Counter { sealed trait Command case object Increment extends Command @@ -366,11 +365,11 @@ object StyleGuideDocExamples { Behaviors.same } } - //#public-private-messages-1 + // #public-private-messages-1 } object PublicVsPrivateMessages2 { - //#public-private-messages-2 + // #public-private-messages-2 // above example is preferred, but this is possible and not wrong object Counter { // The type of all public and private messages the Counter actor handles @@ -417,7 +416,7 @@ object StyleGuideDocExamples { Behaviors.same } } - //#public-private-messages-2 + // #public-private-messages-2 } object Ask { @@ -425,7 +424,7 @@ object StyleGuideDocExamples { implicit val system: ActorSystem[Nothing] = ??? - //#ask-1 + // #ask-1 import akka.actor.typed.scaladsl.AskPattern._ import akka.util.Timeout @@ -433,11 +432,11 @@ object StyleGuideDocExamples { val counter: ActorRef[Command] = ??? val result: Future[OperationResult] = counter.ask(replyTo => Increment(delta = 2, replyTo)) - //#ask-1 + // #ask-1 - //#ask-2 + // #ask-2 val result2: Future[OperationResult] = counter.ask(Increment(delta = 2, _)) - //#ask-2 + // #ask-2 /* //#ask-3 @@ -446,26 +445,26 @@ object StyleGuideDocExamples { //#ask-3 */ - //#ask-4 + // #ask-4 val result3: Future[OperationResult] = counter ? (Increment(delta = 2, _)) - //#ask-4 + // #ask-4 } object ExhaustivenessCheck { object CountDown { - //#messages-sealed + // #messages-sealed sealed trait Command case object Down extends Command final case class GetValue(replyTo: ActorRef[Value]) extends Command final case class Value(n: Int) - //#messages-sealed + // #messages-sealed } class CountDown() { import CountDown._ - //#pattern-match-unhandled + // #pattern-match-unhandled val zero: Behavior[Command] = { Behaviors.receiveMessage { case GetValue(replyTo) => @@ -475,19 +474,18 @@ object StyleGuideDocExamples { Behaviors.unhandled } } - //#pattern-match-unhandled + // #pattern-match-unhandled @nowarn object partial { - //#pattern-match-partial + // #pattern-match-partial val zero: Behavior[Command] = { - Behaviors.receiveMessagePartial { - case GetValue(replyTo) => - replyTo ! Value(0) - Behaviors.same + Behaviors.receiveMessagePartial { case GetValue(replyTo) => + replyTo ! Value(0) + Behaviors.same } } - //#pattern-match-partial + // #pattern-match-partial } } @@ -495,40 +493,37 @@ object StyleGuideDocExamples { object BehaviorCompositionWithPartialFunction { - //#messages-sealed-composition + // #messages-sealed-composition sealed trait Command case object Down extends Command final case class GetValue(replyTo: ActorRef[Value]) extends Command final case class Value(n: Int) - //#messages-sealed-composition + // #messages-sealed-composition - //#get-handler-partial - def getHandler(value: Int): PartialFunction[Command, Behavior[Command]] = { - case GetValue(replyTo) => - replyTo ! Value(value) - Behaviors.same + // #get-handler-partial + def getHandler(value: Int): PartialFunction[Command, Behavior[Command]] = { case GetValue(replyTo) => + replyTo ! Value(value) + Behaviors.same } - //#get-handler-partial - - //#set-handler-non-zero-partial - def setHandlerNotZero(value: Int): PartialFunction[Command, Behavior[Command]] = { - case Down => - if (value == 1) - zero - else - nonZero(value - 1) + // #get-handler-partial + + // #set-handler-non-zero-partial + def setHandlerNotZero(value: Int): PartialFunction[Command, Behavior[Command]] = { case Down => + if (value == 1) + zero + else + nonZero(value - 1) } - //#set-handler-non-zero-partial + // #set-handler-non-zero-partial - //#set-handler-zero-partial - def setHandlerZero(log: Logger): PartialFunction[Command, Behavior[Command]] = { - case Down => - log.error("Counter is already at zero!") - Behaviors.same + // #set-handler-zero-partial + def setHandlerZero(log: Logger): PartialFunction[Command, Behavior[Command]] = { case Down => + log.error("Counter is already at zero!") + Behaviors.same } - //#set-handler-zero-partial + // #set-handler-zero-partial - //#top-level-behaviors-partial + // #top-level-behaviors-partial val zero: Behavior[Command] = Behaviors.setup { context => Behaviors.receiveMessagePartial(getHandler(0).orElse(setHandlerZero(context.log))) } @@ -538,13 +533,13 @@ object StyleGuideDocExamples { // Default Initial Behavior for this actor def apply(initialCapacity: Int): Behavior[Command] = nonZero(initialCapacity) - //#top-level-behaviors-partial + // #top-level-behaviors-partial } object NestingSample1 { sealed trait Command - //#nesting + // #nesting def apply(): Behavior[Command] = Behaviors.setup[Command](context => Behaviors.withStash(100)(stash => @@ -552,19 +547,19 @@ object StyleGuideDocExamples { context.log.debug("Starting up") // behavior using context, stash and timers ... - //#nesting + // #nesting timers.isTimerActive("aa") stash.isEmpty Behaviors.empty - //#nesting + // #nesting })) - //#nesting + // #nesting } object NestingSample2 { sealed trait Command - //#nesting-supervise + // #nesting-supervise def apply(): Behavior[Command] = Behaviors.setup { context => // only run on initial actor start, not on crash-restart @@ -575,13 +570,13 @@ object StyleGuideDocExamples { // every time the actor crashes and restarts a new stash is created (previous stash is lost) context.log.debug("Starting up with stash") // Behaviors.receiveMessage { ... } - //#nesting-supervise + // #nesting-supervise stash.isEmpty Behaviors.empty - //#nesting-supervise + // #nesting-supervise }) .onFailure[RuntimeException](SupervisorStrategy.restart) } - //#nesting-supervise + // #nesting-supervise } } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/ClassicWatchingTypedSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/ClassicWatchingTypedSpec.scala index 0058ed6119e..d3555b29e17 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/ClassicWatchingTypedSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/ClassicWatchingTypedSpec.scala @@ -26,7 +26,7 @@ object ClassicWatchingTypedSpec { def props() = classic.Props(new Classic) } - //#classic-watch + // #classic-watch class Classic extends classic.Actor with ActorLogging { // context.spawn is an implicit extension method val second: ActorRef[Typed.Command] = @@ -51,9 +51,9 @@ object ClassicWatchingTypedSpec { context.stop(self) } } - //#classic-watch + // #classic-watch - //#typed + // #typed object Typed { sealed trait Command final case class Ping(replyTo: ActorRef[Pong.type]) extends Command @@ -70,7 +70,7 @@ object ClassicWatchingTypedSpec { } } } - //#typed + // #typed } class ClassicWatchingTypedSpec extends AnyWordSpec with LogCapturing { @@ -80,9 +80,9 @@ class ClassicWatchingTypedSpec extends AnyWordSpec with LogCapturing { "Classic -> Typed" must { "support creating, watching and messaging" in { val system = classic.ActorSystem("Coexistence") - //#create-classic + // #create-classic val classicActor = system.actorOf(Classic.props()) - //#create-classic + // #create-classic val probe = TestProbe()(system) probe.watch(classicActor) probe.expectTerminated(classicActor, 200.millis) @@ -90,11 +90,11 @@ class ClassicWatchingTypedSpec extends AnyWordSpec with LogCapturing { } "support converting a classic actor system to an actor system" in { - //#convert-classic + // #convert-classic val system = akka.actor.ActorSystem("ClassicToTypedSystem") val typedSystem: ActorSystem[Nothing] = system.toTyped - //#convert-classic + // #convert-classic typedSystem.scheduler // remove compile warning TestKit.shutdownActorSystem(system) } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingClassicSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingClassicSpec.scala index b96ff015ac7..606bf0eb652 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingClassicSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingClassicSpec.scala @@ -21,7 +21,7 @@ import scala.concurrent.duration._ object TypedWatchingClassicSpec { - //#typed + // #typed object Typed { final case class Ping(replyTo: akka.actor.typed.ActorRef[Pong.type]) sealed trait Command @@ -39,32 +39,29 @@ object TypedWatchingClassicSpec { classic.tell(Typed.Ping(context.self), context.self.toClassic) Behaviors - .receivePartial[Command] { - case (context, Pong) => - // it's not possible to get the sender, that must be sent in message - // context.stop is an implicit extension method - context.stop(classic) - Behaviors.same + .receivePartial[Command] { case (context, Pong) => + // it's not possible to get the sender, that must be sent in message + // context.stop is an implicit extension method + context.stop(classic) + Behaviors.same } - .receiveSignal { - case (_, akka.actor.typed.Terminated(_)) => - Behaviors.stopped + .receiveSignal { case (_, akka.actor.typed.Terminated(_)) => + Behaviors.stopped } } } - //#typed + // #typed - //#classic + // #classic object Classic { def props(): classic.Props = classic.Props(new Classic) } class Classic extends classic.Actor { - override def receive = { - case Typed.Ping(replyTo) => - replyTo ! Typed.Pong + override def receive = { case Typed.Ping(replyTo) => + replyTo ! Typed.Pong } } - //#classic + // #classic } class TypedWatchingClassicSpec extends AnyWordSpec with LogCapturing { @@ -73,10 +70,10 @@ class TypedWatchingClassicSpec extends AnyWordSpec with LogCapturing { "Typed -> Classic" must { "support creating, watching and messaging" in { - //#create + // #create val system = classic.ActorSystem("TypedWatchingClassic") val typed = system.spawn(Typed.behavior, "Typed") - //#create + // #create val probe = TestProbe()(system) probe.watch(typed.toClassic) probe.expectTerminated(typed.toClassic, 200.millis) diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/extensions/ExtensionDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/extensions/ExtensionDocSpec.scala index c533603d971..0894a2845a7 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/extensions/ExtensionDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/extensions/ExtensionDocSpec.scala @@ -51,10 +51,10 @@ object ExtensionDocSpec { val initialBehavior: Behavior[Any] = Behaviors.empty[Any] - //#usage + // #usage Behaviors.setup[Any] { ctx => DatabasePool(ctx.system).connection().executeQuery("insert into...") initialBehavior } - //#usage + // #usage } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/ClassicSample.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/ClassicSample.scala index 7a927de5e3b..a962611aa17 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/ClassicSample.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/ClassicSample.scala @@ -13,7 +13,7 @@ import akka.actor.Props object ClassicSample { - //#hello-world-actor + // #hello-world-actor object HelloWorld { final case class Greet(whom: String) final case class Greeted(whom: String) @@ -25,12 +25,11 @@ object ClassicSample { class HelloWorld extends Actor with ActorLogging { import HelloWorld._ - override def receive: Receive = { - case Greet(whom) => - log.info("Hello {}!", whom) - sender() ! Greeted(whom) + override def receive: Receive = { case Greet(whom) => + log.info("Hello {}!", whom) + sender() ! Greeted(whom) } } - //#hello-world-actor + // #hello-world-actor } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/TypedSample.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/TypedSample.scala index a8c9f4a0c1e..9ecc989001c 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/TypedSample.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/fromclassic/TypedSample.scala @@ -15,7 +15,7 @@ import akka.actor.typed.scaladsl.Behaviors object TypedSample { - //#hello-world-actor + // #hello-world-actor object HelloWorld { final case class Greet(whom: String, replyTo: ActorRef[Greeted]) final case class Greeted(whom: String, from: ActorRef[Greet]) @@ -33,9 +33,9 @@ object TypedSample { this } } - //#hello-world-actor + // #hello-world-actor - //#children + // #children object Parent { sealed trait Command case class DelegateToChild(name: String, message: Child.Command) extends Command @@ -66,7 +66,7 @@ object TypedSample { updated(Map.empty) } } - //#children + // #children object Child { sealed trait Command diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala index 54341c5f2d0..c60da3946be 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala @@ -17,37 +17,37 @@ object SupervisionCompileOnly { val behavior = Behaviors.empty[String] - //#restart + // #restart Behaviors.supervise(behavior).onFailure[IllegalStateException](SupervisorStrategy.restart) - //#restart + // #restart - //#resume + // #resume Behaviors.supervise(behavior).onFailure[IllegalStateException](SupervisorStrategy.resume) - //#resume + // #resume - //#restart-limit + // #restart-limit Behaviors .supervise(behavior) .onFailure[IllegalStateException]( SupervisorStrategy.restart.withLimit(maxNrOfRetries = 10, withinTimeRange = 10.seconds)) - //#restart-limit + // #restart-limit - //#multiple + // #multiple Behaviors .supervise(Behaviors.supervise(behavior).onFailure[IllegalStateException](SupervisorStrategy.restart)) .onFailure[IllegalArgumentException](SupervisorStrategy.stop) - //#multiple + // #multiple - //#wrap + // #wrap object Counter { sealed trait Command case class Increment(nr: Int) extends Command case class GetCount(replyTo: ActorRef[Int]) extends Command - //#top-level + // #top-level def apply(): Behavior[Command] = Behaviors.supervise(counter(1)).onFailure(SupervisorStrategy.restart) - //#top-level + // #top-level private def counter(count: Int): Behavior[Command] = Behaviors.receiveMessage[Command] { @@ -58,9 +58,9 @@ object SupervisionCompileOnly { Behaviors.same } } - //#wrap + // #wrap - //#restart-stop-children + // #restart-stop-children def child(size: Long): Behavior[String] = Behaviors.receiveMessage(msg => child(size + msg.length)) @@ -82,9 +82,9 @@ object SupervisionCompileOnly { } .onFailure(SupervisorStrategy.restart) } - //#restart-stop-children + // #restart-stop-children - //#restart-keep-children + // #restart-keep-children def parent2: Behavior[String] = { Behaviors.setup { ctx => val child1 = ctx.spawn(child(0), "child1") @@ -104,7 +104,7 @@ object SupervisionCompileOnly { .onFailure(SupervisorStrategy.restart.withStopChildren(false)) } } - //#restart-keep-children + // #restart-keep-children trait Resource { def close(): Unit @@ -113,7 +113,7 @@ object SupervisionCompileOnly { def claimResource(): Resource = ??? @nowarn("msg=never used") - //#restart-PreRestart-signal + // #restart-PreRestart-signal def withPreRestart: Behavior[String] = { Behaviors .supervise[String] { @@ -138,5 +138,5 @@ object SupervisionCompileOnly { .onFailure[Exception](SupervisorStrategy.restart) } - //#restart-PreRestart-signal + // #restart-PreRestart-signal } diff --git a/akka-actor-typed/src/main/scala-2.13/akka/actor/typed/internal/receptionist/Platform.scala b/akka-actor-typed/src/main/scala-2.13/akka/actor/typed/internal/receptionist/Platform.scala index dfd0b4a1410..2f15b91b9c9 100644 --- a/akka-actor-typed/src/main/scala-2.13/akka/actor/typed/internal/receptionist/Platform.scala +++ b/akka-actor-typed/src/main/scala-2.13/akka/actor/typed/internal/receptionist/Platform.scala @@ -7,9 +7,7 @@ package akka.actor.typed.internal.receptionist import akka.actor.typed.ActorRef import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[receptionist] object Platform { type Service[K <: AbstractServiceKey] = ActorRef[K#Protocol] type Subscriber[K <: AbstractServiceKey] = ActorRef[ReceptionistMessages.Listing[K#Protocol]] diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRef.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRef.scala index 93b9c372ad5..03f5873ddde 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRef.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRef.scala @@ -31,9 +31,7 @@ trait ActorRef[-T] extends RecipientRef[T] with java.lang.Comparable[ActorRef[_] */ def tell(msg: T): Unit - /** - * Narrow the type of this `ActorRef`, which is always a safe operation. - */ + /** Narrow the type of this `ActorRef`, which is always a safe operation. */ def narrow[U <: T]: ActorRef[U] /** @@ -68,9 +66,7 @@ object ActorRef { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object SerializedActorRef { def apply[T](actorRef: ActorRef[T]): SerializedActorRef[T] = { new SerializedActorRef(actorRef) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRefResolver.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRefResolver.scala index 5a72c76caf3..e19f4edef0c 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRefResolver.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRefResolver.scala @@ -33,16 +33,12 @@ abstract class ActorRefResolver extends Extension { */ def toSerializationFormat[T](ref: ActorRef[T]): String - /** - * Deserialize an `ActorRef` in the [[ActorRefResolver#toSerializationFormat]]. - */ + /** Deserialize an `ActorRef` in the [[ActorRefResolver#toSerializationFormat]]. */ def resolveActorRef[T](serializedActorRef: String): ActorRef[T] } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class ActorRefResolverImpl(system: ActorSystem[_]) extends ActorRefResolver { import akka.actor.typed.scaladsl.adapter._ diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala index 419e8ae8645..13c2a045524 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala @@ -11,7 +11,7 @@ import scala.concurrent.{ ExecutionContextExecutor, Future } import com.typesafe.config.{ Config, ConfigFactory } import org.slf4j.Logger -import akka.{ Done, actor => classic } +import akka.{ actor => classic, Done } import akka.actor.{ Address, BootstrapSetup, ClassicActorSystemProvider } import akka.actor.setup.ActorSystemSetup import akka.actor.typed.eventstream.EventStream @@ -40,14 +40,10 @@ abstract class ActorSystem[-T] extends ActorRef[T] with Extensions with ClassicA */ def name: String - /** - * The core settings extracted from the supplied configuration. - */ + /** The core settings extracted from the supplied configuration. */ def settings: Settings - /** - * Log the configuration. - */ + /** Log the configuration. */ def logConfiguration(): Unit /** @@ -58,19 +54,13 @@ abstract class ActorSystem[-T] extends ActorRef[T] with Extensions with ClassicA */ def log: Logger - /** - * Start-up time in milliseconds since the epoch. - */ + /** Start-up time in milliseconds since the epoch. */ def startTime: Long - /** - * Up-time of this actor system in seconds. - */ + /** Up-time of this actor system in seconds. */ def uptime: Long - /** - * A ThreadFactory that can be used if the transport needs to create any Threads - */ + /** A ThreadFactory that can be used if the transport needs to create any Threads */ def threadFactory: ThreadFactory /** @@ -89,14 +79,10 @@ abstract class ActorSystem[-T] extends ActorRef[T] with Extensions with ClassicA */ def scheduler: Scheduler - /** - * Facilities for lookup up thread-pools from configuration. - */ + /** Facilities for lookup up thread-pools from configuration. */ def dispatchers: Dispatchers - /** - * The default thread pool of this ActorSystem, configured with settings in `akka.actor.default-dispatcher`. - */ + /** The default thread pool of this ActorSystem, configured with settings in `akka.actor.default-dispatcher`. */ implicit def executionContext: ExecutionContextExecutor /** @@ -142,9 +128,7 @@ abstract class ActorSystem[-T] extends ActorRef[T] with Extensions with ClassicA */ def deadLetters[U]: ActorRef[U] - /** - * An ActorRef that ignores any incoming messages. - */ + /** An ActorRef that ignores any incoming messages. */ def ignoreRef[U]: ActorRef[U] /** @@ -164,9 +148,7 @@ abstract class ActorSystem[-T] extends ActorRef[T] with Extensions with ClassicA */ def systemActorOf[U](behavior: Behavior[U], name: String, props: Props = Props.empty): ActorRef[U] - /** - * Return a reference to this system’s [[akka.actor.typed.receptionist.Receptionist]]. - */ + /** Return a reference to this system’s [[akka.actor.typed.receptionist.Receptionist]]. */ def receptionist: ActorRef[Receptionist.Command] = Receptionist(this).ref @@ -191,21 +173,15 @@ abstract class ActorSystem[-T] extends ActorRef[T] with Extensions with ClassicA object ActorSystem { - /** - * Scala API: Create an ActorSystem - */ + /** Scala API: Create an ActorSystem */ def apply[T](guardianBehavior: Behavior[T], name: String): ActorSystem[T] = createInternal(name, guardianBehavior, Props.empty, ActorSystemSetup.create(BootstrapSetup())) - /** - * Scala API: Create an ActorSystem - */ + /** Scala API: Create an ActorSystem */ def apply[T](guardianBehavior: Behavior[T], name: String, config: Config): ActorSystem[T] = createInternal(name, guardianBehavior, Props.empty, ActorSystemSetup.create(BootstrapSetup(config))) - /** - * Scala API: Create an ActorSystem - */ + /** Scala API: Create an ActorSystem */ def apply[T](guardianBehavior: Behavior[T], name: String, config: Config, guardianProps: Props): ActorSystem[T] = createInternal(name, guardianBehavior, guardianProps, ActorSystemSetup.create(BootstrapSetup(config))) @@ -228,21 +204,15 @@ object ActorSystem { def apply[T](guardianBehavior: Behavior[T], name: String, bootstrapSetup: BootstrapSetup): ActorSystem[T] = apply(guardianBehavior, name, ActorSystemSetup.create(bootstrapSetup)) - /** - * Java API: Create an ActorSystem - */ + /** Java API: Create an ActorSystem */ def create[T](guardianBehavior: Behavior[T], name: String): ActorSystem[T] = apply(guardianBehavior, name) - /** - * Java API: Create an ActorSystem - */ + /** Java API: Create an ActorSystem */ def create[T](guardianBehavior: Behavior[T], name: String, config: Config): ActorSystem[T] = apply(guardianBehavior, name, config) - /** - * Java API: Create an ActorSystem - */ + /** Java API: Create an ActorSystem */ def create[T](guardianBehavior: Behavior[T], name: String, config: Config, guardianProps: Props): ActorSystem[T] = createInternal(name, guardianBehavior, guardianProps, ActorSystemSetup.create(BootstrapSetup(config))) @@ -311,9 +281,7 @@ final class Settings(val config: Config, val classicSettings: classic.ActorSyste def setup: ActorSystemSetup = classicSettings.setup - /** - * Returns the String representation of the Config that this Settings is backed by - */ + /** Returns the String representation of the Config that this Settings is backed by */ override def toString: String = config.root.render private val typedConfig = config.getConfig("akka.actor.typed") diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Behavior.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Behavior.scala index 6ed9db91a19..9f297c60cac 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/Behavior.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Behavior.scala @@ -42,9 +42,7 @@ import akka.util.OptionVal @DoNotInherit abstract class Behavior[T](private[akka] val _tag: Int) { behavior => - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi @volatile private[akka] var _internalClassicPropsCache: OptionVal[CachedProps] = OptionVal.None @@ -141,7 +139,6 @@ object Behavior { * The `ClassTag` for `Outer` ensures that only messages of this class or a subclass thereof will be * intercepted. Other message types (e.g. a private protocol) will bypass * the interceptor and be continue to the inner behavior untouched. - * */ def transformMessages[Outer: ClassTag](matcher: PartialFunction[Outer, Inner]): Behavior[Outer] = BehaviorImpl.transformMessages(behavior, matcher) @@ -215,31 +212,21 @@ object Behavior { throw new IllegalArgumentException(s"cannot use $behavior as initial behavior") else behavior - /** - * Returns true if the given behavior is not stopped. - */ + /** Returns true if the given behavior is not stopped. */ def isAlive[T](behavior: Behavior[T]): Boolean = !(behavior._tag == BehaviorTags.StoppedBehavior || behavior._tag == BehaviorTags.FailedBehavior) - /** - * Returns true if the given behavior is the special `unhandled` marker. - */ + /** Returns true if the given behavior is the special `unhandled` marker. */ def isUnhandled[T](behavior: Behavior[T]): Boolean = behavior eq BehaviorImpl.UnhandledBehavior - /** - * Returns true if the given behavior is deferred. - */ + /** Returns true if the given behavior is deferred. */ def isDeferred[T](behavior: Behavior[T]): Boolean = behavior._tag == BehaviorTags.DeferredBehavior - /** - * Execute the behavior with the given message. - */ + /** Execute the behavior with the given message. */ def interpretMessage[T](behavior: Behavior[T], ctx: TypedActorContext[T], msg: T): Behavior[T] = interpret(behavior, ctx, msg, isSignal = false) - /** - * Execute the behavior with the given signal. - */ + /** Execute the behavior with the given signal. */ def interpretSignal[T](behavior: Behavior[T], ctx: TypedActorContext[T], signal: Signal): Behavior[T] = { val result = interpret(behavior, ctx, signal, isSignal = true) // we need to throw here to allow supervision of deathpact exception diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/BehaviorInterceptor.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/BehaviorInterceptor.scala index dc596f5cdba..fa0a736862a 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/BehaviorInterceptor.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/BehaviorInterceptor.scala @@ -147,9 +147,7 @@ object BehaviorInterceptor { abstract class BehaviorSignalInterceptor[Inner] extends BehaviorInterceptor[Inner, Inner](null) { import BehaviorInterceptor._ - /** - * Only signals and not messages are intercepted by `BehaviorSignalInterceptor`. - */ + /** Only signals and not messages are intercepted by `BehaviorSignalInterceptor`. */ final override def aroundReceive( ctx: TypedActorContext[Inner], msg: Inner, diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Dispatchers.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Dispatchers.scala index 083a2937cd8..b0a49a9e3f8 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/Dispatchers.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Dispatchers.scala @@ -16,15 +16,11 @@ object Dispatchers { */ final val DefaultDispatcherId = "akka.actor.default-dispatcher" - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi final val InternalDispatcherId = "akka.actor.internal-dispatcher" } -/** - * An [[ActorSystem]] looks up all its thread pools via a Dispatchers instance. - */ +/** An [[ActorSystem]] looks up all its thread pools via a Dispatchers instance. */ abstract class Dispatchers { def lookup(selector: DispatcherSelector): ExecutionContextExecutor def shutdown(): Unit diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Extensions.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Extensions.scala index 14abbfcfed7..7eb6e4f4dcf 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/Extensions.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Extensions.scala @@ -107,22 +107,16 @@ trait Extension */ abstract class ExtensionId[T <: Extension] { - /** - * Create the extension, will be invoked at most one time per actor system where the extension is registered. - */ + /** Create the extension, will be invoked at most one time per actor system where the extension is registered. */ def createExtension(system: ActorSystem[_]): T - /** - * Lookup or create an instance of the extension identified by this id. - */ + /** Lookup or create an instance of the extension identified by this id. */ final def apply(system: ActorSystem[_]): T = system.registerExtension(this) override final def hashCode: Int = System.identityHashCode(this) override final def equals(other: Any): Boolean = this eq other.asInstanceOf[AnyRef] - /** - * Java API: The identifier of the extension - */ + /** Java API: The identifier of the extension */ def id: ExtensionId[T] = this } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/LogOptions.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/LogOptions.scala index 816bc85a6cf..bf1f0bbf864 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/LogOptions.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/LogOptions.scala @@ -11,9 +11,7 @@ import org.slf4j.event.Level import akka.annotation.{ DoNotInherit, InternalApi } -/** - * Logging options when using `Behaviors.logMessages`. - */ +/** Logging options when using `Behaviors.logMessages`. */ @DoNotInherit abstract sealed class LogOptions { @@ -23,14 +21,10 @@ abstract sealed class LogOptions { */ def withEnabled(enabled: Boolean): LogOptions - /** - * The [[org.slf4j.event.Level]] to use when logging messages. - */ + /** The [[org.slf4j.event.Level]] to use when logging messages. */ def withLevel(level: Level): LogOptions - /** - * A [[org.slf4j.Logger]] to use when logging messages. - */ + /** A [[org.slf4j.Logger]] to use when logging messages. */ def withLogger(logger: Logger): LogOptions def enabled: Boolean @@ -41,14 +35,10 @@ abstract sealed class LogOptions { def getLogger: Optional[Logger] } -/** - * Factories for log options - */ +/** Factories for log options */ object LogOptions { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final case class LogOptionsImpl(enabled: Boolean, level: Level, logger: Option[Logger]) extends LogOptions { @@ -59,27 +49,19 @@ object LogOptions { */ override def withEnabled(enabled: Boolean): LogOptions = this.copy(enabled = enabled) - /** - * The [[org.slf4j.event.Level]] to use when logging messages. - */ + /** The [[org.slf4j.event.Level]] to use when logging messages. */ override def withLevel(level: Level): LogOptions = this.copy(level = level) - /** - * A [[org.slf4j.Logger]] to use when logging messages. - */ + /** A [[org.slf4j.Logger]] to use when logging messages. */ override def withLogger(logger: Logger): LogOptions = this.copy(logger = Option(logger)) /** Java API */ override def getLogger: Optional[Logger] = Optional.ofNullable(logger.orNull) } - /** - * Scala API: Create a new log options with defaults. - */ + /** Scala API: Create a new log options with defaults. */ def apply(): LogOptions = LogOptionsImpl(enabled = true, Level.DEBUG, None) - /** - * Java API: Create a new log options. - */ + /** Java API: Create a new log options. */ def create(): LogOptions = apply() } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/MessageAndSignals.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/MessageAndSignals.scala index ee3d1977572..27304d44f4b 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/MessageAndSignals.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/MessageAndSignals.scala @@ -6,9 +6,7 @@ package akka.actor.typed import akka.annotation.DoNotInherit -/** - * Exception that an actor fails with if it does not handle a Terminated message. - */ +/** Exception that an actor fails with if it does not handle a Terminated message. */ final case class DeathPactException(ref: ActorRef[Nothing]) extends RuntimeException(s"death pact with $ref was triggered") { @@ -88,14 +86,10 @@ object ChildFailed { def unapply(t: ChildFailed): Option[(ActorRef[Nothing], Throwable)] = Some((t.ref, t.cause)) } -/** - * Child has failed due an uncaught exception - */ +/** Child has failed due an uncaught exception */ final class ChildFailed(ref: ActorRef[Nothing], val cause: Throwable) extends Terminated(ref) { - /** - * Java API - */ + /** Java API */ def getCause(): Throwable = cause override def toString: String = s"ChildFailed($ref,${cause.getClass.getName})" diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Props.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Props.scala index 39623608be5..4ca024cc95e 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/Props.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Props.scala @@ -59,9 +59,7 @@ abstract class Props private[akka] () extends Product with Serializable { @InternalApi private[akka] def withNext(next: Props): Props - /** - * Prepend a selection of the [[ActorSystem]] default executor to this Props. - */ + /** Prepend a selection of the [[ActorSystem]] default executor to this Props. */ def withDispatcherDefault: Props = DispatcherDefault(this) /** @@ -71,9 +69,7 @@ abstract class Props private[akka] () extends Product with Serializable { */ def withDispatcherFromConfig(path: String): Props = DispatcherFromConfig(path, this) - /** - * Prepend a selection of the same executor as the parent actor to this Props. - */ + /** Prepend a selection of the same executor as the parent actor to this Props. */ def withDispatcherSameAsParent: Props = DispatcherSameAsParent(this) /** @@ -140,9 +136,7 @@ abstract class Props private[akka] () extends Product with Serializable { } } -/** - * Not for user extension. - */ +/** Not for user extension. */ @DoNotInherit abstract class DispatcherSelector extends Props @@ -188,22 +182,16 @@ object DispatcherSelector { def sameAsParent(): DispatcherSelector = DispatcherSameAsParent.empty } -/** - * Not for user extension. - */ +/** Not for user extension. */ @DoNotInherit abstract class MailboxSelector extends Props object MailboxSelector { - /** - * Scala API: The default mailbox is SingleConsumerOnlyUnboundedMailbox - */ + /** Scala API: The default mailbox is SingleConsumerOnlyUnboundedMailbox */ def default(): MailboxSelector = fromConfig("akka.actor.typed.default-mailbox") - /** - * Java API: The default mailbox is SingleConsumerOnlyUnboundedMailbox - */ + /** Java API: The default mailbox is SingleConsumerOnlyUnboundedMailbox */ def defaultMailbox(): MailboxSelector = default() /** @@ -235,17 +223,13 @@ abstract class ActorTags extends Props { */ def tags: Set[String] - /** - * Java API: one or more tags defined for the actor - */ + /** Java API: one or more tags defined for the actor */ def getTags(): java.util.Set[String] = tags.asJava } object ActorTags { - /** - * Java API: create a tag props with one or more tags - */ + /** Java API: create a tag props with one or more tags */ @varargs def create(tags: String*): ActorTags = apply(tags.toSet) @@ -256,9 +240,7 @@ object ActorTags { */ def create(tags: java.util.Set[String]): ActorTags = ActorTagsImpl(tags.asScala.toSet) - /** - * Scala API: create a tag props with one or more tags - */ + /** Scala API: create a tag props with one or more tags */ def apply(tag: String, additionalTags: String*): ActorTags = { val tags = if (additionalTags.isEmpty) Set(tag) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Scheduler.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Scheduler.scala index 9e7046f9ad0..e8e0e555e64 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/Scheduler.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Scheduler.scala @@ -39,7 +39,6 @@ trait Scheduler { * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `Behaviors.withTimers` or `ActorContext.scheduleOnce` should be preferred. - * */ def scheduleOnce(delay: java.time.Duration, runnable: Runnable, executor: ExecutionContext): Cancellable @@ -62,10 +61,9 @@ trait Scheduler { * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `Behaviors.withTimers` should be preferred. - * */ - def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable + def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable /** * Java API: Schedules a `Runnable` to be run repeatedly with an initial delay and @@ -124,10 +122,9 @@ trait Scheduler { * reach (calculated as: `delay / tickNanos > Int.MaxValue`). * * Note: For scheduling within actors `Behaviors.withTimers` should be preferred. - * */ - def scheduleAtFixedRate(initialDelay: FiniteDuration, interval: FiniteDuration)(runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable + def scheduleAtFixedRate(initialDelay: FiniteDuration, interval: FiniteDuration)(runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable /** * Java API: Schedules a `Runnable` to be run repeatedly with an initial delay and diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/SpawnProtocol.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/SpawnProtocol.scala index cb77e0f0565..36886d5886e 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/SpawnProtocol.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/SpawnProtocol.scala @@ -25,9 +25,7 @@ import akka.annotation.DoNotInherit */ object SpawnProtocol { - /** - * Not for user extension - */ + /** Not for user extension */ @DoNotInherit sealed trait Command /** @@ -44,14 +42,10 @@ object SpawnProtocol { final case class Spawn[T](behavior: Behavior[T], name: String, props: Props, replyTo: ActorRef[ActorRef[T]]) extends Command - /** - * Java API: returns a behavior that can be commanded to spawn arbitrary children. - */ + /** Java API: returns a behavior that can be commanded to spawn arbitrary children. */ def create(): Behavior[Command] = apply() - /** - * Scala API: returns a behavior that can be commanded to spawn arbitrary children. - */ + /** Scala API: returns a behavior that can be commanded to spawn arbitrary children. */ def apply(): Behavior[Command] = Behaviors.receive { (ctx, msg) => msg match { diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/SupervisorStrategy.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/SupervisorStrategy.scala index 9908a7cf77a..86ae8c84095 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/SupervisorStrategy.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/SupervisorStrategy.scala @@ -34,9 +34,7 @@ object SupervisorStrategy { val restart: RestartSupervisorStrategy = Restart(maxRestarts = -1, withinTimeRange = Duration.Zero) - /** - * Stop the actor - */ + /** Stop the actor */ val stop: SupervisorStrategy = Stop(loggingEnabled = true, logLevel = Level.ERROR) /** @@ -109,9 +107,7 @@ object SupervisorStrategy { randomFactor: Double): BackoffSupervisorStrategy = restartWithBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case class Resume(loggingEnabled: Boolean, logLevel: Level) extends SupervisorStrategy { override def withLoggingEnabled(enabled: Boolean): SupervisorStrategy = copy(loggingEnabled = enabled) @@ -119,9 +115,7 @@ object SupervisorStrategy { copy(logLevel = level) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case class Stop(loggingEnabled: Boolean, logLevel: Level) extends SupervisorStrategy { override def withLoggingEnabled(enabled: Boolean) = copy(loggingEnabled = enabled) @@ -129,9 +123,7 @@ object SupervisorStrategy { copy(logLevel = level) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] sealed trait RestartOrBackoff extends SupervisorStrategy { def maxRestarts: Int def stopChildren: Boolean @@ -141,9 +133,7 @@ object SupervisorStrategy { def unlimitedRestarts(): Boolean = maxRestarts == -1 } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final case class Restart( maxRestarts: Int, withinTimeRange: FiniteDuration, @@ -173,9 +163,7 @@ object SupervisorStrategy { copy(logLevel = level) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final case class Backoff( minBackoff: FiniteDuration, maxBackoff: FiniteDuration, @@ -220,9 +208,7 @@ object SupervisorStrategy { } } -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit sealed abstract class SupervisorStrategy { def loggingEnabled: Boolean @@ -234,9 +220,7 @@ sealed abstract class SupervisorStrategy { } -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit sealed abstract class RestartSupervisorStrategy extends SupervisorStrategy { @@ -293,9 +277,7 @@ sealed abstract class RestartSupervisorStrategy extends SupervisorStrategy { } -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit sealed abstract class BackoffSupervisorStrategy extends SupervisorStrategy { def resetBackoffAfter: FiniteDuration diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/TypedActorContext.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/TypedActorContext.scala index d590af97d6d..2f827e16e82 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/TypedActorContext.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/TypedActorContext.scala @@ -16,13 +16,9 @@ import akka.annotation.DoNotInherit trait TypedActorContext[T] { // this should be a pure interface, i.e. only abstract methods - /** - * Get the `javadsl` of this `ActorContext`. - */ + /** Get the `javadsl` of this `ActorContext`. */ def asJava: javadsl.ActorContext[T] - /** - * Get the `scaladsl` of this `ActorContext`. - */ + /** Get the `scaladsl` of this `ActorContext`. */ def asScala: scaladsl.ActorContext[T] } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/ConsumerController.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/ConsumerController.scala index 3cd0f064870..e881891ddda 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/ConsumerController.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/ConsumerController.scala @@ -127,14 +127,10 @@ object ConsumerController { object SequencedMessage { - /** - * SequencedMessage.message can be `A` or `ChunkedMessage`. - */ + /** SequencedMessage.message can be `A` or `ChunkedMessage`. */ type MessageOrChunk = Any - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def fromChunked[A]( producerId: String, seqNr: SeqNr, @@ -232,42 +228,30 @@ object ConsumerController { def withFlowControlWindow(newFlowControlWindow: Int): Settings = copy(flowControlWindow = newFlowControlWindow) - /** - * Scala API - */ + /** Scala API */ def withResendIntervalMin(newResendIntervalMin: FiniteDuration): Settings = copy(resendIntervalMin = newResendIntervalMin) - /** - * Scala API - */ + /** Scala API */ def withResendIntervalMax(newResendIntervalMax: FiniteDuration): Settings = copy(resendIntervalMax = newResendIntervalMax) - /** - * Java API - */ + /** Java API */ def withResendIntervalMin(newResendIntervalMin: JavaDuration): Settings = copy(resendIntervalMin = newResendIntervalMin.asScala) - /** - * Java API - */ + /** Java API */ def withResendIntervalMax(newResendIntervalMax: JavaDuration): Settings = copy(resendIntervalMax = newResendIntervalMax.asScala) - /** - * Java API - */ + /** Java API */ def getResendIntervalMax(): JavaDuration = resendIntervalMax.asJava def withOnlyFlowControl(newOnlyFlowControl: Boolean): Settings = copy(onlyFlowControl = newOnlyFlowControl) - /** - * Private copy method for internal use only. - */ + /** Private copy method for internal use only. */ private def copy( flowControlWindow: Int = flowControlWindow, resendIntervalMin: FiniteDuration = resendIntervalMin, @@ -300,24 +284,18 @@ object ConsumerController { def apply[A](serviceKey: ServiceKey[Command[A]], settings: Settings): Behavior[Command[A]] = apply(Some(serviceKey), settings) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def apply[A]( serviceKey: Option[ServiceKey[Command[A]]], settings: Settings): Behavior[Command[A]] = { ConsumerControllerImpl(serviceKey, settings) } - /** - * Java API - */ + /** Java API */ def create[A](): Behavior[Command[A]] = apply() - /** - * Java API - */ + /** Java API */ def create[A](settings: Settings): Behavior[Command[A]] = apply(settings) @@ -329,9 +307,7 @@ object ConsumerController { def create[A](serviceKey: ServiceKey[Command[A]]): Behavior[Command[A]] = apply(serviceKey) - /** - * Java API - */ + /** Java API */ def create[A](serviceKey: ServiceKey[Command[A]], settings: Settings): Behavior[Command[A]] = apply(Some(serviceKey), settings) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/DurableProducerQueue.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/DurableProducerQueue.scala index b8b734d81fd..79e823fee78 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/DurableProducerQueue.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/DurableProducerQueue.scala @@ -32,9 +32,7 @@ object DurableProducerQueue { trait Command[A] - /** - * Request that is used at startup to retrieve the unconfirmed messages and current sequence number. - */ + /** Request that is used at startup to retrieve the unconfirmed messages and current sequence number. */ final case class LoadState[A](replyTo: ActorRef[State[A]]) extends Command[A] /** @@ -91,9 +89,7 @@ object DurableProducerQueue { copy(confirmedSeqNr = confirmedSeqNr -- confirmationQualifiers) } - /** - * If not all chunked messages were stored before crash those partial chunked messages should not be resent. - */ + /** If not all chunked messages were stored before crash those partial chunked messages should not be resent. */ def cleanupPartialChunkedMessages(): State[A] = { if (unconfirmed.isEmpty || unconfirmed.forall(u => u.isFirstChunk && u.isLastChunk)) { this @@ -123,14 +119,10 @@ object DurableProducerQueue { } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] sealed trait Event extends DeliverySerializable - /** - * The fact (event) that a message has been sent. - */ + /** The fact (event) that a message has been sent. */ final class MessageSent[A]( val seqNr: SeqNr, val message: MessageSent.MessageOrChunk, @@ -178,9 +170,7 @@ object DurableProducerQueue { object MessageSent { - /** - * SequencedMessage.message can be `A` or `ChunkedMessage`. - */ + /** SequencedMessage.message can be `A` or `ChunkedMessage`. */ type MessageOrChunk = Any def apply[A]( @@ -191,9 +181,7 @@ object DurableProducerQueue { timestampMillis: TimestampMillis): MessageSent[A] = new MessageSent(seqNr, message, ack, confirmationQualifier, timestampMillis) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def fromChunked[A]( seqNr: SeqNr, chunkedMessage: ChunkedMessage, @@ -202,9 +190,7 @@ object DurableProducerQueue { timestampMillis: TimestampMillis): MessageSent[A] = new MessageSent(seqNr, chunkedMessage, ack, confirmationQualifier, timestampMillis) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def fromMessageOrChunked[A]( seqNr: SeqNr, message: MessageOrChunk, @@ -218,18 +204,14 @@ object DurableProducerQueue { Some((sent.seqNr, sent.message, sent.ack, sent.confirmationQualifier, sent.timestampMillis)) } - /** - * INTERNAL API: The fact (event) that a message has been confirmed to be delivered and processed. - */ + /** INTERNAL API: The fact (event) that a message has been confirmed to be delivered and processed. */ @InternalApi private[akka] final case class Confirmed( seqNr: SeqNr, confirmationQualifier: ConfirmationQualifier, timestampMillis: TimestampMillis) extends Event - /** - * INTERNAL API: Remove entries related to the confirmationQualifiers that haven't been used for a while. - */ + /** INTERNAL API: Remove entries related to the confirmationQualifiers that haven't been used for a while. */ @InternalApi private[akka] final case class Cleanup(confirmationQualifiers: Set[String]) extends Event } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/ProducerController.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/ProducerController.scala index e2307d1e9d5..e7a9df53ff0 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/ProducerController.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/ProducerController.scala @@ -191,42 +191,30 @@ object ProducerController { def withDurableQueueRetryAttempts(newDurableQueueRetryAttempts: Int): Settings = copy(durableQueueRetryAttempts = newDurableQueueRetryAttempts) - /** - * Scala API - */ + /** Scala API */ def withDurableQueueRequestTimeout(newDurableQueueRequestTimeout: FiniteDuration): Settings = copy(durableQueueRequestTimeout = newDurableQueueRequestTimeout) - /** - * Scala API - */ + /** Scala API */ def withDurableQueueResendFirstInterval(newDurableQueueResendFirstInterval: FiniteDuration): Settings = copy(durableQueueResendFirstInterval = newDurableQueueResendFirstInterval) - /** - * Java API - */ + /** Java API */ def withDurableQueueRequestTimeout(newDurableQueueRequestTimeout: JavaDuration): Settings = copy(durableQueueRequestTimeout = newDurableQueueRequestTimeout.asScala) - /** - * Java API - */ + /** Java API */ def withDurableQueueResendFirstInterval(newDurableQueueResendFirstInterval: JavaDuration): Settings = copy(durableQueueResendFirstInterval = newDurableQueueResendFirstInterval.asScala) - /** - * Java API - */ + /** Java API */ def getDurableQueueRequestTimeout(): JavaDuration = durableQueueRequestTimeout.asJava def withChunkLargeMessagesBytes(newChunkLargeMessagesBytes: Int): Settings = copy(chunkLargeMessagesBytes = newChunkLargeMessagesBytes) - /** - * Private copy method for internal use only. - */ + /** Private copy method for internal use only. */ private def copy( durableQueueRequestTimeout: FiniteDuration = durableQueueRequestTimeout, durableQueueRetryAttempts: Int = durableQueueRetryAttempts, @@ -277,9 +265,7 @@ object ProducerController { ProducerControllerImpl(producerId, durableQueueBehavior, settings, send) } - /** - * Java API - */ + /** Java API */ def create[A]( messageClass: Class[A], producerId: String, @@ -287,9 +273,7 @@ object ProducerController { apply(producerId, durableQueueBehavior.asScala)(ClassTag(messageClass)) } - /** - * Java API - */ + /** Java API */ def create[A]( messageClass: Class[A], producerId: String, diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/WorkPullingProducerController.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/WorkPullingProducerController.scala index 2e5d38d5786..ee29d7f5198 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/WorkPullingProducerController.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/WorkPullingProducerController.scala @@ -131,9 +131,7 @@ object WorkPullingProducerController { */ final case class MessageWithConfirmation[A](message: A, replyTo: ActorRef[Done]) extends UnsealedInternalCommand - /** - * Retrieve information about registered workers. - */ + /** Retrieve information about registered workers. */ final case class GetWorkerStats[A](replyTo: ActorRef[WorkerStats]) extends Command[A] final case class WorkerStats(numberOfWorkers: Int) @@ -193,9 +191,7 @@ object WorkPullingProducerController { def withProducerControllerSettings(newProducerControllerSettings: ProducerController.Settings): Settings = copy(producerControllerSettings = newProducerControllerSettings) - /** - * Private copy method for internal use only. - */ + /** Private copy method for internal use only. */ private def copy( bufferSize: Int = bufferSize, internalAskTimeout: FiniteDuration = internalAskTimeout, @@ -223,9 +219,7 @@ object WorkPullingProducerController { WorkPullingProducerControllerImpl(producerId, workerServiceKey, durableQueueBehavior, settings) } - /** - * Java API - */ + /** Java API */ def create[A]( messageClass: Class[A], producerId: String, @@ -234,9 +228,7 @@ object WorkPullingProducerController { apply(producerId, workerServiceKey, durableQueueBehavior.asScala)(ClassTag(messageClass)) } - /** - * Java API - */ + /** Java API */ def apply[A]( messageClass: Class[A], producerId: String, diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ChunkedMessage.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ChunkedMessage.scala index ed1b8905218..d8afc4dc9c8 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ChunkedMessage.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ChunkedMessage.scala @@ -7,9 +7,7 @@ package akka.actor.typed.delivery.internal import akka.annotation.InternalApi import akka.util.ByteString -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class ChunkedMessage( serialized: ByteString, firstChunk: Boolean, diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ConsumerControllerImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ConsumerControllerImpl.scala index 5a0cdcbe2f3..7577054bb26 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ConsumerControllerImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ConsumerControllerImpl.scala @@ -355,8 +355,8 @@ private class ConsumerControllerImpl[A] private ( case _: UnsealedInternalCommand => Behaviors.unhandled } - .receiveSignal { - case (_, PostStop) => postStop(s) + .receiveSignal { case (_, PostStop) => + postStop(s) } } @@ -480,8 +480,8 @@ private class ConsumerControllerImpl[A] private ( case _: UnsealedInternalCommand => Behaviors.unhandled } - .receiveSignal { - case (_, PostStop) => postStop(s) + .receiveSignal { case (_, PostStop) => + postStop(s) } } @@ -638,8 +638,8 @@ private class ConsumerControllerImpl[A] private ( case _: UnsealedInternalCommand => Behaviors.unhandled } - .receiveSignal { - case (_, PostStop) => postStop(s) + .receiveSignal { case (_, PostStop) => + postStop(s) } } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/DeliverySerializable.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/DeliverySerializable.scala index a8233a5ba3d..c157167f457 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/DeliverySerializable.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/DeliverySerializable.scala @@ -6,7 +6,5 @@ package akka.actor.typed.delivery.internal import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait DeliverySerializable diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ProducerControllerImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ProducerControllerImpl.scala index 9fbe85a5cf6..88435d6a3e5 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ProducerControllerImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/ProducerControllerImpl.scala @@ -232,8 +232,8 @@ object ProducerControllerImpl { send: SequencedMessage[A] => Unit, producer: ActorRef[RequestNext[A]], loadedState: DurableProducerQueue.State[A]): State[A] = { - val unconfirmed = loadedState.unconfirmed.toVector.zipWithIndex.map { - case (u, i) => SequencedMessage[A](producerId, u.seqNr, u.message, i == 0, u.ack)(self) + val unconfirmed = loadedState.unconfirmed.toVector.zipWithIndex.map { case (u, i) => + SequencedMessage[A](producerId, u.seqNr, u.message, i == 0, u.ack)(self) } State( requested = false, @@ -346,7 +346,12 @@ object ProducerControllerImpl { val manifest = Serializers.manifestFor(ser, mAnyRef) val serializerId = ser.identifier if (bytes.length <= chunkSize) { - ChunkedMessage(ByteString.fromArrayUnsafe(bytes), firstChunk = true, lastChunk = true, serializerId, manifest) :: Nil + ChunkedMessage( + ByteString.fromArrayUnsafe(bytes), + firstChunk = true, + lastChunk = true, + serializerId, + manifest) :: Nil } else { val builder = Vector.newBuilder[ChunkedMessage] val chunksIter = ByteString.fromArrayUnsafe(bytes).grouped(chunkSize) @@ -525,8 +530,8 @@ private class ProducerControllerImpl[A: ClassTag]( val (replies, newReplyAfterStore) = s.replyAfterStore.partition { case (seqNr, _) => seqNr <= newConfirmedSeqNr } if (replies.nonEmpty && traceEnabled) context.log.trace("Sending confirmation replies from [{}] to [{}].", replies.head._1, replies.last._1) - replies.foreach { - case (seqNr, replyTo) => replyTo ! seqNr + replies.foreach { case (seqNr, replyTo) => + replyTo ! seqNr } val newUnconfirmed = diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/WorkPullingProducerControllerImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/WorkPullingProducerControllerImpl.scala index ae5d4d518c4..622f7c01770 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/WorkPullingProducerControllerImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/delivery/internal/WorkPullingProducerControllerImpl.scala @@ -31,9 +31,7 @@ import akka.actor.typed.scaladsl.StashBuffer import akka.annotation.InternalApi import akka.util.Timeout -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object WorkPullingProducerControllerImpl { import WorkPullingProducerController.Command @@ -372,8 +370,9 @@ private class WorkPullingProducerControllerImpl[A: ClassTag]( // msg ResendDurableMsg, and stashed before storage false } else { - throw new IllegalStateException(s"Invalid combination of hasRequested [${s.requested}], " + - s"wasStashed [$wasStashed], hasMoreDemand [$hasMoreDemand], stashBuffer.isEmpty [${stashBuffer.isEmpty}]") + throw new IllegalStateException( + s"Invalid combination of hasRequested [${s.requested}], " + + s"wasStashed [$wasStashed], hasMoreDemand [$hasMoreDemand], stashBuffer.isEmpty [${stashBuffer.isEmpty}]") } s.copy(out = newOut, requested = newRequested, preselectedWorkers = s.preselectedWorkers - totalSeqNr) @@ -388,8 +387,8 @@ private class WorkPullingProducerControllerImpl[A: ClassTag]( def selectWorker(): Option[(OutKey, OutState[A])] = { val preselected = s.preselectedWorkers.valuesIterator.map(_.outKey).toSet - val workers = workersWithDemand.filterNot { - case (outKey, _) => preselected(outKey) + val workers = workersWithDemand.filterNot { case (outKey, _) => + preselected(outKey) } if (workers.isEmpty) { None @@ -443,7 +442,8 @@ private class WorkPullingProducerControllerImpl[A: ClassTag]( currentSeqNr = s.currentSeqNr + 1, preselectedWorkers = s.preselectedWorkers.updated(s.currentSeqNr, PreselectedWorker(outKey, out.confirmationQualifier)), - handOver = s.handOver.updated(s.currentSeqNr, HandOver(resend.oldConfirmationQualifier, resend.oldSeqNr)))) + handOver = + s.handOver.updated(s.currentSeqNr, HandOver(resend.oldConfirmationQualifier, resend.oldSeqNr)))) case None => checkStashFull(stashBuffer) // no demand from any workers, or all already preselected @@ -488,8 +488,8 @@ private class WorkPullingProducerControllerImpl[A: ClassTag]( } def onAck(outState: OutState[A], confirmedSeqNr: OutSeqNr): Vector[Unconfirmed[A]] = { - val (confirmed, newUnconfirmed) = outState.unconfirmed.partition { - case Unconfirmed(_, seqNr, _, _) => seqNr <= confirmedSeqNr + val (confirmed, newUnconfirmed) = outState.unconfirmed.partition { case Unconfirmed(_, seqNr, _, _) => + seqNr <= confirmedSeqNr } if (confirmed.nonEmpty) { @@ -582,12 +582,11 @@ private class WorkPullingProducerControllerImpl[A: ClassTag]( key, outState.unconfirmed.head.outSeqNr, outState.unconfirmed.last.outSeqNr) - outState.unconfirmed.foreach { - case Unconfirmed(totalSeqNr, _, msg, replyTo) => - if (durableQueue.isEmpty) - context.self ! Msg(msg, wasStashed = true, replyTo) - else - context.self ! ResendDurableMsg(msg, outState.confirmationQualifier, totalSeqNr) + outState.unconfirmed.foreach { case Unconfirmed(totalSeqNr, _, msg, replyTo) => + if (durableQueue.isEmpty) + context.self ! Msg(msg, wasStashed = true, replyTo) + else + context.self ! ResendDurableMsg(msg, outState.confirmationQualifier, totalSeqNr) } acc.copy(out = acc.out - key) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/eventstream/EventStream.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/eventstream/EventStream.scala index fc40c165a7f..ee3d180e218 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/eventstream/EventStream.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/eventstream/EventStream.scala @@ -44,18 +44,13 @@ object EventStream { * def subscribe(actorSystem: ActorSystem[_], actorRef: ActorRef[A]) = * actorSystem.eventStream ! EventStream.Subscribe[A1](actorRef) * }}} - * */ final case class Subscribe[E](subscriber: ActorRef[E])(implicit classTag: ClassTag[E]) extends Command { - /** - * Java API. - */ + /** Java API. */ def this(clazz: Class[E], subscriber: ActorRef[E]) = this(subscriber)(ClassTag(clazz)) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def topic: Class[_] = classTag.runtimeClass } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala index 61e6ff576fa..5e6709712d6 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala @@ -30,9 +30,7 @@ import akka.util.JavaDurationConverters._ import akka.util.OptionVal import akka.util.Timeout -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ActorContextImpl { // single context for logging as there are a few things that are initialized @@ -81,9 +79,7 @@ import akka.util.Timeout } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait ActorContextImpl[T] extends TypedActorContext[T] with javadsl.ActorContext[T] @@ -220,7 +216,7 @@ import akka.util.Timeout override def ask[Req, Res](target: RecipientRef[Req], createRequest: ActorRef[Res] => Req)( mapResponse: Try[Res] => T)(implicit responseTimeout: Timeout, classTag: ClassTag[Res]): Unit = { import akka.actor.typed.scaladsl.AskPattern._ - pipeToSelf((target.ask(createRequest))(responseTimeout, system.scheduler))(mapResponse) + pipeToSelf(target.ask(createRequest)(responseTimeout, system.scheduler))(mapResponse) } override def askWithStatus[Req, Res](target: RecipientRef[Req], createRequest: ActorRef[StatusReply[Res]] => Req)( @@ -229,7 +225,7 @@ import akka.util.Timeout case Success(StatusReply.Success(t: Res)) => mapResponse(Success(t)) case Success(StatusReply.Error(why)) => mapResponse(Failure(why)) case fail: Failure[_] => mapResponse(fail.asInstanceOf[Failure[Res]]) - case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser + case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser } // Java API impl @@ -263,7 +259,7 @@ import akka.util.Timeout case StatusReply.Success(value: Res) => applyToResponse(value, null) case StatusReply.Error(why) => applyToResponse(null.asInstanceOf[Res], why) case null => applyToResponse(null.asInstanceOf[Res], failure) - case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser + case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser }) } @@ -322,14 +318,10 @@ import akka.util.Timeout ref.asInstanceOf[ActorRef[U]] } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def messageAdapters: List[(Class[_], Any => T)] = _messageAdapters - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def setCurrentActorThread(): Unit = { _currentActorThread match { case OptionVal.Some(t) => @@ -341,16 +333,12 @@ import akka.util.Timeout } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def clearCurrentActorThread(): Unit = { _currentActorThread = OptionVal.None } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def checkCurrentActorThread(): Unit = { val callerThread = Thread.currentThread() _currentActorThread match { diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorFlightRecorder.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorFlightRecorder.scala index 3096568fd5c..8662304372d 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorFlightRecorder.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorFlightRecorder.scala @@ -9,9 +9,7 @@ import akka.actor.typed.{ ActorSystem, Extension, ExtensionId } import akka.annotation.InternalApi import akka.util.FlightRecorderLoader -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi object ActorFlightRecorder extends ExtensionId[ActorFlightRecorder] { @@ -22,17 +20,13 @@ object ActorFlightRecorder extends ExtensionId[ActorFlightRecorder] { NoOpActorFlightRecorder) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait ActorFlightRecorder extends Extension { val delivery: DeliveryFlightRecorder } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait DeliveryFlightRecorder { def producerCreated(producerId: String, path: ActorPath): Unit @@ -70,9 +64,7 @@ private[akka] case object NoOpActorFlightRecorder extends ActorFlightRecorder { override val delivery: DeliveryFlightRecorder = NoOpDeliveryFlightRecorder } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object NoOpDeliveryFlightRecorder extends DeliveryFlightRecorder { override def producerCreated(producerId: String, path: ActorPath): Unit = () diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorMdc.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorMdc.scala index f48773d2bea..f499bb7f6cd 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorMdc.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorMdc.scala @@ -8,9 +8,7 @@ import org.slf4j.MDC import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ActorMdc { val SourceActorSystemKey = "sourceActorSystem" val AkkaSourceKey = "akkaSource" diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorRefImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorRefImpl.scala index d312f7bd45a..9bffdc3c7fc 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorRefImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorRefImpl.scala @@ -21,9 +21,7 @@ private[akka] trait ActorRefImpl[-T] extends ActorRef[T] { this: InternalRecipie final override def unsafeUpcast[U >: T @uncheckedVariance]: ActorRef[U] = this.asInstanceOf[ActorRef[U]] - /** - * Comparison takes path and the unique id of the actor cell into account. - */ + /** Comparison takes path and the unique id of the actor cell into account. */ final override def compareTo(other: ActorRef[_]) = { val x = this.path.compareTo(other.path) if (x == 0) if (this.path.uid < other.path.uid) -1 else if (this.path.uid == other.path.uid) 0 else 1 @@ -32,9 +30,7 @@ private[akka] trait ActorRefImpl[-T] extends ActorRef[T] { this: InternalRecipie final override def hashCode: Int = path.uid - /** - * Equals takes path and the unique id of the actor cell into account. - */ + /** Equals takes path and the unique id of the actor cell into account. */ final override def equals(that: Any): Boolean = that match { case other: ActorRef[_] => path.uid == other.path.uid && path == other.path case _ => false diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/BehaviorImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/BehaviorImpl.scala index c848ec344d6..967eb59bb50 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/BehaviorImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/BehaviorImpl.scala @@ -12,9 +12,7 @@ import akka.actor.typed.scaladsl.{ ActorContext => SAC } import akka.annotation.InternalApi import akka.util.{ LineNumbers, OptionVal } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object BehaviorTags { @@ -32,9 +30,7 @@ private[akka] object BehaviorTags { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object BehaviorImpl { implicit class ContextAs[T](val ctx: AC[T]) extends AnyVal { diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/CachedProps.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/CachedProps.scala index 55daff288e9..32267153467 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/CachedProps.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/CachedProps.scala @@ -7,9 +7,7 @@ package akka.actor.typed.internal import akka.actor.typed.Props import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final private[akka] case class CachedProps( typedProps: Props, diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ExtensionsImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ExtensionsImpl.scala index d627e3a11e3..415bf7f1e64 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ExtensionsImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ExtensionsImpl.scala @@ -24,9 +24,7 @@ private[akka] trait ExtensionsImpl extends Extensions { self: ActorSystem[_] wit private val extensions = new ConcurrentHashMap[ExtensionId[_], AnyRef] - /** - * Hook for ActorSystem to load extensions on startup - */ + /** Hook for ActorSystem to load extensions on startup */ def loadExtensions(): Unit = { /* @@ -36,8 +34,8 @@ private[akka] trait ExtensionsImpl extends Extensions { self: ActorSystem[_] wit settings.config.getStringList(key).asScala.foreach { extensionIdFQCN => // it is either a Scala object or it is a Java class with a static singleton accessor - val idTry = dynamicAccess.getObjectFor[AnyRef](extensionIdFQCN).recoverWith { - case _ => idFromJavaSingletonAccessor(extensionIdFQCN) + val idTry = dynamicAccess.getObjectFor[AnyRef](extensionIdFQCN).recoverWith { case _ => + idFromJavaSingletonAccessor(extensionIdFQCN) } idTry match { @@ -100,29 +98,27 @@ private[akka] trait ExtensionsImpl extends Extensions { self: ActorSystem[_] wit } } catch { case t: Throwable => - //In case shit hits the fan, remove the inProcess signal and escalate to caller + // In case shit hits the fan, remove the inProcess signal and escalate to caller extensions.replace(ext, inProcessOfRegistration, t) throw t } finally { - //Always notify listeners of the inProcess signal + // Always notify listeners of the inProcess signal inProcessOfRegistration.countDown() } case _ => - //Someone else is in process of registering an extension for this Extension, retry + // Someone else is in process of registering an extension for this Extension, retry registerExtension(ext) } } - /** - * Returns any extension registered to the specified Extension or returns null if not registered - */ + /** Returns any extension registered to the specified Extension or returns null if not registered */ @tailrec private def findExtension[T <: Extension](ext: ExtensionId[T]): T = extensions.get(ext) match { case c: CountDownLatch => - //Registration in process, await completion and retry + // Registration in process, await completion and retry c.await() findExtension(ext) - case t: Throwable => throw t //Initialization failed, throw same again - case other => other.asInstanceOf[T] //could be a T or null, in which case we return the null as T + case t: Throwable => throw t // Initialization failed, throw same again + case other => other.asInstanceOf[T] // could be a T or null, in which case we return the null as T } } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala index 58926ff514a..19d880f1288 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala @@ -95,7 +95,8 @@ private[akka] final class InterceptorImpl[O, I]( private def deduplicate(interceptedResult: Behavior[I], ctx: TypedActorContext[O]): Behavior[O] = { val started = Behavior.start(interceptedResult, ctx.asInstanceOf[TypedActorContext[I]]) - if (started == BehaviorImpl.UnhandledBehavior || started == BehaviorImpl.SameBehavior || !Behavior.isAlive(started)) { + if (started == BehaviorImpl.UnhandledBehavior || started == BehaviorImpl.SameBehavior || !Behavior.isAlive( + started)) { started.unsafeCast[O] } else { // returned behavior could be nested in setups, so we need to start before we deduplicate @@ -137,9 +138,7 @@ private[akka] final case class MonitorInterceptor[T: ClassTag](actorRef: ActorRe } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object LogMessagesInterceptor { def apply[T](opts: LogOptions): BehaviorInterceptor[T, T] = { new LogMessagesInterceptor(opts).asInstanceOf[BehaviorInterceptor[T, T]] @@ -192,9 +191,7 @@ private[akka] final class LogMessagesInterceptor(val opts: LogOptions) extends B } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TransformMessagesInterceptor { @@ -203,9 +200,7 @@ private[akka] object TransformMessagesInterceptor { private final def any2NotMatchIndicator[T] = _any2NotMatchIndicator.asInstanceOf[Any => T] } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class TransformMessagesInterceptor[O: ClassTag, I](matcher: PartialFunction[O, I]) extends BehaviorInterceptor[O, I] { diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InternalMessage.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InternalMessage.scala index 4d1c87a268f..87bf2ac9adf 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InternalMessage.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InternalMessage.scala @@ -7,9 +7,7 @@ package akka.actor.typed.internal import akka.actor.WrappedMessage import akka.annotation.InternalApi -/** - * A marker trait for internal messages. - */ +/** A marker trait for internal messages. */ @InternalApi private[akka] sealed trait InternalMessage /** diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InternalRecipientRef.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InternalRecipientRef.scala index a2be8457f29..2b297f03baa 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InternalRecipientRef.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InternalRecipientRef.scala @@ -8,19 +8,13 @@ import akka.actor.ActorRefProvider import akka.actor.typed.RecipientRef import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait InternalRecipientRef[-T] extends RecipientRef[T] { - /** - * Get a reference to the actor ref provider which created this ref. - */ + /** Get a reference to the actor ref provider which created this ref. */ def provider: ActorRefProvider - /** - * @return `true` if the actor is locally known to be terminated, `false` if alive or uncertain. - */ + /** @return `true` if the actor is locally known to be terminated, `false` if alive or uncertain. */ def isTerminated: Boolean def refPrefix: String = toString diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/LoggerClass.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/LoggerClass.scala index c52e5927abf..91a616ac00b 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/LoggerClass.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/LoggerClass.scala @@ -9,9 +9,7 @@ import scala.util.control.NonFatal import akka.annotation.InternalApi import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object LoggerClass { @@ -22,9 +20,7 @@ private[akka] object LoggerClass { private val defaultPrefixesToSkip = List("scala.runtime", "akka.actor.typed.internal") - /** - * Try to extract a logger class from the call stack, if not possible the provided default is used - */ + /** Try to extract a logger class from the call stack, if not possible the provided default is used */ def detectLoggerClassFromStack(default: Class[_], additionalPrefixesToSkip: List[String] = Nil): Class[_] = { // TODO use stack walker API when we no longer need to support Java 8 try { diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/MiscMessageSerializer.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/MiscMessageSerializer.scala index 09ba692ef57..32cf82ae9f9 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/MiscMessageSerializer.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/MiscMessageSerializer.scala @@ -12,9 +12,7 @@ import akka.actor.typed.scaladsl.adapter._ import akka.annotation.InternalApi import akka.serialization.{ BaseSerializer, SerializerWithStringManifest } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class MiscMessageSerializer(val system: akka.actor.ExtendedActorSystem) extends SerializerWithStringManifest with BaseSerializer { diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PoisonPill.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PoisonPill.scala index 7fdb40b350c..cf0e85afefd 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PoisonPill.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PoisonPill.scala @@ -19,9 +19,7 @@ import akka.annotation.InternalApi */ @InternalApi private[akka] sealed abstract class PoisonPill extends Signal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case object PoisonPill extends PoisonPill { def instance: PoisonPill = this } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PropsImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PropsImpl.scala index 381a280673e..6bee9773ec2 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PropsImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PropsImpl.scala @@ -8,9 +8,7 @@ import akka.actor.typed.{ DispatcherSelector, MailboxSelector, Props } import akka.actor.typed.ActorTags import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object PropsImpl { /** diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala index 374782589cd..9b6ec7b9298 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala @@ -22,9 +22,7 @@ import akka.japi.function.Procedure import akka.util.{ unused, ConstantFun } import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object StashBufferImpl { private[akka] final class Node[T](var next: Node[T], val message: T) { def apply(f: T => Unit): Unit = f(message) @@ -34,9 +32,7 @@ import akka.util.OptionVal new StashBufferImpl(ctx, capacity, null, null) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class StashBufferImpl[T] private ( ctx: ActorContext[T], val capacity: Int, @@ -187,14 +183,15 @@ import akka.util.OptionVal else { val node = messages.next() val message = wrap(node.message) - val interpretResult = try { - message match { - case sig: Signal => Behavior.interpretSignal(b2, ctx, sig) - case msg => interpretUnstashedMessage(b2, ctx, msg, node) + val interpretResult = + try { + message match { + case sig: Signal => Behavior.interpretSignal(b2, ctx, sig) + case msg => interpretUnstashedMessage(b2, ctx, msg, node) + } + } catch { + case NonFatal(e) => throw UnstashException(e, b2) } - } catch { - case NonFatal(e) => throw UnstashException(e, b2) - } val actualNext = if (interpretResult == BehaviorImpl.same) b2 @@ -255,9 +252,7 @@ import akka.util.OptionVal } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object UnstashException { def unwrap(t: Throwable): Throwable = t match { case UnstashException(e, _) => e diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala index 9621d480137..9e22112e3d9 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala @@ -29,13 +29,11 @@ import akka.event.Logging import akka.util.OptionVal import akka.util.unused -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Supervisor { def apply[T, Thr <: Throwable: ClassTag](initialBehavior: Behavior[T], strategy: SupervisorStrategy): Behavior[T] = { if (initialBehavior.isInstanceOf[scaladsl.AbstractBehavior[_]] || initialBehavior - .isInstanceOf[javadsl.AbstractBehavior[_]]) { + .isInstanceOf[javadsl.AbstractBehavior[_]]) { throw new IllegalArgumentException( "The supervised Behavior must not be a AbstractBehavior instance directly," + "because a different instance should be created when it is restarted. Wrap in Behaviors.setup.") @@ -54,9 +52,7 @@ import akka.util.unused } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private abstract class AbstractSupervisor[I, Thr <: Throwable](strategy: SupervisorStrategy)(implicit ev: ClassTag[Thr]) extends BehaviorInterceptor[Any, I] { @@ -121,9 +117,7 @@ private abstract class AbstractSupervisor[I, Thr <: Throwable](strategy: Supervi override def toString: String = Logging.simpleName(getClass) } -/** - * For cases where O == I for BehaviorInterceptor. - */ +/** For cases where O == I for BehaviorInterceptor. */ private abstract class SimpleSupervisor[T, Thr <: Throwable: ClassTag](ss: SupervisorStrategy) extends AbstractSupervisor[T, Thr](ss) { @@ -170,9 +164,7 @@ private class ResumeSupervisor[T, Thr <: Throwable: ClassTag](ss: Resume) extend private object RestartSupervisor { - /** - * Calculates an exponential back off delay. - */ + /** Calculates an exponential back off delay. */ def calculateDelay( restartCount: Int, minBackoff: FiniteDuration, @@ -304,18 +296,22 @@ private class RestartSupervisor[T, Thr <: Throwable: ClassTag](initial: Behavior override protected def handleSignalException( ctx: TypedActorContext[Any], target: SignalTarget[T]): Catcher[Behavior[T]] = { - handleException(ctx, signalRestart = { - case e: UnstashException[Any] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) - case _ => target(ctx, PreRestart) - }) + handleException( + ctx, + signalRestart = { + case e: UnstashException[Any] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) + case _ => target(ctx, PreRestart) + }) } override protected def handleReceiveException( ctx: TypedActorContext[Any], target: ReceiveTarget[T]): Catcher[Behavior[T]] = { - handleException(ctx, signalRestart = { - case e: UnstashException[Any] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) - case _ => target.signalRestart(ctx) - }) + handleException( + ctx, + signalRestart = { + case e: UnstashException[Any] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) + case _ => target.signalRestart(ctx) + }) } private def handleException(ctx: TypedActorContext[Any], signalRestart: Throwable => Unit): Catcher[Behavior[T]] = { @@ -357,7 +353,7 @@ private class RestartSupervisor[T, Thr <: Throwable: ClassTag](initial: Behavior // new stash only if there is no already on-going restart with previously stashed messages val stashBufferForRestart = restartingInProgress match { case OptionVal.Some((stashBuffer, _)) => stashBuffer - case _ => StashBuffer[Any](ctx.asScala.asInstanceOf[scaladsl.ActorContext[Any]], stashCapacity) + case _ => StashBuffer[Any](ctx.asScala.asInstanceOf[scaladsl.ActorContext[Any]], stashCapacity) } restartingInProgress = OptionVal.Some((stashBufferForRestart, childrenToStop)) strategy match { @@ -397,10 +393,13 @@ private class RestartSupervisor[T, Thr <: Throwable: ClassTag](initial: Behavior case _ => newBehavior } nextBehavior.narrow - } catch handleException(ctx, signalRestart = { - case e: UnstashException[Any] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) - case _ => () - }) + } catch + handleException( + ctx, + signalRestart = { + case e: UnstashException[Any] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) + case _ => () + }) } private def stopChildren(ctx: TypedActorContext[_], children: Set[ActorRef[Nothing]]): Unit = { diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/SystemMessage.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/SystemMessage.scala index 37420074065..ea24a21ea47 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/SystemMessage.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/SystemMessage.scala @@ -33,7 +33,6 @@ private[typed] object SystemMessageList { } /** - * * INTERNAL API * * Value class supporting list operations on system messages. The `next` field of [[SystemMessage]] @@ -45,24 +44,17 @@ private[typed] object SystemMessageList { * * The type of the list also encodes that the messages contained are in reverse order, i.e. the head of the list is the * latest appended element. - * */ private[typed] class LatestFirstSystemMessageList(val head: SystemMessage) extends AnyVal { import SystemMessageList._ - /** - * Indicates if the list is empty or not. This operation has constant cost. - */ + /** Indicates if the list is empty or not. This operation has constant cost. */ final def isEmpty: Boolean = head eq null - /** - * Indicates if the list has at least one element or not. This operation has constant cost. - */ + /** Indicates if the list has at least one element or not. This operation has constant cost. */ final def nonEmpty: Boolean = head ne null - /** - * Indicates if the list is empty or not. This operation has constant cost. - */ + /** Indicates if the list is empty or not. This operation has constant cost. */ final def size: Int = sizeInner(head, 0) /** @@ -82,9 +74,7 @@ private[typed] class LatestFirstSystemMessageList(val head: SystemMessage) exten */ final def reverse: EarliestFirstSystemMessageList = new EarliestFirstSystemMessageList(reverseInner(head, null)) - /** - * Attaches a message to the current head of the list. This operation has constant cost. - */ + /** Attaches a message to the current head of the list. This operation has constant cost. */ final def ::(msg: SystemMessage): LatestFirstSystemMessageList = { assert(msg ne null) msg.next = head @@ -94,7 +84,6 @@ private[typed] class LatestFirstSystemMessageList(val head: SystemMessage) exten } /** - * * INTERNAL API * * Value class supporting list operations on system messages. The `next` field of [[SystemMessage]] @@ -106,24 +95,17 @@ private[typed] class LatestFirstSystemMessageList(val head: SystemMessage) exten * * This list type also encodes that the messages contained are in reverse order, i.e. the head of the list is the * latest appended element. - * */ private[typed] class EarliestFirstSystemMessageList(val head: SystemMessage) extends AnyVal { import SystemMessageList._ - /** - * Indicates if the list is empty or not. This operation has constant cost. - */ + /** Indicates if the list is empty or not. This operation has constant cost. */ final def isEmpty: Boolean = head eq null - /** - * Indicates if the list has at least one element or not. This operation has constant cost. - */ + /** Indicates if the list has at least one element or not. This operation has constant cost. */ final def nonEmpty: Boolean = head ne null - /** - * Indicates if the list is empty or not. This operation has constant cost. - */ + /** Indicates if the list is empty or not. This operation has constant cost. */ final def size: Int = sizeInner(head, 0) /** @@ -143,9 +125,7 @@ private[typed] class EarliestFirstSystemMessageList(val head: SystemMessage) ext */ final def reverse: LatestFirstSystemMessageList = new LatestFirstSystemMessageList(reverseInner(head, null)) - /** - * Attaches a message to the current head of the list. This operation has constant cost. - */ + /** Attaches a message to the current head of the list. This operation has constant cost. */ final def ::(msg: SystemMessage): EarliestFirstSystemMessageList = { assert(msg ne null) msg.next = head @@ -196,39 +176,27 @@ private[akka] sealed trait SystemMessage extends Serializable { def unlinked: Boolean = next eq null } -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) private[akka] final case class Create() extends SystemMessage -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) private[akka] final case class Terminate() extends SystemMessage -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) private[akka] final case class Watch(watchee: ActorRef[Nothing], watcher: ActorRef[Nothing]) extends SystemMessage -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) private[akka] final case class Unwatch(watchee: ActorRef[Nothing], watcher: ActorRef[Nothing]) extends SystemMessage -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) private[akka] final case class DeathWatchNotification(actor: ActorRef[Nothing], failureCause: Throwable) extends SystemMessage -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) private[akka] case object NoMessage extends SystemMessage // switched into the mailbox to signal termination diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala index ac64180697a..757a1a9ceab 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala @@ -17,9 +17,7 @@ import akka.annotation.InternalApi import akka.dispatch.ExecutionContexts import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TimerSchedulerImpl { final case class Timer[T](key: Any, msg: T, repeat: Boolean, generation: Int, task: Cancellable) sealed class TimerMsg(val key: Any, val generation: Int, val owner: AnyRef) { @@ -70,7 +68,7 @@ import akka.util.OptionVal startTimerAtFixedRate(key, msg, initialDelay.asScala, interval.asScala) override final def startPeriodicTimer(key: Any, msg: T, interval: Duration): Unit = { - //this follows the deprecation note in the super class + // this follows the deprecation note in the super class startTimerWithFixedDelay(key, msg, interval.asScala) } @@ -78,9 +76,7 @@ import akka.util.OptionVal startSingleTimer(key, msg, delay.asScala) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class TimerSchedulerImpl[T](ctx: ActorContext[T]) extends scaladsl.TimerScheduler[T] with TimerSchedulerCrossDslSupport[T] { diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WithMdcBehaviorInterceptor.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WithMdcBehaviorInterceptor.scala index 1b6578ec882..5b9b73af57a 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WithMdcBehaviorInterceptor.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WithMdcBehaviorInterceptor.scala @@ -11,9 +11,7 @@ import org.slf4j.MDC import akka.actor.typed.{ Behavior, BehaviorInterceptor, Signal, TypedActorContext } import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object WithMdcBehaviorInterceptor { val noMdcPerMessage = (_: Any) => Map.empty[String, String] @@ -96,11 +94,11 @@ import akka.annotation.InternalApi } private def setMdcValues(dynamicMdc: Map[String, String]): Unit = { - if (staticMdc.nonEmpty) staticMdc.foreach { - case (key, value) => MDC.put(key, value) + if (staticMdc.nonEmpty) staticMdc.foreach { case (key, value) => + MDC.put(key, value) } - if (dynamicMdc.nonEmpty) dynamicMdc.foreach { - case (key, value) => MDC.put(key, value) + if (dynamicMdc.nonEmpty) dynamicMdc.foreach { case (key, value) => + MDC.put(key, value) } } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala index ec7f2b46b49..c7d1e1bf168 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala @@ -23,9 +23,7 @@ import akka.actor.typed.internal.adapter.ActorAdapter.TypedActorFailedException import akka.annotation.InternalApi import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[typed] object ActorAdapter { /** @@ -38,8 +36,8 @@ import akka.util.OptionVal */ final case class TypedActorFailedException(cause: Throwable) extends RuntimeException - private val DummyReceive: classic.Actor.Receive = { - case _ => throw new RuntimeException("receive should never be called on the typed ActorAdapter") + private val DummyReceive: classic.Actor.Receive = { case _ => + throw new RuntimeException("receive should never be called on the typed ActorAdapter") } private val classicSupervisorDecider: Throwable => classic.SupervisorStrategy.Directive = { exc => @@ -49,9 +47,7 @@ import akka.util.OptionVal } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[typed] final class ActorAdapter[T](_initialBehavior: Behavior[T], rethrowTypedFailure: Boolean) extends classic.Actor { @@ -120,7 +116,7 @@ import akka.util.OptionVal if (c.hasTimer) { msg match { case timerMsg: TimerMsg => - //we can only get this kind of message if the timer is of this concrete class + // we can only get this kind of message if the timer is of this concrete class c.timer.asInstanceOf[TimerSchedulerImpl[T]].interceptTimerMsg(ctx.log, timerMsg) match { case OptionVal.Some(m) => next(Behavior.interpretMessage(behavior, c, m), m) @@ -189,17 +185,18 @@ import akka.util.OptionVal private def withSafelyAdapted[U, V](adapt: () => U)(body: U => V): Unit = { var failed = false - val adapted: U = try { - adapt() - } catch { - case NonFatal(ex) => - // pass it on through the signal handler chain giving supervision a chance to deal with it - handleSignal(MessageAdaptionFailure(ex)) - // Signal handler should actually throw so this is mostly to keep compiler happy (although a user could override - // the MessageAdaptionFailure handling to do something weird) - failed = true - null.asInstanceOf[U] - } + val adapted: U = + try { + adapt() + } catch { + case NonFatal(ex) => + // pass it on through the signal handler chain giving supervision a chance to deal with it + handleSignal(MessageAdaptionFailure(ex)) + // Signal handler should actually throw so this is mostly to keep compiler happy (although a user could override + // the MessageAdaptionFailure handling to do something weird) + failed = true + null.asInstanceOf[U] + } if (!failed) { if (adapted != null) body(adapted) else @@ -220,10 +217,10 @@ import akka.util.OptionVal super.unhandled(other) } - final override def supervisorStrategy = classic.OneForOneStrategy(loggingEnabled = false) { - case ex => - ctx.setCurrentActorThread() - try ex match { + final override def supervisorStrategy = classic.OneForOneStrategy(loggingEnabled = false) { case ex => + ctx.setCurrentActorThread() + try + ex match { case TypedActorFailedException(cause) => // These have already been optionally logged by typed supervision recordChildFailure(cause) @@ -250,9 +247,10 @@ import akka.util.OptionVal classic.SupervisorStrategy.Stop else ActorAdapter.classicSupervisorDecider(ex) - } finally { - ctx.clearCurrentActorThread() } + finally { + ctx.clearCurrentActorThread() + } } private def recordChildFailure(ex: Throwable): Unit = { @@ -326,9 +324,7 @@ import akka.util.OptionVal } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[typed] final class ComposedStoppingBehavior[T]( lastBehavior: Behavior[T], stopBehavior: StoppedBehavior[T]) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorContextAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorContextAdapter.scala index d1d169862c8..91bcfa5e42e 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorContextAdapter.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorContextAdapter.scala @@ -32,9 +32,7 @@ private[akka] object ActorContextAdapter { toClassicImp(context) } -/** - * INTERNAL API. Wrapping an [[akka.actor.ActorContext]] as an [[TypedActorContext]]. - */ +/** INTERNAL API. Wrapping an [[akka.actor.ActorContext]] as an [[TypedActorContext]]. */ @InternalApi private[akka] final class ActorContextAdapter[T](adapter: ActorAdapter[T]) extends ActorContextImpl[T] { import ActorRefAdapter.toClassic diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorRefAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorRefAdapter.scala index d08f4f9afee..e905179ccea 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorRefAdapter.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorRefAdapter.scala @@ -12,9 +12,7 @@ import akka.actor.InvalidMessageException import akka.annotation.InternalApi import akka.dispatch.sysmsg -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[typed] class ActorRefAdapter[-T](val classicRef: classic.InternalActorRef) extends ActorRef[T] with internal.ActorRefImpl[T] diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorRefFactoryAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorRefFactoryAdapter.scala index c2db145fdac..00f52427d9c 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorRefFactoryAdapter.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorRefFactoryAdapter.scala @@ -11,9 +11,7 @@ import akka.annotation.InternalApi import akka.util.ErrorMessages import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[typed] object ActorRefFactoryAdapter { private val remoteDeploymentNotAllowed = "Remote deployment not allowed for typed actors" diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/AdapterExtension.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/AdapterExtension.scala index 3ec3dd5f0ac..9d3395c10b2 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/AdapterExtension.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/AdapterExtension.scala @@ -16,9 +16,7 @@ import akka.annotation.InternalApi val adapter = ActorSystemAdapter(sys) } -/** - * Internal API - */ +/** Internal API */ @InternalApi object AdapterExtension extends akka.actor.ExtensionId[AdapterExtension] { def createExtension(sys: ExtendedActorSystem): AdapterExtension = new AdapterExtension(sys) } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/PropsAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/PropsAdapter.scala index 393fdcc5f4c..886a0b849e8 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/PropsAdapter.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/PropsAdapter.scala @@ -16,9 +16,7 @@ import akka.actor.typed.internal.PropsImpl._ import akka.annotation.InternalApi import akka.dispatch.Mailboxes -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object PropsAdapter { private final val TypedCreatorFunctionConsumerClazz = classOf[TypedCreatorFunctionConsumer] diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/SchedulerAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/SchedulerAdapter.scala index 03255383721..597e105e2e8 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/SchedulerAdapter.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/SchedulerAdapter.scala @@ -13,9 +13,7 @@ import akka.actor.Cancellable import akka.actor.typed.Scheduler import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object SchedulerAdapter { def toClassic(scheduler: Scheduler): akka.actor.Scheduler = scheduler match { @@ -27,20 +25,18 @@ import akka.annotation.InternalApi } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class SchedulerAdapter(private[akka] val classicScheduler: akka.actor.Scheduler) extends Scheduler { - override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = + override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = classicScheduler.scheduleOnce(delay, runnable) override def scheduleOnce(delay: Duration, runnable: Runnable, executor: ExecutionContext): Cancellable = classicScheduler.scheduleOnce(delay, runnable)(executor) - override def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = + override def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = classicScheduler.scheduleWithFixedDelay(initialDelay, delay)(runnable) override def scheduleWithFixedDelay( @@ -50,8 +46,8 @@ private[akka] final class SchedulerAdapter(private[akka] val classicScheduler: a executor: ExecutionContext): Cancellable = classicScheduler.scheduleWithFixedDelay(initialDelay, delay, runnable, executor) - override def scheduleAtFixedRate(initialDelay: FiniteDuration, interval: FiniteDuration)(runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = + override def scheduleAtFixedRate(initialDelay: FiniteDuration, interval: FiniteDuration)(runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = classicScheduler.scheduleAtFixedRate(initialDelay, interval)(runnable) override def scheduleAtFixedRate( diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/jfr/JFRActorFlightRecorder.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/jfr/JFRActorFlightRecorder.scala index c5650ecc4ed..f6cbfab549b 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/jfr/JFRActorFlightRecorder.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/jfr/JFRActorFlightRecorder.scala @@ -9,17 +9,13 @@ import akka.actor.typed.internal.ActorFlightRecorder import akka.actor.typed.internal.DeliveryFlightRecorder import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class JFRActorFlightRecorder() extends ActorFlightRecorder { override val delivery: DeliveryFlightRecorder = new JFRDeliveryFlightRecorder } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class JFRDeliveryFlightRecorder extends DeliveryFlightRecorder { override def producerCreated(producerId: String, path: ActorPath): Unit = diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/pubsub/TopicImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/pubsub/TopicImpl.scala index be04bca3560..e0cebd3b1df 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/pubsub/TopicImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/pubsub/TopicImpl.scala @@ -19,9 +19,7 @@ import akka.actor.typed.scaladsl.LoggerOps import akka.actor.typed.scaladsl.adapter._ import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TopicImpl { @@ -43,12 +41,10 @@ private[akka] object TopicImpl { final case class SubscriberTerminated[T](subscriber: ActorRef[T]) extends Command[T] } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi -private[akka] final class TopicImpl[T](topicName: String, context: ActorContext[TopicImpl.Command[T]])( - implicit classTag: ClassTag[T]) +private[akka] final class TopicImpl[T](topicName: String, context: ActorContext[TopicImpl.Command[T]])(implicit + classTag: ClassTag[T]) extends AbstractBehavior[TopicImpl.Command[T]](context) { /* @@ -73,7 +69,7 @@ private[akka] final class TopicImpl[T](topicName: String, context: ActorContext[ private val receptionist = context.system.receptionist private val receptionistAdapter = context.messageAdapter[Receptionist.Listing] { case topicServiceKey.Listing(topics) => TopicInstancesUpdated(topics) - case _ => throw new IllegalArgumentException() // FIXME exhaustiveness check fails on receptionist listing match + case _ => throw new IllegalArgumentException() // FIXME exhaustiveness check fails on receptionist listing match } receptionist ! Receptionist.Subscribe(topicServiceKey, receptionistAdapter) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala index 5f6d2c565e5..902f90e31a8 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala @@ -197,12 +197,14 @@ private[akka] object LocalReceptionist extends ReceptionistBehaviorProvider { case None => } - updateServices(Set(key), { state => - val newState = state.serviceInstanceRemoved(key)(serviceInstance) - if (state.servicesPerActor.getOrElse(serviceInstance, Set.empty).isEmpty) - ctx.unwatch(serviceInstance) - newState - }) + updateServices( + Set(key), + { state => + val newState = state.serviceInstanceRemoved(key)(serviceInstance) + if (state.servicesPerActor.getOrElse(serviceInstance, Set.empty).isEmpty) + ctx.unwatch(serviceInstance) + newState + }) } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ReceptionistImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ReceptionistImpl.scala index b2122e39b34..305fe2f0e2a 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ReceptionistImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ReceptionistImpl.scala @@ -11,9 +11,7 @@ import akka.actor.typed.Props import akka.actor.typed.receptionist.Receptionist import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class ReceptionistImpl(system: ActorSystem[_]) extends Receptionist { override val ref: ActorRef[Receptionist.Command] = { @@ -21,12 +19,11 @@ import akka.annotation.InternalApi if (system.settings.classicSettings.ProviderSelectionType.hasCluster) { system.dynamicAccess .getObjectFor[ReceptionistBehaviorProvider]("akka.cluster.typed.internal.receptionist.ClusterReceptionist") - .recover { - case e => - throw new RuntimeException( - "ClusterReceptionist could not be loaded dynamically. Make sure you have " + - "'akka-cluster-typed' in the classpath.", - e) + .recover { case e => + throw new RuntimeException( + "ClusterReceptionist could not be loaded dynamically. Make sure you have " + + "'akka-cluster-typed' in the classpath.", + e) } .get } else LocalReceptionist diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ServiceKeySerializer.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ServiceKeySerializer.scala index 6ca911986df..699fde64c86 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ServiceKeySerializer.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ServiceKeySerializer.scala @@ -10,9 +10,7 @@ import akka.actor.typed.receptionist.ServiceKey import akka.annotation.InternalApi import akka.serialization.{ BaseSerializer, SerializerWithStringManifest } -/** - * Internal API - */ +/** Internal API */ @InternalApi final class ServiceKeySerializer(val system: akka.actor.ExtendedActorSystem) extends SerializerWithStringManifest diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/GroupRouterImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/GroupRouterImpl.scala index d1144efb4b4..1ff1f0b7090 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/GroupRouterImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/GroupRouterImpl.scala @@ -86,17 +86,14 @@ private final class InitialGroupRouterImpl[T]( import akka.actor.typed.scaladsl.adapter._ if (!stash.isFull) stash.stash(msg) else - context.system.eventStream ! EventStream.Publish(Dropped( - msg, - s"Stash is full in group router for [$serviceKey]", - context.self.toClassic)) // don't fail on full stash + context.system.eventStream ! EventStream.Publish( + Dropped(msg, s"Stash is full in group router for [$serviceKey]", context.self.toClassic) + ) // don't fail on full stash this } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[routing] object GroupRouterHelper { def routeesToUpdate[T](allRoutees: Set[ActorRef[T]], preferLocalRoutees: Boolean): Set[ActorRef[T]] = { @@ -107,9 +104,7 @@ private[routing] object GroupRouterHelper { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class GroupRouterImpl[T]( ctx: ActorContext[T], diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/PoolRouterImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/PoolRouterImpl.scala index bb8dd1c7017..f9845aa86af 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/PoolRouterImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/PoolRouterImpl.scala @@ -13,9 +13,7 @@ import akka.actor.typed.scaladsl.{ AbstractBehavior, ActorContext, Behaviors } import akka.annotation.InternalApi import akka.util.ConstantFun -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class PoolRouterBuilder[T]( poolSize: Int, @@ -45,8 +43,8 @@ private[akka] final case class PoolRouterBuilder[T]( withConsistentHashingRouting(virtualNodesFactor, mapping.apply(_)) def withConsistentHashingRouting(virtualNodesFactor: Int, mapping: T => String): PoolRouterBuilder[T] = { - copy( - logicFactory = system => new RoutingLogics.ConsistentHashingLogic[T](virtualNodesFactor, mapping, system.address)) + copy(logicFactory = system => + new RoutingLogics.ConsistentHashingLogic[T](virtualNodesFactor, mapping, system.address)) } def withPoolSize(poolSize: Int): PoolRouterBuilder[T] = copy(poolSize = poolSize) @@ -59,9 +57,7 @@ private[akka] final case class PoolRouterBuilder[T]( override def withBroadcastPredicate(pred: T => Boolean): scaladsl.PoolRouter[T] = copy(broadcastPredicate = pred) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private final class PoolRouterImpl[T]( ctx: ActorContext[T], @@ -93,20 +89,19 @@ private final class PoolRouterImpl[T]( this } - override def onSignal: PartialFunction[Signal, Behavior[T]] = { - case Terminated(child) => - // Note that if several children are stopping concurrently children may already be empty - // for the `Terminated` we receive for the first child. This means it is not certain that - // there will be a log entry per child in those cases (it does not make sense to keep the - // pool alive just to get the logging right when there are no routees available) - if (context.children.nonEmpty) { - context.log.debug("Pool child stopped [{}]", child.path) - onRouteesChanged() - this - } else { - context.log.info("Last pool child stopped, stopping pool [{}]", context.self.path) - Behaviors.stopped - } + override def onSignal: PartialFunction[Signal, Behavior[T]] = { case Terminated(child) => + // Note that if several children are stopping concurrently children may already be empty + // for the `Terminated` we receive for the first child. This means it is not certain that + // there will be a log entry per child in those cases (it does not make sense to keep the + // pool alive just to get the logging right when there are no routees available) + if (context.children.nonEmpty) { + context.log.debug("Pool child stopped [{}]", child.path) + onRouteesChanged() + this + } else { + context.log.info("Last pool child stopped, stopping pool [{}]", context.self.path) + Behaviors.stopped + } } } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/RoutingLogic.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/RoutingLogic.scala index ff3fb01e51d..0a7d2f07234 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/RoutingLogic.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/RoutingLogic.scala @@ -33,9 +33,7 @@ sealed private[akka] trait RoutingLogic[T] { def routeesUpdated(newRoutees: Set[ActorRef[T]]): Unit } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object RoutingLogics { @@ -62,8 +60,8 @@ private[akka] object RoutingLogics { val firstDiffIndex = { var idx = 0 while (idx < currentRoutees.length && - idx < sortedNewRoutees.length && - currentRoutees(idx) == sortedNewRoutees(idx)) { + idx < sortedNewRoutees.length && + currentRoutees(idx) == sortedNewRoutees(idx)) { idx += 1 } idx diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala index 4caf64bad57..100d5b1fc7f 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala @@ -28,7 +28,6 @@ import akka.util.JavaDurationConverters._ * message that is sent to the target Actor in order to function as a reply-to * address, therefore the argument to the ask method is not the message itself * but a function that given the reply-to address will create the message. - * */ object AskPattern { @@ -41,7 +40,7 @@ object AskPattern { messageFactory: JFunction[ActorRef[Res], Req], timeout: Duration, scheduler: Scheduler): CompletionStage[Res] = - (actor.ask(messageFactory.apply)(timeout.asScala, scheduler)).toJava + actor.ask(messageFactory.apply)(timeout.asScala, scheduler).toJava /** * The same as [[ask]] but only for requests that result in a response of type [[akka.pattern.StatusReply]]. @@ -54,6 +53,6 @@ object AskPattern { messageFactory: JFunction[ActorRef[StatusReply[Res]], Req], timeout: Duration, scheduler: Scheduler): CompletionStage[Res] = - (actor.askWithStatus(messageFactory.apply)(timeout.asScala, scheduler).toJava) + actor.askWithStatus(messageFactory.apply)(timeout.asScala, scheduler).toJava } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/BehaviorBuilder.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/BehaviorBuilder.scala index 54fbb9ceb29..a0137b5c33b 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/BehaviorBuilder.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/BehaviorBuilder.scala @@ -31,9 +31,7 @@ import akka.util.OptionVal */ final class BehaviorBuilder[T] private (messageHandlers: List[Case[T, T]], signalHandlers: List[Case[T, Signal]]) { - /** - * Build a Behavior from the current state of the builder - */ + /** Build a Behavior from the current state of the builder */ def build(): Behavior[T] = { new BuiltBehavior[T](messageHandlers.reverse.toArray, signalHandlers.reverse.toArray) } @@ -167,9 +165,7 @@ object BehaviorBuilder { test: OptionVal[MT => Boolean], handler: JFunction[MT, Behavior[BT]]) - /** - * @return new empty immutable behavior builder. - */ + /** @return new empty immutable behavior builder. */ // Empty param list to work around https://github.com/lampepfl/dotty/issues/10347 def create[T]: BehaviorBuilder[T] = _empty.asInstanceOf[BehaviorBuilder[T]] } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala index 91d7f99ed88..88ac9addea4 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala @@ -5,7 +5,7 @@ package akka.actor.typed.javadsl import java.util.Collections -import java.util.function.{ Supplier, Function => JFunction } +import java.util.function.{ Function => JFunction, Supplier } import scala.reflect.ClassTag @@ -22,9 +22,7 @@ import akka.japi.pf.PFBuilder import akka.util.ccompat.JavaConverters._ import akka.util.unused -/** - * Factories for [[akka.actor.typed.Behavior]]. - */ +/** Factories for [[akka.actor.typed.Behavior]]. */ object Behaviors { private[this] val _two2same = new JapiFunction2[ActorContext[Any], Any, Behavior[Any]] { @@ -46,9 +44,7 @@ object Behaviors { def setup[T](factory: akka.japi.function.Function[ActorContext[T], Behavior[T]]): Behavior[T] = BehaviorImpl.DeferredBehavior(ctx => factory.apply(ctx.asJava)) - /** - * Support for stashing messages to unstash at a later time. - */ + /** Support for stashing messages to unstash at a later time. */ def withStash[T](capacity: Int, factory: java.util.function.Function[StashBuffer[T], Behavior[T]]): Behavior[T] = setup(ctx => { factory(StashBufferImpl[T](ctx.asScala, capacity)) @@ -95,14 +91,10 @@ object Behaviors { */ def stopped[T](postStop: Effect): Behavior[T] = BehaviorImpl.stopped(postStop.apply _) - /** - * A behavior that treats every incoming message as unhandled. - */ + /** A behavior that treats every incoming message as unhandled. */ def empty[T]: Behavior[T] = BehaviorImpl.empty - /** - * A behavior that ignores every incoming message and returns “same”. - */ + /** A behavior that ignores every incoming message and returns “same”. */ def ignore[T]: Behavior[T] = BehaviorImpl.ignore /** @@ -154,9 +146,11 @@ object Behaviors { def receive[T]( onMessage: JapiFunction2[ActorContext[T], T, Behavior[T]], onSignal: JapiFunction2[ActorContext[T], Signal, Behavior[T]]): Behavior[T] = { - new BehaviorImpl.ReceiveBehavior((ctx, msg) => onMessage.apply(ctx.asJava, msg), { - case (ctx, sig) => onSignal.apply(ctx.asJava, sig) - }) + new BehaviorImpl.ReceiveBehavior( + (ctx, msg) => onMessage.apply(ctx.asJava, msg), + { case (ctx, sig) => + onSignal.apply(ctx.asJava, sig) + }) } /** @@ -173,9 +167,7 @@ object Behaviors { */ def receive[T](@unused `type`: Class[T]): BehaviorBuilder[T] = BehaviorBuilder.create[T] - /** - * Construct an actor behavior that can react to lifecycle signals only. - */ + /** Construct an actor behavior that can react to lifecycle signals only. */ def receiveSignal[T](handler: JapiFunction2[ActorContext[T], Signal, Behavior[T]]): Behavior[T] = { receive(two2same, handler) } @@ -327,7 +319,6 @@ object Behaviors { * each message processing by the inner behavior is done. * @param behavior The actual behavior handling the messages, the MDC is used for the log entries logged through * `ActorContext.log` - * */ def withMdc[T]( interceptMessageClass: Class[T], @@ -344,7 +335,6 @@ object Behaviors { * @param staticMdc This MDC is setup in the logging context for every message * @param behavior The actual behavior handling the messages, the MDC is used for the log entries logged through * `ActorContext.log` - * */ def withMdc[T]( interceptMessageClass: Class[T], @@ -369,7 +359,6 @@ object Behaviors { * each message processing by the inner behavior is done. * @param behavior The actual behavior handling the messages, the MDC is used for the log entries logged through * `ActorContext.log` - * */ def withMdc[T]( interceptMessageClass: Class[T], diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Receive.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Receive.scala index c902826f49e..0fcdd725c8b 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Receive.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Receive.scala @@ -28,7 +28,6 @@ abstract class Receive[T] extends ExtensibleBehavior[T] { * * returning `stopped` will terminate this Behavior * * returning `same` designates to reuse the current Behavior * * returning `unhandled` keeps the same Behavior and signals that the message was not yet handled - * */ @throws(classOf[Exception]) def receiveMessage(msg: T): Behavior[T] diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala index 866ab06df55..74afe094a66 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala @@ -77,12 +77,15 @@ final class ReceiveBuilder[T] private ( * @return this behavior builder */ def onMessageEquals(msg: T, handler: Creator[Behavior[T]]): ReceiveBuilder[T] = - withMessage(OptionVal.Some(msg.getClass), OptionVal.Some(new JPredicate[T] { - override def test(param: T): Boolean = param == (msg) - }), new JFunction[T, Behavior[T]] { - // invoke creator without the message - override def apply(param: T): Behavior[T] = handler.create() - }) + withMessage( + OptionVal.Some(msg.getClass), + OptionVal.Some(new JPredicate[T] { + override def test(param: T): Boolean = param == msg + }), + new JFunction[T, Behavior[T]] { + // invoke creator without the message + override def apply(param: T): Behavior[T] = handler.create() + }) /** * Add a new case to the message handling matching any message. Subsequent `onMessage` clauses will @@ -128,11 +131,14 @@ final class ReceiveBuilder[T] private ( * @return this behavior builder */ def onSignalEquals(signal: Signal, handler: Creator[Behavior[T]]): ReceiveBuilder[T] = - withSignal(signal.getClass, OptionVal.Some(new JPredicate[Signal] { - override def test(param: Signal): Boolean = param == signal - }), new JFunction[Signal, Behavior[T]] { - override def apply(param: Signal): Behavior[T] = handler.create() - }) + withSignal( + signal.getClass, + OptionVal.Some(new JPredicate[Signal] { + override def test(param: Signal): Boolean = param == signal + }), + new JFunction[Signal, Behavior[T]] { + override def apply(param: Signal): Behavior[T] = handler.create() + }) private def withMessage[M <: T]( `type`: OptionVal[Class[M]], diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Routers.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Routers.scala index e210c6c4678..f6b59db144d 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Routers.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Routers.scala @@ -174,18 +174,12 @@ abstract class PoolRouter[T] extends DeferredBehavior[T] { virtualNodesFactor: Int, mapping: java.util.function.Function[T, String]): PoolRouter[T] - /** - * Set a new pool size from the one set at construction - */ + /** Set a new pool size from the one set at construction */ def withPoolSize(poolSize: Int): PoolRouter[T] - /** - * Set the props used to spawn the pool's routees - */ + /** Set the props used to spawn the pool's routees */ def withRouteeProps(routeeProps: Props): PoolRouter[T] - /** - * Any message that the predicate returns true for will be broadcast to all routees. - */ + /** Any message that the predicate returns true for will be broadcast to all routees. */ def withBroadcastPredicate(pred: Predicate[T]): PoolRouter[T] } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/StashBuffer.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/StashBuffer.scala index 3e0f9e53e86..8ea679562b3 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/StashBuffer.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/StashBuffer.scala @@ -4,7 +4,7 @@ package akka.actor.typed.javadsl -import java.util.function.{ Predicate, Function => JFunction } +import java.util.function.{ Function => JFunction, Predicate } import akka.actor.typed.{ scaladsl, Behavior } import akka.annotation.DoNotInherit @@ -48,9 +48,7 @@ import akka.japi.function.Procedure */ def capacity: Int - /** - * @return `true` if no more messages can be added, i.e. size equals the capacity of the stash buffer - */ + /** @return `true` if no more messages can be added, i.e. size equals the capacity of the stash buffer */ def isFull: Boolean /** @@ -96,9 +94,7 @@ import akka.japi.function.Procedure */ def anyMatch(predicate: Predicate[T]): Boolean - /** - * Removes all messages from the buffer. - */ + /** Removes all messages from the buffer. */ def clear(): Unit /** @@ -144,7 +140,5 @@ import akka.japi.function.Procedure } -/** - * Is thrown when the size of the stash exceeds the capacity of the stash buffer. - */ +/** Is thrown when the size of the stash exceeds the capacity of the stash buffer. */ final class StashOverflowException(message: String) extends scaladsl.StashOverflowException(message) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/TimerScheduler.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/TimerScheduler.scala index 255f46c40e7..93765a0ca9e 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/TimerScheduler.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/TimerScheduler.scala @@ -210,9 +210,7 @@ trait TimerScheduler[T] { def startTimerAtFixedRate(msg: T, initialDelay: java.time.Duration, interval: java.time.Duration): Unit = startTimerAtFixedRate(msg, msg, initialDelay, interval) - /** - * Deprecated API: See [[TimerScheduler#startTimerWithFixedDelay]] or [[TimerScheduler#startTimerAtFixedRate]]. - */ + /** Deprecated API: See [[TimerScheduler#startTimerWithFixedDelay]] or [[TimerScheduler#startTimerAtFixedRate]]. */ @deprecated( "Use startTimerWithFixedDelay or startTimerAtFixedRate instead. This has the same semantics as " + "startTimerAtFixedRate, but startTimerWithFixedDelay is often preferred.", @@ -243,9 +241,7 @@ trait TimerScheduler[T] { def startSingleTimer(msg: T, delay: Duration): Unit = startSingleTimer(msg, msg, delay) - /** - * Check if a timer with a given `key` is active. - */ + /** Check if a timer with a given `key` is active. */ def isTimerActive(key: Any): Boolean /** @@ -259,8 +255,6 @@ trait TimerScheduler[T] { */ def cancel(key: Any): Unit - /** - * Cancel all timers. - */ + /** Cancel all timers. */ def cancelAll(): Unit } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/pubsub/Topic.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/pubsub/Topic.scala index a1138a14ff5..9d680994f8d 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/pubsub/Topic.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/pubsub/Topic.scala @@ -28,40 +28,28 @@ import akka.annotation.DoNotInherit */ object Topic { - /** - * Not for user extension - */ + /** Not for user extension */ @DoNotInherit trait Command[T] extends TopicImpl.Command[T] - /** - * Scala API: Publish the message to all currently known subscribers. - */ + /** Scala API: Publish the message to all currently known subscribers. */ object Publish { def apply[T](message: T): Command[T] = TopicImpl.Publish(message) } - /** - * Java API: Publish the message to all currently known subscribers. - */ + /** Java API: Publish the message to all currently known subscribers. */ def publish[T](message: T): Command[T] = Publish(message) - /** - * Scala API: Subscribe to this topic. Should only be used for local subscribers. - */ + /** Scala API: Subscribe to this topic. Should only be used for local subscribers. */ object Subscribe { def apply[T](subscriber: ActorRef[T]): Command[T] = TopicImpl.Subscribe(subscriber) } - /** - * Java API: Subscribe to this topic. Should only be used for local subscribers. - */ + /** Java API: Subscribe to this topic. Should only be used for local subscribers. */ def subscribe[T](subscriber: ActorRef[T]): Command[T] = Subscribe(subscriber) - /** - * Scala API: Unsubscribe a previously subscribed actor from this topic. - */ + /** Scala API: Unsubscribe a previously subscribed actor from this topic. */ object Unsubscribe { def apply[T](subscriber: ActorRef[T]): Command[T] = TopicImpl.Unsubscribe(subscriber) } @@ -79,9 +67,7 @@ object Topic { @DoNotInherit trait TopicStats { - /** - * @return The number of local subscribers subscribing to this topic actor instance when the request was handled - */ + /** @return The number of local subscribers subscribing to this topic actor instance when the request was handled */ def localSubscriberCount: Int /** @@ -110,20 +96,14 @@ object Topic { def getTopicStats[T](replyTo: ActorRef[TopicStats]): Command[T] = GetTopicStats(replyTo) - /** - * Java API: Unsubscribe a previously subscribed actor from this topic. - */ + /** Java API: Unsubscribe a previously subscribed actor from this topic. */ def unsubscribe[T](subscriber: ActorRef[T]): Command[T] = Unsubscribe(subscriber) - /** - * Scala API: Create a topic actor behavior for the given topic name and message type. - */ + /** Scala API: Create a topic actor behavior for the given topic name and message type. */ def apply[T](topicName: String)(implicit classTag: ClassTag[T]): Behavior[Command[T]] = Behaviors.setup[TopicImpl.Command[T]](context => new TopicImpl[T](topicName, context)).narrow - /** - * Java API: Create a topic actor behavior for the given topic name and message class - */ + /** Java API: Create a topic actor behavior for the given topic name and message class */ def create[T](messageClass: Class[T], topicName: String): Behavior[Command[T]] = apply[T](topicName)(ClassTag(messageClass)) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/receptionist/Receptionist.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/receptionist/Receptionist.scala index 32e4d85befe..76209718943 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/receptionist/Receptionist.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/receptionist/Receptionist.scala @@ -25,15 +25,11 @@ abstract class Receptionist extends Extension { object ServiceKey { - /** - * Scala API: Creates a service key. The given ID should uniquely define a service with a given protocol. - */ + /** Scala API: Creates a service key. The given ID should uniquely define a service with a given protocol. */ def apply[T](id: String)(implicit classTag: ClassTag[T]): ServiceKey[T] = DefaultServiceKey(id, classTag.runtimeClass.getName) - /** - * Java API: Creates a service key. The given ID should uniquely define a service with a given protocol. - */ + /** Java API: Creates a service key. The given ID should uniquely define a service with a given protocol. */ def create[T](clazz: Class[T], id: String): ServiceKey[T] = DefaultServiceKey(id, clazz.getName) @@ -74,9 +70,7 @@ abstract class ServiceKey[T] extends AbstractServiceKey { key => else None } - /** - * Scala API: Provides a type safe pattern match for registration acks - */ + /** Scala API: Provides a type safe pattern match for registration acks */ object Registered { def unapply(l: Receptionist.Registered): Option[ActorRef[T]] = if (l.isForKey(key)) Some(l.serviceInstance(key)) @@ -116,15 +110,11 @@ object Receptionist extends ExtensionId[Receptionist] { */ object Register { - /** - * Create a Register without Ack that the service was registered - */ + /** Create a Register without Ack that the service was registered */ def apply[T](key: ServiceKey[T], service: ActorRef[T]): Command = new ReceptionistMessages.Register[T](key, service, None) - /** - * Create a Register with an actor that will get an ack that the service was registered - */ + /** Create a Register with an actor that will get an ack that the service was registered */ def apply[T](key: ServiceKey[T], service: ActorRef[T], replyTo: ActorRef[Registered]): Command = new ReceptionistMessages.Register[T](key, service, Some(replyTo)) } @@ -181,22 +171,16 @@ object Receptionist extends ExtensionId[Receptionist] { def getServiceInstance[T](key: ServiceKey[T]): ActorRef[T] } - /** - * Sent by the receptionist, available here for easier testing - */ + /** Sent by the receptionist, available here for easier testing */ object Registered { - /** - * Scala API - */ + /** Scala API */ def apply[T](key: ServiceKey[T], serviceInstance: ActorRef[T]): Registered = new ReceptionistMessages.Registered(key, serviceInstance) } - /** - * Java API: Sent by the receptionist, available here for easier testing - */ + /** Java API: Sent by the receptionist, available here for easier testing */ def registered[T](key: ServiceKey[T], serviceInstance: ActorRef[T]): Registered = Registered(key, serviceInstance) @@ -212,27 +196,19 @@ object Receptionist extends ExtensionId[Receptionist] { */ object Deregister { - /** - * Create a Deregister without Ack that the service was deregistered - */ + /** Create a Deregister without Ack that the service was deregistered */ def apply[T](key: ServiceKey[T], service: ActorRef[T]): Command = new ReceptionistMessages.Deregister[T](key, service, None) - /** - * Create a Deregister with an actor that will get an ack that the service was unregistered - */ + /** Create a Deregister with an actor that will get an ack that the service was unregistered */ def apply[T](key: ServiceKey[T], service: ActorRef[T], replyTo: ActorRef[Deregistered]): Command = new ReceptionistMessages.Deregister[T](key, service, Some(replyTo)) } - /** - * Java API: A Deregister message without Ack that the service was unregistered - */ + /** Java API: A Deregister message without Ack that the service was unregistered */ def deregister[T](key: ServiceKey[T], service: ActorRef[T]): Command = Deregister(key, service) - /** - * Java API: A Deregister message with an actor that will get an ack that the service was unregistered - */ + /** Java API: A Deregister message with an actor that will get an ack that the service was unregistered */ def deregister[T](key: ServiceKey[T], service: ActorRef[T], replyTo: ActorRef[Deregistered]): Command = Deregister(key, service, replyTo) @@ -264,21 +240,15 @@ object Receptionist extends ExtensionId[Receptionist] { def getServiceInstance[T](key: ServiceKey[T]): ActorRef[T] } - /** - * Sent by the receptionist, available here for easier testing - */ + /** Sent by the receptionist, available here for easier testing */ object Deregistered { - /** - * Scala API - */ + /** Scala API */ def apply[T](key: ServiceKey[T], serviceInstance: ActorRef[T]): Deregistered = new ReceptionistMessages.Deregistered(key, serviceInstance) } - /** - * Java API: Sent by the receptionist, available here for easier testing - */ + /** Java API: Sent by the receptionist, available here for easier testing */ def deregistered[T](key: ServiceKey[T], serviceInstance: ActorRef[T]): Deregistered = Deregistered(key, serviceInstance) @@ -292,9 +262,7 @@ object Receptionist extends ExtensionId[Receptionist] { */ object Subscribe { - /** - * Scala API: - */ + /** Scala API: */ def apply[T](key: ServiceKey[T], subscriber: ActorRef[Listing]): Command = new ReceptionistMessages.Subscribe(key, subscriber) @@ -320,9 +288,7 @@ object Receptionist extends ExtensionId[Receptionist] { def apply[T](key: ServiceKey[T], replyTo: ActorRef[Listing]): Command = new ReceptionistMessages.Find(key, replyTo) - /** - * Special factory to make using Find with ask easier - */ + /** Special factory to make using Find with ask easier */ def apply[T](key: ServiceKey[T]): ActorRef[Listing] => Command = ref => new ReceptionistMessages.Find(key, ref) } @@ -416,9 +382,7 @@ object Receptionist extends ExtensionId[Receptionist] { } - /** - * Sent by the receptionist, available here for easier testing - */ + /** Sent by the receptionist, available here for easier testing */ object Listing { /** Scala API: */ @@ -434,15 +398,11 @@ object Receptionist extends ExtensionId[Receptionist] { new ReceptionistMessages.Listing[T](key, serviceInstances, allServiceInstances, servicesWereAddedOrRemoved) } - /** - * Java API: Sent by the receptionist, available here for easier testing - */ + /** Java API: Sent by the receptionist, available here for easier testing */ def listing[T](key: ServiceKey[T], serviceInstances: java.util.Set[ActorRef[T]]): Listing = Listing(key, serviceInstances.asScala.toSet) - /** - * Java API: Sent by the receptionist, available here for easier testing - */ + /** Java API: Sent by the receptionist, available here for easier testing */ def listing[T]( key: ServiceKey[T], serviceInstances: java.util.Set[ActorRef[T]], diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AbstractBehavior.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AbstractBehavior.scala index 2e4e9b665ca..7ce3041df02 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AbstractBehavior.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AbstractBehavior.scala @@ -49,7 +49,6 @@ abstract class AbstractBehavior[T](protected val context: ActorContext[T]) exten *
  • returning `this` or `same` designates to reuse the current Behavior
  • *
  • returning `unhandled` keeps the same Behavior and signals that the message was not yet handled
  • * - * */ @throws(classOf[Exception]) def onMessage(msg: T): Behavior[T] @@ -87,9 +86,11 @@ abstract class AbstractBehavior[T](protected val context: ActorContext[T]) exten @throws(classOf[Exception]) override final def receiveSignal(ctx: TypedActorContext[T], msg: Signal): Behavior[T] = { checkRightContext(ctx) - onSignal.applyOrElse(msg, { - case MessageAdaptionFailure(ex) => throw ex - case _ => Behaviors.unhandled - }: PartialFunction[Signal, Behavior[T]]) + onSignal.applyOrElse( + msg, + { + case MessageAdaptionFailure(ex) => throw ex + case _ => Behaviors.unhandled + }: PartialFunction[Signal, Behavior[T]]) } } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/ActorContext.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/ActorContext.scala index ee64c638a84..27faf5081f0 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/ActorContext.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/ActorContext.scala @@ -250,9 +250,7 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi */ @InternalApi private[akka] def spawnMessageAdapter[U](f: U => T, name: String): ActorRef[U] - /** - * INTERNAL API: See `spawnMessageAdapter` with name parameter - */ + /** INTERNAL API: See `spawnMessageAdapter` with name parameter */ @InternalApi private[akka] def spawnMessageAdapter[U](f: U => T): ActorRef[U] /** @@ -327,49 +325,33 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi */ def pipeToSelf[Value](future: Future[Value])(mapResult: Try[Value] => T): Unit - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def onUnhandled(msg: T): Unit - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def currentBehavior: Behavior[T] - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def hasTimer: Boolean - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def cancelAllTimers(): Unit - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def clearMdc(): Unit - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def setCurrentActorThread(): Unit - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def clearCurrentActorThread(): Unit - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def checkCurrentActorThread(): Unit } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Behaviors.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Behaviors.scala index 4145fb02046..918cae63b8b 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Behaviors.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Behaviors.scala @@ -10,9 +10,7 @@ import scala.reflect.ClassTag import akka.actor.typed.internal._ import akka.annotation.{ DoNotInherit, InternalApi } -/** - * Factories for [[akka.actor.typed.Behavior]]. - */ +/** Factories for [[akka.actor.typed.Behavior]]. */ object Behaviors { /** @@ -29,9 +27,7 @@ object Behaviors { def setup[T](factory: ActorContext[T] => Behavior[T]): Behavior[T] = BehaviorImpl.DeferredBehavior(factory) - /** - * Support for stashing messages to unstash at a later time. - */ + /** Support for stashing messages to unstash at a later time. */ def withStash[T](capacity: Int)(factory: StashBuffer[T] => Behavior[T]): Behavior[T] = setup(ctx => { val stash = StashBuffer[T](ctx, capacity) @@ -79,14 +75,10 @@ object Behaviors { */ def stopped[T](postStop: () => Unit): Behavior[T] = BehaviorImpl.stopped(postStop) - /** - * A behavior that treats every incoming message as unhandled. - */ + /** A behavior that treats every incoming message as unhandled. */ def empty[T]: Behavior[T] = BehaviorImpl.empty - /** - * A behavior that ignores every incoming message and returns “same”. - */ + /** A behavior that ignores every incoming message and returns “same”. */ def ignore[T]: Behavior[T] = BehaviorImpl.ignore /** @@ -123,25 +115,19 @@ object Behaviors { def receiveMessage[T](onMessage: T => Behavior[T]): Receive[T] = new ReceiveMessageImpl(onMessage) - /** - * Construct an actor `Behavior` from a partial message handler which treats undefined messages as unhandled. - */ + /** Construct an actor `Behavior` from a partial message handler which treats undefined messages as unhandled. */ def receivePartial[T](onMessage: PartialFunction[(ActorContext[T], T), Behavior[T]]): Receive[T] = Behaviors.receive[T] { (ctx, t) => onMessage.applyOrElse((ctx, t), (_: (ActorContext[T], T)) => Behaviors.unhandled[T]) } - /** - * Construct an actor `Behavior` from a partial message handler which treats undefined messages as unhandled. - */ + /** Construct an actor `Behavior` from a partial message handler which treats undefined messages as unhandled. */ def receiveMessagePartial[T](onMessage: PartialFunction[T, Behavior[T]]): Receive[T] = Behaviors.receive[T] { (_, t) => onMessage.applyOrElse(t, (_: T) => Behaviors.unhandled[T]) } - /** - * Construct an actor `Behavior` that can react to lifecycle signals only. - */ + /** Construct an actor `Behavior` that can react to lifecycle signals only. */ def receiveSignal[T](handler: PartialFunction[(ActorContext[T], Signal), Behavior[T]]): Behavior[T] = receive[T]((_, _) => same).receiveSignal(handler) @@ -222,8 +208,8 @@ object Behaviors { final class Supervise[T] private[akka] (val wrapped: Behavior[T]) extends AnyVal { /** Specify the [[SupervisorStrategy]] to be invoked when the wrapped behavior throws. */ - def onFailure[Thr <: Throwable](strategy: SupervisorStrategy)( - implicit tag: ClassTag[Thr] = ThrowableClassTag): Behavior[T] = { + def onFailure[Thr <: Throwable](strategy: SupervisorStrategy)(implicit + tag: ClassTag[Thr] = ThrowableClassTag): Behavior[T] = { val effectiveTag = if (tag == ClassTag.Nothing) ThrowableClassTag else tag Supervisor(Behavior.validateAsInitial(wrapped), strategy)(effectiveTag) } @@ -250,7 +236,6 @@ object Behaviors { * each message processing by the inner behavior is done. * @param behavior The actual behavior handling the messages, the MDC is used for the log entries logged through * `ActorContext.log` - * */ def withMdc[T: ClassTag](mdcForMessage: T => Map[String, String])(behavior: Behavior[T]): Behavior[T] = withMdc[T](Map.empty[String, String], mdcForMessage)(behavior) @@ -265,7 +250,6 @@ object Behaviors { * @param staticMdc This MDC is setup in the logging context for every message * @param behavior The actual behavior handling the messages, the MDC is used for the log entries logged through * `ActorContext.log` - * */ def withMdc[T: ClassTag](staticMdc: Map[String, String])(behavior: Behavior[T]): Behavior[T] = withMdc[T](staticMdc, (_: T) => Map.empty[String, String])(behavior) @@ -288,7 +272,6 @@ object Behaviors { * each message processing by the inner behavior is done. * @param behavior The actual behavior handling the messages, the MDC is used for the log entries logged through * `ActorContext.log` - * */ def withMdc[T: ClassTag](staticMdc: Map[String, String], mdcForMessage: T => Map[String, String])( behavior: Behavior[T]): Behavior[T] = diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Routers.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Routers.scala index 9f9084b38d5..2555b0fc7e9 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Routers.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Routers.scala @@ -46,9 +46,7 @@ object Routers { @DoNotInherit trait GroupRouter[T] extends Behavior[T] { - /** - * Route messages by randomly selecting the routee from the available routees. This is the default for group routers. - */ + /** Route messages by randomly selecting the routee from the available routees. This is the default for group routers. */ def withRandomRouting(): GroupRouter[T] /** @@ -162,18 +160,12 @@ trait PoolRouter[T] extends Behavior[T] { */ def withConsistentHashingRouting(virtualNodesFactor: Int, mapping: T => String): PoolRouter[T] - /** - * Set a new pool size from the one set at construction - */ + /** Set a new pool size from the one set at construction */ def withPoolSize(poolSize: Int): PoolRouter[T] - /** - * Set the props used to spawn the pool's routees - */ + /** Set the props used to spawn the pool's routees */ def withRouteeProps(routeeProps: Props): PoolRouter[T] - /** - * Any message that the predicate returns true for will be broadcast to all routees. - */ + /** Any message that the predicate returns true for will be broadcast to all routees. */ def withBroadcastPredicate(pred: T => Boolean): PoolRouter[T] } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/StashBuffer.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/StashBuffer.scala index 15d78a75105..d88f819fb91 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/StashBuffer.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/StashBuffer.scala @@ -8,9 +8,7 @@ import akka.actor.typed.Behavior import akka.actor.typed.internal.StashBufferImpl import akka.annotation.{ DoNotInherit, InternalApi } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object StashBuffer { /** @@ -62,9 +60,7 @@ import akka.annotation.{ DoNotInherit, InternalApi } */ def capacity: Int - /** - * @return `true` if no more messages can be added, i.e. size equals the capacity of the stash buffer - */ + /** @return `true` if no more messages can be added, i.e. size equals the capacity of the stash buffer */ def isFull: Boolean /** @@ -108,9 +104,7 @@ import akka.annotation.{ DoNotInherit, InternalApi } */ def exists(predicate: T => Boolean): Boolean - /** - * Removes all messages from the buffer. - */ + /** Removes all messages from the buffer. */ def clear(): Unit /** @@ -156,7 +150,5 @@ import akka.annotation.{ DoNotInherit, InternalApi } } -/** - * Is thrown when the size of the stash exceeds the capacity of the stash buffer. - */ +/** Is thrown when the size of the stash exceeds the capacity of the stash buffer. */ class StashOverflowException(message: String) extends RuntimeException(message) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/TimerScheduler.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/TimerScheduler.scala index 0c4aa9544de..31a1ac39d9b 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/TimerScheduler.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/TimerScheduler.scala @@ -210,9 +210,7 @@ trait TimerScheduler[T] { def startTimerAtFixedRate(msg: T, initialDelay: FiniteDuration, interval: FiniteDuration): Unit = startTimerAtFixedRate(msg, msg, initialDelay, interval) - /** - * Deprecated API: See [[TimerScheduler#startTimerWithFixedDelay]] or [[TimerScheduler#startTimerAtFixedRate]]. - */ + /** Deprecated API: See [[TimerScheduler#startTimerWithFixedDelay]] or [[TimerScheduler#startTimerAtFixedRate]]. */ @deprecated( "Use startTimerWithFixedDelay or startTimerAtFixedRate instead. This has the same semantics as " + "startTimerAtFixedRate, but startTimerWithFixedDelay is often preferred.", @@ -243,9 +241,7 @@ trait TimerScheduler[T] { def startSingleTimer(msg: T, delay: FiniteDuration): Unit = startSingleTimer(msg, msg, delay) - /** - * Check if a timer with a given `key` is active. - */ + /** Check if a timer with a given `key` is active. */ def isTimerActive(key: Any): Boolean /** @@ -259,9 +255,7 @@ trait TimerScheduler[T] { */ def cancel(key: Any): Unit - /** - * Cancel all timers. - */ + /** Cancel all timers. */ def cancelAll(): Unit } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/package.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/package.scala index 952e2119bd2..9fec69ab6ae 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/package.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/package.scala @@ -33,9 +33,7 @@ package object adapter { import language.implicitConversions - /** - * Extension methods added to [[akka.actor.ActorSystem]]. - */ + /** Extension methods added to [[akka.actor.ActorSystem]]. */ implicit class ClassicActorSystemOps(val sys: akka.actor.ActorSystem) extends AnyVal { /** @@ -70,15 +68,11 @@ package object adapter { def toTyped: ActorSystem[Nothing] = AdapterExtension(sys).adapter } - /** - * Extension methods added to [[akka.actor.typed.ActorSystem]]. - */ + /** Extension methods added to [[akka.actor.typed.ActorSystem]]. */ implicit class TypedActorSystemOps(val sys: ActorSystem[_]) extends AnyVal { def toClassic: akka.actor.ActorSystem = sys.classicSystem - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def internalSystemActorOf[U]( behavior: Behavior[U], name: String, @@ -87,9 +81,7 @@ package object adapter { } } - /** - * Extension methods added to [[akka.actor.ActorContext]]. - */ + /** Extension methods added to [[akka.actor.ActorContext]]. */ implicit class ClassicActorContextOps(val ctx: akka.actor.ActorContext) extends AnyVal { /** @@ -126,9 +118,7 @@ package object adapter { ctx.stop(ActorRefAdapter.toClassic(child)) } - /** - * Extension methods added to [[akka.actor.typed.scaladsl.ActorContext]]. - */ + /** Extension methods added to [[akka.actor.typed.scaladsl.ActorContext]]. */ implicit class TypedActorContextOps(val ctx: scaladsl.ActorContext[_]) extends AnyVal { def actorOf(props: akka.actor.Props): akka.actor.ActorRef = ActorContextAdapter.toClassic(ctx).actorOf(props) @@ -141,16 +131,12 @@ package object adapter { // watch, unwatch and stop not needed here because of the implicit ActorRef conversion } - /** - * Extension methods added to [[akka.actor.typed.ActorRef]]. - */ + /** Extension methods added to [[akka.actor.typed.ActorRef]]. */ implicit class TypedActorRefOps(val ref: ActorRef[_]) extends AnyVal { def toClassic: akka.actor.ActorRef = ActorRefAdapter.toClassic(ref) } - /** - * Extension methods added to [[akka.actor.ActorRef]]. - */ + /** Extension methods added to [[akka.actor.ActorRef]]. */ implicit class ClassicActorRefOps(val ref: akka.actor.ActorRef) extends AnyVal { /** @@ -161,26 +147,18 @@ package object adapter { def toTyped[T]: ActorRef[T] = ActorRefAdapter(ref) } - /** - * Implicit conversion from classic [[akka.actor.ActorRef]] to [[akka.actor.typed.ActorRef]]. - */ + /** Implicit conversion from classic [[akka.actor.ActorRef]] to [[akka.actor.typed.ActorRef]]. */ implicit def actorRefAdapter[T](ref: akka.actor.ActorRef): ActorRef[T] = ActorRefAdapter(ref) - /** - * Extension methods added to [[akka.actor.typed.Scheduler]]. - */ + /** Extension methods added to [[akka.actor.typed.Scheduler]]. */ implicit class TypedSchedulerOps(val scheduler: Scheduler) extends AnyVal { def toClassic: akka.actor.Scheduler = SchedulerAdapter.toClassic(scheduler) } - /** - * Extension methods added to [[akka.actor.Scheduler]]. - */ + /** Extension methods added to [[akka.actor.Scheduler]]. */ implicit class ClassicSchedulerOps(val scheduler: akka.actor.Scheduler) extends AnyVal { - /** - * Adapt the classic `Scheduler` to `akka.actor.typed.Scheduler`. - */ + /** Adapt the classic `Scheduler` to `akka.actor.typed.Scheduler`. */ def toTyped: Scheduler = new SchedulerAdapter(scheduler) } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/package.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/package.scala index e9d8a96c645..6cfa73ae19c 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/package.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/package.scala @@ -21,7 +21,7 @@ package object scaladsl { * }}} * or * {{{ - *import akka.actor.typed.scaladsl._ + * import akka.actor.typed.scaladsl._ * }}} * * @param log the underlying [[org.slf4j.Logger]] diff --git a/akka-actor/src/main/scala-2.13+/akka/compat/Future.scala b/akka-actor/src/main/scala-2.13+/akka/compat/Future.scala index 29faa6e2c1d..4cfd2344b0f 100644 --- a/akka-actor/src/main/scala-2.13+/akka/compat/Future.scala +++ b/akka-actor/src/main/scala-2.13+/akka/compat/Future.scala @@ -18,33 +18,33 @@ import akka.annotation.InternalApi * Remove these classes as soon as support for Scala 2.12 is dropped! */ @InternalApi private[akka] object Future { - def fold[T, R](futures: IterableOnce[SFuture[T]])(zero: R)(op: (R, T) => R)( - implicit executor: ExecutionContext): SFuture[R] = { + def fold[T, R](futures: IterableOnce[SFuture[T]])(zero: R)(op: (R, T) => R)(implicit + executor: ExecutionContext): SFuture[R] = { // This will have performance implications since the elements are copied to a Vector SFuture.foldLeft[T, R](futures.iterator.to(immutable.Iterable))(zero)(op)(executor) } - def fold[T, R](futures: immutable.Iterable[SFuture[T]])(zero: R)(op: (R, T) => R)( - implicit executor: ExecutionContext): SFuture[R] = + def fold[T, R](futures: immutable.Iterable[SFuture[T]])(zero: R)(op: (R, T) => R)(implicit + executor: ExecutionContext): SFuture[R] = SFuture.foldLeft[T, R](futures)(zero)(op)(executor) - def reduce[T, R >: T](futures: IterableOnce[SFuture[T]])(op: (R, T) => R)( - implicit executor: ExecutionContext): SFuture[R] = { + def reduce[T, R >: T](futures: IterableOnce[SFuture[T]])(op: (R, T) => R)(implicit + executor: ExecutionContext): SFuture[R] = { // This will have performance implications since the elements are copied to a Vector SFuture.reduceLeft[T, R](futures.iterator.to(immutable.Iterable))(op)(executor) } - def reduce[T, R >: T](futures: immutable.Iterable[SFuture[T]])(op: (R, T) => R)( - implicit executor: ExecutionContext): SFuture[R] = + def reduce[T, R >: T](futures: immutable.Iterable[SFuture[T]])(op: (R, T) => R)(implicit + executor: ExecutionContext): SFuture[R] = SFuture.reduceLeft[T, R](futures)(op)(executor) - def find[T](futures: IterableOnce[SFuture[T]])(p: T => Boolean)( - implicit executor: ExecutionContext): SFuture[Option[T]] = { + def find[T](futures: IterableOnce[SFuture[T]])(p: T => Boolean)(implicit + executor: ExecutionContext): SFuture[Option[T]] = { // This will have performance implications since the elements are copied to a Vector SFuture.find[T](futures.iterator.to(immutable.Iterable))(p)(executor) } - def find[T](futures: immutable.Iterable[SFuture[T]])(p: T => Boolean)( - implicit executor: ExecutionContext): SFuture[Option[T]] = + def find[T](futures: immutable.Iterable[SFuture[T]])(p: T => Boolean)(implicit + executor: ExecutionContext): SFuture[Option[T]] = SFuture.find[T](futures)(p)(executor) } diff --git a/akka-actor/src/main/scala-2.13/akka/dispatch/internal/ScalaBatchable.scala b/akka-actor/src/main/scala-2.13/akka/dispatch/internal/ScalaBatchable.scala index df89bed436b..5aaa71abba5 100644 --- a/akka-actor/src/main/scala-2.13/akka/dispatch/internal/ScalaBatchable.scala +++ b/akka-actor/src/main/scala-2.13/akka/dispatch/internal/ScalaBatchable.scala @@ -6,9 +6,7 @@ package akka.dispatch.internal import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ScalaBatchable { diff --git a/akka-actor/src/main/scala-2.13/akka/util/ByteIterator.scala b/akka-actor/src/main/scala-2.13/akka/util/ByteIterator.scala index b12c79d1f88..3c86abdfe61 100644 --- a/akka-actor/src/main/scala-2.13/akka/util/ByteIterator.scala +++ b/akka-actor/src/main/scala-2.13/akka/util/ByteIterator.scala @@ -73,12 +73,12 @@ object ByteIterator { final override def clone: ByteArrayIterator = new ByteArrayIterator(array, from, until) final override def take(n: Int): this.type = { - if (n < len) until = { if (n > 0) (from + n) else from } + if (n < len) until = { if (n > 0) from + n else from } this } final override def drop(n: Int): this.type = { - if (n > 0) from = { if (n < len) (from + n) else until } + if (n > 0) from = { if (n < len) from + n else until } this } @@ -166,7 +166,7 @@ object ByteIterator { def asInputStream: java.io.InputStream = new java.io.InputStream { override def available: Int = iterator.len - def read: Int = if (hasNext) (next().toInt & 0xff) else -1 + def read: Int = if (hasNext) next().toInt & 0xff else -1 override def read(b: Array[Byte], off: Int, len: Int): Int = { if ((off < 0) || (len < 0) || (off + len > b.length)) throw new IndexOutOfBoundsException @@ -387,7 +387,7 @@ object ByteIterator { def asInputStream: java.io.InputStream = new java.io.InputStream { override def available: Int = current.len - def read: Int = if (hasNext) (next().toInt & 0xff) else -1 + def read: Int = if (hasNext) next().toInt & 0xff else -1 override def read(b: Array[Byte], off: Int, len: Int): Int = { val nRead = current.asInputStream.read(b, off, len) @@ -414,9 +414,7 @@ object ByteIterator { } } -/** - * An iterator over a ByteString. - */ +/** An iterator over a ByteString. */ abstract class ByteIterator extends BufferedIterator[Byte] { def len: Int @@ -518,14 +516,10 @@ abstract class ByteIterator extends BufferedIterator[Byte] { target } - /** - * Get a single Byte from this iterator. Identical to next(). - */ + /** Get a single Byte from this iterator. Identical to next(). */ def getByte: Byte = next() - /** - * Get a single Short from this iterator. - */ + /** Get a single Short from this iterator. */ def getShort(implicit byteOrder: ByteOrder): Short = { if (byteOrder == ByteOrder.BIG_ENDIAN) ((next() & 0xff) << 8 | (next() & 0xff) << 0).toShort @@ -534,9 +528,7 @@ abstract class ByteIterator extends BufferedIterator[Byte] { else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } - /** - * Get a single Int from this iterator. - */ + /** Get a single Int from this iterator. */ def getInt(implicit byteOrder: ByteOrder): Int = { if (byteOrder == ByteOrder.BIG_ENDIAN) ((next() & 0xff) << 24 @@ -551,9 +543,7 @@ abstract class ByteIterator extends BufferedIterator[Byte] { else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } - /** - * Get a single Long from this iterator. - */ + /** Get a single Long from this iterator. */ def getLong(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) ((next().toLong & 0xff) << 56 @@ -630,59 +620,39 @@ abstract class ByteIterator extends BufferedIterator[Byte] { bs } - /** - * Get a number of Shorts from this iterator. - */ + /** Get a number of Shorts from this iterator. */ def getShorts(xs: Array[Short])(implicit byteOrder: ByteOrder): this.type = getShorts(xs, 0, xs.length)(byteOrder) - /** - * Get a number of Shorts from this iterator. - */ + /** Get a number of Shorts from this iterator. */ def getShorts(xs: Array[Short], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type - /** - * Get a number of Ints from this iterator. - */ + /** Get a number of Ints from this iterator. */ def getInts(xs: Array[Int])(implicit byteOrder: ByteOrder): this.type = getInts(xs, 0, xs.length)(byteOrder) - /** - * Get a number of Ints from this iterator. - */ + /** Get a number of Ints from this iterator. */ def getInts(xs: Array[Int], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type - /** - * Get a number of Longs from this iterator. - */ + /** Get a number of Longs from this iterator. */ def getLongs(xs: Array[Long])(implicit byteOrder: ByteOrder): this.type = getLongs(xs, 0, xs.length)(byteOrder) - /** - * Get a number of Longs from this iterator. - */ + /** Get a number of Longs from this iterator. */ def getLongs(xs: Array[Long], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type - /** - * Get a number of Floats from this iterator. - */ + /** Get a number of Floats from this iterator. */ def getFloats(xs: Array[Float])(implicit byteOrder: ByteOrder): this.type = getFloats(xs, 0, xs.length)(byteOrder) - /** - * Get a number of Floats from this iterator. - */ + /** Get a number of Floats from this iterator. */ def getFloats(xs: Array[Float], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type - /** - * Get a number of Doubles from this iterator. - */ + /** Get a number of Doubles from this iterator. */ def getDoubles(xs: Array[Double])(implicit byteOrder: ByteOrder): this.type = getDoubles(xs, 0, xs.length)(byteOrder) - /** - * Get a number of Doubles from this iterator. - */ + /** Get a number of Doubles from this iterator. */ def getDoubles(xs: Array[Double], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type /** diff --git a/akka-actor/src/main/scala-2.13/akka/util/ByteString.scala b/akka-actor/src/main/scala-2.13/akka/util/ByteString.scala index a1f4f01d7ed..b0076c42ecd 100644 --- a/akka-actor/src/main/scala-2.13/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala-2.13/akka/util/ByteString.scala @@ -19,50 +19,32 @@ import scala.reflect.ClassTag object ByteString { - /** - * Creates a new ByteString by copying a byte array. - */ + /** Creates a new ByteString by copying a byte array. */ def apply(bytes: Array[Byte]): ByteString = CompactByteString(bytes) - /** - * Creates a new ByteString by copying bytes. - */ + /** Creates a new ByteString by copying bytes. */ def apply(bytes: Byte*): ByteString = CompactByteString(bytes: _*) - /** - * Creates a new ByteString by iterating over bytes. - */ + /** Creates a new ByteString by iterating over bytes. */ def apply(bytes: IterableOnce[Byte]): ByteString = CompactByteString(bytes) - /** - * Creates a new ByteString by converting from integral numbers to bytes. - */ + /** Creates a new ByteString by converting from integral numbers to bytes. */ def apply[T](bytes: T*)(implicit num: Integral[T]): ByteString = CompactByteString(bytes: _*)(num) - /** - * Creates a new ByteString by copying bytes from a ByteBuffer. - */ + /** Creates a new ByteString by copying bytes from a ByteBuffer. */ def apply(bytes: ByteBuffer): ByteString = CompactByteString(bytes) - /** - * Creates a new ByteString by encoding a String as UTF-8. - */ + /** Creates a new ByteString by encoding a String as UTF-8. */ def apply(string: String): ByteString = apply(string, StandardCharsets.UTF_8) - /** - * Creates a new ByteString by encoding a String with a charset. - */ + /** Creates a new ByteString by encoding a String with a charset. */ def apply(string: String, charset: String): ByteString = CompactByteString(string, charset) - /** - * Creates a new ByteString by encoding a String with a charset. - */ + /** Creates a new ByteString by encoding a String with a charset. */ def apply(string: String, charset: Charset): ByteString = CompactByteString(string, charset) - /** - * Creates a new ByteString by copying a byte array. - */ + /** Creates a new ByteString by copying a byte array. */ def fromArray(array: Array[Byte]): ByteString = apply(array) /** @@ -79,7 +61,6 @@ object ByteString { * want wrap it into an ByteArray, and from there on only use that reference (the ByteString) * to operate on the wrapped data. For all other intents and purposes, please use the usual * apply and create methods - which provide the immutability guarantees by copying the array. - * */ def fromArrayUnsafe(array: Array[Byte]): ByteString = ByteString1C(array) @@ -104,7 +85,6 @@ object ByteString { * want wrap it into an ByteArray, and from there on only use that reference (the ByteString) * to operate on the wrapped data. For all other intents and purposes, please use the usual * apply and create methods - which provide the immutability guarantees by copying the array. - * */ def fromArrayUnsafe(array: Array[Byte], offset: Int, length: Int): ByteString = ByteString1(array, offset, length) @@ -116,29 +96,19 @@ object ByteString { def fromInts(array: Int*): ByteString = apply(array: _*)(scala.math.Numeric.IntIsIntegral) - /** - * Creates a new ByteString which will contain the UTF-8 representation of the given String - */ + /** Creates a new ByteString which will contain the UTF-8 representation of the given String */ def fromString(string: String): ByteString = apply(string) - /** - * Creates a new ByteString which will contain the representation of the given String in the given charset - */ + /** Creates a new ByteString which will contain the representation of the given String in the given charset */ def fromString(string: String, charset: String): ByteString = apply(string, charset) - /** - * Creates a new ByteString which will contain the representation of the given String in the given charset - */ + /** Creates a new ByteString which will contain the representation of the given String in the given charset */ def fromString(string: String, charset: Charset): ByteString = apply(string, charset) - /** - * Standard "UTF-8" charset - */ + /** Standard "UTF-8" charset */ val UTF_8: String = StandardCharsets.UTF_8.name() - /** - * Creates a new ByteString by copying bytes out of a ByteBuffer. - */ + /** Creates a new ByteString by copying bytes out of a ByteBuffer. */ def fromByteBuffer(buffer: ByteBuffer): ByteString = apply(buffer) val empty: ByteString = ByteString1C.empty @@ -171,9 +141,7 @@ object ByteString { } } - /** - * A compact (unsliced) and unfragmented ByteString, implementation of ByteString1C. - */ + /** A compact (unsliced) and unfragmented ByteString, implementation of ByteString1C. */ @SerialVersionUID(3956956327691936932L) final class ByteString1C private (private val bytes: Array[Byte]) extends CompactByteString { def apply(idx: Int): Byte = bytes(idx) @@ -289,9 +257,7 @@ object ByteString { ByteString1C.readFromInputStream(is).toByteString1 } - /** - * An unfragmented ByteString. - */ + /** An unfragmented ByteString. */ final class ByteString1 private (private val bytes: Array[Byte], private val startIndex: Int, val length: Int) extends ByteString with Serializable { @@ -314,7 +280,7 @@ object ByteString { os.write(bytes, startIndex, length) } - def isCompact: Boolean = (length == bytes.length) + def isCompact: Boolean = length == bytes.length private[akka] def byteStringCompanion = ByteString1 @@ -505,9 +471,7 @@ object ByteString { } } - /** - * A ByteString with 2 or more fragments. - */ + /** A ByteString with 2 or more fragments. */ final class ByteStrings private (private[akka] val bytestrings: Vector[ByteString1], val length: Int) extends ByteString with Serializable { @@ -874,14 +838,10 @@ sealed abstract class ByteString private[akka] def writeToOutputStream(os: ObjectOutputStream): Unit - /** - * Efficiently concatenate another ByteString. - */ + /** Efficiently concatenate another ByteString. */ def ++(that: ByteString): ByteString - /** - * Java API: efficiently concatenate another ByteString. - */ + /** Java API: efficiently concatenate another ByteString. */ def concat(that: ByteString): ByteString = this ++ that /** @@ -937,9 +897,7 @@ sealed abstract class ByteString */ def toByteBuffer: ByteBuffer = ByteBuffer.wrap(toArray) - /** - * Decodes this ByteString as a UTF-8 encoded String. - */ + /** Decodes this ByteString as a UTF-8 encoded String. */ final def utf8String: String = decodeString(StandardCharsets.UTF_8) /** @@ -960,14 +918,10 @@ sealed abstract class ByteString */ def decodeBase64: ByteString - /** - * Returns a ByteString which is the Base64 representation of this ByteString - */ + /** Returns a ByteString which is the Base64 representation of this ByteString */ def encodeBase64: ByteString - /** - * map method that will automatically cast Int back into Byte. - */ + /** map method that will automatically cast Int back into Byte. */ final def mapI(f: Byte => Int): ByteString = map(f.andThen(_.toByte)) def map[A](f: Byte => Byte): ByteString = fromSpecific(super.map(f)) @@ -975,15 +929,11 @@ sealed abstract class ByteString object CompactByteString { - /** - * Creates a new CompactByteString by copying a byte array. - */ + /** Creates a new CompactByteString by copying a byte array. */ def apply(bytes: Array[Byte]): CompactByteString = if (bytes.isEmpty) empty else ByteString.ByteString1C(bytes.clone) - /** - * Creates a new CompactByteString by copying bytes. - */ + /** Creates a new CompactByteString by copying bytes. */ def apply(bytes: Byte*): CompactByteString = { if (bytes.isEmpty) empty else { @@ -993,26 +943,20 @@ object CompactByteString { } } - /** - * Creates a new CompactByteString by traversing bytes. - */ + /** Creates a new CompactByteString by traversing bytes. */ def apply(bytes: IterableOnce[Byte]): CompactByteString = { val it = bytes.iterator if (it.isEmpty) empty else ByteString.ByteString1C(it.toArray) } - /** - * Creates a new CompactByteString by converting from integral numbers to bytes. - */ + /** Creates a new CompactByteString by converting from integral numbers to bytes. */ def apply[T](bytes: T*)(implicit num: Integral[T]): CompactByteString = { if (bytes.isEmpty) empty else ByteString.ByteString1C(bytes.iterator.map(x => num.toInt(x).toByte).to(Array)) } - /** - * Creates a new CompactByteString by copying bytes from a ByteBuffer. - */ + /** Creates a new CompactByteString by copying bytes from a ByteBuffer. */ def apply(bytes: ByteBuffer): CompactByteString = { if (bytes.remaining < 1) empty else { @@ -1022,20 +966,14 @@ object CompactByteString { } } - /** - * Creates a new CompactByteString by encoding a String as UTF-8. - */ + /** Creates a new CompactByteString by encoding a String as UTF-8. */ def apply(string: String): CompactByteString = apply(string, StandardCharsets.UTF_8) - /** - * Creates a new CompactByteString by encoding a String with a charset. - */ + /** Creates a new CompactByteString by encoding a String with a charset. */ def apply(string: String, charset: String): CompactByteString = if (string.isEmpty) empty else ByteString.ByteString1C(string.getBytes(charset)) - /** - * Creates a new CompactByteString by encoding a String with a charset. - */ + /** Creates a new CompactByteString by encoding a String with a charset. */ def apply(string: String, charset: Charset): CompactByteString = if (string.isEmpty) empty else ByteString.ByteString1C(string.getBytes(charset)) @@ -1092,11 +1030,10 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { } @inline protected final def fillByteBuffer(len: Int, byteOrder: ByteOrder)(fill: ByteBuffer => Unit): this.type = { - fillArray(len) { - case (array, start) => - val buffer = ByteBuffer.wrap(array, start, len) - buffer.order(byteOrder) - fill(buffer) + fillArray(len) { case (array, start) => + val buffer = ByteBuffer.wrap(array, start, len) + buffer.order(byteOrder) + fill(buffer) } } @@ -1196,19 +1133,13 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { this } - /** - * Java API: append a ByteString to this builder. - */ + /** Java API: append a ByteString to this builder. */ def append(bs: ByteString): this.type = if (bs.isEmpty) this else this ++= bs - /** - * Add a single Byte to this builder. - */ + /** Add a single Byte to this builder. */ def putByte(x: Byte): this.type = this += x - /** - * Add a single Short to this builder. - */ + /** Add a single Short to this builder. */ def putShort(x: Int)(implicit byteOrder: ByteOrder): this.type = { if (byteOrder == ByteOrder.BIG_ENDIAN) { this += (x >>> 8).toByte @@ -1219,9 +1150,7 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { } else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } - /** - * Add a single Int to this builder. - */ + /** Add a single Int to this builder. */ def putInt(x: Int)(implicit byteOrder: ByteOrder): this.type = { fillArray(4) { (target, offset) => if (byteOrder == ByteOrder.BIG_ENDIAN) { @@ -1239,9 +1168,7 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { this } - /** - * Add a single Long to this builder. - */ + /** Add a single Long to this builder. */ def putLong(x: Long)(implicit byteOrder: ByteOrder): this.type = { fillArray(8) { (target, offset) => if (byteOrder == ByteOrder.BIG_ENDIAN) { @@ -1267,9 +1194,7 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { this } - /** - * Add the `n` least significant bytes of the given Long to this builder. - */ + /** Add the `n` least significant bytes of the given Long to this builder. */ def putLongPart(x: Long, n: Int)(implicit byteOrder: ByteOrder): this.type = { fillArray(n) { (target, offset) => if (byteOrder == ByteOrder.BIG_ENDIAN) { @@ -1285,87 +1210,59 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { } } - /** - * Add a single Float to this builder. - */ + /** Add a single Float to this builder. */ def putFloat(x: Float)(implicit byteOrder: ByteOrder): this.type = putInt(java.lang.Float.floatToRawIntBits(x))(byteOrder) - /** - * Add a single Double to this builder. - */ + /** Add a single Double to this builder. */ def putDouble(x: Double)(implicit byteOrder: ByteOrder): this.type = putLong(java.lang.Double.doubleToRawLongBits(x))(byteOrder) - /** - * Add a number of Bytes from an array to this builder. - */ + /** Add a number of Bytes from an array to this builder. */ def putBytes(array: Array[Byte]): this.type = putBytes(array, 0, array.length) - /** - * Add a number of Bytes from an array to this builder. - */ + /** Add a number of Bytes from an array to this builder. */ def putBytes(array: Array[Byte], start: Int, len: Int): this.type = fillArray(len) { case (target, targetOffset) => Array.copy(array, start, target, targetOffset, len) } - /** - * Add a number of Shorts from an array to this builder. - */ + /** Add a number of Shorts from an array to this builder. */ def putShorts(array: Array[Short])(implicit byteOrder: ByteOrder): this.type = putShorts(array, 0, array.length)(byteOrder) - /** - * Add a number of Shorts from an array to this builder. - */ + /** Add a number of Shorts from an array to this builder. */ def putShorts(array: Array[Short], start: Int, len: Int)(implicit byteOrder: ByteOrder): this.type = fillByteBuffer(len * 2, byteOrder) { _.asShortBuffer.put(array, start, len) } - /** - * Add a number of Ints from an array to this builder. - */ + /** Add a number of Ints from an array to this builder. */ def putInts(array: Array[Int])(implicit byteOrder: ByteOrder): this.type = putInts(array, 0, array.length)(byteOrder) - /** - * Add a number of Ints from an array to this builder. - */ + /** Add a number of Ints from an array to this builder. */ def putInts(array: Array[Int], start: Int, len: Int)(implicit byteOrder: ByteOrder): this.type = fillByteBuffer(len * 4, byteOrder) { _.asIntBuffer.put(array, start, len) } - /** - * Add a number of Longs from an array to this builder. - */ + /** Add a number of Longs from an array to this builder. */ def putLongs(array: Array[Long])(implicit byteOrder: ByteOrder): this.type = putLongs(array, 0, array.length)(byteOrder) - /** - * Add a number of Longs from an array to this builder. - */ + /** Add a number of Longs from an array to this builder. */ def putLongs(array: Array[Long], start: Int, len: Int)(implicit byteOrder: ByteOrder): this.type = fillByteBuffer(len * 8, byteOrder) { _.asLongBuffer.put(array, start, len) } - /** - * Add a number of Floats from an array to this builder. - */ + /** Add a number of Floats from an array to this builder. */ def putFloats(array: Array[Float])(implicit byteOrder: ByteOrder): this.type = putFloats(array, 0, array.length)(byteOrder) - /** - * Add a number of Floats from an array to this builder. - */ + /** Add a number of Floats from an array to this builder. */ def putFloats(array: Array[Float], start: Int, len: Int)(implicit byteOrder: ByteOrder): this.type = fillByteBuffer(len * 4, byteOrder) { _.asFloatBuffer.put(array, start, len) } - /** - * Add a number of Doubles from an array to this builder. - */ + /** Add a number of Doubles from an array to this builder. */ def putDoubles(array: Array[Double])(implicit byteOrder: ByteOrder): this.type = putDoubles(array, 0, array.length)(byteOrder) - /** - * Add a number of Doubles from an array to this builder. - */ + /** Add a number of Doubles from an array to this builder. */ def putDoubles(array: Array[Double], start: Int, len: Int)(implicit byteOrder: ByteOrder): this.type = fillByteBuffer(len * 8, byteOrder) { _.asDoubleBuffer.put(array, start, len) } @@ -1396,13 +1293,9 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { override def write(b: Array[Byte], off: Int, len: Int): Unit = { builder.putBytes(b, off, len) } } - /** - * Tests whether this ByteStringBuilder is empty. - */ + /** Tests whether this ByteStringBuilder is empty. */ def isEmpty: Boolean = _length == 0 - /** - * Tests whether this ByteStringBuilder is not empty. - */ + /** Tests whether this ByteStringBuilder is not empty. */ def nonEmpty: Boolean = _length > 0 } diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 3f38d01350c..dbbb3c74396 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -4,9 +4,7 @@ package akka -/** - * Akka base Exception. - */ +/** Akka base Exception. */ @SerialVersionUID(1L) class AkkaException(message: String, cause: Throwable) extends RuntimeException(message, cause) with Serializable { def this(msg: String) = this(msg, null) @@ -26,9 +24,7 @@ trait OnlyCauseStackTrace { self: Throwable => } } -/** - * This exception is thrown when Akka detects a problem with the provided configuration - */ +/** This exception is thrown when Akka detects a problem with the provided configuration */ class ConfigurationException(message: String, cause: Throwable) extends AkkaException(message, cause) { def this(msg: String) = this(msg, null) } diff --git a/akka-actor/src/main/scala/akka/AkkaVersion.scala b/akka-actor/src/main/scala/akka/AkkaVersion.scala index de1a0673899..14b06eb6315 100644 --- a/akka-actor/src/main/scala/akka/AkkaVersion.scala +++ b/akka-actor/src/main/scala/akka/AkkaVersion.scala @@ -24,9 +24,7 @@ object AkkaVersion { require(libraryName, requiredVersion, Version.current) } - /** - * Internal API: - */ + /** Internal API: */ @InternalApi private[akka] def require(libraryName: String, requiredVersion: String, currentVersion: String): Unit = { if (requiredVersion != currentVersion) { diff --git a/akka-actor/src/main/scala/akka/Done.scala b/akka-actor/src/main/scala/akka/Done.scala index 8b2f017012b..a18c04ab46e 100644 --- a/akka-actor/src/main/scala/akka/Done.scala +++ b/akka-actor/src/main/scala/akka/Done.scala @@ -17,9 +17,7 @@ import akka.annotation.DoNotInherit case object Done extends Done { - /** - * Java API: the singleton instance - */ + /** Java API: the singleton instance */ def getInstance(): Done = this /** diff --git a/akka-actor/src/main/scala/akka/NotUsed.scala b/akka-actor/src/main/scala/akka/NotUsed.scala index 58d2590ac73..cce6938beca 100644 --- a/akka-actor/src/main/scala/akka/NotUsed.scala +++ b/akka-actor/src/main/scala/akka/NotUsed.scala @@ -14,9 +14,7 @@ sealed abstract class NotUsed case object NotUsed extends NotUsed { - /** - * Java API: the singleton instance - */ + /** Java API: the singleton instance */ def getInstance(): NotUsed = this /** diff --git a/akka-actor/src/main/scala/akka/actor/AbstractActor.scala b/akka-actor/src/main/scala/akka/actor/AbstractActor.scala index 5a4fc381102..7efe9b1ffc5 100644 --- a/akka-actor/src/main/scala/akka/actor/AbstractActor.scala +++ b/akka-actor/src/main/scala/akka/actor/AbstractActor.scala @@ -15,9 +15,7 @@ import akka.annotation.DoNotInherit import akka.japi.pf.ReceiveBuilder import akka.util.JavaDurationConverters -/** - * Java API: compatible with lambda expressions - */ +/** Java API: compatible with lambda expressions */ object AbstractActor { /** @@ -38,9 +36,7 @@ object AbstractActor { new Receive(onMessage.orElse(other.onMessage)) } - /** - * emptyBehavior is a Receive-expression that matches no messages at all, ever. - */ + /** emptyBehavior is a Receive-expression that matches no messages at all, ever. */ final val emptyBehavior: Receive = new Receive(PartialFunction.empty) /** @@ -183,9 +179,7 @@ object AbstractActor { setReceiveTimeout(timeout.asScala) } - /** - * Cancel the sending of receive timeout notifications. - */ + /** Cancel the sending of receive timeout notifications. */ def cancelReceiveTimeout(): Unit = setReceiveTimeout(Duration.Undefined) } } @@ -214,7 +208,6 @@ object AbstractActor { * } * } * - * */ abstract class AbstractActor extends Actor { @@ -351,7 +344,6 @@ abstract class UntypedAbstractActor extends AbstractActor { * Java API: compatible with lambda expressions * * Actor base class that mixes in logging into the Actor. - * */ abstract class AbstractLoggingActor extends AbstractActor with ActorLogging @@ -397,7 +389,6 @@ abstract class AbstractLoggingActor extends AbstractActor with ActorLogging * For a `Stash` based actor that enforces unbounded deques see [[akka.actor.AbstractActorWithUnboundedStash]]. * There is also an unrestricted version [[akka.actor.AbstractActorWithUnrestrictedStash]] that does not * enforce the mailbox type. - * */ abstract class AbstractActorWithStash extends AbstractActor with Stash @@ -407,7 +398,6 @@ abstract class AbstractActorWithStash extends AbstractActor with Stash * Actor base class with `Stash` that enforces an unbounded deque for the actor. The proper mailbox has to be configured * manually, and the mailbox should extend the [[akka.dispatch.DequeBasedMessageQueueSemantics]] marker trait. * See [[akka.actor.AbstractActorWithStash]] for details on how `Stash` works. - * */ abstract class AbstractActorWithUnboundedStash extends AbstractActor with UnboundedStash @@ -416,6 +406,5 @@ abstract class AbstractActorWithUnboundedStash extends AbstractActor with Unboun * * Actor base class with `Stash` that does not enforce any mailbox type. The mailbox of the actor has to be configured * manually. See [[akka.actor.AbstractActorWithStash]] for details on how `Stash` works. - * */ abstract class AbstractActorWithUnrestrictedStash extends AbstractActor with UnrestrictedStash diff --git a/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala b/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala index ba1ff7b7e06..012d9f52c1b 100644 --- a/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala +++ b/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala @@ -8,10 +8,7 @@ import scala.concurrent.duration.FiniteDuration import akka.util.JavaDurationConverters._ -/** - * Java API: compatible with lambda expressions - * - */ +/** Java API: compatible with lambda expressions */ object AbstractFSM { /** @@ -29,7 +26,6 @@ object AbstractFSM { * Java API: compatible with lambda expressions * * Finite State Machine actor abstract base class. - * */ abstract class AbstractFSM[S, D] extends FSM[S, D] { import java.util.{ List => JList } @@ -537,9 +533,7 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { setTimer(name, msg, timeout.asScala, repeat) } - /** - * Default reason if calling `stop()`. - */ + /** Default reason if calling `stop()`. */ val Normal: FSM.Reason = FSM.Normal /** @@ -553,7 +547,6 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * Java API: compatible with lambda expressions * * Finite State Machine actor abstract base class. - * */ abstract class AbstractLoggingFSM[S, D] extends AbstractFSM[S, D] with LoggingFSM[S, D] @@ -561,6 +554,5 @@ abstract class AbstractLoggingFSM[S, D] extends AbstractFSM[S, D] with LoggingFS * Java API: compatible with lambda expressions * * Finite State Machine actor abstract base class with Stash support. - * */ abstract class AbstractFSMWithStash[S, D] extends AbstractFSM[S, D] with Stash diff --git a/akka-actor/src/main/scala/akka/actor/AbstractProps.scala b/akka-actor/src/main/scala/akka/actor/AbstractProps.scala index c3a92fbb755..1e3ab84a893 100644 --- a/akka-actor/src/main/scala/akka/actor/AbstractProps.scala +++ b/akka-actor/src/main/scala/akka/actor/AbstractProps.scala @@ -13,28 +13,21 @@ import scala.annotation.varargs import akka.japi.Creator import akka.util.Reflect -/** - * - * Java API: Factory for Props instances. - */ +/** Java API: Factory for Props instances. */ private[akka] trait AbstractProps { - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def validate(clazz: Class[_]): Unit = { if (Modifier.isAbstract(clazz.getModifiers)) { throw new IllegalArgumentException(s"Actor class [${clazz.getName}] must not be abstract") } else if (!classOf[Actor].isAssignableFrom(clazz) && - !classOf[IndirectActorProducer].isAssignableFrom(clazz)) { + !classOf[IndirectActorProducer].isAssignableFrom(clazz)) { throw new IllegalArgumentException( s"Actor class [${clazz.getName}] must be subClass of akka.actor.Actor or akka.actor.IndirectActorProducer.") } } - /** - * Java API: create a Props given a class and its constructor arguments. - */ + /** Java API: create a Props given a class and its constructor arguments. */ @varargs def create(clazz: Class[_], args: AnyRef*): Props = new Props(deploy = Props.defaultDeploy, clazz = clazz, args = args.toList) @@ -71,9 +64,7 @@ private[akka] trait AbstractProps { create(classOf[CreatorConsumer], actorClass, creator) } - /** - * Create new Props from the given [[akka.japi.Creator]] with the type set to the given actorClass. - */ + /** Create new Props from the given [[akka.japi.Creator]] with the type set to the given actorClass. */ def create[T <: Actor](actorClass: Class[T], creator: Creator[T]): Props = { checkCreatorClosingOver(creator.getClass) create(classOf[CreatorConsumer], actorClass, creator) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 04caccc2231..4c1c2aee200 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -28,22 +28,16 @@ private[akka] trait AutoReceivedMessage extends Serializable */ trait PossiblyHarmful -/** - * Marker trait to signal that this class should not be verified for serializability. - */ +/** Marker trait to signal that this class should not be verified for serializability. */ trait NoSerializationVerificationNeeded abstract class PoisonPill extends AutoReceivedMessage with PossiblyHarmful with DeadLetterSuppression -/** - * A message all Actors will understand, that when processed will terminate the Actor permanently. - */ +/** A message all Actors will understand, that when processed will terminate the Actor permanently. */ @SerialVersionUID(1L) case object PoisonPill extends PoisonPill { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this } @@ -56,9 +50,7 @@ abstract class Kill extends AutoReceivedMessage with PossiblyHarmful @SerialVersionUID(1L) case object Kill extends Kill { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this } @@ -142,15 +134,11 @@ abstract class ReceiveTimeout extends PossiblyHarmful @SerialVersionUID(1L) case object ReceiveTimeout extends ReceiveTimeout { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this } -/** - * Marker trait to indicate that a message should not reset the receive timeout. - */ +/** Marker trait to indicate that a message should not reset the receive timeout. */ trait NotInfluenceReceiveTimeout /** @@ -160,9 +148,7 @@ trait NotInfluenceReceiveTimeout @SerialVersionUID(1L) final case class IllegalActorStateException private[akka] (message: String) extends AkkaException(message) -/** - * ActorKilledException is thrown when an Actor receives the [[akka.actor.Kill]] message - */ +/** ActorKilledException is thrown when an Actor receives the [[akka.actor.Kill]] message */ @SerialVersionUID(1L) final case class ActorKilledException private[akka] (message: String) extends AkkaException(message) with NoStackTrace @@ -280,9 +266,7 @@ final case class DeathPactException private[akka] (dead: ActorRef) @SerialVersionUID(1L) class ActorInterruptedException private[akka] (cause: Throwable) extends AkkaException(cause.getMessage, cause) -/** - * This message is published to the EventStream whenever an Actor receives a message it doesn't understand - */ +/** This message is published to the EventStream whenever an Actor receives a message it doesn't understand */ @SerialVersionUID(1L) final case class UnhandledMessage( @BeanProperty message: Any, @@ -301,9 +285,7 @@ final case class UnhandledMessage( object Status { sealed trait Status extends Serializable - /** - * This class/message type is preferably used to indicate success of some operation performed. - */ + /** This class/message type is preferably used to indicate success of some operation performed. */ @SerialVersionUID(1L) final case class Success(status: Any) extends Status @@ -373,26 +355,20 @@ trait DiagnosticActorLogging extends Actor { object Actor { - /** - * Type alias representing a Receive-expression for Akka Actors. - */ - //#receive + /** Type alias representing a Receive-expression for Akka Actors. */ + // #receive type Receive = PartialFunction[Any, Unit] - //#receive + // #receive - /** - * emptyBehavior is a Receive-expression that matches no messages at all, ever. - */ + /** emptyBehavior is a Receive-expression that matches no messages at all, ever. */ @SerialVersionUID(1L) object emptyBehavior extends Receive { def isDefinedAt(x: Any) = false def apply(x: Any) = throw new UnsupportedOperationException("Empty behavior apply()") } - /** - * ignoringBehavior is a Receive-expression that consumes and ignores all messages. - */ + /** ignoringBehavior is a Receive-expression that consumes and ignores all messages. */ @SerialVersionUID(1L) object ignoringBehavior extends Receive { def isDefinedAt(x: Any): Boolean = true @@ -405,14 +381,10 @@ object Actor { */ final val noSender: ActorRef = null - /** - * INTERNAL API - */ + /** INTERNAL API */ private final val NotHandled = new Object - /** - * INTERNAL API - */ + /** INTERNAL API */ private final val notHandledFun = (_: Any) => NotHandled } @@ -503,7 +475,7 @@ trait Actor { * self ! message * */ - implicit final val self: ActorRef = context.self //MUST BE A VAL, TRUST ME + implicit final val self: ActorRef = context.self // MUST BE A VAL, TRUST ME /** * The reference sender Actor of the last received message. @@ -519,9 +491,9 @@ trait Actor { * Scala API: This defines the initial actor behavior, it must return a partial function * with the actor logic. */ - //#receive + // #receive def receive: Actor.Receive - //#receive + // #receive /** * INTERNAL API. @@ -585,10 +557,10 @@ trait Actor { * Empty default implementation. */ @throws(classOf[Exception]) // when changing this you MUST also change ActorDocTest - //#lifecycle-hooks + // #lifecycle-hooks def preStart(): Unit = () - //#lifecycle-hooks + // #lifecycle-hooks /** * User overridable callback. @@ -597,10 +569,10 @@ trait Actor { * Empty default implementation. */ @throws(classOf[Exception]) // when changing this you MUST also change ActorDocTest - //#lifecycle-hooks + // #lifecycle-hooks def postStop(): Unit = () - //#lifecycle-hooks + // #lifecycle-hooks /** * Scala API: User overridable callback: '''By default it disposes of all children and then calls `postStop()`.''' @@ -611,7 +583,7 @@ trait Actor { * up of resources before Actor is terminated. */ @throws(classOf[Exception]) // when changing this you MUST also change ActorDocTest - //#lifecycle-hooks + // #lifecycle-hooks def preRestart(@unused reason: Throwable, @unused message: Option[Any]): Unit = { context.children.foreach { child => context.unwatch(child) @@ -620,7 +592,7 @@ trait Actor { postStop() } - //#lifecycle-hooks + // #lifecycle-hooks /** * User overridable callback: By default it calls `preStart()`. @@ -629,11 +601,11 @@ trait Actor { * Is called right AFTER restart on the newly created Actor to allow reinitialization after an Actor crash. */ @throws(classOf[Exception]) // when changing this you MUST also change ActorDocTest - //#lifecycle-hooks + // #lifecycle-hooks def postRestart(@unused reason: Throwable): Unit = { preStart() } - //#lifecycle-hooks + // #lifecycle-hooks /** * User overridable callback. diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 5f302f68579..1f15c235b40 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -227,32 +227,22 @@ trait ActorContext extends ActorRefFactory with ClassicActorContextProvider { */ def unwatch(subject: ActorRef): ActorRef - /** - * ActorContexts shouldn't be Serializable - */ + /** ActorContexts shouldn't be Serializable */ final protected def writeObject(@unused o: ObjectOutputStream): Unit = throw new NotSerializableException("ActorContext is not serializable!") } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait Cell { - /** - * The “self” reference which this Cell is attached to. - */ + /** The “self” reference which this Cell is attached to. */ def self: ActorRef - /** - * The system within which this Cell lives. - */ + /** The system within which this Cell lives. */ def system: ActorSystem - /** - * The system internals where this Cell lives. - */ + /** The system internals where this Cell lives. */ def systemImpl: ActorSystemImpl /** @@ -261,24 +251,16 @@ private[akka] trait Cell { */ def start(): this.type - /** - * Recursively suspend this actor and all its children. Is only allowed to throw Fatal Throwables. - */ + /** Recursively suspend this actor and all its children. Is only allowed to throw Fatal Throwables. */ def suspend(): Unit - /** - * Recursively resume this actor and all its children. Is only allowed to throw Fatal Throwables. - */ + /** Recursively resume this actor and all its children. Is only allowed to throw Fatal Throwables. */ def resume(causedByFailure: Throwable): Unit - /** - * Restart this actor (will recursively restart or stop all children). Is only allowed to throw Fatal Throwables. - */ + /** Restart this actor (will recursively restart or stop all children). Is only allowed to throw Fatal Throwables. */ def restart(cause: Throwable): Unit - /** - * Recursively terminate this actor and all its children. Is only allowed to throw Fatal Throwables. - */ + /** Recursively terminate this actor and all its children. Is only allowed to throw Fatal Throwables. */ def stop(): Unit /** @@ -287,19 +269,13 @@ private[akka] trait Cell { */ private[akka] def isTerminated: Boolean - /** - * The supervisor of this actor. - */ + /** The supervisor of this actor. */ def parent: InternalActorRef - /** - * All children of this actor, including only reserved-names. - */ + /** All children of this actor, including only reserved-names. */ def childrenRefs: ChildrenContainer - /** - * Get the stats for the named child, if that exists. - */ + /** Get the stats for the named child, if that exists. */ def getChildByName(name: String): Option[ChildStats] /** @@ -350,9 +326,7 @@ private[akka] trait Cell { */ def numberOfMessages: Int - /** - * The props for this actor cell. - */ + /** The props for this actor cell. */ def props: Props } @@ -467,7 +441,7 @@ private[akka] class ActorCell( /* * MESSAGE PROCESSING */ - //Memory consistency is handled by the Mailbox (reading mailbox status then processing messages, then writing mailbox status + // Memory consistency is handled by the Mailbox (reading mailbox status then processing messages, then writing mailbox status final def systemInvoke(message: SystemMessage): Unit = { /* * When recreate/suspend/resume are received while restarting (i.e. between @@ -520,9 +494,10 @@ private[akka] class ActorCell( case Supervise(child, async) => supervise(child, async) case NoMessage => // only here to suppress warning } - } catch handleNonFatalOrInterruptedException { e => - handleInvokeFailure(Nil, e) - } + } catch + handleNonFatalOrInterruptedException { e => + handleInvokeFailure(Nil, e) + } val newState = calculateState // As each state accepts a strict subset of another state, it is enough to unstash if we "walk up" the state // chain @@ -535,7 +510,7 @@ private[akka] class ActorCell( invokeAll(new EarliestFirstSystemMessageList(message), calculateState) } - //Memory consistency is handled by the Mailbox (reading mailbox status then processing messages, then writing mailbox status + // Memory consistency is handled by the Mailbox (reading mailbox status then processing messages, then writing mailbox status final def invoke(messageHandle: Envelope): Unit = { val msg = messageHandle.message val timeoutBeforeReceive = cancelReceiveTimeoutIfNeeded(msg) @@ -547,11 +522,13 @@ private[akka] class ActorCell( receiveMessage(msg) } currentMessage = null // reset current message after successful invocation - } catch handleNonFatalOrInterruptedException { e => - handleInvokeFailure(Nil, e) - } finally - // Schedule or reschedule receive timeout - checkReceiveTimeoutIfNeeded(msg, timeoutBeforeReceive) + } catch + handleNonFatalOrInterruptedException { e => + handleInvokeFailure(Nil, e) + } + finally + // Schedule or reschedule receive timeout + checkReceiveTimeoutIfNeeded(msg, timeoutBeforeReceive) } def autoReceiveMessage(msg: Envelope): Unit = { @@ -566,7 +543,9 @@ private[akka] class ActorCell( case sel: ActorSelectionMessage => receiveSelection(sel) case Identify(messageId) => sender() ! ActorIdentity(messageId, Some(self)) case unexpected => - throw new RuntimeException(s"Unexpected message for autoreceive: $unexpected") // for exhaustiveness check, will not happen + throw new RuntimeException( + s"Unexpected message for autoreceive: $unexpected" + ) // for exhaustiveness check, will not happen } } @@ -607,7 +586,7 @@ private[akka] class ActorCell( * ACTOR INSTANCE HANDLING */ - //This method is in charge of setting up the contextStack and create a new instance of the Actor + // This method is in charge of setting up the contextStack and create a new instance of the Actor protected def newActor(): Actor = { contextStack.set(this :: contextStack.get) try { @@ -624,7 +603,9 @@ private[akka] class ActorCell( } finally { val stackAfter = contextStack.get if (stackAfter.nonEmpty) - contextStack.set(if (stackAfter.head eq null) stackAfter.tail.tail else stackAfter.tail) // pop null marker plus our context + contextStack.set( + if (stackAfter.head eq null) stackAfter.tail.tail else stackAfter.tail + ) // pop null marker plus our context } } diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala index 862aad5f6b8..21622052400 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala @@ -12,16 +12,12 @@ import scala.collection.immutable import akka.japi.Util.immutableSeq -/** - * Java API - */ +/** Java API */ object ActorPaths { // static forwarders to `object ActorPath`, since `trait ActorPath` // could not be changed to `abstract ActorPath` in a binary compatible way - /** - * Parse string as actor path; throws java.net.MalformedURLException if unable to do so. - */ + /** Parse string as actor path; throws java.net.MalformedURLException if unable to do so. */ def fromString(s: String): ActorPath = ActorPath.fromString(s) /** @@ -55,9 +51,7 @@ object ActorPaths { object ActorPath { - /** - * Parse string as actor path; throws java.net.MalformedURLException if unable to do so. - */ + /** Parse string as actor path; throws java.net.MalformedURLException if unable to do so. */ def fromString(s: String): ActorPath = s match { case ActorPathExtractor(address, elems) => RootActorPath(address) / elems case _ => throw new MalformedURLException("cannot parse as ActorPath: " + s) @@ -88,17 +82,18 @@ object ActorPath { // If the number of cases increase remember to add a `@switch` annotation e.g.: // (findInvalidPathElementCharPosition(element): @switch) match { - (findInvalidPathElementCharPosition(element)) match { + findInvalidPathElementCharPosition(element) match { case ValidPathCode => // valid case EmptyPathCode => throw InvalidActorNameException(s"Actor path element must not be empty $fullPathMsg") case invalidAt => - throw InvalidActorNameException(s"""Invalid actor path element [$element]$fullPathMsg, illegal character [${element( - invalidAt)}] at position: $invalidAt. """ + - """Actor paths MUST: """ + - """not start with `$`, """ + - s"""include only ASCII letters and can only contain these special characters: ${ActorPath.ValidSymbols}.""") + throw InvalidActorNameException( + s"""Invalid actor path element [$element]$fullPathMsg, illegal character [${element( + invalidAt)}] at position: $invalidAt. """ + + """Actor paths MUST: """ + + """not start with `$`, """ + + s"""include only ASCII letters and can only contain these special characters: ${ActorPath.ValidSymbols}.""") } } @@ -129,7 +124,8 @@ object ActorPath { case '%' if pos + 2 < len && isHexChar(s.charAt(pos + 1)) && isHexChar(s.charAt(pos + 2)) => validate(pos + 3) case _ => pos - } else ValidPathCode + } + else ValidPathCode if (len > 0 && s.charAt(0) != '$') validate(0) else 0 } @@ -164,52 +160,34 @@ sealed trait ActorPath extends Comparable[ActorPath] with Serializable { */ def address: Address - /** - * The name of the actor that this path refers to. - */ + /** The name of the actor that this path refers to. */ def name: String - /** - * The path for the parent actor. - */ + /** The path for the parent actor. */ def parent: ActorPath - /** - * Create a new child actor path. - */ + /** Create a new child actor path. */ def /(child: String): ActorPath - /** - * Java API: Create a new child actor path. - */ + /** Java API: Create a new child actor path. */ def child(child: String): ActorPath = /(child) - /** - * Recursively create a descendant’s path by appending all child names. - */ + /** Recursively create a descendant’s path by appending all child names. */ def /(child: Iterable[String]): ActorPath = child.foldLeft(this)((path, elem) => if (elem.isEmpty) path else path / elem) - /** - * Java API: Recursively create a descendant’s path by appending all child names. - */ + /** Java API: Recursively create a descendant’s path by appending all child names. */ def descendant(names: java.lang.Iterable[String]): ActorPath = /(immutableSeq(names)) - /** - * Sequence of names for this path from root to this. Performance implication: has to allocate a list. - */ + /** Sequence of names for this path from root to this. Performance implication: has to allocate a list. */ def elements: immutable.Iterable[String] - /** - * Java API: Sequence of names for this path from root to this. Performance implication: has to allocate a list. - */ + /** Java API: Sequence of names for this path from root to this. Performance implication: has to allocate a list. */ @nowarn("msg=deprecated") def getElements: java.lang.Iterable[String] = scala.collection.JavaConverters.asJavaIterableConverter(elements).asJava - /** - * Walk up the tree to obtain and return the RootActorPath. - */ + /** Walk up the tree to obtain and return the RootActorPath. */ def root: RootActorPath /** @@ -297,14 +275,10 @@ final case class RootActorPath(address: Address, name: String = "/") extends Act case _: ChildActorPath => 1 } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def uid: Int = ActorCell.undefinedUid - /** - * INTERNAL API - */ + /** INTERNAL API */ override private[akka] def withUid(uid: Int): ActorPath = if (uid == ActorCell.undefinedUid) this else throw new IllegalStateException(s"RootActorPath must have undefinedUid, [$uid != ${ActorCell.undefinedUid}") @@ -345,9 +319,7 @@ final class ChildActorPath private[akka] (val parent: ActorPath, val name: Strin rec(this) } - /** - * INTERNAL API - */ + /** INTERNAL API */ override private[akka] def withUid(uid: Int): ActorPath = if (uid == this.uid) this else new ChildActorPath(parent, name, uid) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index f99f98d59f7..b27c6a23295 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -108,14 +108,10 @@ object ActorRef { abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable { scalaRef: InternalActorRef with ActorRefScope => - /** - * Returns the path for this actor (from this actor up to the root actor). - */ + /** Returns the path for this actor (from this actor up to the root actor). */ def path: ActorPath - /** - * Comparison takes path and the unique id of the actor cell into account. - */ + /** Comparison takes path and the unique id of the actor cell into account. */ final def compareTo(other: ActorRef): Int = { val x = this.path.compareTo(other.path) if (x == 0) if (this.path.uid < other.path.uid) -1 else if (this.path.uid == other.path.uid) 0 else 1 @@ -167,9 +163,7 @@ abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable else path.uid } - /** - * Equals takes path and the unique id of the actor cell into account. - */ + /** Equals takes path and the unique id of the actor cell into account. */ final override def equals(that: Any): Boolean = that match { case other: ActorRef => path.uid == other.path.uid && path == other.path case _ => false @@ -214,9 +208,7 @@ private[akka] trait ActorRefScope { def isLocal: Boolean } -/** - * Refs which are statically known to be local inherit from this Scope - */ +/** Refs which are statically known to be local inherit from this Scope */ private[akka] trait LocalRef extends ActorRefScope { final def isLocal = true } @@ -232,9 +224,7 @@ private[akka] trait RepointableRef extends ActorRefScope { def isStarted: Boolean } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object InternalActorRef { def isTemporaryRef(ref: ActorRef): Boolean = ref match { @@ -242,7 +232,9 @@ private[akka] trait RepointableRef extends ActorRefScope { (i.isLocal && i.isInstanceOf[PromiseActorRef]) || (!i.isLocal && i.path.elements.head == "temp") case unexpected => - throw new IllegalArgumentException(s"ActorRef is not internal: $unexpected") // will not happen, for exhaustiveness check + throw new IllegalArgumentException( + s"ActorRef is not internal: $unexpected" + ) // will not happen, for exhaustiveness check } } @@ -265,14 +257,10 @@ private[akka] trait RepointableRef extends ActorRefScope { def stop(): Unit def sendSystemMessage(message: SystemMessage): Unit - /** - * Get a reference to the actor ref provider which created this ref. - */ + /** Get a reference to the actor ref provider which created this ref. */ def provider: ActorRefProvider - /** - * Obtain parent of this ref; used by getChild for ".." paths. - */ + /** Obtain parent of this ref; used by getChild for ".." paths. */ def getParent: InternalActorRef /** @@ -311,9 +299,7 @@ private[akka] abstract class ActorRefWithCell extends InternalActorRef { this: A def getSingleChild(name: String): InternalActorRef } -/** - * This is an internal look-up failure token, not useful for anything else. - */ +/** This is an internal look-up failure token, not useful for anything else. */ private[akka] case object Nobody extends MinimalActorRef { override val path: RootActorPath = new RootActorPath(Address("akka", "all-systems"), "/Nobody") override def provider = throw new UnsupportedOperationException("Nobody does not provide") @@ -324,9 +310,7 @@ private[akka] case object Nobody extends MinimalActorRef { override protected def writeReplace(): AnyRef = serialized } -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) private[akka] class SerializedNobody extends Serializable { @throws(classOf[java.io.ObjectStreamException]) private def readResolve(): AnyRef = Nobody @@ -378,9 +362,7 @@ private[akka] class LocalActorRef private[akka] ( @InternalApi override private[akka] def isTerminated: Boolean = actorCell.isTerminated - /** - * Starts the actor after initialization. - */ + /** Starts the actor after initialization. */ override def start(): Unit = actorCell.start() /** @@ -392,14 +374,10 @@ private[akka] class LocalActorRef private[akka] ( */ override def suspend(): Unit = actorCell.suspend() - /** - * Resumes a suspended actor. - */ + /** Resumes a suspended actor. */ override def resume(causedByFailure: Throwable): Unit = actorCell.resume(causedByFailure) - /** - * Shuts down the actor and its message queue - */ + /** Shuts down the actor and its message queue */ override def stop(): Unit = actorCell.stop() override def getParent: InternalActorRef = actorCell.parent @@ -476,9 +454,7 @@ private[akka] final case class SerializedActorRef private (path: String) { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object SerializedActorRef { def apply(actorRef: ActorRef): SerializedActorRef = { new SerializedActorRef(actorRef) @@ -524,9 +500,7 @@ private[akka] trait MinimalActorRef extends InternalActorRef with LocalRef { override protected def writeReplace(): AnyRef = SerializedIgnore } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object IgnoreActorRef { private val fakeSystemName = "local" @@ -536,23 +510,17 @@ private[akka] trait MinimalActorRef extends InternalActorRef with LocalRef { private val pathString = path.toString - /** - * Check if the passed `otherPath` is the same as IgnoreActorRef.path - */ + /** Check if the passed `otherPath` is the same as IgnoreActorRef.path */ def isIgnoreRefPath(otherPath: String): Boolean = pathString == otherPath - /** - * Check if the passed `otherPath` is the same as IgnoreActorRef.path - */ + /** Check if the passed `otherPath` is the same as IgnoreActorRef.path */ def isIgnoreRefPath(otherPath: ActorPath): Boolean = path == otherPath } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @SerialVersionUID(1L) private[akka] object SerializedIgnore extends Serializable { @throws(classOf[java.io.ObjectStreamException]) private def readResolve(): AnyRef = IgnoreActorRef @@ -612,18 +580,14 @@ final case class Dropped(message: Any, reason: String, sender: ActorRef, recipie object Dropped { - /** - * Convenience for creating `Dropped` without a `sender`. - */ + /** Convenience for creating `Dropped` without a `sender`. */ def apply(message: Any, reason: String, recipient: ActorRef): Dropped = Dropped(message, reason, ActorRef.noSender, recipient) } object WrappedMessage { - /** - * Unwrap [[WrappedMessage]] recursively. - */ + /** Unwrap [[WrappedMessage]] recursively. */ @tailrec def unwrap(message: Any): Any = { message match { case w: WrappedMessage => unwrap(w.message) @@ -643,7 +607,7 @@ trait WrappedMessage { private[akka] object DeadLetterActorRef { @SerialVersionUID(1L) - class SerializedDeadLetterActorRef extends Serializable { //TODO implement as Protobuf for performance? + class SerializedDeadLetterActorRef extends Serializable { // TODO implement as Protobuf for performance? @throws(classOf[java.io.ObjectStreamException]) private def readResolve(): AnyRef = JavaSerializer.currentSystem.value.deadLetters } @@ -807,9 +771,7 @@ private[akka] class VirtualPathContainer( def removeChild(name: String): Unit = if (children.remove(name) eq null) log.warning("{} trying to remove non-child {}", path, name) - /** - * Remove a named child if it matches the ref. - */ + /** Remove a named child if it matches the ref. */ protected def removeChild(name: String, ref: ActorRef): Unit = { val current = getChild(name) if (current eq null) @@ -844,9 +806,7 @@ private[akka] class VirtualPathContainer( } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FunctionRef { def deadLetterMessageHandler(system: ActorSystem): (ActorRef, Any) => Unit = { (sender, msg) => // avoid infinite loop (StackOverflow) if FunctionRef is used for subscribing to DeadLetter from eventStream @@ -895,7 +855,7 @@ private[akka] class VirtualPathContainer( case u: Unwatch => remWatcher(u.watchee, u.watcher) case DeathWatchNotification(actorRef, _, _) => this.!(Terminated(actorRef)(existenceConfirmed = true, addressTerminated = false))(actorRef) - case _ => //ignore all other messages + case _ => // ignore all other messages } } @@ -906,13 +866,11 @@ private[akka] class VirtualPathContainer( private[this] var watching = ActorCell.emptyActorRefSet private[this] var _watchedBy: OptionVal[Set[ActorRef]] = OptionVal.Some(ActorCell.emptyActorRefSet) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi override private[akka] def isTerminated: Boolean = _watchedBy.isEmpty - //noinspection EmptyCheck + // noinspection EmptyCheck protected def sendTerminated(): Unit = { def unwatchWatched(watched: ActorRef): Unit = watched.asInstanceOf[InternalActorRef].sendSystemMessage(Unwatch(watched, this)) @@ -1078,9 +1036,7 @@ private[akka] class VirtualPathContainer( actorRef.asInstanceOf[InternalActorRef].sendSystemMessage(Unwatch(actorRef.asInstanceOf[InternalActorRef], this)) } - /** - * Query whether this FunctionRef is currently watching the given Actor. - */ + /** Query whether this FunctionRef is currently watching the given Actor. */ def isWatching(actorRef: ActorRef): Boolean = this.synchronized { watching.contains(actorRef) } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index cc6fd9e1185..9961fd254f0 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -44,32 +44,22 @@ import akka.util.OptionVal */ def rootGuardianAt(address: Address): ActorRef - /** - * Reference to the supervisor used for all top-level user actors. - */ + /** Reference to the supervisor used for all top-level user actors. */ def guardian: LocalActorRef - /** - * Reference to the supervisor used for all top-level system actors. - */ + /** Reference to the supervisor used for all top-level system actors. */ def systemGuardian: LocalActorRef - /** - * Dead letter destination for this provider. - */ + /** Dead letter destination for this provider. */ def deadLetters: ActorRef /** INTERNAL API */ @InternalApi private[akka] def ignoreRef: ActorRef - /** - * The root path for all actors within this actor system, not including any remote address information. - */ + /** The root path for all actors within this actor system, not including any remote address information. */ def rootPath: ActorPath - /** - * The Settings associated with this ActorRefProvider - */ + /** The Settings associated with this ActorRefProvider */ def settings: ActorSystem.Settings /** @@ -80,34 +70,22 @@ import akka.util.OptionVal */ private[akka] def init(system: ActorSystemImpl): Unit - /** - * The Deployer associated with this ActorRefProvider - */ + /** The Deployer associated with this ActorRefProvider */ def deployer: Deployer - /** - * Generates and returns a unique actor path below “/temp”. - */ + /** Generates and returns a unique actor path below “/temp”. */ def tempPath(): ActorPath - /** - * Generates and returns a unique actor path starting with `prefix` below “/temp”. - */ + /** Generates and returns a unique actor path starting with `prefix` below “/temp”. */ def tempPath(prefix: String): ActorPath - /** - * Returns the actor reference representing the “/temp” path. - */ + /** Returns the actor reference representing the “/temp” path. */ def tempContainer: InternalActorRef - /** - * INTERNAL API: Registers an actorRef at a path returned by tempPath(); do NOT pass in any other path. - */ + /** INTERNAL API: Registers an actorRef at a path returned by tempPath(); do NOT pass in any other path. */ private[akka] def registerTempActor(actorRef: InternalActorRef, path: ActorPath): Unit - /** - * Unregister a temporary actor from the “/temp” path (i.e. obtained from tempPath()); do NOT pass in any other path. - */ + /** Unregister a temporary actor from the “/temp” path (i.e. obtained from tempPath()); do NOT pass in any other path. */ def unregisterTempActor(path: ActorPath): Unit /** @@ -155,17 +133,13 @@ import akka.util.OptionVal */ def getExternalAddressFor(addr: Address): Option[Address] - /** - * Obtain the external address of the default transport. - */ + /** Obtain the external address of the default transport. */ def getDefaultAddress: Address /** INTERNAL API */ @InternalApi private[akka] def serializationInformation: Serialization.Information - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def addressString: String @@ -181,19 +155,13 @@ import akka.util.OptionVal "implicit ActorRefFactory required: if outside of an Actor you need an implicit ActorSystem, inside of an actor this should be the implicit ActorContext") trait ActorRefFactory { - /** - * INTERNAL API - */ + /** INTERNAL API */ protected def systemImpl: ActorSystemImpl - /** - * INTERNAL API - */ + /** INTERNAL API */ protected def provider: ActorRefProvider - /** - * Returns the default MessageDispatcher associated with this ActorRefFactory - */ + /** Returns the default MessageDispatcher associated with this ActorRefFactory */ implicit def dispatcher: ExecutionContextExecutor /** @@ -203,9 +171,7 @@ trait ActorRefFactory { */ protected def guardian: InternalActorRef - /** - * INTERNAL API - */ + /** INTERNAL API */ protected def lookupRoot: InternalActorRef /** @@ -277,14 +243,10 @@ trait ActorRefFactory { def stop(actor: ActorRef): Unit } -/** - * Internal Akka use only, used in implementation of system.stop(child). - */ +/** Internal Akka use only, used in implementation of system.stop(child). */ private[akka] final case class StopChild(child: ActorRef) -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object SystemGuardian { /** @@ -318,9 +280,7 @@ private[akka] object LocalActorRefProvider { override def preRestart(cause: Throwable, msg: Option[Any]): Unit = {} } - /** - * System guardian - */ + /** System guardian */ private class SystemGuardian(override val supervisorStrategy: SupervisorStrategy, val guardian: ActorRef) extends Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] { @@ -449,13 +409,12 @@ private[akka] class LocalActorRefProvider private[akka] ( override def stop(): Unit = { causeOfTermination.trySuccess( - Terminated(provider.rootGuardian)(existenceConfirmed = true, addressTerminated = true)) //Idempotent + Terminated(provider.rootGuardian)(existenceConfirmed = true, addressTerminated = true) + ) // Idempotent terminationPromise.completeWith(causeOfTermination.future) // Signal termination downstream, idempotent } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi override private[akka] def isTerminated: Boolean = !isWalking @@ -472,7 +431,10 @@ private[akka] class LocalActorRefProvider private[akka] ( log.error(ex, s"guardian $child failed, shutting down!") causeOfTermination.tryFailure(ex) child.stop() - case Supervise(_, _) => // TODO register child in some map to keep track of it and enable shutdown after all dead + case Supervise( + _, + _ + ) => // TODO register child in some map to keep track of it and enable shutdown after all dead case _: DeathWatchNotification => stop() case _ => log.error(s"$this received unexpected system message [$message]") } @@ -505,23 +467,16 @@ private[akka] class LocalActorRefProvider private[akka] ( .createInstanceFor[SupervisorStrategyConfigurator](settings.SupervisorStrategyClass, EmptyImmutableSeq) .get - /** - * Overridable supervision strategy to be used by the “/user” guardian. - */ - protected def rootGuardianStrategy: SupervisorStrategy = OneForOneStrategy() { - case ex => - log.error(ex, "guardian failed, shutting down system") - SupervisorStrategy.Stop + /** Overridable supervision strategy to be used by the “/user” guardian. */ + protected def rootGuardianStrategy: SupervisorStrategy = OneForOneStrategy() { case ex => + log.error(ex, "guardian failed, shutting down system") + SupervisorStrategy.Stop } - /** - * Overridable supervision strategy to be used by the “/user” guardian. - */ + /** Overridable supervision strategy to be used by the “/user” guardian. */ protected def guardianStrategy: SupervisorStrategy = guardianSupervisorStrategyConfigurator.create() - /** - * Overridable supervision strategy to be used by the “/user” guardian. - */ + /** Overridable supervision strategy to be used by the “/user” guardian. */ protected def systemGuardianStrategy: SupervisorStrategy = SupervisorStrategy.defaultStrategy private def internalDispatcher = system.dispatchers.internalDispatcher @@ -630,9 +585,7 @@ private[akka] class LocalActorRefProvider private[akka] ( } } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def resolveActorRef(ref: InternalActorRef, pathElements: Iterable[String]): InternalActorRef = if (pathElements.isEmpty) { logDeser.debug("Resolve (deserialization) of empty path doesn't match an active actor, using deadLetters.") @@ -683,8 +636,8 @@ private[akka] class LocalActorRefProvider private[akka] ( case (Deploy.DispatcherSameAsParent, Deploy.NoMailboxGiven) => props.withDispatcher(parentDispatcher) case (dsp, Deploy.NoMailboxGiven) => props.withDispatcher(dsp) case (Deploy.NoDispatcherGiven, mbx) => props.withMailbox(mbx) - case (Deploy.DispatcherSameAsParent, mbx) => props.withDispatcher(parentDispatcher).withMailbox(mbx) - case (dsp, mbx) => props.withDispatcher(dsp).withMailbox(mbx) + case (Deploy.DispatcherSameAsParent, mbx) => props.withDispatcher(parentDispatcher).withMailbox(mbx) + case (dsp, mbx) => props.withDispatcher(dsp).withMailbox(mbx) } case _ => // no deployment config found diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala index c75e96ee1dd..eddb34ef2ab 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala @@ -95,7 +95,6 @@ abstract class ActorSelection extends Serializable { * if such an actor exists. It is completed with failure [[ActorNotFound]] if * no such actor exists or the identification didn't complete within the * supplied `timeout`. - * */ def resolveOne(timeout: java.time.Duration): CompletionStage[ActorRef] = { import JavaDurationConverters._ @@ -112,14 +111,10 @@ abstract class ActorSelection extends Serializable { builder.toString } - /** - * The [[akka.actor.ActorPath]] of the anchor actor. - */ + /** The [[akka.actor.ActorPath]] of the anchor actor. */ def anchorPath: ActorPath = anchor.path - /** - * String representation of the path elements, starting with "/" and separated with "/". - */ + /** String representation of the path elements, starting with "/" and separated with "/". */ def pathString: String = path.mkString("/", "/", "") /** @@ -161,7 +156,7 @@ abstract class ActorSelection extends Serializable { * allowing for broadcasting of messages to that section. */ object ActorSelection { - //This cast is safe because the self-type of ActorSelection requires that it mixes in ScalaActorSelection + // This cast is safe because the self-type of ActorSelection requires that it mixes in ScalaActorSelection implicit def toScala(sel: ActorSelection): ScalaActorSelection = sel.asInstanceOf[ScalaActorSelection] /** @@ -297,33 +292,25 @@ private[akka] final case class ActorSelectionMessage( override def message: Any = msg } -/** - * INTERNAL API - */ +/** INTERNAL API */ @nowarn("msg=@SerialVersionUID has no effect on traits") @SerialVersionUID(1L) private[akka] sealed trait SelectionPathElement -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(2L) private[akka] final case class SelectChildName(name: String) extends SelectionPathElement { override def toString: String = name } -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(2L) private[akka] final case class SelectChildPattern(patternStr: String) extends SelectionPathElement { val pattern: Pattern = Helpers.makePattern(patternStr) override def toString: String = patternStr } -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(2L) private[akka] case object SelectParent extends SelectionPathElement { override def toString: String = ".." diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index d88fd65ce31..bc4c18c2e1c 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -56,9 +56,7 @@ object BootstrapSetup { defaultExecutionContext: Option[ExecutionContext]): BootstrapSetup = new BootstrapSetup(classLoader, config, defaultExecutionContext) - /** - * Scala API: Short for using custom config but keeping default classloader and default execution context - */ + /** Scala API: Short for using custom config but keeping default classloader and default execution context */ def apply(config: Config): BootstrapSetup = apply(None, Some(config), None) /** @@ -72,9 +70,7 @@ object BootstrapSetup { defaultExecutionContext: Optional[ExecutionContext]): BootstrapSetup = apply(classLoader.asScala, config.asScala, defaultExecutionContext.asScala) - /** - * Java API: Short for using custom config but keeping default classloader and default execution context - */ + /** Java API: Short for using custom config but keeping default classloader and default execution context */ def create(config: Config): BootstrapSetup = apply(config) /** @@ -106,19 +102,13 @@ object ProviderSelection { case object Cluster extends ProviderSelection("cluster", ClusterActorRefProvider, hasCluster = true) final case class Custom(override val fqcn: String) extends ProviderSelection("custom", fqcn, hasCluster = false) - /** - * JAVA API - */ + /** JAVA API */ def local(): ProviderSelection = Local - /** - * JAVA API - */ + /** JAVA API */ def remote(): ProviderSelection = Remote - /** - * JAVA API - */ + /** JAVA API */ def cluster(): ProviderSelection = Cluster /** INTERNAL API */ @@ -315,9 +305,7 @@ object ActorSystem { defaultExecutionContext: Option[ExecutionContext] = None): ActorSystem = apply(name, ActorSystemSetup(BootstrapSetup(classLoader, config, defaultExecutionContext))) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object Settings { /** @@ -338,7 +326,7 @@ object ActorSystem { val loggingFilterAlreadyConfigured = configuredLoggingFilter == slf4jLoggingFilterClassName || configuredLoggingFilter != classOf[ - DefaultLoggingFilter].getName + DefaultLoggingFilter].getName def newLoggingFilterConfStr = s"""$loggingFilterConfKey = "$slf4jLoggingFilterClassName"""" @@ -351,7 +339,7 @@ object ActorSystem { } else { val confKey = "akka.use-slf4j" if (config.hasPath(confKey) && config.getBoolean(confKey) && dynamicAccess.classIsOnClasspath( - slf4jLoggerClassName)) { + slf4jLoggerClassName)) { val newLoggers = slf4jLoggerClassName +: configuredLoggers.filterNot(_ == classOf[DefaultLogger].getName) val newLoggersConfStr = s"$loggersConfKey = [${newLoggers.mkString("\"", "\", \"", "\"")}]" val newConfStr = @@ -476,9 +464,7 @@ object ActorSystem { throw new akka.ConfigurationException( "Akka JAR version [" + Version + "] does not match the provided config version [" + ConfigVersion + "]") - /** - * Returns the String representation of the Config that this Settings is backed by - */ + /** Returns the String representation of the Config that this Settings is backed by */ override def toString: String = config.root.render } @@ -526,59 +512,37 @@ abstract class ActorSystem extends ActorRefFactory with ClassicActorSystemProvid */ def name: String - /** - * The core settings extracted from the supplied configuration. - */ + /** The core settings extracted from the supplied configuration. */ def settings: Settings - /** - * Log the configuration. - */ + /** Log the configuration. */ def logConfiguration(): Unit - /** - * Construct a path below the application guardian to be used with [[ActorSystem#actorSelection]]. - */ + /** Construct a path below the application guardian to be used with [[ActorSystem#actorSelection]]. */ def /(name: String): ActorPath - /** - * Java API: Create a new child actor path. - */ + /** Java API: Create a new child actor path. */ def child(child: String): ActorPath = /(child) - /** - * Construct a path below the application guardian to be used with [[ActorSystem#actorSelection]]. - */ + /** Construct a path below the application guardian to be used with [[ActorSystem#actorSelection]]. */ def /(name: Iterable[String]): ActorPath - /** - * Java API: Recursively create a descendant’s path by appending all child names. - */ + /** Java API: Recursively create a descendant’s path by appending all child names. */ def descendant(names: java.lang.Iterable[String]): ActorPath = /(immutableSeq(names)) - /** - * Start-up time in milliseconds since the epoch. - */ + /** Start-up time in milliseconds since the epoch. */ val startTime: Long = System.currentTimeMillis - /** - * Up-time of this actor system in seconds. - */ + /** Up-time of this actor system in seconds. */ def uptime: Long = (System.currentTimeMillis - startTime) / 1000 - /** - * Main event bus of this actor system, used for example for logging. - */ + /** Main event bus of this actor system, used for example for logging. */ def eventStream: EventStream - /** - * Java API: Main event bus of this actor system, used for example for logging. - */ + /** Java API: Main event bus of this actor system, used for example for logging. */ def getEventStream: EventStream = eventStream - /** - * Convenient logging adapter for logging to the [[ActorSystem#eventStream]]. - */ + /** Convenient logging adapter for logging to the [[ActorSystem#eventStream]]. */ def log: LoggingAdapter /** @@ -600,9 +564,7 @@ abstract class ActorSystem extends ActorRefFactory with ClassicActorSystemProvid */ def getScheduler: Scheduler = scheduler - /** - * Helper object for looking up configured dispatchers. - */ + /** Helper object for looking up configured dispatchers. */ def dispatchers: Dispatchers /** @@ -621,9 +583,7 @@ abstract class ActorSystem extends ActorRefFactory with ClassicActorSystemProvid */ def getDispatcher: ExecutionContextExecutor = dispatcher - /** - * Helper object for looking up configured mailbox types. - */ + /** Helper object for looking up configured mailbox types. */ def mailboxes: Mailboxes /** @@ -724,19 +684,13 @@ abstract class ActorSystem extends ActorRefFactory with ClassicActorSystemProvid @DoNotInherit abstract class ExtendedActorSystem extends ActorSystem { - /** - * The ActorRefProvider is the only entity which creates all actor references within this actor system. - */ + /** The ActorRefProvider is the only entity which creates all actor references within this actor system. */ def provider: ActorRefProvider - /** - * The top-level supervisor of all actors created using system.actorOf(...). - */ + /** The top-level supervisor of all actors created using system.actorOf(...). */ def guardian: InternalActorRef - /** - * The top-level supervisor of all system-internal services like logging. - */ + /** The top-level supervisor of all system-internal services like logging. */ def systemGuardian: InternalActorRef /** @@ -748,9 +702,7 @@ abstract class ExtendedActorSystem extends ActorSystem { */ def systemActorOf(props: Props, name: String): ActorRef - /** - * A ThreadFactory that can be used if the transport needs to create any Threads - */ + /** A ThreadFactory that can be used if the transport needs to create any Threads */ def threadFactory: ThreadFactory /** @@ -782,21 +734,15 @@ abstract class ExtendedActorSystem extends ActorSystem { */ def uid: Long - /** - * INTERNAL API: final step of `terminate()` - */ + /** INTERNAL API: final step of `terminate()` */ @InternalApi private[akka] def finalTerminate(): Unit - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def isTerminating(): Boolean } -/** - * Internal API - */ +/** Internal API */ @InternalApi private[akka] class ActorSystemImpl( val name: String, @@ -969,19 +915,20 @@ private[akka] class ActorSystemImpl( val scheduler: Scheduler = createScheduler() - val provider: ActorRefProvider = try { - val arguments = Vector( - classOf[String] -> name, - classOf[Settings] -> settings, - classOf[EventStream] -> eventStream, - classOf[DynamicAccess] -> dynamicAccess) - - dynamicAccess.createInstanceFor[ActorRefProvider](ProviderClass, arguments).get - } catch { - case NonFatal(e) => - Try(stopScheduler()) - throw e - } + val provider: ActorRefProvider = + try { + val arguments = Vector( + classOf[String] -> name, + classOf[Settings] -> settings, + classOf[EventStream] -> eventStream, + classOf[DynamicAccess] -> dynamicAccess) + + dynamicAccess.createInstanceFor[ActorRefProvider](ProviderClass, arguments).get + } catch { + case NonFatal(e) => + Try(stopScheduler()) + throw e + } def deadLetters: ActorRef = provider.deadLetters @@ -1059,28 +1006,29 @@ private[akka] class ActorSystemImpl( "The calling code expected that the ActorSystem was initialized but it wasn't yet. " + "This is probably a bug in the ActorSystem initialization sequence often related to initialization of extensions. " + "Please report at https://github.com/akka/akka/issues.") - private lazy val _start: this.type = try { - - registerOnTermination(stopScheduler()) - // the provider is expected to start default loggers, LocalActorRefProvider does this - provider.init(this) - // at this point it should be initialized "enough" for most extensions that we might want to guard against otherwise - _initialized = true - - if (settings.LogDeadLetters > 0) - logDeadLetterListener = Some(systemActorOf(Props[DeadLetterListener](), "deadLetterListener")) - eventStream.startUnsubscriber() - ManifestInfo(this).checkSameVersion("Akka", allModules, logWarning = true) - if (!terminating) - loadExtensions() - if (LogConfigOnStart) logConfiguration() - this - } catch { - case NonFatal(e) => - try terminate() - catch { case NonFatal(_) => Try(stopScheduler()) } - throw e - } + private lazy val _start: this.type = + try { + + registerOnTermination(stopScheduler()) + // the provider is expected to start default loggers, LocalActorRefProvider does this + provider.init(this) + // at this point it should be initialized "enough" for most extensions that we might want to guard against otherwise + _initialized = true + + if (settings.LogDeadLetters > 0) + logDeadLetterListener = Some(systemActorOf(Props[DeadLetterListener](), "deadLetterListener")) + eventStream.startUnsubscriber() + ManifestInfo(this).checkSameVersion("Akka", allModules, logWarning = true) + if (!terminating) + loadExtensions() + if (LogConfigOnStart) logConfiguration() + this + } catch { + case NonFatal(e) => + try terminate() + catch { case NonFatal(_) => Try(stopScheduler()) } + throw e + } def start(): this.type = _start def registerOnTermination[T](code: => T): Unit = { registerOnTermination(new Runnable { def run() = code }) } @@ -1129,7 +1077,7 @@ private[akka] class ActorSystemImpl( terminate() } - //#create-scheduler + // #create-scheduler /** * Create the scheduler service. This one needs one special behavior: if * Closeable, it MUST execute all outstanding tasks upon .close() in order @@ -1148,7 +1096,7 @@ private[akka] class ActorSystemImpl( classOf[LoggingAdapter] -> log, classOf[ThreadFactory] -> threadFactory.withName(threadFactory.name + "-scheduler"))) .get - //#create-scheduler + // #create-scheduler /* * This is called after the last actor has signaled its termination, i.e. @@ -1166,9 +1114,7 @@ private[akka] class ActorSystemImpl( // 3) the registered extension. private val extensions = new ConcurrentHashMap[ExtensionId[_], AnyRef] - /** - * Returns any extension registered to the specified Extension or returns null if not registered - */ + /** Returns any extension registered to the specified Extension or returns null if not registered */ @tailrec private def findExtension[T <: Extension](ext: ExtensionId[T]): T = extensions.get(ext) match { case c: CountDownLatch => @@ -1181,16 +1127,16 @@ private[akka] class ActorSystemImpl( "A serializer must not access the SerializationExtension from its constructor. Use lazy init." else "Could be deadlock due to cyclic initialization of extensions.")) } - findExtension(ext) //Registration in process, await completion and retry - case t: Throwable => throw t //Initialization failed, throw same again + findExtension(ext) // Registration in process, await completion and retry + case t: Throwable => throw t // Initialization failed, throw same again case other => - other.asInstanceOf[T] //could be a T or null, in which case we return the null as T + other.asInstanceOf[T] // could be a T or null, in which case we return the null as T } @tailrec final def registerExtension[T <: Extension](ext: ExtensionId[T]): T = { findExtension(ext) match { - case null => //Doesn't already exist, commence registration + case null => // Doesn't already exist, commence registration val inProcessOfRegistration = new CountDownLatch(1) extensions.putIfAbsent(ext, inProcessOfRegistration) match { // Signal that registration is in process case null => @@ -1199,18 +1145,26 @@ private[akka] class ActorSystemImpl( case null => throw new IllegalStateException(s"Extension instance created as 'null' for extension [$ext]") case instance => - extensions.replace(ext, inProcessOfRegistration, instance) //Replace our in process signal with the initialized extension - instance //Profit! + extensions.replace( + ext, + inProcessOfRegistration, + instance + ) // Replace our in process signal with the initialized extension + instance // Profit! } } catch { case t: Throwable => - extensions.replace(ext, inProcessOfRegistration, t) //In case shit hits the fan, remove the inProcess signal - throw t //Escalate to caller + extensions.replace( + ext, + inProcessOfRegistration, + t + ) // In case shit hits the fan, remove the inProcess signal + throw t // Escalate to caller } finally { - inProcessOfRegistration.countDown() //Always notify listeners of the inProcess signal + inProcessOfRegistration.countDown() // Always notify listeners of the inProcess signal } case _ => - registerExtension(ext) //Someone else is in process of registering an extension for this Extension, retry + registerExtension(ext) // Someone else is in process of registering an extension for this Extension, retry } case existing => existing.asInstanceOf[T] } @@ -1233,9 +1187,8 @@ private[akka] class ActorSystemImpl( def loadExtensions(key: String, throwOnLoadFail: Boolean): Unit = { immutableSeq(settings.config.getStringList(key)).foreach { fqcn => - dynamicAccess.getObjectFor[AnyRef](fqcn).recoverWith { - case firstProblem => - dynamicAccess.createInstanceFor[AnyRef](fqcn, Nil).recoverWith { case _ => Failure(firstProblem) } + dynamicAccess.getObjectFor[AnyRef](fqcn).recoverWith { case firstProblem => + dynamicAccess.createInstanceFor[AnyRef](fqcn, Nil).recoverWith { case _ => Failure(firstProblem) } } match { case Success(p: ExtensionIdProvider) => registerExtension(p.lookup) @@ -1314,16 +1267,14 @@ private[akka] class ActorSystemImpl( */ final def add(r: Runnable): Unit = { @tailrec def addRec(r: Runnable, p: Promise[T]): Unit = ref.get match { - case null => throw new RejectedExecutionException("ActorSystem already terminated.") + case null => throw new RejectedExecutionException("ActorSystem already terminated.") case some if ref.compareAndSet(some, p) => some.completeWith(p.future.andThen { case _ => r.run() }) case _ => addRec(r, p) } addRec(r, Promise[T]()) } - /** - * Returns a Future which will be completed once all registered callbacks have been executed. - */ + /** Returns a Future which will be completed once all registered callbacks have been executed. */ def terminationFuture: Future[T] = done.future } diff --git a/akka-actor/src/main/scala/akka/actor/Address.scala b/akka-actor/src/main/scala/akka/actor/Address.scala index a4a1a78cc62..b17830646c1 100644 --- a/akka-actor/src/main/scala/akka/actor/Address.scala +++ b/akka-actor/src/main/scala/akka/actor/Address.scala @@ -41,14 +41,10 @@ final case class Address private[akka] (protocol: String, system: String, host: Address(protocol, system, host, port) } - /** - * Java API: The hostname if specified or empty optional if not - */ + /** Java API: The hostname if specified or empty optional if not */ def getHost(): Optional[String] = host.asJava - /** - * Java API: The port if specified or empty optional if not - */ + /** Java API: The port if specified or empty optional if not */ def getPort(): Optional[Integer] = port.asJava.asInstanceOf[Optional[Integer]] /** @@ -89,7 +85,8 @@ final case class Address private[akka] (protocol: String, system: String, host: */ def hostPort: String = toString.substring(protocol.length + 3) - /** INTERNAL API + /** + * INTERNAL API * Check if the address is not created through `AddressFromURIString`, if there * are any unusual characters in the host string. */ @@ -109,20 +106,14 @@ object Address { // if underscore and no dot after, then invalid val InvalidHostRegex = "_[^.]*$".r - /** - * Constructs a new Address with the specified protocol and system name - */ + /** Constructs a new Address with the specified protocol and system name */ def apply(protocol: String, system: String) = new Address(protocol, system) - /** - * Constructs a new Address with the specified protocol, system name, host and port - */ + /** Constructs a new Address with the specified protocol, system name, host and port */ def apply(protocol: String, system: String, host: String, port: Int) = new Address(protocol, system, Some(host), Some(port)) - /** - * `Address` ordering type class, sorts addresses by protocol, name, host and port. - */ + /** `Address` ordering type class, sorts addresses by protocol, name, host and port. */ implicit val addressOrdering: Ordering[Address] = Ordering.fromLessThan[Address] { (a, b) => if (a eq b) false else if (a.protocol != b.protocol) a.system.compareTo(b.protocol) < 0 @@ -167,9 +158,7 @@ object RelativeActorPath extends PathUtils { } } -/** - * This object serves as extractor for Scala and as address parser for Java. - */ +/** This object serves as extractor for Scala and as address parser for Java. */ object AddressFromURIString { def unapply(addr: String): Option[Address] = try unapply(new URI(addr)) @@ -189,23 +178,17 @@ object AddressFromURIString { else Address(uri.getScheme, uri.getUserInfo, uri.getHost, uri.getPort)) } - /** - * Try to construct an Address from the given String or throw a java.net.MalformedURLException. - */ + /** Try to construct an Address from the given String or throw a java.net.MalformedURLException. */ def apply(addr: String): Address = addr match { case AddressFromURIString(address) => address case _ => throw new MalformedURLException(addr) } - /** - * Java API: Try to construct an Address from the given String or throw a java.net.MalformedURLException. - */ + /** Java API: Try to construct an Address from the given String or throw a java.net.MalformedURLException. */ def parse(addr: String): Address = apply(addr) } -/** - * Given an ActorPath it returns the Address and the path elements if the path is well-formed - */ +/** Given an ActorPath it returns the Address and the path elements if the path is well-formed */ object ActorPathExtractor extends PathUtils { def unapply(addr: String): Option[(Address, immutable.Iterable[String])] = try { diff --git a/akka-actor/src/main/scala/akka/actor/ClassicActorSystemProvider.scala b/akka-actor/src/main/scala/akka/actor/ClassicActorSystemProvider.scala index 4d141b5811a..34577701079 100644 --- a/akka-actor/src/main/scala/akka/actor/ClassicActorSystemProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ClassicActorSystemProvider.scala @@ -15,9 +15,7 @@ import akka.annotation.InternalApi @DoNotInherit trait ClassicActorSystemProvider { - /** - * Allows access to the classic `akka.actor.ActorSystem` even for `akka.actor.typed.ActorSystem[_]`s. - */ + /** Allows access to the classic `akka.actor.ActorSystem` even for `akka.actor.typed.ActorSystem[_]`s. */ def classicSystem: ActorSystem } diff --git a/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala b/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala index 1336f80835b..6d290a8abaa 100644 --- a/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala +++ b/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala @@ -39,19 +39,13 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi */ val PhaseBeforeServiceUnbind = "before-service-unbind" - /** - * Stop accepting new incoming requests in for example HTTP. - */ + /** Stop accepting new incoming requests in for example HTTP. */ val PhaseServiceUnbind = "service-unbind" - /** - * Wait for requests that are in progress to be completed. - */ + /** Wait for requests that are in progress to be completed. */ val PhaseServiceRequestsDone = "service-requests-done" - /** - * Final shutdown of service endpoints. - */ + /** Final shutdown of service endpoints. */ val PhaseServiceStop = "service-stop" /** @@ -60,29 +54,19 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi */ val PhaseBeforeClusterShutdown = "before-cluster-shutdown" - /** - * Graceful shutdown of the Cluster Sharding regions. - */ + /** Graceful shutdown of the Cluster Sharding regions. */ val PhaseClusterShardingShutdownRegion = "cluster-sharding-shutdown-region" - /** - * Emit the leave command for the node that is shutting down. - */ + /** Emit the leave command for the node that is shutting down. */ val PhaseClusterLeave = "cluster-leave" - /** - * Shutdown cluster singletons - */ + /** Shutdown cluster singletons */ val PhaseClusterExiting = "cluster-exiting" - /** - * Wait until exiting has been completed - */ + /** Wait until exiting has been completed */ val PhaseClusterExitingDone = "cluster-exiting-done" - /** - * Shutdown the cluster extension - */ + /** Shutdown the cluster extension */ val PhaseClusterShutdown = "cluster-shutdown" /** @@ -106,74 +90,46 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi */ trait Reason - /** - * Scala API: The reason for the shutdown was unknown. Needed for backwards compatibility. - */ + /** Scala API: The reason for the shutdown was unknown. Needed for backwards compatibility. */ case object UnknownReason extends Reason - /** - * Java API: The reason for the shutdown was unknown. Needed for backwards compatibility. - */ + /** Java API: The reason for the shutdown was unknown. Needed for backwards compatibility. */ def unknownReason: Reason = UnknownReason - /** - * Scala API: The shutdown was initiated by ActorSystem.terminate. - */ + /** Scala API: The shutdown was initiated by ActorSystem.terminate. */ case object ActorSystemTerminateReason extends Reason - /** - * Java API: The shutdown was initiated by ActorSystem.terminate. - */ + /** Java API: The shutdown was initiated by ActorSystem.terminate. */ def actorSystemTerminateReason: Reason = ActorSystemTerminateReason - /** - * Scala API: The shutdown was initiated by a JVM shutdown hook, e.g. triggered by SIGTERM. - */ + /** Scala API: The shutdown was initiated by a JVM shutdown hook, e.g. triggered by SIGTERM. */ case object JvmExitReason extends Reason - /** - * Java API: The shutdown was initiated by a JVM shutdown hook, e.g. triggered by SIGTERM. - */ + /** Java API: The shutdown was initiated by a JVM shutdown hook, e.g. triggered by SIGTERM. */ def jvmExitReason: Reason = JvmExitReason - /** - * Scala API: The shutdown was initiated by Cluster downing. - */ + /** Scala API: The shutdown was initiated by Cluster downing. */ case object ClusterDowningReason extends Reason - /** - * Java API: The shutdown was initiated by Cluster downing. - */ + /** Java API: The shutdown was initiated by Cluster downing. */ def clusterDowningReason: Reason = ClusterDowningReason - /** - * Scala API: The shutdown was initiated by a failure to join a seed node. - */ + /** Scala API: The shutdown was initiated by a failure to join a seed node. */ case object ClusterJoinUnsuccessfulReason extends Reason - /** - * Java API: The shutdown was initiated by a failure to join a seed node. - */ + /** Java API: The shutdown was initiated by a failure to join a seed node. */ def clusterJoinUnsuccessfulReason: Reason = ClusterJoinUnsuccessfulReason - /** - * Scala API: The shutdown was initiated by a configuration clash within the existing cluster and the joining node - */ + /** Scala API: The shutdown was initiated by a configuration clash within the existing cluster and the joining node */ case object IncompatibleConfigurationDetectedReason extends Reason - /** - * Java API: The shutdown was initiated by a configuration clash within the existing cluster and the joining node - */ + /** Java API: The shutdown was initiated by a configuration clash within the existing cluster and the joining node */ def incompatibleConfigurationDetectedReason: Reason = IncompatibleConfigurationDetectedReason - /** - * Scala API: The shutdown was initiated by Cluster leaving. - */ + /** Scala API: The shutdown was initiated by Cluster leaving. */ case object ClusterLeavingReason extends Reason - /** - * Java API: The shutdown was initiated by Cluster leaving. - */ + /** Java API: The shutdown was initiated by Cluster leaving. */ def clusterLeavingReason: Reason = ClusterLeavingReason @volatile private var runningJvmHook = false @@ -286,18 +242,14 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi } } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] final case class Phase( dependsOn: Set[String], timeout: FiniteDuration, recover: Boolean, enabled: Boolean) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def phasesFromConfig(conf: Config): Map[String, Phase] = { import akka.util.ccompat.JavaConverters._ val defaultPhaseTimeout = conf.getString("default-phase-timeout") @@ -321,9 +273,7 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi } } - /** - * INTERNAL API: https://en.wikipedia.org/wiki/Topological_sorting - */ + /** INTERNAL API: https://en.wikipedia.org/wiki/Topological_sorting */ private[akka] def topologicalSort(phases: Map[String, Phase]): List[String] = { var result = List.empty[String] var unmarked = phases.keySet ++ phases.values.flatMap(_.dependsOn) // in case phase is not defined as key @@ -355,18 +305,14 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait JVMShutdownHooks { def addHook(t: Thread): Unit def removeHook(t: Thread): Boolean } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object JVMShutdownHooks extends JVMShutdownHooks { override def addHook(t: Thread): Unit = Runtime.getRuntime.addShutdownHook(t) @@ -460,8 +406,10 @@ final class CoordinatedShutdown private[akka] ( } nextTaskState match { case Cancelled => - registeredPhases - .merge(phaseName, StrictPhaseDefinition.empty, (previous, incoming) => previous.merge(incoming)) + registeredPhases.merge( + phaseName, + StrictPhaseDefinition.empty, + (previous, incoming) => previous.merge(incoming)) if (log.isDebugEnabled) { log.debug("Successfully cancelled CoordinatedShutdown task [{}] from phase [{}].", name, phaseName) } @@ -504,9 +452,8 @@ final class CoordinatedShutdown private[akka] ( def totalDuration(): FiniteDuration = { import akka.util.ccompat.JavaConverters._ - registeredPhases.keySet.asScala.foldLeft(Duration.Zero) { - case (acc, phase) => - acc + timeout(phase) + registeredPhases.keySet.asScala.foldLeft(Duration.Zero) { case (acc, phase) => + acc + timeout(phase) } } @@ -527,15 +474,11 @@ final class CoordinatedShutdown private[akka] ( private val _jvmHooksLatch = new AtomicReference[CountDownLatch](new CountDownLatch(0)) @volatile private var actorSystemJvmHook: OptionVal[Cancellable] = OptionVal.None - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] lazy val terminationWatcher = system.systemActorOf(CoordinatedShutdownTerminationWatcher.props, "coordinatedShutdownTerminationWatcher") - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def jvmHooksLatch: CountDownLatch = _jvmHooksLatch.get /** @@ -729,25 +672,26 @@ final class CoordinatedShutdown private[akka] ( val result = phaseDef.run(recoverEnabled) val timeout = phases(phaseName).timeout val deadline = Deadline.now + timeout - val timeoutFut = try { - after(timeout, system.scheduler) { - if (phaseName == CoordinatedShutdown.PhaseActorSystemTerminate && deadline.hasTimeLeft()) { - // too early, i.e. triggered by system termination + val timeoutFut = + try { + after(timeout, system.scheduler) { + if (phaseName == CoordinatedShutdown.PhaseActorSystemTerminate && deadline.hasTimeLeft()) { + // too early, i.e. triggered by system termination + result + } else if (result.isCompleted) + Future.successful(Done) + else if (recoverEnabled) { + log.warning("Coordinated shutdown phase [{}] timed out after {}", phaseName, timeout) + Future.successful(Done) + } else + Future.failed( + new TimeoutException(s"Coordinated shutdown phase [$phaseName] timed out after $timeout")) + } + } catch { + case _: IllegalStateException => + // The call to `after` threw IllegalStateException, triggered by system termination result - } else if (result.isCompleted) - Future.successful(Done) - else if (recoverEnabled) { - log.warning("Coordinated shutdown phase [{}] timed out after {}", phaseName, timeout) - Future.successful(Done) - } else - Future.failed( - new TimeoutException(s"Coordinated shutdown phase [$phaseName] timed out after $timeout")) } - } catch { - case _: IllegalStateException => - // The call to `after` threw IllegalStateException, triggered by system termination - result - } Future.firstCompletedOf(List(result, timeoutFut)) } if (remaining.isEmpty) @@ -789,9 +733,7 @@ final class CoordinatedShutdown private[akka] ( throw new IllegalArgumentException(s"Unknown phase [$phase]. All phases must be defined in configuration") } - /** - * Sum of timeouts of all phases that have some task. - */ + /** Sum of timeouts of all phases that have some task. */ def totalTimeout(): FiniteDuration = { tasks.totalDuration() } diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 31a3d1f0934..e4e52d7445d 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -20,9 +20,7 @@ object Deploy { final val NoMailboxGiven = "" val local = Deploy(scope = LocalScope) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final val DispatcherSameAsParent = ".." def apply( @@ -76,19 +74,13 @@ final class Deploy( dispatcher: String, mailbox: String) = this(path, config, routerConfig, scope, dispatcher, mailbox, Set.empty) - /** - * Java API to create a Deploy with the given RouterConfig - */ + /** Java API to create a Deploy with the given RouterConfig */ def this(routing: RouterConfig) = this("", ConfigFactory.empty, routing) - /** - * Java API to create a Deploy with the given RouterConfig with Scope - */ + /** Java API to create a Deploy with the given RouterConfig with Scope */ def this(routing: RouterConfig, scope: Scope) = this("", ConfigFactory.empty, routing, scope) - /** - * Java API to create a Deploy with the given Scope - */ + /** Java API to create a Deploy with the given Scope */ def this(scope: Scope) = this("", ConfigFactory.empty, NoRouter, scope) /** @@ -185,17 +177,13 @@ abstract class LocalScope extends Scope @SerialVersionUID(1L) case object LocalScope extends LocalScope { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this def withFallback(other: Scope): Scope = this } -/** - * This is the default value and as such allows overrides. - */ +/** This is the default value and as such allows overrides. */ @nowarn("msg=@SerialVersionUID has no effect") @SerialVersionUID(1L) abstract class NoScopeGiven extends Scope @@ -203,15 +191,11 @@ abstract class NoScopeGiven extends Scope case object NoScopeGiven extends NoScopeGiven { def withFallback(other: Scope): Scope = other - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this } -/** - * Deployer maps actor paths to actor deployments. - */ +/** Deployer maps actor paths to actor deployments. */ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAccess: DynamicAccess) { import akka.util.ccompat.JavaConverters._ @@ -226,8 +210,8 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce .root .unwrapped .asScala - .collect { - case (key, value: String) => (key -> value) + .collect { case (key, value: String) => + key -> value } .toMap diff --git a/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala b/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala index 60ee59673eb..df4befddadc 100644 --- a/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala +++ b/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala @@ -49,9 +49,7 @@ import akka.annotation.DoNotInherit */ def createInstanceFor[T: ClassTag](fqcn: String, args: immutable.Seq[(Class[_], AnyRef)]): Try[T] - /** - * Obtain the Scala “object” instance for the given fully-qualified class name, if there is one. - */ + /** Obtain the Scala “object” instance for the given fully-qualified class name, if there is one. */ def getObjectFor[T: ClassTag](fqcn: String): Try[T] /** diff --git a/akka-actor/src/main/scala/akka/actor/Extension.scala b/akka-actor/src/main/scala/akka/actor/Extension.scala index 09ed0ef964e..95f1d986d5e 100644 --- a/akka-actor/src/main/scala/akka/actor/Extension.scala +++ b/akka-actor/src/main/scala/akka/actor/Extension.scala @@ -71,16 +71,12 @@ trait Extension */ trait ExtensionId[T <: Extension] { - /** - * Returns an instance of the extension identified by this ExtensionId instance. - */ + /** Returns an instance of the extension identified by this ExtensionId instance. */ def apply(system: ActorSystem): T = { java.util.Objects.requireNonNull(system, "system must not be null!").registerExtension(this) } - /** - * Returns an instance of the extension identified by this ExtensionId instance. - */ + /** Returns an instance of the extension identified by this ExtensionId instance. */ def apply(system: ClassicActorSystemProvider): T = apply(system.classicSystem) /** @@ -91,7 +87,6 @@ trait ExtensionId[T <: Extension] { * {{{ * override def get(system: ActorSystem): TheExtension = super.get(system) * }}} - * */ def get(system: ActorSystem): T = apply(system) @@ -103,7 +98,6 @@ trait ExtensionId[T <: Extension] { * {{{ * override def get(system: ClassicActorSystemProvider): TheExtension = super.get(system) * }}} - * */ def get(system: ClassicActorSystemProvider): T = apply(system) @@ -117,9 +111,7 @@ trait ExtensionId[T <: Extension] { override final def equals(other: Any): Boolean = this eq other.asInstanceOf[AnyRef] } -/** - * Java API for ExtensionId - */ +/** Java API for ExtensionId */ abstract class AbstractExtensionId[T <: Extension] extends ExtensionId[T] /** @@ -129,8 +121,6 @@ abstract class AbstractExtensionId[T <: Extension] extends ExtensionId[T] */ trait ExtensionIdProvider { - /** - * Returns the canonical ExtensionId for this Extension - */ + /** Returns the canonical ExtensionId for this Extension */ def lookup: ExtensionId[_ <: Extension] } diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 36904ab27d3..f7fc4df9a0b 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -58,14 +58,10 @@ object FSM { */ final case class UnsubscribeTransitionCallBack(actorRef: ActorRef) - /** - * Reason why this [[akka.actor.FSM]] is shutting down. - */ + /** Reason why this [[akka.actor.FSM]] is shutting down. */ sealed trait Reason - /** - * Default reason if calling `stop()`. - */ + /** Default reason if calling `stop()`. */ case object Normal extends Reason /** @@ -81,14 +77,10 @@ object FSM { */ final case class Failure(cause: Any) extends Reason - /** - * This case object is received in case of a state timeout. - */ + /** This case object is received in case of a state timeout. */ case object StateTimeout - /** - * INTERNAL API - */ + /** INTERNAL API */ private final case class TimeoutMarker(generation: Long) /** INTERNAL API */ @@ -115,9 +107,7 @@ object FSM { override def repeat: Boolean = false } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final case class Timer(name: String, msg: Any, mode: TimerMode, generation: Int, owner: AnyRef)( context: ActorContext) @@ -153,9 +143,7 @@ object FSM { } val `→` = `->` - /** - * Log Entry of the [[akka.actor.LoggingFSM]], can be obtained by calling `getLog`. - */ + /** Log Entry of the [[akka.actor.LoggingFSM]], can be obtained by calling `getLog`. */ final case class LogEntry[S, D](stateName: S, stateData: D, event: Any) /** Used by `forMax` to signal "cancel stateTimeout" */ @@ -173,9 +161,7 @@ object FSM { replies: List[Any]) extends State[S, D](stateName, stateData, timeout, stopReason, replies) { - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] override def notifies: Boolean = false override def copy( @@ -247,9 +233,7 @@ object FSM { case _ => throw new IndexOutOfBoundsException(n.toString) } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def notifies: Boolean = true // defined here to be able to override it in SilentState @@ -271,8 +255,8 @@ object FSM { */ def forMax(timeout: Duration): State[S, D] = timeout match { case f: FiniteDuration => copy(timeout = Some(f)) - case Duration.Inf => copy(timeout = SomeMaxFiniteDuration) // we map the Infinite duration to a special marker, - case _ => copy(timeout = None) // that means "cancel stateTimeout". This marker is needed + case Duration.Inf => copy(timeout = SomeMaxFiniteDuration) // we map the Infinite duration to a special marker, + case _ => copy(timeout = None) // that means "cancel stateTimeout". This marker is needed } // so we do not have to break source/binary compat. // TODO: Can be removed once we can break State#timeout signature to `Option[Duration]` @@ -306,16 +290,12 @@ object FSM { copy(stateData = nextStateData) } - /** - * INTERNAL API. - */ + /** INTERNAL API. */ private[akka] def withStopReason(reason: Reason): State[S, D] = { copy(stopReason = Some(reason)) } - /** - * INTERNAL API. - */ + /** INTERNAL API. */ private[akka] def withNotification(notifies: Boolean): State[S, D] = { if (notifies) State(stateName, stateData, timeout, stopReason, replies) @@ -442,9 +422,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging { */ val `->` = FSM.`->` - /** - * This case object is received in case of a state timeout. - */ + /** This case object is received in case of a state timeout. */ val StateTimeout = FSM.StateTimeout /** @@ -502,19 +480,13 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging { goto(currentState.stateName) .withNotification(false) // cannot directly use currentState because of the timeout field - /** - * Produce change descriptor to stop this FSM actor with reason "Normal". - */ + /** Produce change descriptor to stop this FSM actor with reason "Normal". */ final def stop(): State = stop(Normal) - /** - * Produce change descriptor to stop this FSM actor including specified reason. - */ + /** Produce change descriptor to stop this FSM actor including specified reason. */ final def stop(reason: Reason): State = stop(reason, currentState.stateData) - /** - * Produce change descriptor to stop this FSM actor including specified reason. - */ + /** Produce change descriptor to stop this FSM actor including specified reason. */ final def stop(reason: Reason, stateData: D): State = stay().using(stateData).withStopReason(reason) final class TransformHelper(func: StateFunction) { @@ -639,9 +611,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging { */ final def setStateTimeout(state: S, timeout: Timeout): Unit = stateTimeouts(state) = timeout - /** - * INTERNAL API, used for testing. - */ + /** INTERNAL API, used for testing. */ private[akka] final def isStateTimerActive = timeoutFuture.isDefined /** @@ -710,24 +680,18 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging { if (currentState != null) makeTransition(currentState) else throw new IllegalStateException("You must call `startWith` before calling `initialize`") - /** - * Return current state name (i.e. object of type S) - */ + /** Return current state name (i.e. object of type S) */ final def stateName: S = { if (currentState != null) currentState.stateName else throw new IllegalStateException("You must call `startWith` before using `stateName`") } - /** - * Return current state data (i.e. object of type D) - */ + /** Return current state data (i.e. object of type D) */ final def stateData: D = if (currentState != null) currentState.stateData else throw new IllegalStateException("You must call `startWith` before using `stateData`") - /** - * Return next state data (available in onTransition handlers) - */ + /** Return next state data (available in onTransition handlers) */ final def nextStateData = nextState match { case null => throw new IllegalStateException("nextStateData is only available during onTransition") case x => x.stateData @@ -774,10 +738,9 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging { /* * unhandled event handler */ - private val handleEventDefault: StateFunction = { - case Event(value, _) => - log.warning("unhandled event " + value + " in state " + stateName) - stay() + private val handleEventDefault: StateFunction = { case Event(value, _) => + log.warning("unhandled event " + value + " in state " + stateName) + stay() } private var handleEvent: StateFunction = handleEventDefault diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 86dcb7ccc1b..a271e076969 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -21,14 +21,10 @@ import akka.japi.Util.immutableSeq import akka.util.JavaDurationConverters._ import akka.util.ccompat._ -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] sealed trait ChildStats -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] case object ChildNameReserved extends ChildStats /** @@ -44,7 +40,7 @@ final case class ChildRestartStats( def uid: Int = child.path.uid - //FIXME How about making ChildRestartStats immutable and then move these methods into the actual supervisor strategies? + // FIXME How about making ChildRestartStats immutable and then move these methods into the actual supervisor strategies? def requestRestartPermission(retriesWindow: (Option[Int], Option[Int])): Boolean = retriesWindow match { case (Some(retries), _) if retries < 1 => false @@ -97,9 +93,7 @@ final class StoppingSupervisorStrategy extends SupervisorStrategyConfigurator { trait SupervisorStrategyLowPriorityImplicits { this: SupervisorStrategy.type => - /** - * Implicit conversion from `Seq` of Cause-Directive pairs to a `Decider`. See makeDecider(causeDirective). - */ + /** Implicit conversion from `Seq` of Cause-Directive pairs to a `Decider`. See makeDecider(causeDirective). */ implicit def seqCauseDirective2Decider(trapExit: Iterable[CauseDirective]): Decider = makeDecider(trapExit) // the above would clash with seqThrowable2Decider for empty lists } @@ -112,14 +106,10 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { private[akka] def logLevel: LogLevel } - /** - * Resumes message processing for the failed Actor - */ + /** Resumes message processing for the failed Actor */ case object Resume extends Resume(Logging.WarningLevel) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] sealed class Resume(private[akka] val logLevel: LogLevel) extends Directive @@ -129,15 +119,11 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { */ case object Restart extends Restart(Logging.ErrorLevel) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] sealed class Restart(private[akka] val logLevel: LogLevel) extends Directive - /** - * Stops the Actor - */ + /** Stops the Actor */ case object Stop extends Stop(Logging.ErrorLevel) @InternalApi @@ -152,9 +138,7 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { override private[akka] def logLevel = throw new IllegalStateException("Escalate is not logged") } - /** - * Java API: Returning this directive resumes message processing for the failed Actor - */ + /** Java API: Returning this directive resumes message processing for the failed Actor */ def resume = Resume // switch to return type `Directive` on next binary incompatible release /** @@ -178,9 +162,7 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { */ def restart(logLevel: LogLevel): Directive = new Restart(logLevel) - /** - * Java API: Returning this directive stops the Actor - */ + /** Java API: Returning this directive stops the Actor */ def stop = Stop // switch to return type `Directive` on next binary incompatible release /** @@ -226,8 +208,8 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { * terminated (one-for-one). */ final val stoppingStrategy: SupervisorStrategy = { - def stoppingDecider: Decider = { - case _: Exception => Stop + def stoppingDecider: Decider = { case _: Exception => + Stop } OneForOneStrategy()(stoppingDecider) } @@ -246,8 +228,8 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { * Decider builder which just checks whether one of * the given Throwables matches the cause and restarts, otherwise escalates. */ - def makeDecider(trapExit: immutable.Seq[Class[_ <: Throwable]]): Decider = { - case x => if (trapExit.exists(_.isInstance(x))) Restart else Escalate + def makeDecider(trapExit: immutable.Seq[Class[_ <: Throwable]]): Decider = { case x => + if (trapExit.exists(_.isInstance(x))) Restart else Escalate } /** @@ -268,9 +250,7 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { { case x => directives.collectFirst { case (c, d) if c.isInstance(x) => d }.getOrElse(Escalate) } } - /** - * Converts a Java Decider into a Scala Decider - */ + /** Converts a Java Decider into a Scala Decider */ def makeDecider(func: JDecider): Decider = { case x => func(x) } /** @@ -281,13 +261,12 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { */ private[akka] def sort(in: Iterable[CauseDirective]): immutable.Seq[CauseDirective] = in.foldLeft(new ArrayBuffer[CauseDirective](in.size)) { (buf, ca) => - buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { - case -1 => buf.append(ca) - case x => buf.insert(x, ca) - } - buf + buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { + case -1 => buf.append(ca) + case x => buf.insert(x, ca) } - .to(immutable.IndexedSeq) + buf + }.to(immutable.IndexedSeq) private[akka] def withinTimeRangeOption(withinTimeRange: Duration): Option[Duration] = if (withinTimeRange.isFinite && withinTimeRange >= Duration.Zero) Some(withinTimeRange) else None @@ -327,9 +306,7 @@ abstract class SupervisorStrategy { */ def handleChildTerminated(context: ActorContext, child: ActorRef, children: Iterable[ActorRef]): Unit - /** - * This method is called to act on the failure of a child: restart if the flag is true, stop otherwise. - */ + /** This method is called to act on the failure of a child: restart if the flag is true, stop otherwise. */ def processFailure( context: ActorContext, restart: Boolean, @@ -378,9 +355,7 @@ abstract class SupervisorStrategy { } } - /** - * Logging of actor failures is done when this is `true`. - */ + /** Logging of actor failures is done when this is `true`. */ protected def loggingEnabled: Boolean = true /** @@ -462,9 +437,7 @@ case class AllForOneStrategy( import SupervisorStrategy._ - /** - * Java API - */ + /** Java API */ def this( maxNrOfRetries: Int, withinTimeRange: Duration, @@ -472,9 +445,7 @@ case class AllForOneStrategy( loggingEnabled: Boolean) = this(maxNrOfRetries, withinTimeRange, loggingEnabled)(SupervisorStrategy.makeDecider(decider)) - /** - * Java API - */ + /** Java API */ def this( maxNrOfRetries: Int, withinTimeRange: java.time.Duration, @@ -482,51 +453,35 @@ case class AllForOneStrategy( loggingEnabled: Boolean) = this(maxNrOfRetries, withinTimeRange.asScala, loggingEnabled)(SupervisorStrategy.makeDecider(decider)) - /** - * Java API - */ + /** Java API */ def this(maxNrOfRetries: Int, withinTimeRange: Duration, decider: SupervisorStrategy.JDecider) = this(maxNrOfRetries, withinTimeRange)(SupervisorStrategy.makeDecider(decider)) - /** - * Java API - */ + /** Java API */ def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, decider: SupervisorStrategy.JDecider) = this(maxNrOfRetries, withinTimeRange.asScala)(SupervisorStrategy.makeDecider(decider)) - /** - * Java API - */ + /** Java API */ def this(maxNrOfRetries: Int, withinTimeRange: Duration, trapExit: JIterable[Class[_ <: Throwable]]) = this(maxNrOfRetries, withinTimeRange)(SupervisorStrategy.makeDecider(trapExit)) - /** - * Java API - */ + /** Java API */ def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, trapExit: JIterable[Class[_ <: Throwable]]) = this(maxNrOfRetries, withinTimeRange.asScala)(SupervisorStrategy.makeDecider(trapExit)) - /** - * Java API: compatible with lambda expressions - */ + /** Java API: compatible with lambda expressions */ def this(maxNrOfRetries: Int, withinTimeRange: Duration, decider: SupervisorStrategy.Decider) = this(maxNrOfRetries = maxNrOfRetries, withinTimeRange = withinTimeRange)(decider) - /** - * Java API: compatible with lambda expressions - */ + /** Java API: compatible with lambda expressions */ def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, decider: SupervisorStrategy.Decider) = this(maxNrOfRetries = maxNrOfRetries, withinTimeRange = withinTimeRange.asScala)(decider) - /** - * Java API: compatible with lambda expressions - */ + /** Java API: compatible with lambda expressions */ def this(loggingEnabled: Boolean, decider: SupervisorStrategy.Decider) = this(loggingEnabled = loggingEnabled)(decider) - /** - * Java API: compatible with lambda expressions - */ + /** Java API: compatible with lambda expressions */ def this(decider: SupervisorStrategy.Decider) = this()(decider) @@ -549,7 +504,7 @@ case class AllForOneStrategy( children: Iterable[ChildRestartStats]): Unit = { if (children.nonEmpty) { if (restart && children.forall(_.requestRestartPermission(retriesWindow))) - children.foreach(crs => restartChild(crs.child, cause, suspendFirst = (crs.child != child))) + children.foreach(crs => restartChild(crs.child, cause, suspendFirst = crs.child != child)) else for (c <- children) context.stop(c.child) } @@ -574,9 +529,7 @@ case class OneForOneStrategy( override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider) extends SupervisorStrategy { - /** - * Java API - */ + /** Java API */ def this( maxNrOfRetries: Int, withinTimeRange: Duration, @@ -584,9 +537,7 @@ case class OneForOneStrategy( loggingEnabled: Boolean) = this(maxNrOfRetries, withinTimeRange, loggingEnabled)(SupervisorStrategy.makeDecider(decider)) - /** - * Java API - */ + /** Java API */ def this( maxNrOfRetries: Int, withinTimeRange: java.time.Duration, @@ -594,48 +545,34 @@ case class OneForOneStrategy( loggingEnabled: Boolean) = this(maxNrOfRetries, withinTimeRange.asScala, loggingEnabled)(SupervisorStrategy.makeDecider(decider)) - /** - * Java API - */ + /** Java API */ def this(maxNrOfRetries: Int, withinTimeRange: Duration, decider: SupervisorStrategy.JDecider) = this(maxNrOfRetries, withinTimeRange)(SupervisorStrategy.makeDecider(decider)) - /** - * Java API - */ + /** Java API */ def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, decider: SupervisorStrategy.JDecider) = this(maxNrOfRetries, withinTimeRange.asScala)(SupervisorStrategy.makeDecider(decider)) - /** - * Java API - */ + /** Java API */ def this(maxNrOfRetries: Int, withinTimeRange: Duration, trapExit: JIterable[Class[_ <: Throwable]]) = this(maxNrOfRetries, withinTimeRange)(SupervisorStrategy.makeDecider(trapExit)) - /** - * Java API - */ + /** Java API */ def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, trapExit: JIterable[Class[_ <: Throwable]]) = this(maxNrOfRetries, withinTimeRange.asScala)(SupervisorStrategy.makeDecider(trapExit)) - /** - * Java API: compatible with lambda expressions - */ + /** Java API: compatible with lambda expressions */ def this(maxNrOfRetries: Int, withinTimeRange: Duration, decider: SupervisorStrategy.Decider) = this(maxNrOfRetries = maxNrOfRetries, withinTimeRange = withinTimeRange)(decider) - /** - * Java API: compatible with lambda expressions - */ + /** Java API: compatible with lambda expressions */ def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, decider: SupervisorStrategy.Decider) = this(maxNrOfRetries = maxNrOfRetries, withinTimeRange = withinTimeRange.asScala)(decider) def this(loggingEnabled: Boolean, decider: SupervisorStrategy.Decider) = this(loggingEnabled = loggingEnabled)(decider) - /** - * Java API: Restart an infinite number of times. Compatible with lambda expressions. - */ + /** Java API: Restart an infinite number of times. Compatible with lambda expressions. */ def this(decider: SupervisorStrategy.Decider) = this()(decider) @@ -662,6 +599,6 @@ case class OneForOneStrategy( if (restart && stats.requestRestartPermission(retriesWindow)) restartChild(child, cause, suspendFirst = false) else - context.stop(child) //TODO optimization to drop child here already? + context.stop(child) // TODO optimization to drop child here already? } } diff --git a/akka-actor/src/main/scala/akka/actor/IndirectActorProducer.scala b/akka-actor/src/main/scala/akka/actor/IndirectActorProducer.scala index a5364ff067c..c41457d7f8a 100644 --- a/akka-actor/src/main/scala/akka/actor/IndirectActorProducer.scala +++ b/akka-actor/src/main/scala/akka/actor/IndirectActorProducer.scala @@ -65,34 +65,26 @@ private[akka] object IndirectActorProducer { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class CreatorFunctionConsumer(creator: () => Actor) extends IndirectActorProducer { override def actorClass = classOf[Actor] override def produce() = creator() } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class CreatorConsumer(clazz: Class[_ <: Actor], creator: Creator[Actor]) extends IndirectActorProducer { override def actorClass = clazz override def produce() = creator.create() } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class TypedCreatorFunctionConsumer(clz: Class[_ <: Actor], creator: () => Actor) extends IndirectActorProducer { override def actorClass = clz override def produce() = creator() } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class ArgsReflectConstructor(clz: Class[_ <: Actor], args: immutable.Seq[Any]) extends IndirectActorProducer { private[this] val constructor = Reflect.findConstructor(clz, args) @@ -100,9 +92,7 @@ private[akka] class ArgsReflectConstructor(clz: Class[_ <: Actor], args: immutab override def produce() = Reflect.instantiate(constructor, args) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class NoArgsReflectConstructor(clz: Class[_ <: Actor]) extends IndirectActorProducer { Reflect.findConstructor(clz, List.empty) override def actorClass = clz diff --git a/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala b/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala index 41c74d6faa9..b6929a4bfc5 100644 --- a/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala @@ -71,19 +71,13 @@ class LightArrayRevolverScheduler(config: Config, log: LoggingAdapter, threadFac */ protected def clock(): Long = System.nanoTime - /** - * Replaceable for testing. - */ + /** Replaceable for testing. */ protected def startTick: Int = 0 - /** - * Overridable for tests - */ + /** Overridable for tests */ protected def getShutdownTimeout: FiniteDuration = ShutdownTimeout - /** - * Overridable for tests - */ + /** Overridable for tests */ protected def waitNanos(nanos: Long): Unit = { // see https://www.javamex.com/tutorials/threads/sleep_issues.shtml val sleepMs = if (Helpers.isWindows) (nanos + 4999999) / 10000000 * 10 else (nanos + 999999) / 1000000 @@ -93,62 +87,64 @@ class LightArrayRevolverScheduler(config: Config, log: LoggingAdapter, threadFac } } - override def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = { + override def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = { checkMaxDelay(roundUp(delay).toNanos) super.scheduleWithFixedDelay(initialDelay, delay)(runnable) } - override def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = { + override def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = { checkMaxDelay(roundUp(delay).toNanos) - try new AtomicReference[Cancellable](InitialRepeatMarker) with Cancellable { self => - compareAndSet( - InitialRepeatMarker, - schedule( - executor, - new AtomicLong(clock() + initialDelay.toNanos) with Runnable { - override def run(): Unit = { - try { - runnable.run() - val driftNanos = clock() - getAndAdd(delay.toNanos) - if (self.get != null) - swap(schedule(executor, this, Duration.fromNanos(Math.max(delay.toNanos - driftNanos, 1)))) - } catch { - case _: SchedulerException => // ignore failure to enqueue or terminated target actor + try + new AtomicReference[Cancellable](InitialRepeatMarker) with Cancellable { self => + compareAndSet( + InitialRepeatMarker, + schedule( + executor, + new AtomicLong(clock() + initialDelay.toNanos) with Runnable { + override def run(): Unit = { + try { + runnable.run() + val driftNanos = clock() - getAndAdd(delay.toNanos) + if (self.get != null) + swap(schedule(executor, this, Duration.fromNanos(Math.max(delay.toNanos - driftNanos, 1)))) + } catch { + case _: SchedulerException => // ignore failure to enqueue or terminated target actor + } } - } - }, - roundUp(initialDelay))) + }, + roundUp(initialDelay))) - @tailrec private def swap(c: Cancellable): Unit = { - get match { - case null => if (c != null) c.cancel() - case old => if (!compareAndSet(old, c)) swap(c) + @tailrec private def swap(c: Cancellable): Unit = { + get match { + case null => if (c != null) c.cancel() + case old => if (!compareAndSet(old, c)) swap(c) + } } - } - final def cancel(): Boolean = { - @tailrec def tailrecCancel(): Boolean = { - get match { - case null => false - case c => - if (c.cancel()) compareAndSet(c, null) - else compareAndSet(c, null) || tailrecCancel() + final def cancel(): Boolean = { + @tailrec def tailrecCancel(): Boolean = { + get match { + case null => false + case c => + if (c.cancel()) compareAndSet(c, null) + else compareAndSet(c, null) || tailrecCancel() + } } + + tailrecCancel() } - tailrecCancel() + override def isCancelled: Boolean = get == null } - - override def isCancelled: Boolean = get == null - } catch { + catch { case cause @ SchedulerException(msg) => throw new IllegalStateException(msg, cause) } } - override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = + override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = try schedule(executor, runnable, roundUp(delay)) catch { case cause @ SchedulerException(msg) => throw new IllegalStateException(msg, cause) @@ -340,14 +336,10 @@ object LightArrayRevolverScheduler { private class TaskQueue extends AbstractNodeQueue[TaskHolder] - /** - * INTERNAL API - */ + /** INTERNAL API */ protected[actor] trait TimerTask extends Runnable with Cancellable - /** - * INTERNAL API - */ + /** INTERNAL API */ protected[actor] class TaskHolder(@volatile var task: Runnable, var ticks: Int, executionContext: ExecutionContext) extends TimerTask { @@ -355,7 +347,7 @@ object LightArrayRevolverScheduler { private final def extractTask(replaceWith: Runnable): Runnable = task match { case t @ (ExecutedTask | CancelledTask) => t - case x => if (unsafe.compareAndSwapObject(this, taskOffset, x, replaceWith)) x else extractTask(replaceWith) + case x => if (unsafe.compareAndSwapObject(this, taskOffset, x, replaceWith)) x else extractTask(replaceWith) } private[akka] final def executeTask(): Boolean = extractTask(ExecutedTask) match { diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index ebad66fa659..b2ea31b74d2 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -21,29 +21,19 @@ import akka.routing._ */ object Props extends AbstractProps { - /** - * The defaultCreator, simply throws an UnsupportedOperationException when applied, which is used when creating a Props - */ + /** The defaultCreator, simply throws an UnsupportedOperationException when applied, which is used when creating a Props */ final val defaultCreator: () => Actor = () => throw new UnsupportedOperationException("No actor creator specified!") - /** - * The defaultRoutedProps is NoRouter which is used when creating a Props - */ + /** The defaultRoutedProps is NoRouter which is used when creating a Props */ final val defaultRoutedProps: RouterConfig = NoRouter - /** - * The default Deploy instance which is used when creating a Props - */ + /** The default Deploy instance which is used when creating a Props */ final val defaultDeploy = Deploy() - /** - * A Props instance whose creator will create an actor that doesn't respond to any message - */ + /** A Props instance whose creator will create an actor that doesn't respond to any message */ final val empty = Props[EmptyActor]() - /** - * The default Props instance, uses the settings from the Props object starting with default*. - */ + /** The default Props instance, uses the settings from the Props object starting with default*. */ final val default = Props(defaultDeploy, classOf[CreatorFunctionConsumer], List(defaultCreator)) /** @@ -80,9 +70,7 @@ object Props extends AbstractProps { private def mkProps(classOfActor: Class[_], ctor: () => Actor): Props = Props(classOf[TypedCreatorFunctionConsumer], classOfActor, ctor) - /** - * Scala API: create a Props given a class and its constructor arguments. - */ + /** Scala API: create a Props given a class and its constructor arguments. */ def apply(clazz: Class[_], args: Any*): Props = apply(defaultDeploy, clazz, args.toList) } @@ -123,9 +111,7 @@ final case class Props(deploy: Deploy, clazz: Class[_], args: immutable.Seq[Any] @transient private[this] var _cachedActorClass: Class[_ <: Actor] = _ - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def producer: IndirectActorProducer = { if (_producer eq null) _producer = IndirectActorProducer(clazz, args) @@ -167,42 +153,30 @@ final case class Props(deploy: Deploy, clazz: Class[_], args: immutable.Seq[Any] */ def routerConfig: RouterConfig = deploy.routerConfig - /** - * Returns a new Props with the specified dispatcher set. - */ + /** Returns a new Props with the specified dispatcher set. */ def withDispatcher(d: String): Props = deploy.dispatcher match { case NoDispatcherGiven => copy(deploy = deploy.copy(dispatcher = d)) case x => if (x == d) this else copy(deploy = deploy.copy(dispatcher = d)) } - /** - * Returns a new Props with the specified mailbox set. - */ + /** Returns a new Props with the specified mailbox set. */ def withMailbox(m: String): Props = deploy.mailbox match { case NoMailboxGiven => copy(deploy = deploy.copy(mailbox = m)) case x => if (x == m) this else copy(deploy = deploy.copy(mailbox = m)) } - /** - * Returns a new Props with the specified router config set. - */ + /** Returns a new Props with the specified router config set. */ def withRouter(r: RouterConfig): Props = copy(deploy = deploy.copy(routerConfig = r)) - /** - * Returns a new Props with the specified deployment configuration. - */ + /** Returns a new Props with the specified deployment configuration. */ def withDeploy(d: Deploy): Props = copy(deploy = d.withFallback(deploy)) - /** - * Returns a new Props with the specified set of tags. - */ + /** Returns a new Props with the specified set of tags. */ @varargs def withActorTags(tags: String*): Props = withActorTags(tags.toSet) - /** - * Scala API: Returns a new Props with the specified set of tags. - */ + /** Scala API: Returns a new Props with the specified set of tags. */ def withActorTags(tags: Set[String]): Props = copy(deploy = deploy.withTags(tags)) diff --git a/akka-actor/src/main/scala/akka/actor/ReflectiveDynamicAccess.scala b/akka-actor/src/main/scala/akka/actor/ReflectiveDynamicAccess.scala index 357fa184ad0..276ea816b69 100644 --- a/akka-actor/src/main/scala/akka/actor/ReflectiveDynamicAccess.scala +++ b/akka-actor/src/main/scala/akka/actor/ReflectiveDynamicAccess.scala @@ -25,11 +25,11 @@ import akka.annotation.DoNotInherit class ReflectiveDynamicAccess(val classLoader: ClassLoader) extends DynamicAccess { override def getClassFor[T: ClassTag](fqcn: String): Try[Class[_ <: T]] = - Try[Class[_ <: T]]({ + Try[Class[_ <: T]] { val c = Class.forName(fqcn, false, classLoader).asInstanceOf[Class[_ <: T]] val t = implicitly[ClassTag[T]].runtimeClass if (t.isAssignableFrom(c)) c else throw new ClassCastException(t.toString + " is not assignable from " + c) - }) + } override def createInstanceFor[T: ClassTag](clazz: Class[_], args: immutable.Seq[(Class[_], AnyRef)]): Try[T] = Try { @@ -70,7 +70,9 @@ class ReflectiveDynamicAccess(val classLoader: ClassLoader) extends DynamicAcces case x if !t.isInstance(x) => throw new ClassCastException(fqcn + " is not a subtype of " + t) case x: T => x case unexpected => - throw new IllegalArgumentException(s"Unexpected module field: $unexpected") // will not happen, for exhaustiveness check + throw new IllegalArgumentException( + s"Unexpected module field: $unexpected" + ) // will not happen, for exhaustiveness check } }.recover { case i: InvocationTargetException if i.getTargetException ne null => throw i.getTargetException } } diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 734d33071ce..5b3679e0e60 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -69,49 +69,51 @@ trait Scheduler { * * Note: For scheduling within actors `with Timers` should be preferred. */ - def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = { - try new AtomicReference[Cancellable](Cancellable.initialNotCancelled) with Cancellable { self => - compareAndSet( - Cancellable.initialNotCancelled, - scheduleOnce( - initialDelay, - new Runnable { - override def run(): Unit = { - try { - runnable.run() - if (self.get != null) - swap(scheduleOnce(delay, this)) - } catch { - // ignore failure to enqueue or terminated target actor - case _: SchedulerException => - case e: IllegalStateException if e.getCause != null && e.getCause.isInstanceOf[SchedulerException] => + def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = { + try + new AtomicReference[Cancellable](Cancellable.initialNotCancelled) with Cancellable { self => + compareAndSet( + Cancellable.initialNotCancelled, + scheduleOnce( + initialDelay, + new Runnable { + override def run(): Unit = { + try { + runnable.run() + if (self.get != null) + swap(scheduleOnce(delay, this)) + } catch { + // ignore failure to enqueue or terminated target actor + case _: SchedulerException => + case e: IllegalStateException if e.getCause != null && e.getCause.isInstanceOf[SchedulerException] => + } } - } - })) + })) - @tailrec private def swap(c: Cancellable): Unit = { - get match { - case null => if (c != null) c.cancel() - case old => if (!compareAndSet(old, c)) swap(c) + @tailrec private def swap(c: Cancellable): Unit = { + get match { + case null => if (c != null) c.cancel() + case old => if (!compareAndSet(old, c)) swap(c) + } } - } - final def cancel(): Boolean = { - @tailrec def tailrecCancel(): Boolean = { - get match { - case null => false - case c => - if (c.cancel()) compareAndSet(c, null) - else compareAndSet(c, null) || tailrecCancel() + final def cancel(): Boolean = { + @tailrec def tailrecCancel(): Boolean = { + get match { + case null => false + case c => + if (c.cancel()) compareAndSet(c, null) + else compareAndSet(c, null) || tailrecCancel() + } } + + tailrecCancel() } - tailrecCancel() + override def isCancelled: Boolean = get == null } - - override def isCancelled: Boolean = get == null - } catch { + catch { case SchedulerException(msg) => throw new IllegalStateException(msg) } } @@ -166,10 +168,7 @@ trait Scheduler { initialDelay: FiniteDuration, delay: FiniteDuration, receiver: ActorRef, - message: Any)( - implicit - executor: ExecutionContext, - sender: ActorRef = Actor.noSender): Cancellable = { + message: Any)(implicit executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable = { scheduleWithFixedDelay(initialDelay, delay)(new Runnable { def run(): Unit = { receiver ! message @@ -237,8 +236,8 @@ trait Scheduler { * Note: For scheduling within actors `with Timers` should be preferred. */ @nowarn("msg=deprecated") - final def scheduleAtFixedRate(initialDelay: FiniteDuration, interval: FiniteDuration)(runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = + final def scheduleAtFixedRate(initialDelay: FiniteDuration, interval: FiniteDuration)(runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = schedule(initialDelay, interval, runnable)(executor) /** @@ -310,10 +309,7 @@ trait Scheduler { initialDelay: FiniteDuration, interval: FiniteDuration, receiver: ActorRef, - message: Any)( - implicit - executor: ExecutionContext, - sender: ActorRef = Actor.noSender): Cancellable = + message: Any)(implicit executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable = schedule(initialDelay, interval, receiver, message) /** @@ -351,16 +347,13 @@ trait Scheduler { scheduleAtFixedRate(initialDelay.asScala, interval.asScala, receiver, message)(executor, sender) } - /** - * Deprecated API: See [[Scheduler#scheduleWithFixedDelay]] or [[Scheduler#scheduleAtFixedRate]]. - */ + /** Deprecated API: See [[Scheduler#scheduleWithFixedDelay]] or [[Scheduler#scheduleAtFixedRate]]. */ @deprecated( "Use scheduleWithFixedDelay or scheduleAtFixedRate instead. This has the same semantics as " + "scheduleAtFixedRate, but scheduleWithFixedDelay is often preferred.", since = "2.6.0") @nowarn("msg=deprecated") - final def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, receiver: ActorRef, message: Any)( - implicit + final def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, receiver: ActorRef, message: Any)(implicit executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable = schedule( @@ -374,9 +367,7 @@ trait Scheduler { } }) - /** - * Deprecated API: See [[Scheduler#scheduleWithFixedDelay]] or [[Scheduler#scheduleAtFixedRate]]. - */ + /** Deprecated API: See [[Scheduler#scheduleWithFixedDelay]] or [[Scheduler#scheduleAtFixedRate]]. */ @deprecated( "Use scheduleWithFixedDelay or scheduleAtFixedRate instead. This has the same semantics as " + "scheduleAtFixedRate, but scheduleWithFixedDelay is often preferred.", @@ -392,37 +383,30 @@ trait Scheduler { schedule(initialDelay.asScala, interval.asScala, receiver, message)(executor, sender) } - /** - * Deprecated API: See [[Scheduler#scheduleWithFixedDelay]] or [[Scheduler#scheduleAtFixedRate]]. - */ + /** Deprecated API: See [[Scheduler#scheduleWithFixedDelay]] or [[Scheduler#scheduleAtFixedRate]]. */ @deprecated( "Use scheduleWithFixedDelay or scheduleAtFixedRate instead. This has the same semantics as " + "scheduleAtFixedRate, but scheduleWithFixedDelay is often preferred.", since = "2.6.0") - final def schedule(initialDelay: FiniteDuration, interval: FiniteDuration)(f: => Unit)( - implicit + final def schedule(initialDelay: FiniteDuration, interval: FiniteDuration)(f: => Unit)(implicit executor: ExecutionContext): Cancellable = schedule(initialDelay, interval, new Runnable { override def run(): Unit = f }) - /** - * Deprecated API: See [[Scheduler#scheduleWithFixedDelay]] or [[Scheduler#scheduleAtFixedRate]]. - */ + /** Deprecated API: See [[Scheduler#scheduleWithFixedDelay]] or [[Scheduler#scheduleAtFixedRate]]. */ @deprecated( "Use scheduleWithFixedDelay or scheduleAtFixedRate instead. This has the same semantics as " + "scheduleAtFixedRate, but scheduleWithFixedDelay is often preferred.", since = "2.6.0") - def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable + def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable - /** - * Deprecated API: See [[Scheduler#scheduleWithFixedDelay]] or [[Scheduler#scheduleAtFixedRate]]. - */ + /** Deprecated API: See [[Scheduler#scheduleWithFixedDelay]] or [[Scheduler#scheduleAtFixedRate]]. */ @deprecated( "Use scheduleWithFixedDelay or scheduleAtFixedRate instead. This has the same semantics as " + "scheduleAtFixedRate, but scheduleWithFixedDelay is often preferred.", since = "2.6.0") - def schedule(initialDelay: java.time.Duration, interval: java.time.Duration, runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = { + def schedule(initialDelay: java.time.Duration, interval: java.time.Duration, runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = { import JavaDurationConverters._ schedule(initialDelay.asScala, interval.asScala, runnable) } @@ -436,13 +420,14 @@ trait Scheduler { * * Note: For scheduling within actors `with Timers` should be preferred. */ - final def scheduleOnce(delay: FiniteDuration, receiver: ActorRef, message: Any)( - implicit + final def scheduleOnce(delay: FiniteDuration, receiver: ActorRef, message: Any)(implicit executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable = - scheduleOnce(delay, new Runnable { - override def run(): Unit = receiver ! message - }) + scheduleOnce( + delay, + new Runnable { + override def run(): Unit = receiver ! message + }) /** * Java API: Schedules a message to be sent once with a delay, i.e. a time period that has @@ -472,9 +457,7 @@ trait Scheduler { * * Note: For scheduling within actors `with Timers` should be preferred. */ - final def scheduleOnce(delay: FiniteDuration)(f: => Unit)( - implicit - executor: ExecutionContext): Cancellable = + final def scheduleOnce(delay: FiniteDuration)(f: => Unit)(implicit executor: ExecutionContext): Cancellable = scheduleOnce(delay, new Runnable { override def run(): Unit = f }) /** @@ -543,9 +526,7 @@ object Cancellable { def isCancelled: Boolean = true } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] val initialNotCancelled: Cancellable = new Cancellable { def cancel(): Boolean = false def isCancelled: Boolean = false diff --git a/akka-actor/src/main/scala/akka/actor/Stash.scala b/akka-actor/src/main/scala/akka/actor/Stash.scala index 6711b8723c7..d0e98b3f823 100644 --- a/akka-actor/src/main/scala/akka/actor/Stash.scala +++ b/akka-actor/src/main/scala/akka/actor/Stash.scala @@ -60,9 +60,7 @@ import akka.dispatch.{ */ trait Stash extends UnrestrictedStash with RequiresMessageQueue[DequeBasedMessageQueueSemantics] -/** - * The `UnboundedStash` trait is a version of [[akka.actor.Stash]] that enforces an unbounded stash for you actor. - */ +/** The `UnboundedStash` trait is a version of [[akka.actor.Stash]] that enforces an unbounded stash for you actor. */ trait UnboundedStash extends UnrestrictedStash with RequiresMessageQueue[UnboundedDequeBasedMessageQueueSemantics] /** @@ -266,9 +264,7 @@ private[akka] trait StashSupport { } } -/** - * Is thrown when the size of the Stash exceeds the capacity of the Stash - */ +/** Is thrown when the size of the Stash exceeds the capacity of the Stash */ class StashOverflowException(message: String, cause: Throwable = null) extends AkkaException(message, cause) with NoStackTrace diff --git a/akka-actor/src/main/scala/akka/actor/Timers.scala b/akka-actor/src/main/scala/akka/actor/Timers.scala index 10113c9e624..7bdb0501cc4 100644 --- a/akka-actor/src/main/scala/akka/actor/Timers.scala +++ b/akka-actor/src/main/scala/akka/actor/Timers.scala @@ -23,9 +23,7 @@ trait Timers extends Actor { private def actorCell = context.asInstanceOf[ActorCell] private val _timers = new TimerSchedulerImpl(context) - /** - * Start and cancel timers via the enclosed `TimerScheduler`. - */ + /** Start and cancel timers via the enclosed `TimerScheduler`. */ final def timers: TimerScheduler = _timers override protected[akka] def aroundPreRestart(reason: Throwable, message: Option[Any]): Unit = { @@ -67,9 +65,7 @@ trait Timers extends Actor { */ abstract class AbstractActorWithTimers extends AbstractActor with Timers { - /** - * Start and cancel timers via the enclosed `TimerScheduler`. - */ + /** Start and cancel timers via the enclosed `TimerScheduler`. */ final def getTimers: TimerScheduler = timers } @@ -278,18 +274,14 @@ abstract class AbstractActorWithTimers extends AbstractActor with Timers { interval: java.time.Duration): Unit = startTimerAtFixedRate(key, msg, initialDelay.asScala, interval.asScala) - /** - * Deprecated API: See [[TimerScheduler#startTimerWithFixedDelay]] or [[TimerScheduler#startTimerAtFixedRate]]. - */ + /** Deprecated API: See [[TimerScheduler#startTimerWithFixedDelay]] or [[TimerScheduler#startTimerAtFixedRate]]. */ @deprecated( "Use startTimerWithFixedDelay or startTimerAtFixedRate instead. This has the same semantics as " + "startTimerAtFixedRate, but startTimerWithFixedDelay is often preferred.", since = "2.6.0") def startPeriodicTimer(key: Any, msg: Any, interval: FiniteDuration): Unit - /** - * Deprecated API: See [[TimerScheduler#startTimerWithFixedDelay]] or [[TimerScheduler#startTimerAtFixedRate]]. - */ + /** Deprecated API: See [[TimerScheduler#startTimerWithFixedDelay]] or [[TimerScheduler#startTimerAtFixedRate]]. */ @deprecated( "Use startTimerWithFixedDelay or startTimerAtFixedRate instead. This has the same semantics as " + "startTimerAtFixedRate, but startTimerWithFixedDelay is often preferred.", @@ -320,9 +312,7 @@ abstract class AbstractActorWithTimers extends AbstractActor with Timers { final def startSingleTimer(key: Any, msg: Any, timeout: java.time.Duration): Unit = startSingleTimer(key, msg, timeout.asScala) - /** - * Check if a timer with a given `key` is active. - */ + /** Check if a timer with a given `key` is active. */ def isTimerActive(key: Any): Boolean /** @@ -336,9 +326,7 @@ abstract class AbstractActorWithTimers extends AbstractActor with Timers { */ def cancel(key: Any): Unit - /** - * Cancel all timers. - */ + /** Cancel all timers. */ def cancelAll(): Unit } diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala index 88080cbed5f..01d9192b373 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala @@ -123,9 +123,9 @@ private[akka] trait Children { this: ActorCell => } if (actor match { - case r: RepointableRef => r.isStarted - case _ => true - }) shallDie(actor) + case r: RepointableRef => r.isStarted + case _ => true + }) shallDie(actor) } actor.asInstanceOf[InternalActorRef].stop() } @@ -273,17 +273,16 @@ private[akka] trait Children { this: ActorCell => if (oldInfo eq null) Serialization.currentTransportInformation.value = system.provider.serializationInformation - props.args.forall( - arg => - arg == null || - arg.isInstanceOf[NoSerializationVerificationNeeded] || - settings.NoSerializationVerificationNeededClassPrefix.exists(arg.getClass.getName.startsWith) || { - val o = arg.asInstanceOf[AnyRef] - val serializer = ser.findSerializerFor(o) - val bytes = serializer.toBinary(o) - val ms = Serializers.manifestFor(serializer, o) - ser.deserialize(bytes, serializer.identifier, ms).get != null - }) + props.args.forall(arg => + arg == null || + arg.isInstanceOf[NoSerializationVerificationNeeded] || + settings.NoSerializationVerificationNeededClassPrefix.exists(arg.getClass.getName.startsWith) || { + val o = arg.asInstanceOf[AnyRef] + val serializer = ser.findSerializerFor(o) + val bytes = serializer.toBinary(o) + val ms = Serializers.manifestFor(serializer, o) + ser.deserialize(bytes, serializer.identifier, ms).get != null + }) } catch { case NonFatal(e) => throw new IllegalArgumentException(s"pre-creation serialization check failed at [${cell.self.path}/$name]", e) diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala b/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala index 0174f9920cb..30adc18e308 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala @@ -9,9 +9,7 @@ import scala.collection.immutable import akka.actor.{ ActorRef, ChildNameReserved, ChildRestartStats, ChildStats, InvalidActorNameException } import akka.util.Collections.{ EmptyImmutableSeq, PartialImmutableValuesIterable } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] trait ChildrenContainer { def add(name: String, stats: ChildRestartStats): ChildrenContainer @@ -80,9 +78,7 @@ private[akka] object ChildrenContainer { override def unreserve(name: String): ChildrenContainer = this } - /** - * This is the empty container, shared among all leaf actors. - */ + /** This is the empty container, shared among all leaf actors. */ object EmptyChildrenContainer extends EmptyChildrenContainer { override def toString = "no children" } @@ -173,7 +169,8 @@ private[akka] object ChildrenContainer { if (t.isEmpty) reason match { case Termination => TerminatedChildrenContainer case _ => NormalChildrenContainer(c - child.path.name) - } else copy(c - child.path.name, t) + } + else copy(c - child.path.name, t) } override def getByName(name: String): Option[ChildStats] = c.get(name) diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala b/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala index 195a2490095..b71e0959838 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala @@ -29,12 +29,15 @@ private[akka] trait DeathWatch { this: ActorCell => maintainAddressTerminatedSubscription(a) { a.sendSystemMessage(Watch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ updateWatching(a, None) - } else + } + else checkWatchingSame(a, None) } a case unexpected => - throw new IllegalArgumentException(s"ActorRef is not internal: $unexpected") // will not happen, for exhaustiveness check + throw new IllegalArgumentException( + s"ActorRef is not internal: $unexpected" + ) // will not happen, for exhaustiveness check } override final def watchWith(subject: ActorRef, msg: Any): ActorRef = subject match { @@ -44,12 +47,15 @@ private[akka] trait DeathWatch { this: ActorCell => maintainAddressTerminatedSubscription(a) { a.sendSystemMessage(Watch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ updateWatching(a, Some(msg)) - } else + } + else checkWatchingSame(a, Some(msg)) } a case unexpected => - throw new IllegalArgumentException(s"ActorRef is not internal: $unexpected") // will not happen, for exhaustiveness check + throw new IllegalArgumentException( + s"ActorRef is not internal: $unexpected" + ) // will not happen, for exhaustiveness check } override final def unwatch(subject: ActorRef): ActorRef = subject match { @@ -63,7 +69,9 @@ private[akka] trait DeathWatch { this: ActorCell => terminatedQueued -= a a case unexpected => - throw new IllegalArgumentException(s"ActorRef is not internal: $unexpected") // will not happen, for exhaustiveness check + throw new IllegalArgumentException( + s"ActorRef is not internal: $unexpected" + ) // will not happen, for exhaustiveness check } protected def receivedTerminated(t: Terminated): Unit = diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala index a84d4beeedb..4c5ab4f168b 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala @@ -29,14 +29,15 @@ final case class SerializationCheckFailedException private[dungeon] (msg: Object "To avoid this error, either disable 'akka.actor.serialize-messages', mark the message with 'akka.actor.NoSerializationVerificationNeeded', or configure serialization to support this message", cause) -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait Dispatch { this: ActorCell => + // scalafmt breaks this by moving the _ to its own line which doesn't compile + // format: off @nowarn @volatile private var _mailboxDoNotCallMeDirectly : Mailbox = _ //This must be volatile since it isn't protected by the mailbox status + // format: on @nowarn private def _preventPrivateUnusedErasure = { _mailboxDoNotCallMeDirectly @@ -112,9 +113,7 @@ private[akka] trait Dispatch { this: ActorCell => this } - /** - * Start this cell, i.e. attach it to the dispatcher. - */ + /** Start this cell, i.e. attach it to the dispatcher. */ def start(): this.type = { // This call is expected to start off the actor by scheduling its mailbox. dispatcher.attach(this) @@ -176,11 +175,12 @@ private[akka] trait Dispatch { this: ActorCell => if (system.settings.NoSerializationVerificationNeededClassPrefix.exists(msg.getClass.getName.startsWith)) envelope else { - val deserializedMsg = try { - serializeAndDeserializePayload(msg) - } catch { - case NonFatal(e) => throw SerializationCheckFailedException(msg, e) - } + val deserializedMsg = + try { + serializeAndDeserializePayload(msg) + } catch { + case NonFatal(e) => throw SerializationCheckFailedException(msg, e) + } envelope.message match { case dl: DeadLetter => envelope.copy(message = dl.copy(message = deserializedMsg)) case _ => envelope.copy(message = deserializedMsg) diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala index e909669728b..f36e338f443 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala @@ -23,9 +23,7 @@ import akka.event.Logging import akka.event.Logging.Debug import akka.event.Logging.Error -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FaultHandling { sealed trait FailedInfo private case object NoFailedInfo extends FailedInfo @@ -76,9 +74,7 @@ private[akka] trait FaultHandling { this: ActorCell => } protected def setFailedFatally(): Unit = _failed = FailedFatally - /** - * Do re-create the actor in response to a failure. - */ + /** Do re-create the actor in response to a failure. */ protected def faultRecreate(cause: Throwable): Unit = if (actor == null) { system.eventStream.publish( @@ -92,10 +88,12 @@ private[akka] trait FaultHandling { this: ActorCell => try { // if the actor fails in preRestart, we can do nothing but log it: it’s best-effort if (!isFailedFatally) failedActor.aroundPreRestart(cause, optionalMessage) - } catch handleNonFatalOrInterruptedException { e => - val ex = PreRestartException(self, e, cause, optionalMessage) - publish(Error(ex, self.path.toString, clazz(failedActor), e.getMessage)) - } finally { + } catch + handleNonFatalOrInterruptedException { e => + val ex = PreRestartException(self, e, cause, optionalMessage) + publish(Error(ex, self.path.toString, clazz(failedActor), e.getMessage)) + } + finally { clearActorFields(failedActor, recreate = true) } } @@ -141,9 +139,7 @@ private[akka] trait FaultHandling { this: ActorCell => } } - /** - * Do create the actor in response to a failure. - */ + /** Do create the actor in response to a failure. */ protected def faultCreate(): Unit = { assert(mailbox.isSuspended, "mailbox must be suspended during failed creation, status=" + mailbox.currentStatus) assert(perpetrator == self) @@ -161,9 +157,10 @@ private[akka] trait FaultHandling { this: ActorCell => try resumeNonRecursive() finally clearFailed() try create(None) - catch handleNonFatalOrInterruptedException { e => - handleInvokeFailure(Nil, e) - } + catch + handleNonFatalOrInterruptedException { e => + handleInvokeFailure(Nil, e) + } } protected def terminate(): Unit = { @@ -203,33 +200,35 @@ private[akka] trait FaultHandling { this: ActorCell => @InternalStableApi final def handleInvokeFailure(childrenNotToSuspend: immutable.Iterable[ActorRef], t: Throwable): Unit = { // prevent any further messages to be processed until the actor has been restarted - if (!isFailed) try { - suspendNonRecursive() - // suspend children - val skip: Set[ActorRef] = currentMessage match { - case Envelope(Failed(_, _, _), child) => setFailed(child); Set(child) - case _ => setFailed(self); Set.empty - } - suspendChildren(exceptFor = skip ++ childrenNotToSuspend) - t match { - // tell supervisor - case _: InterruptedException => - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - parent.sendSystemMessage(Failed(self, new ActorInterruptedException(t), uid)) - case _ => - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - parent.sendSystemMessage(Failed(self, t, uid)) - } - } catch handleNonFatalOrInterruptedException { e => - publish( - Error( - e, - self.path.toString, - clazz(actor), - "emergency stop: exception in failure handling for " + t.getClass + Logging.stackTraceFor(t))) - try children.foreach(stop) - finally finishTerminate() - } + if (!isFailed) + try { + suspendNonRecursive() + // suspend children + val skip: Set[ActorRef] = currentMessage match { + case Envelope(Failed(_, _, _), child) => setFailed(child); Set(child) + case _ => setFailed(self); Set.empty + } + suspendChildren(exceptFor = skip ++ childrenNotToSuspend) + t match { + // tell supervisor + case _: InterruptedException => + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + parent.sendSystemMessage(Failed(self, new ActorInterruptedException(t), uid)) + case _ => + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + parent.sendSystemMessage(Failed(self, t, uid)) + } + } catch + handleNonFatalOrInterruptedException { e => + publish( + Error( + e, + self.path.toString, + clazz(actor), + "emergency stop: exception in failure handling for " + t.getClass + Logging.stackTraceFor(t))) + try children.foreach(stop) + finally finishTerminate() + } } private def finishTerminate(): Unit = { @@ -240,21 +239,28 @@ private[akka] trait FaultHandling { this: ActorCell => * specific order. */ try if (a ne null) a.aroundPostStop() - catch handleNonFatalOrInterruptedException { e => - publish(Error(e, self.path.toString, clazz(a), e.getMessage)) - } finally try stopFunctionRefs() - finally try dispatcher.detach(this) - finally try parent.sendSystemMessage( - DeathWatchNotification(self, existenceConfirmed = true, addressTerminated = false)) - finally try tellWatchersWeDied() - finally try unwatchWatchedActors(a) // stay here as we expect an emergency stop from handleInvokeFailure - finally { - if (system.settings.DebugLifecycle) - publish(Debug(self.path.toString, clazz(a), "stopped")) - - clearActorFields(a, recreate = false) - clearFieldsForTermination() - } + catch + handleNonFatalOrInterruptedException { e => + publish(Error(e, self.path.toString, clazz(a), e.getMessage)) + } + finally + try stopFunctionRefs() + finally + try dispatcher.detach(this) + finally + try + parent.sendSystemMessage(DeathWatchNotification(self, existenceConfirmed = true, addressTerminated = false)) + finally + try tellWatchersWeDied() + finally + try unwatchWatchedActors(a) // stay here as we expect an emergency stop from handleInvokeFailure + finally { + if (system.settings.DebugLifecycle) + publish(Debug(self.path.toString, clazz(a), "stopped")) + + clearActorFields(a, recreate = false) + clearFieldsForTermination() + } } private def finishRecreate(cause: Throwable): Unit = { @@ -268,21 +274,24 @@ private[akka] trait FaultHandling { this: ActorCell => val freshActor = newActor() freshActor.aroundPostRestart(cause) - checkReceiveTimeout(reschedule = true) // user may have set a receive timeout in preStart which is called from postRestart + checkReceiveTimeout(reschedule = + true + ) // user may have set a receive timeout in preStart which is called from postRestart if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(freshActor), "restarted")) // only after parent is up and running again do restart the children which were not stopped - survivors.foreach( - child => - try child.asInstanceOf[InternalActorRef].restart(cause) - catch handleNonFatalOrInterruptedException { e => + survivors.foreach(child => + try child.asInstanceOf[InternalActorRef].restart(cause) + catch + handleNonFatalOrInterruptedException { e => publish(Error(e, self.path.toString, clazz(freshActor), "restarting " + child)) }) - } catch handleNonFatalOrInterruptedException { e => - setFailedFatally() - clearActorFields(actor, recreate = false) // in order to prevent preRestart() from happening again - handleInvokeFailure(survivors, PostRestartException(self, e, cause)) - } + } catch + handleNonFatalOrInterruptedException { e => + setFailedFatally() + clearActorFields(actor, recreate = false) // in order to prevent preRestart() from happening again + handleInvokeFailure(survivors, PostRestartException(self, e, cause)) + } } final protected def handleFailure(f: Failed): Unit = { @@ -316,10 +325,11 @@ private[akka] trait FaultHandling { this: ActorCell => */ if (actor != null) { try actor.supervisorStrategy.handleChildTerminated(this, child, children) - catch handleNonFatalOrInterruptedException { e => - publish(Error(e, self.path.toString, clazz(actor), "handleChildTerminated failed")) - handleInvokeFailure(Nil, e) - } + catch + handleNonFatalOrInterruptedException { e => + publish(Error(e, self.path.toString, clazz(actor), "handleChildTerminated failed")) + handleInvokeFailure(Nil, e) + } } /* * if the removal changed the state of the (terminating) children container, diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/ReceiveTimeout.scala b/akka-actor/src/main/scala/akka/actor/dungeon/ReceiveTimeout.scala index 561dc8096be..53dd0afee54 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/ReceiveTimeout.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/ReceiveTimeout.scala @@ -48,7 +48,7 @@ private[akka] trait ReceiveTimeout { this: ActorCell => } private def rescheduleReceiveTimeout(f: FiniteDuration): Unit = { - receiveTimeoutData._2.cancel() //Cancel any ongoing future + receiveTimeoutData._2.cancel() // Cancel any ongoing future val task = system.scheduler.scheduleOnce(f, self, akka.actor.ReceiveTimeout)(this.dispatcher) receiveTimeoutData = (f, task) } diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala b/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala index 4df2a04f50d..aa73438c205 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala @@ -10,9 +10,7 @@ import akka.annotation.InternalApi import akka.event.Logging import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TimerSchedulerImpl { sealed trait TimerMsg { def key: Any @@ -43,9 +41,7 @@ import akka.util.OptionVal } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class TimerSchedulerImpl(ctx: ActorContext) extends TimerScheduler { import TimerSchedulerImpl._ diff --git a/akka-actor/src/main/scala/akka/actor/setup/ActorSystemSetup.scala b/akka-actor/src/main/scala/akka/actor/setup/ActorSystemSetup.scala index ce7eda641bc..55c233ae0d3 100644 --- a/akka-actor/src/main/scala/akka/actor/setup/ActorSystemSetup.scala +++ b/akka-actor/src/main/scala/akka/actor/setup/ActorSystemSetup.scala @@ -32,15 +32,11 @@ object ActorSystemSetup { val empty = new ActorSystemSetup(Map.empty) - /** - * Scala API: Create an [[ActorSystemSetup]] containing all the provided settings - */ + /** Scala API: Create an [[ActorSystemSetup]] containing all the provided settings */ def apply(settings: Setup*): ActorSystemSetup = new ActorSystemSetup(settings.map(s => s.getClass -> s).toMap) - /** - * Java API: Create an [[ActorSystemSetup]] containing all the provided settings - */ + /** Java API: Create an [[ActorSystemSetup]] containing all the provided settings */ @varargs def create(settings: Setup*): ActorSystemSetup = apply(settings: _*) } @@ -53,16 +49,12 @@ object ActorSystemSetup { */ final class ActorSystemSetup private[akka] (@InternalApi private[akka] val setups: Map[Class[_], AnyRef]) { - /** - * Java API: Extract a concrete [[Setup]] of type `T` if it is defined in the settings. - */ + /** Java API: Extract a concrete [[Setup]] of type `T` if it is defined in the settings. */ def get[T <: Setup](clazz: Class[T]): Optional[T] = { setups.get(clazz).map(_.asInstanceOf[T]).asJava } - /** - * Scala API: Extract a concrete [[Setup]] of type `T` if it is defined in the settings. - */ + /** Scala API: Extract a concrete [[Setup]] of type `T` if it is defined in the settings. */ def get[T <: Setup: ClassTag]: Option[T] = { val clazz = implicitly[ClassTag[T]].runtimeClass setups.get(clazz).map(_.asInstanceOf[T]) diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 18f94364944..67c84266116 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -52,27 +52,26 @@ final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cl } finally cleanup() } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] trait LoadMetrics { self: Executor => def atFullThrottle(): Boolean } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object MessageDispatcher { - val UNSCHEDULED = 0 //WARNING DO NOT CHANGE THE VALUE OF THIS: It relies on the faster init of 0 in AbstractMessageDispatcher + val UNSCHEDULED = + 0 // WARNING DO NOT CHANGE THE VALUE OF THIS: It relies on the faster init of 0 in AbstractMessageDispatcher val SCHEDULED = 1 val RESCHEDULED = 2 // dispatcher debugging helper using println (see below) // since this is a compile-time constant, scalac will elide code behind if (MessageDispatcher.debug) (RK checked with 2.9.1) final val debug = false // Deliberately without type ascription to make it a compile-time constant - lazy val actors = new Index[MessageDispatcher, ActorRef](16, new ju.Comparator[ActorRef] { - override def compare(a: ActorRef, b: ActorRef): Int = a.compareTo(b) - }) + lazy val actors = new Index[MessageDispatcher, ActorRef]( + 16, + new ju.Comparator[ActorRef] { + override def compare(a: ActorRef, b: ActorRef): Int = a.compareTo(b) + }) def printActors(): Unit = if (debug) { for { @@ -131,9 +130,7 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator private final def updateShutdownSchedule(expect: Int, update: Int): Boolean = Unsafe.instance.compareAndSwapInt(this, shutdownScheduleOffset, expect, update) - /** - * Creates and returns a mailbox for the given actor. - */ + /** Creates and returns a mailbox for the given actor. */ protected[akka] def createMailbox(actor: Cell, mailboxType: MailboxType): Mailbox /** @@ -152,9 +149,7 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator registerForExecution(actor.mailbox, false, true) } - /** - * Detaches the specified actor instance from this dispatcher - */ + /** Detaches the specified actor instance from this dispatcher */ final def detach(actor: ActorCell): Unit = try unregister(actor) finally ifSensibleToDoSoThenScheduleShutdown() @@ -187,16 +182,19 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator else ifSensibleToDoSoThenScheduleShutdown() case RESCHEDULED => case unexpected => - throw new IllegalArgumentException(s"Unexpected actor class marker: $unexpected") // will not happen, for exhaustiveness check + throw new IllegalArgumentException( + s"Unexpected actor class marker: $unexpected" + ) // will not happen, for exhaustiveness check } } private def scheduleShutdownAction(): Unit = { // IllegalStateException is thrown if scheduler has been shutdown - try prerequisites.scheduler.scheduleOnce(shutdownTimeout, shutdownAction)(new ExecutionContext { - override def execute(runnable: Runnable): Unit = runnable.run() - override def reportFailure(t: Throwable): Unit = MessageDispatcher.this.reportFailure(t) - }) + try + prerequisites.scheduler.scheduleOnce(shutdownTimeout, shutdownAction)(new ExecutionContext { + override def execute(runnable: Runnable): Unit = runnable.run() + override def reportFailure(t: Throwable): Unit = MessageDispatcher.this.reportFailure(t) + }) catch { case _: IllegalStateException => shutdown() @@ -239,7 +237,7 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator shutdownSchedule match { case SCHEDULED => try { - if (inhabitants == 0) shutdown() //Warning, racy + if (inhabitants == 0) shutdown() // Warning, racy } finally { while (!updateShutdownSchedule(shutdownSchedule, UNSCHEDULED)) {} } @@ -248,7 +246,9 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator else run() case UNSCHEDULED => case unexpected => - throw new IllegalArgumentException(s"Unexpected actor class marker: $unexpected") // will not happen, for exhaustiveness check + throw new IllegalArgumentException( + s"Unexpected actor class marker: $unexpected" + ) // will not happen, for exhaustiveness check } } } @@ -262,9 +262,7 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator */ protected[akka] def shutdownTimeout: FiniteDuration - /** - * After the call to this method, the dispatcher mustn't begin any new message processing for the specified reference - */ + /** After the call to this method, the dispatcher mustn't begin any new message processing for the specified reference */ protected[akka] def suspend(actor: ActorCell): Unit = { val mbox = actor.mailbox if ((mbox.actor eq actor) && (mbox.dispatcher eq this)) @@ -305,24 +303,16 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator hasSystemMessageHint: Boolean): Boolean // TODO check whether this should not actually be a property of the mailbox - /** - * INTERNAL API - */ + /** INTERNAL API */ protected[akka] def throughput: Int - /** - * INTERNAL API - */ + /** INTERNAL API */ protected[akka] def throughputDeadlineTime: Duration - /** - * INTERNAL API - */ + /** INTERNAL API */ @inline protected[akka] final val isThroughputDeadlineTimeDefined = throughputDeadlineTime.toMillis > 0 - /** - * INTERNAL API - */ + /** INTERNAL API */ protected[akka] def executeTask(invocation: TaskInvocation): Unit /** @@ -335,15 +325,11 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator protected[akka] def shutdown(): Unit } -/** - * An ExecutorServiceConfigurator is a class that given some prerequisites and a configuration can create instances of ExecutorService - */ +/** An ExecutorServiceConfigurator is a class that given some prerequisites and a configuration can create instances of ExecutorService */ abstract class ExecutorServiceConfigurator(@unused config: Config, @unused prerequisites: DispatcherPrerequisites) extends ExecutorServiceFactoryProvider -/** - * Base class to be used for hooking in new dispatchers into Dispatchers. - */ +/** Base class to be used for hooking in new dispatchers into Dispatchers. */ abstract class MessageDispatcherConfigurator(_config: Config, val prerequisites: DispatcherPrerequisites) { val config: Config = new CachingConfig(_config) @@ -368,13 +354,12 @@ abstract class MessageDispatcherConfigurator(_config: Config, val prerequisites: val args = List(classOf[Config] -> config, classOf[DispatcherPrerequisites] -> prerequisites) prerequisites.dynamicAccess .createInstanceFor[ExecutorServiceConfigurator](fqcn, args) - .recover { - case exception => - throw new IllegalArgumentException( - ("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s], - make sure it has an accessible constructor with a [%s,%s] signature""") - .format(fqcn, config.getString("id"), classOf[Config], classOf[DispatcherPrerequisites]), - exception) + .recover { case exception => + throw new IllegalArgumentException( + """Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s], + make sure it has an accessible constructor with a [%s,%s] signature""" + .format(fqcn, config.getString("id"), classOf[Config], classOf[DispatcherPrerequisites]), + exception) } .get } @@ -407,7 +392,7 @@ class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPr case size if size > 0 => Some(config.getString("task-queue-type")) .map { - case "array" => ThreadPoolConfig.arrayBlockingQueue(size, false) //TODO config fairness? + case "array" => ThreadPoolConfig.arrayBlockingQueue(size, false) // TODO config fairness? case "" | "linked" => ThreadPoolConfig.linkedBlockingQueue(size) case x => throw new IllegalArgumentException("[%s] is not a valid task-queue-type [array|linked]!".format(x)) diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index 343d6b12ec2..48a03a407a6 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -50,17 +50,13 @@ private[akka] class BalancingDispatcher( _executorServiceFactoryProvider, _shutdownTimeout) { - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] val team = new ConcurrentSkipListSet[ActorCell](Helpers.identityHashComparator(new Comparator[ActorCell] { def compare(l: ActorCell, r: ActorCell) = l.self.path.compareTo(r.self.path) })) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] val messageQueue: MessageQueue = _mailboxType.create(None, None) private class SharingMailbox(val system: ActorSystemImpl, _messageQueue: MessageQueue) @@ -68,7 +64,7 @@ private[akka] class BalancingDispatcher( with DefaultSystemMessageQueue { override def cleanUp(): Unit = { val dlq = mailboxes.deadLetterMailbox - //Don't call the original implementation of this since it scraps all messages, and we don't want to do that + // Don't call the original implementation of this since it scraps all messages, and we don't want to do that var messages = systemDrain(new LatestFirstSystemMessageList(NoMessage)) while (messages.nonEmpty) { // message must be “virgin” before being able to systemEnqueue again @@ -103,12 +99,12 @@ private[akka] class BalancingDispatcher( if (attemptTeamWork) { @tailrec def scheduleOne(i: Iterator[ActorCell] = team.iterator): Unit = if (messageQueue.hasMessages - && i.hasNext - && (executorService.executor match { - case lm: LoadMetrics => !lm.atFullThrottle() - case _ => true - }) - && !registerForExecution(i.next.mailbox, false, false)) + && i.hasNext + && (executorService.executor match { + case lm: LoadMetrics => !lm.atFullThrottle() + case _ => true + }) + && !registerForExecution(i.next.mailbox, false, false)) scheduleOne(i) scheduleOne() diff --git a/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala b/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala index b0c65cd3216..728777545ff 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala @@ -69,7 +69,7 @@ private[akka] trait BatchingExecutor extends Executor { val current = _tasksLocal.get() _tasksLocal.remove() if ((current eq this) && !current.isEmpty) { // Resubmit ourselves if something bad happened and we still have work to do - unbatchedExecute(current) //TODO what if this submission fails? + unbatchedExecute(current) // TODO what if this submission fails? true } else false } diff --git a/akka-actor/src/main/scala/akka/dispatch/CachingConfig.scala b/akka-actor/src/main/scala/akka/dispatch/CachingConfig.scala index 52542f10403..133a177d7fa 100644 --- a/akka-actor/src/main/scala/akka/dispatch/CachingConfig.scala +++ b/akka-actor/src/main/scala/akka/dispatch/CachingConfig.scala @@ -12,9 +12,7 @@ import scala.util.{ Failure, Success, Try } import com.typesafe.config._ -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object CachingConfig { val emptyConfig = ConfigFactory.empty() diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index 4b1e0bd742b..6eb41068a43 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -54,27 +54,21 @@ class Dispatcher( protected final def executorService: ExecutorServiceDelegate = executorServiceDelegate - /** - * INTERNAL API - */ + /** INTERNAL API */ protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope): Unit = { val mbox = receiver.mailbox mbox.enqueue(receiver.self, invocation) registerForExecution(mbox, true, false) } - /** - * INTERNAL API - */ + /** INTERNAL API */ protected[akka] def systemDispatch(receiver: ActorCell, invocation: SystemMessage): Unit = { val mbox = receiver.mailbox mbox.systemEnqueue(receiver.self, invocation) registerForExecution(mbox, false, true) } - /** - * INTERNAL API - */ + /** INTERNAL API */ protected[akka] def executeTask(invocation: TaskInvocation): Unit = { try { executorService.execute(invocation) @@ -90,9 +84,7 @@ class Dispatcher( } } - /** - * INTERNAL API - */ + /** INTERNAL API */ protected[akka] def createMailbox(actor: akka.actor.Cell, mailboxType: MailboxType): Mailbox = { new Mailbox(mailboxType.create(Some(actor.self), Some(actor.system))) with DefaultSystemMessageQueue } @@ -102,9 +94,7 @@ class Dispatcher( classOf[LazyExecutorServiceDelegate], "executorServiceDelegate") - /** - * INTERNAL API - */ + /** INTERNAL API */ protected[akka] def shutdown(): Unit = { val newDelegate = executorServiceDelegate.copy() // Doesn't matter which one we copy val es = esUpdater.getAndSet(this, newDelegate) @@ -120,7 +110,7 @@ class Dispatcher( mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean = { - if (mbox.canBeScheduledForExecution(hasMessageHint, hasSystemMessageHint)) { //This needs to be here to ensure thread safety and no races + if (mbox.canBeScheduledForExecution(hasMessageHint, hasSystemMessageHint)) { // This needs to be here to ensure thread safety and no races if (mbox.setAsScheduled()) { try { executorService.execute(mbox) @@ -130,7 +120,7 @@ class Dispatcher( try { executorService.execute(mbox) true - } catch { //Retry once + } catch { // Retry once case e: RejectedExecutionException => mbox.setAsIdle() eventStream.publish(Error(e, getClass.getName, getClass, "registerForExecution was rejected twice!")) @@ -146,9 +136,7 @@ class Dispatcher( object PriorityGenerator { - /** - * Creates a PriorityGenerator that uses the supplied function as priority generator - */ + /** Creates a PriorityGenerator that uses the supplied function as priority generator */ def apply(priorityFunction: Any => Int): PriorityGenerator = new PriorityGenerator { def gen(message: Any): Int = priorityFunction(message) } diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index d46d9c76540..1023a130c62 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -18,9 +18,7 @@ import akka.event.{ EventStream, LoggingAdapter } import akka.event.Logging.Warning import akka.util.Helpers.ConfigOps -/** - * DispatcherPrerequisites represents useful contextual pieces when constructing a MessageDispatcher - */ +/** DispatcherPrerequisites represents useful contextual pieces when constructing a MessageDispatcher */ trait DispatcherPrerequisites { def threadFactory: ThreadFactory def eventStream: EventStream @@ -31,9 +29,7 @@ trait DispatcherPrerequisites { def defaultExecutionContext: Option[ExecutionContext] } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class DefaultDispatcherPrerequisites( threadFactory: ThreadFactory, @@ -60,9 +56,7 @@ object Dispatchers { */ final val DefaultBlockingDispatcherId: String = "akka.actor.default-blocking-io-dispatcher" - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final val InternalDispatcherId = "akka.actor.internal-dispatcher" @@ -80,7 +74,7 @@ object Dispatchers { config.getValue(id).valueType match { case ConfigValueType.STRING => getConfig(config, config.getString(id), depth + 1) case ConfigValueType.OBJECT => config.getConfig(id) - case unexpected => ConfigFactory.empty(s"Expected either config or alias at [$id] but found [$unexpected]") + case unexpected => ConfigFactory.empty(s"Expected either config or alias at [$id] but found [$unexpected]") } } else ConfigFactory.empty(s"Dispatcher [$id] not configured") } @@ -112,16 +106,12 @@ class Dispatchers @InternalApi private[akka] ( val defaultDispatcherConfig: Config = idConfig(DefaultDispatcherId).withFallback(settings.config.getConfig(DefaultDispatcherId)) - /** - * The one and only default dispatcher. - */ + /** The one and only default dispatcher. */ def defaultGlobalDispatcher: MessageDispatcher = lookup(DefaultDispatcherId) private val dispatcherConfigurators = new ConcurrentHashMap[String, MessageDispatcherConfigurator] - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] val internalDispatcher = lookup(Dispatchers.InternalDispatcherId) /** @@ -201,16 +191,12 @@ class Dispatchers @InternalApi private[akka] ( def registerConfigurator(id: String, configurator: MessageDispatcherConfigurator): Boolean = dispatcherConfigurators.putIfAbsent(id, configurator) == null - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def config(id: String): Config = { config(id, settings.config.getConfig(id)) } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def config(id: String, appConfig: Config): Config = { import akka.util.ccompat.JavaConverters._ def simpleName = id.substring(id.lastIndexOf('.') + 1) @@ -266,13 +252,12 @@ class Dispatchers @InternalApi private[akka] ( val args = List(classOf[Config] -> cfg, classOf[DispatcherPrerequisites] -> prerequisites) prerequisites.dynamicAccess .createInstanceFor[MessageDispatcherConfigurator](fqn, args) - .recover { - case exception => - throw new ConfigurationException( - ("Cannot instantiate MessageDispatcherConfigurator type [%s], defined in [%s], " + - "make sure it has constructor with [com.typesafe.config.Config] and " + - "[akka.dispatch.DispatcherPrerequisites] parameters").format(fqn, cfg.getString("id")), - exception) + .recover { case exception => + throw new ConfigurationException( + ("Cannot instantiate MessageDispatcherConfigurator type [%s], defined in [%s], " + + "make sure it has constructor with [com.typesafe.config.Config] and " + + "[akka.dispatch.DispatcherPrerequisites] parameters").format(fqn, cfg.getString("id")), + exception) } .get } @@ -295,15 +280,11 @@ class DispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisi configureExecutor(), config.getMillisDuration("shutdown-timeout")) - /** - * Returns the same dispatcher instance for each invocation - */ + /** Returns the same dispatcher instance for each invocation */ override def dispatcher(): MessageDispatcher = instance } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object BalancingDispatcherConfigurator { private val defaultRequirement = ConfigFactory.parseString("mailbox-requirement = akka.dispatch.MultipleConsumerSemantics") @@ -357,9 +338,7 @@ class BalancingDispatcherConfigurator(_config: Config, _prerequisites: Dispatche config.getMillisDuration("shutdown-timeout"), config.getBoolean("attempt-teamwork")) - /** - * Returns the same dispatcher instance for each invocation - */ + /** Returns the same dispatcher instance for each invocation */ override def dispatcher(): MessageDispatcher = instance } @@ -383,9 +362,7 @@ class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrer ThreadPoolConfig() } - /** - * Creates new dispatcher for each invocation. - */ + /** Creates new dispatcher for each invocation. */ override def dispatcher(): MessageDispatcher = new PinnedDispatcher( this, diff --git a/akka-actor/src/main/scala/akka/dispatch/ForkJoinExecutorConfigurator.scala b/akka-actor/src/main/scala/akka/dispatch/ForkJoinExecutorConfigurator.scala index 61036db2d22..b7279451c54 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ForkJoinExecutorConfigurator.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ForkJoinExecutorConfigurator.scala @@ -10,9 +10,7 @@ import com.typesafe.config.Config object ForkJoinExecutorConfigurator { - /** - * INTERNAL AKKA USAGE ONLY - */ + /** INTERNAL AKKA USAGE ONLY */ final class AkkaForkJoinPool( parallelism: Int, threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, @@ -36,9 +34,7 @@ object ForkJoinExecutorConfigurator { def atFullThrottle(): Boolean = this.getActiveThreadCount() >= this.getParallelism() } - /** - * INTERNAL AKKA USAGE ONLY - */ + /** INTERNAL AKKA USAGE ONLY */ @SerialVersionUID(1L) final class AkkaForkJoinTask(runnable: Runnable) extends ForkJoinTask[Unit] { override def getRawResult(): Unit = () diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 4a14e400577..a23a1ea1ae0 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -19,12 +19,10 @@ import akka.annotation.InternalApi import akka.annotation.InternalStableApi import akka.compat import akka.dispatch.internal.SameThreadExecutionContext -import akka.japi.{ Procedure, Function => JFunc, Option => JOption } +import akka.japi.{ Function => JFunc, Option => JOption, Procedure } import akka.util.unused -/** - * ExecutionContexts is the Java API for ExecutionContexts - */ +/** ExecutionContexts is the Java API for ExecutionContexts */ object ExecutionContexts { /** @@ -71,9 +69,7 @@ object ExecutionContexts { errorReporter: Procedure[Throwable]): ExecutionContextExecutorService = ExecutionContext.fromExecutorService(executorService, errorReporter.apply) - /** - * @return a reference to the global ExecutionContext - */ + /** @return a reference to the global ExecutionContext */ def global(): ExecutionContextExecutor = ExecutionContext.global /** @@ -90,9 +86,7 @@ object ExecutionContexts { @InternalStableApi private[akka] val parasitic: ExecutionContext = SameThreadExecutionContext() - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi @deprecated("Use ExecutionContexts.parasitic instead", "2.6.4") private[akka] object sameThreadExecutionContext extends ExecutionContext with BatchingExecutor { @@ -104,9 +98,7 @@ object ExecutionContexts { } -/** - * Futures is the Java API for Futures and Promises - */ +/** Futures is the Java API for Futures and Promises */ object Futures { import akka.util.ccompat.JavaConverters._ @@ -128,28 +120,20 @@ object Futures { */ def promise[T](): Promise[T] = Promise[T]() - /** - * creates an already completed Promise with the specified exception - */ + /** creates an already completed Promise with the specified exception */ def failed[T](exception: Throwable): Future[T] = Future.failed(exception) - /** - * Creates an already completed Promise with the specified result - */ + /** Creates an already completed Promise with the specified result */ def successful[T](result: T): Future[T] = Future.successful(result) - /** - * Creates an already completed CompletionStage with the specified exception - */ + /** Creates an already completed CompletionStage with the specified exception */ def failedCompletionStage[T](ex: Throwable): CompletionStage[T] = { val f = CompletableFuture.completedFuture[T](null.asInstanceOf[T]) f.obtrudeException(ex) f } - /** - * Returns a Future that will hold the optional result of the first Future with a result that matches the predicate - */ + /** Returns a Future that will hold the optional result of the first Future with a result that matches the predicate */ def find[T <: AnyRef]( futures: JIterable[Future[T]], predicate: JFunc[T, java.lang.Boolean], @@ -158,9 +142,7 @@ object Futures { compat.Future.find[T](futures.asScala)(predicate.apply(_))(executor).map(JOption.fromScalaOption) } - /** - * Returns a Future to the result of the first future in the list that is completed - */ + /** Returns a Future to the result of the first future in the list that is completed */ def firstCompletedOf[T <: AnyRef](futures: JIterable[Future[T]], executor: ExecutionContext): Future[T] = Future.firstCompletedOf(futures.asScala)(executor) @@ -177,9 +159,7 @@ object Futures { executor: ExecutionContext): Future[R] = compat.Future.fold(futures.asScala)(zero)(fun.apply)(executor) - /** - * Reduces the results of the supplied futures and binary function. - */ + /** Reduces the results of the supplied futures and binary function. */ def reduce[T <: AnyRef, R >: T]( futures: JIterable[Future[T]], fun: akka.japi.Function2[R, T, R], diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index be4643584fd..d5720226efa 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -22,9 +22,7 @@ import akka.event.Logging.Error import akka.util.{ BoundedBlockingQueue, StablePriorityBlockingQueue, StablePriorityQueue, Unsafe } import akka.util.Helpers.ConfigOps -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object Mailbox { type Status = Int @@ -34,7 +32,8 @@ private[akka] object Mailbox { */ // Primary status - final val Open = 0 // _status is not initialized in AbstractMailbox, so default must be zero! Deliberately without type ascription to make it a compile-time constant + final val Open = + 0 // _status is not initialized in AbstractMailbox, so default must be zero! Deliberately without type ascription to make it a compile-time constant final val Closed = 1 // Deliberately without type ascription to make it a compile-time constant // Secondary status: Scheduled bit may be added to Open/Suspended final val Scheduled = 2 // Deliberately without type ascription to make it a compile-time constant @@ -83,19 +82,13 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) def dispatcher: MessageDispatcher = actor.dispatcher - /** - * Try to enqueue the message to this queue, or throw an exception. - */ + /** Try to enqueue the message to this queue, or throw an exception. */ def enqueue(receiver: ActorRef, msg: Envelope): Unit = messageQueue.enqueue(receiver, msg) - /** - * Try to dequeue the next message from this queue, return null failing that. - */ + /** Try to dequeue the next message from this queue, return null failing that. */ def dequeue(): Envelope = messageQueue.dequeue() - /** - * Indicates whether this queue is non-empty. - */ + /** Indicates whether this queue is non-empty. */ def hasMessages: Boolean = messageQueue.hasMessages /** @@ -106,10 +99,10 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) def numberOfMessages: Int = messageQueue.numberOfMessages @volatile - protected var _statusDoNotCallMeDirectly: Status = _ //0 by default + protected var _statusDoNotCallMeDirectly: Status = _ // 0 by default @volatile - protected var _systemQueueDoNotCallMeDirectly: SystemMessage = _ //null by default + protected var _systemQueueDoNotCallMeDirectly: SystemMessage = _ // null by default @inline final def currentStatus: Mailbox.Status = Unsafe.instance.getIntVolatile(this, AbstractMailbox.mailboxStatusOffset) @@ -179,9 +172,7 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) case s => updateStatus(s, Closed) || becomeClosed() } - /** - * Set Scheduled status, keeping primary status as is. - */ + /** Set Scheduled status, keeping primary status as is. */ @tailrec final def setAsScheduled(): Boolean = { val s = currentStatus @@ -193,9 +184,7 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) else updateStatus(s, s | Scheduled) || setAsScheduled() } - /** - * Reset Scheduled status, keeping primary status as is. - */ + /** Reset Scheduled status, keeping primary status as is. */ @tailrec final def setAsIdle(): Boolean = { val s = currentStatus @@ -226,12 +215,12 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) override final def run(): Unit = { try { - if (!isClosed) { //Volatile read, needed here - processAllSystemMessages() //First, deal with any system messages - processMailbox() //Then deal with messages + if (!isClosed) { // Volatile read, needed here + processAllSystemMessages() // First, deal with any system messages + processMailbox() // Then deal with messages } } finally { - setAsIdle() //Volatile write, needed here + setAsIdle() // Volatile write, needed here dispatcher.registerForExecution(this, false, false) } } @@ -254,9 +243,7 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) throw anything } - /** - * Process the messages in the mailbox - */ + /** Process the messages in the mailbox */ @tailrec private final def processMailbox( left: Int = java.lang.Math.max(dispatcher.throughput, 1), deadlineNs: Long = @@ -358,14 +345,12 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) */ trait MessageQueue { - /** - * Try to enqueue the message to this queue, or throw an exception. - */ - def enqueue(receiver: ActorRef, handle: Envelope): Unit // NOTE: receiver is used only in two places, but cannot be removed + /** Try to enqueue the message to this queue, or throw an exception. */ + def enqueue( + receiver: ActorRef, + handle: Envelope): Unit // NOTE: receiver is used only in two places, but cannot be removed - /** - * Try to dequeue the next message from this queue, return null failing that. - */ + /** Try to dequeue the next message from this queue, return null failing that. */ def dequeue(): Envelope /** @@ -375,9 +360,7 @@ trait MessageQueue { */ def numberOfMessages: Int - /** - * Indicates whether this queue is non-empty. - */ + /** Indicates whether this queue is non-empty. */ def hasMessages: Boolean /** @@ -445,28 +428,20 @@ class BoundedNodeMessageQueue(capacity: Int) } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] trait SystemMessageQueue { - /** - * Enqueue a new system message, e.g. by prepending atomically as new head of a single-linked list. - */ + /** Enqueue a new system message, e.g. by prepending atomically as new head of a single-linked list. */ @InternalStableApi def systemEnqueue(receiver: ActorRef, message: SystemMessage): Unit - /** - * Dequeue all messages from system queue and return them as single-linked list. - */ + /** Dequeue all messages from system queue and return them as single-linked list. */ def systemDrain(newContents: LatestFirstSystemMessageList): EarliestFirstSystemMessageList def hasSystemMessages: Boolean } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] trait DefaultSystemMessageQueue { self: Mailbox => @tailrec @@ -505,9 +480,7 @@ private[akka] trait DefaultSystemMessageQueue { self: Mailbox => */ trait MultipleConsumerSemantics -/** - * A QueueBasedMessageQueue is a MessageQueue backed by a java.util.Queue. - */ +/** A QueueBasedMessageQueue is a MessageQueue backed by a java.util.Queue. */ trait QueueBasedMessageQueue extends MessageQueue with MultipleConsumerSemantics { def queue: Queue[Envelope] def numberOfMessages = queue.size @@ -567,9 +540,7 @@ trait BoundedQueueBasedMessageQueue extends QueueBasedMessageQueue with BoundedM def dequeue(): Envelope = queue.poll() } -/** - * DequeBasedMessageQueue refines QueueBasedMessageQueue to be backed by a java.util.Deque. - */ +/** DequeBasedMessageQueue refines QueueBasedMessageQueue to be backed by a java.util.Deque. */ trait DequeBasedMessageQueueSemantics { def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit } @@ -644,9 +615,7 @@ trait MailboxType { trait ProducesMessageQueue[T <: MessageQueue] -/** - * UnboundedMailbox is the default unbounded MailboxType used by Akka Actors. - */ +/** UnboundedMailbox is the default unbounded MailboxType used by Akka Actors. */ final case class UnboundedMailbox() extends MailboxType with ProducesMessageQueue[UnboundedMailbox.MessageQueue] { def this(settings: ActorSystem.Settings, config: Config) = this() @@ -696,9 +665,7 @@ case class NonBlockingBoundedMailbox(capacity: Int) new BoundedNodeMessageQueue(capacity) } -/** - * BoundedMailbox is the default bounded MailboxType used by Akka Actors. - */ +/** BoundedMailbox is the default bounded MailboxType used by Akka Actors. */ final case class BoundedMailbox(capacity: Int, override val pushTimeOut: FiniteDuration) extends MailboxType with ProducesMessageQueue[BoundedMailbox.MessageQueue] @@ -818,9 +785,7 @@ object BoundedStablePriorityMailbox { } } -/** - * UnboundedDequeBasedMailbox is an unbounded MailboxType, backed by a Deque. - */ +/** UnboundedDequeBasedMailbox is an unbounded MailboxType, backed by a Deque. */ final case class UnboundedDequeBasedMailbox() extends MailboxType with ProducesMessageQueue[UnboundedDequeBasedMailbox.MessageQueue] { @@ -837,9 +802,7 @@ object UnboundedDequeBasedMailbox { } } -/** - * BoundedDequeBasedMailbox is an bounded MailboxType, backed by a Deque. - */ +/** BoundedDequeBasedMailbox is an bounded MailboxType, backed by a Deque. */ case class BoundedDequeBasedMailbox(final val capacity: Int, override final val pushTimeOut: FiniteDuration) extends MailboxType with ProducesMessageQueue[BoundedDequeBasedMailbox.MessageQueue] @@ -864,9 +827,7 @@ object BoundedDequeBasedMailbox { } } -/** - * ControlAwareMessageQueue handles messages that extend [[akka.dispatch.ControlMessage]] with priority. - */ +/** ControlAwareMessageQueue handles messages that extend [[akka.dispatch.ControlMessage]] with priority. */ trait ControlAwareMessageQueueSemantics extends QueueBasedMessageQueue { def controlQueue: Queue[Envelope] def queue: Queue[Envelope] @@ -895,9 +856,7 @@ trait BoundedControlAwareMessageQueueSemantics extends BoundedMessageQueueSemantics with ControlAwareMessageQueueSemantics -/** - * Messages that extend this trait will be handled with priority by control aware mailboxes. - */ +/** Messages that extend this trait will be handled with priority by control aware mailboxes. */ trait ControlMessage /** @@ -998,26 +957,27 @@ object BoundedControlAwareMailbox { var remaining = pushTimeOut.toNanos putLock.lockInterruptibly() - val inserted = try { - var stop = false - while (size.get() == capacity && !stop) { - remaining = notFull.awaitNanos(remaining) - stop = remaining <= 0 - } + val inserted = + try { + var stop = false + while (size.get() == capacity && !stop) { + remaining = notFull.awaitNanos(remaining) + stop = remaining <= 0 + } - if (stop) { - false - } else { - q.add(envelope) - val c = size.incrementAndGet() + if (stop) { + false + } else { + q.add(envelope) + val c = size.incrementAndGet() - if (c < capacity) notFull.signal() + if (c < capacity) notFull.signal() - true + true + } + } finally { + putLock.unlock() } - } finally { - putLock.unlock() - } if (!inserted) { receiver diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala b/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala index ec621a390c2..e086391b04f 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala @@ -71,39 +71,31 @@ private[akka] class Mailboxes( .unwrapped .asScala .toMap - .foldLeft(Map.empty[Class[_ <: Any], String]) { - case (m, (k, v)) => - dynamicAccess - .getClassFor[Any](k) - .map { x => - m.updated(x, v.toString) - } - .recover { - case e => - throw new ConfigurationException( - s"Type [$k] specified as akka.actor.mailbox.requirement " + - s"[$v] in config can't be loaded due to [${e.getMessage}]", - e) - } - .get + .foldLeft(Map.empty[Class[_ <: Any], String]) { case (m, (k, v)) => + dynamicAccess + .getClassFor[Any](k) + .map { x => + m.updated(x, v.toString) + } + .recover { case e => + throw new ConfigurationException( + s"Type [$k] specified as akka.actor.mailbox.requirement " + + s"[$v] in config can't be loaded due to [${e.getMessage}]", + e) + } + .get } } - /** - * Returns a mailbox type as specified in configuration, based on the id, or if not defined None. - */ + /** Returns a mailbox type as specified in configuration, based on the id, or if not defined None. */ def lookup(id: String): MailboxType = lookupConfigurator(id) - /** - * Returns a mailbox type as specified in configuration, based on the type, or if not defined None. - */ + /** Returns a mailbox type as specified in configuration, based on the type, or if not defined None. */ def lookupByQueueType(queueType: Class[_ <: Any]): MailboxType = lookup(lookupId(queueType)) private final val rmqClass = classOf[RequiresMessageQueue[_]] - /** - * Return the required message queue type for this class if any. - */ + /** Return the required message queue type for this class if any. */ def getRequiredType(actorClass: Class[_ <: Actor]): Class[_] = Reflect.findMarker(actorClass, rmqClass) match { case t: ParameterizedType => @@ -113,7 +105,9 @@ private[akka] class Mailboxes( throw new IllegalArgumentException(s"no wildcard type allowed in RequireMessageQueue argument (was [$x])") } case unexpected => - throw new IllegalArgumentException(s"Unexpected actor class marker: $unexpected") // will not happen, for exhaustiveness check + throw new IllegalArgumentException( + s"Unexpected actor class marker: $unexpected" + ) // will not happen, for exhaustiveness check } // don’t care if this happens twice @@ -138,13 +132,13 @@ private[akka] class Mailboxes( s"no wildcard type allowed in ProducesMessageQueue argument (was [$x])") } case unexpected => - throw new IllegalArgumentException(s"Unexpected message queue type marker: $unexpected") // will not happen, for exhaustiveness check + throw new IllegalArgumentException( + s"Unexpected message queue type marker: $unexpected" + ) // will not happen, for exhaustiveness check } } - /** - * Finds out the mailbox type for an actor based on configuration, props and requirements. - */ + /** Finds out the mailbox type for an actor based on configuration, props and requirements. */ protected[akka] def getMailboxType(props: Props, dispatcherConfig: Config): MailboxType = { val id = dispatcherConfig.getString("id") val deploy = props.deploy @@ -198,9 +192,7 @@ private[akka] class Mailboxes( } } - /** - * Check if this class can have a required message queue type. - */ + /** Check if this class can have a required message queue type. */ def hasRequiredType(actorClass: Class[_ <: Actor]): Boolean = rmqClass.isAssignableFrom(actorClass) private def lookupId(queueType: Class[_]): String = @@ -233,12 +225,11 @@ private[akka] class Mailboxes( val args = List(classOf[ActorSystem.Settings] -> settings, classOf[Config] -> conf) dynamicAccess .createInstanceFor[MailboxType](fqcn, args) - .recover { - case exception => - throw new IllegalArgumentException( - s"Cannot instantiate MailboxType [$fqcn], defined in [$id], make sure it has a public" + - " constructor with [akka.actor.ActorSystem.Settings, com.typesafe.config.Config] parameters", - exception) + .recover { case exception => + throw new IllegalArgumentException( + s"Cannot instantiate MailboxType [$fqcn], defined in [$id], make sure it has a public" + + " constructor with [akka.actor.ActorSystem.Settings, com.typesafe.config.Config] parameters", + exception) } .get } @@ -272,7 +263,7 @@ private[akka] class Mailboxes( private final def warn(msg: String): Unit = eventStream.publish(Warning("mailboxes", getClass, msg)) - //INTERNAL API + // INTERNAL API private def config(id: String): Config = { import akka.util.ccompat.JavaConverters._ ConfigFactory @@ -285,9 +276,7 @@ private[akka] class Mailboxes( private val defaultStashCapacity: Int = stashCapacityFromConfig(Dispatchers.DefaultDispatcherId, Mailboxes.DefaultMailboxId) - /** - * INTERNAL API: The capacity of the stash. Configured in the actor's mailbox or dispatcher config. - */ + /** INTERNAL API: The capacity of the stash. Configured in the actor's mailbox or dispatcher config. */ private[akka] final def stashCapacity(dispatcher: String, mailbox: String): Int = { @tailrec def updateCache(cache: Map[String, Int], key: String, value: Int): Boolean = { diff --git a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala index be02efa95ca..ae27915d9dd 100644 --- a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala @@ -32,7 +32,7 @@ class PinnedDispatcher( @volatile private var owner: ActorCell = _actor - //Relies on an external lock provided by MessageDispatcher.attach + // Relies on an external lock provided by MessageDispatcher.attach protected[akka] override def register(actorCell: ActorCell) = { val actor = owner if ((actor ne null) && actorCell != actor) @@ -40,7 +40,7 @@ class PinnedDispatcher( owner = actorCell super.register(actorCell) } - //Relies on an external lock provided by MessageDispatcher.detach + // Relies on an external lock provided by MessageDispatcher.detach protected[akka] override def unregister(actor: ActorCell) = { super.unregister(actor) owner = null diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index c54e80ab823..da18e467b56 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -51,23 +51,17 @@ object ThreadPoolConfig { def reusableQueue(queueFactory: QueueFactory): QueueFactory = reusableQueue(queueFactory()) } -/** - * Function0 without the fun stuff (mostly for the sake of the Java API side of things) - */ +/** Function0 without the fun stuff (mostly for the sake of the Java API side of things) */ trait ExecutorServiceFactory { def createExecutorService: ExecutorService } -/** - * Generic way to specify an ExecutorService to a Dispatcher, create it with the given name if desired - */ +/** Generic way to specify an ExecutorService to a Dispatcher, create it with the given name if desired */ trait ExecutorServiceFactoryProvider { def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory } -/** - * A small configuration DSL to create ThreadPoolExecutors that can be provided as an ExecutorServiceFactoryProvider to Dispatcher - */ +/** A small configuration DSL to create ThreadPoolExecutors that can be provided as an ExecutorServiceFactoryProvider to Dispatcher */ final case class ThreadPoolConfig( allowCorePoolTimeout: Boolean = ThreadPoolConfig.defaultAllowCoreThreadTimeout, corePoolSize: Int = ThreadPoolConfig.defaultCorePoolSize, @@ -103,9 +97,7 @@ final case class ThreadPoolConfig( } } -/** - * A DSL to configure and create a MessageDispatcher with a ThreadPoolExecutor - */ +/** A DSL to configure and create a MessageDispatcher with a ThreadPoolExecutor */ final case class ThreadPoolConfigBuilder(config: ThreadPoolConfig) { import ThreadPoolConfig._ @@ -209,9 +201,7 @@ final case class MonitorableThreadFactory( } } -/** - * As the name says - */ +/** As the name says */ trait ExecutorServiceDelegate extends ExecutorService { def executor: ExecutorService diff --git a/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala b/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala index 24f88f0a120..ffae42d3c14 100644 --- a/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala +++ b/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala @@ -170,7 +170,6 @@ private[akka] class AffinityPool( * due to an exception being thrown in user code, the worker is * responsible for adding one more worker to compensate for its * own termination - * */ private def onWorkerExit(w: AffinityPoolWorker, abruptTermination: Boolean): Unit = bookKeepingLock.withGuard { @@ -292,9 +291,7 @@ private[akka] class AffinityPool( } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @ApiMayChange private[akka] final class AffinityPoolConfigurator(config: Config, prerequisites: DispatcherPrerequisites) @@ -314,21 +311,19 @@ private[akka] final class AffinityPoolConfigurator(config: Config, prerequisites private val queueSelectorFactory: QueueSelectorFactory = prerequisites.dynamicAccess .createInstanceFor[QueueSelectorFactory](queueSelectorFactoryFQCN, immutable.Seq(classOf[Config] -> config)) - .recover { - case _ => - throw new IllegalArgumentException( - s"Cannot instantiate QueueSelectorFactory(queueSelector = $queueSelectorFactoryFQCN), make sure it has an accessible constructor which accepts a Config parameter") + .recover { case _ => + throw new IllegalArgumentException( + s"Cannot instantiate QueueSelectorFactory(queueSelector = $queueSelectorFactoryFQCN), make sure it has an accessible constructor which accepts a Config parameter") } .get private val rejectionHandlerFactoryFCQN = config.getString("rejection-handler") private val rejectionHandlerFactory = prerequisites.dynamicAccess .createInstanceFor[RejectionHandlerFactory](rejectionHandlerFactoryFCQN, Nil) - .recover { - case exception => - throw new IllegalArgumentException( - s"Cannot instantiate RejectionHandlerFactory(rejection-handler = $rejectionHandlerFactoryFCQN), make sure it has an accessible empty constructor", - exception) + .recover { case exception => + throw new IllegalArgumentException( + s"Cannot instantiate RejectionHandlerFactory(rejection-handler = $rejectionHandlerFactoryFCQN), make sure it has an accessible empty constructor", + exception) } .get @@ -380,9 +375,7 @@ trait QueueSelector { def getQueue(command: Runnable, queues: Int): Int } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @ApiMayChange private[akka] final class ThrowOnOverflowRejectionHandler extends RejectionHandlerFactory with RejectionHandler { @@ -391,9 +384,7 @@ private[akka] final class ThrowOnOverflowRejectionHandler extends RejectionHandl override def create(): RejectionHandler = this } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @ApiMayChange private[akka] final class FairDistributionHashCache(val config: Config) extends QueueSelectorFactory { @@ -410,7 +401,7 @@ private[akka] final class FairDistributionHashCache(val config: Config) extends override def toString: String = s"FairDistributionHashCache(fairDistributionThreshold = $fairDistributionThreshold)" private[this] final def improve(h: Int): Int = - 0x7FFFFFFF & (reverseBytes(h * 0x9e3775cd) * 0x9e3775cd) // `sbhash`: In memory of Phil Bagwell. + 0x7fffffff & (reverseBytes(h * 0x9e3775cd) * 0x9e3775cd) // `sbhash`: In memory of Phil Bagwell. override final def getQueue(command: Runnable, queues: Int): Int = { val runnableHash = command.hashCode() if (fairDistributionThreshold == 0) diff --git a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala index 806237801a8..a3fd0ce046a 100644 --- a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala +++ b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala @@ -36,7 +36,6 @@ private[akka] object SystemMessageList { } /** - * * INTERNAL API * * Value class supporting list operations on system messages. The `next` field of [[SystemMessage]] @@ -48,24 +47,17 @@ private[akka] object SystemMessageList { * * The type of the list also encodes that the messages contained are in reverse order, i.e. the head of the list is the * latest appended element. - * */ private[akka] class LatestFirstSystemMessageList(val head: SystemMessage) extends AnyVal { import SystemMessageList._ - /** - * Indicates if the list is empty or not. This operation has constant cost. - */ + /** Indicates if the list is empty or not. This operation has constant cost. */ final def isEmpty: Boolean = head eq null - /** - * Indicates if the list has at least one element or not. This operation has constant cost. - */ + /** Indicates if the list has at least one element or not. This operation has constant cost. */ final def nonEmpty: Boolean = head ne null - /** - * Indicates if the list is empty or not. This operation has constant cost. - */ + /** Indicates if the list is empty or not. This operation has constant cost. */ final def size: Int = sizeInner(head, 0) /** @@ -85,9 +77,7 @@ private[akka] class LatestFirstSystemMessageList(val head: SystemMessage) extend */ final def reverse: EarliestFirstSystemMessageList = new EarliestFirstSystemMessageList(reverseInner(head, null)) - /** - * Attaches a message to the current head of the list. This operation has constant cost. - */ + /** Attaches a message to the current head of the list. This operation has constant cost. */ final def ::(msg: SystemMessage): LatestFirstSystemMessageList = { assert(msg ne null) msg.next = head @@ -97,7 +87,6 @@ private[akka] class LatestFirstSystemMessageList(val head: SystemMessage) extend } /** - * * INTERNAL API * * Value class supporting list operations on system messages. The `next` field of [[SystemMessage]] @@ -109,24 +98,17 @@ private[akka] class LatestFirstSystemMessageList(val head: SystemMessage) extend * * This list type also encodes that the messages contained are in reverse order, i.e. the head of the list is the * latest appended element. - * */ private[akka] class EarliestFirstSystemMessageList(val head: SystemMessage) extends AnyVal { import SystemMessageList._ - /** - * Indicates if the list is empty or not. This operation has constant cost. - */ + /** Indicates if the list is empty or not. This operation has constant cost. */ final def isEmpty: Boolean = head eq null - /** - * Indicates if the list has at least one element or not. This operation has constant cost. - */ + /** Indicates if the list has at least one element or not. This operation has constant cost. */ final def nonEmpty: Boolean = head ne null - /** - * Indicates if the list is empty or not. This operation has constant cost. - */ + /** Indicates if the list is empty or not. This operation has constant cost. */ final def size: Int = sizeInner(head, 0) /** @@ -146,9 +128,7 @@ private[akka] class EarliestFirstSystemMessageList(val head: SystemMessage) exte */ final def reverse: LatestFirstSystemMessageList = new LatestFirstSystemMessageList(reverseInner(head, null)) - /** - * Attaches a message to the current head of the list. This operation has constant cost. - */ + /** Attaches a message to the current head of the list. This operation has constant cost. */ final def ::(msg: SystemMessage): EarliestFirstSystemMessageList = { assert(msg ne null) msg.next = head @@ -203,66 +183,56 @@ private[akka] sealed trait SystemMessage extends PossiblyHarmful with Serializab def unlinked: Boolean = next eq null } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] trait StashWhenWaitingForChildren -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] trait StashWhenFailed -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) -private[akka] final case class Create(failure: Option[ActorInitializationException]) extends SystemMessage // sent to self from Dispatcher.register -/** - * INTERNAL API - */ +private[akka] final case class Create(failure: Option[ActorInitializationException]) + extends SystemMessage // sent to self from Dispatcher.register +/** INTERNAL API */ @SerialVersionUID(1L) -private[akka] final case class Recreate(cause: Throwable) extends SystemMessage with StashWhenWaitingForChildren // sent to self from ActorCell.restart -/** - * INTERNAL API - */ +private[akka] final case class Recreate(cause: Throwable) + extends SystemMessage + with StashWhenWaitingForChildren // sent to self from ActorCell.restart +/** INTERNAL API */ @SerialVersionUID(1L) -private[akka] final case class Suspend() extends SystemMessage with StashWhenWaitingForChildren // sent to self from ActorCell.suspend -/** - * INTERNAL API - */ +private[akka] final case class Suspend() + extends SystemMessage + with StashWhenWaitingForChildren // sent to self from ActorCell.suspend +/** INTERNAL API */ @SerialVersionUID(1L) -private[akka] final case class Resume(causedByFailure: Throwable) extends SystemMessage with StashWhenWaitingForChildren // sent to self from ActorCell.resume -/** - * INTERNAL API - */ +private[akka] final case class Resume(causedByFailure: Throwable) + extends SystemMessage + with StashWhenWaitingForChildren // sent to self from ActorCell.resume +/** INTERNAL API */ @SerialVersionUID(1L) -private[akka] final case class Terminate() extends SystemMessage with DeadLetterSuppression // sent to self from ActorCell.stop +private[akka] final case class Terminate() + extends SystemMessage + with DeadLetterSuppression // sent to self from ActorCell.stop -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) -private[akka] final case class Supervise(child: ActorRef, async: Boolean) extends SystemMessage // sent to supervisor ActorRef from ActorCell.start -/** - * INTERNAL API - */ +private[akka] final case class Supervise(child: ActorRef, async: Boolean) + extends SystemMessage // sent to supervisor ActorRef from ActorCell.start +/** INTERNAL API */ @SerialVersionUID(1L) -private[akka] final case class Watch(watchee: InternalActorRef, watcher: InternalActorRef) extends SystemMessage // sent to establish a DeathWatch -/** - * INTERNAL API - */ -@SerialVersionUID(1L) // Watch and Unwatch have different signatures, but this can't be changed without breaking serialization compatibility -private[akka] final case class Unwatch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to tear down a DeathWatch -/** - * INTERNAL API - */ +private[akka] final case class Watch(watchee: InternalActorRef, watcher: InternalActorRef) + extends SystemMessage // sent to establish a DeathWatch +/** INTERNAL API */ +@SerialVersionUID( + 1L +) // Watch and Unwatch have different signatures, but this can't be changed without breaking serialization compatibility +private[akka] final case class Unwatch(watchee: ActorRef, watcher: ActorRef) + extends SystemMessage // sent to tear down a DeathWatch +/** INTERNAL API */ @SerialVersionUID(1L) private[akka] case object NoMessage extends SystemMessage // switched into the mailbox to signal termination -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) private[akka] final case class Failed(child: ActorRef, cause: Throwable, uid: Int) extends SystemMessage diff --git a/akka-actor/src/main/scala/akka/event/AddressTerminatedTopic.scala b/akka-actor/src/main/scala/akka/event/AddressTerminatedTopic.scala index 63601456942..3ca37c232d9 100644 --- a/akka-actor/src/main/scala/akka/event/AddressTerminatedTopic.scala +++ b/akka-actor/src/main/scala/akka/event/AddressTerminatedTopic.scala @@ -35,9 +35,7 @@ private[akka] object AddressTerminatedTopic extends ExtensionId[AddressTerminate new AddressTerminatedTopic } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] final class AddressTerminatedTopic extends Extension { private val subscribers = new AtomicReference[Set[ActorRef]](Set.empty[ActorRef]) diff --git a/akka-actor/src/main/scala/akka/event/DeadLetterListener.scala b/akka-actor/src/main/scala/akka/event/DeadLetterListener.scala index 87f3dd18da4..e34546022ec 100644 --- a/akka-actor/src/main/scala/akka/event/DeadLetterListener.scala +++ b/akka-actor/src/main/scala/akka/event/DeadLetterListener.scala @@ -60,38 +60,35 @@ class DeadLetterListener extends Actor { case _ => receiveWithMaxCountLogging } - private def receiveWithAlwaysLogging: Receive = { - case d: AllDeadLetters => - if (!isWrappedSuppressed(d)) { - incrementCount() - logDeadLetter(d, doneMsg = "") - } + private def receiveWithAlwaysLogging: Receive = { case d: AllDeadLetters => + if (!isWrappedSuppressed(d)) { + incrementCount() + logDeadLetter(d, doneMsg = "") + } } - private def receiveWithMaxCountLogging: Receive = { - case d: AllDeadLetters => - if (!isWrappedSuppressed(d)) { - incrementCount() - if (count == maxCount) { - logDeadLetter(d, ", no more dead letters will be logged") - context.stop(self) - } else { - logDeadLetter(d, "") - } + private def receiveWithMaxCountLogging: Receive = { case d: AllDeadLetters => + if (!isWrappedSuppressed(d)) { + incrementCount() + if (count == maxCount) { + logDeadLetter(d, ", no more dead letters will be logged") + context.stop(self) + } else { + logDeadLetter(d, "") } + } } - private def receiveWithSuspendLogging(suspendDuration: FiniteDuration): Receive = { - case d: AllDeadLetters => - if (!isWrappedSuppressed(d)) { - incrementCount() - if (count == maxCount) { - val doneMsg = s", no more dead letters will be logged in next [${suspendDuration.pretty}]" - logDeadLetter(d, doneMsg) - context.become(receiveWhenSuspended(suspendDuration, Deadline.now + suspendDuration)) - } else - logDeadLetter(d, "") - } + private def receiveWithSuspendLogging(suspendDuration: FiniteDuration): Receive = { case d: AllDeadLetters => + if (!isWrappedSuppressed(d)) { + incrementCount() + if (count == maxCount) { + val doneMsg = s", no more dead letters will be logged in next [${suspendDuration.pretty}]" + logDeadLetter(d, doneMsg) + context.become(receiveWhenSuspended(suspendDuration, Deadline.now + suspendDuration)) + } else + logDeadLetter(d, "") + } } private def receiveWhenSuspended(suspendDuration: FiniteDuration, suspendDeadline: Deadline): Receive = { diff --git a/akka-actor/src/main/scala/akka/event/EventBus.scala b/akka-actor/src/main/scala/akka/event/EventBus.scala index 036fe66479e..07488ca9cac 100644 --- a/akka-actor/src/main/scala/akka/event/EventBus.scala +++ b/akka-actor/src/main/scala/akka/event/EventBus.scala @@ -25,7 +25,7 @@ trait EventBus { type Classifier type Subscriber - //#event-bus-api + // #event-bus-api /** * Attempts to register the subscriber to the specified Classifier * @return true if successful and false if not (because it was already @@ -40,36 +40,26 @@ trait EventBus { */ def unsubscribe(subscriber: Subscriber, from: Classifier): Boolean - /** - * Attempts to deregister the subscriber from all Classifiers it may be subscribed to - */ + /** Attempts to deregister the subscriber from all Classifiers it may be subscribed to */ def unsubscribe(subscriber: Subscriber): Unit - /** - * Publishes the specified Event to this bus - */ + /** Publishes the specified Event to this bus */ def publish(event: Event): Unit - //#event-bus-api + // #event-bus-api } -/** - * Represents an EventBus where the Subscriber type is ActorRef - */ +/** Represents an EventBus where the Subscriber type is ActorRef */ trait ActorEventBus extends EventBus { type Subscriber = ActorRef protected def compareSubscribers(a: ActorRef, b: ActorRef) = a.compareTo(b) } -/** - * Can be mixed into an EventBus to specify that the Classifier type is ActorRef - */ +/** Can be mixed into an EventBus to specify that the Classifier type is ActorRef */ trait ActorClassifier { this: EventBus => type Classifier = ActorRef } -/** - * Can be mixed into an EventBus to specify that the Classifier type is a Function from Event to Boolean (predicate) - */ +/** Can be mixed into an EventBus to specify that the Classifier type is a Function from Event to Boolean (predicate) */ trait PredicateClassifier { this: EventBus => type Classifier = Event => Boolean } @@ -82,28 +72,22 @@ trait PredicateClassifier { this: EventBus => */ trait LookupClassification { this: EventBus => - protected final val subscribers = new Index[Classifier, Subscriber](mapSize(), new Comparator[Subscriber] { - def compare(a: Subscriber, b: Subscriber): Int = compareSubscribers(a, b) - }) + protected final val subscribers = new Index[Classifier, Subscriber]( + mapSize(), + new Comparator[Subscriber] { + def compare(a: Subscriber, b: Subscriber): Int = compareSubscribers(a, b) + }) - /** - * This is a size hint for the number of Classifiers you expect to have (use powers of 2) - */ + /** This is a size hint for the number of Classifiers you expect to have (use powers of 2) */ protected def mapSize(): Int - /** - * Provides a total ordering of Subscribers (think java.util.Comparator.compare) - */ + /** Provides a total ordering of Subscribers (think java.util.Comparator.compare) */ protected def compareSubscribers(a: Subscriber, b: Subscriber): Int - /** - * Returns the Classifier associated with the given Event - */ + /** Returns the Classifier associated with the given Event */ protected def classify(event: Event): Classifier - /** - * Publishes the given Event to the given Subscriber - */ + /** Publishes the given Event to the given Subscriber */ protected def publish(event: Event, subscriber: Subscriber): Unit def subscribe(subscriber: Subscriber, to: Classifier): Boolean = subscribers.put(to, subscriber) @@ -124,9 +108,7 @@ trait LookupClassification { this: EventBus => */ trait SubchannelClassification { this: EventBus => - /** - * The logic to form sub-class hierarchy - */ + /** The logic to form sub-class hierarchy */ protected implicit def subclassification: Subclassification[Classifier] // must be lazy to avoid initialization order problem with subclassification @@ -135,14 +117,10 @@ trait SubchannelClassification { this: EventBus => @volatile private var cache = Map.empty[Classifier, Set[Subscriber]] - /** - * Returns the Classifier associated with the given Event - */ + /** Returns the Classifier associated with the given Event */ protected def classify(event: Event): Classifier - /** - * Publishes the given Event to the given Subscriber - */ + /** Publishes the given Event to the given Subscriber */ protected def publish(event: Event, subscriber: Subscriber): Unit def subscribe(subscriber: Subscriber, to: Classifier): Boolean = subscriptions.synchronized { @@ -188,13 +166,13 @@ trait SubchannelClassification { this: EventBus => cache.values.exists { _ contains subscriber } private def removeFromCache(changes: immutable.Seq[(Classifier, Set[Subscriber])]): Unit = - cache = changes.foldLeft(cache) { - case (m, (c, cs)) => m.updated(c, m.getOrElse(c, Set.empty[Subscriber]).diff(cs)) + cache = changes.foldLeft(cache) { case (m, (c, cs)) => + m.updated(c, m.getOrElse(c, Set.empty[Subscriber]).diff(cs)) } private def addToCache(changes: immutable.Seq[(Classifier, Set[Subscriber])]): Unit = - cache = changes.foldLeft(cache) { - case (m, (c, cs)) => m.updated(c, m.getOrElse(c, Set.empty[Subscriber]).union(cs)) + cache = changes.foldLeft(cache) { case (m, (c, cs)) => + m.updated(c, m.getOrElse(c, Set.empty[Subscriber]).union(cs)) } } @@ -215,24 +193,16 @@ trait ScanningClassification { self: EventBus => } }) - /** - * Provides a total ordering of Classifiers (think java.util.Comparator.compare) - */ + /** Provides a total ordering of Classifiers (think java.util.Comparator.compare) */ protected def compareClassifiers(a: Classifier, b: Classifier): Int - /** - * Provides a total ordering of Subscribers (think java.util.Comparator.compare) - */ + /** Provides a total ordering of Subscribers (think java.util.Comparator.compare) */ protected def compareSubscribers(a: Subscriber, b: Subscriber): Int - /** - * Returns whether the specified Classifier matches the specified Event - */ + /** Returns whether the specified Classifier matches the specified Event */ protected def matches(classifier: Classifier, event: Event): Boolean - /** - * Publishes the specified Event to the specified Subscriber - */ + /** Publishes the specified Event to the specified Subscriber */ protected def publish(event: Event, subscriber: Subscriber): Unit def subscribe(subscriber: Subscriber, to: Classifier): Boolean = subscribers.add((to, subscriber)) @@ -297,7 +267,7 @@ trait ManagedActorClassification { this: ActorEventBus with ActorClassifier => /** The unsubscriber takes care of unsubscribing actors, which have terminated. */ protected lazy val unsubscriber = - ActorClassificationUnsubscriber.start(system, this.toString(), (this.unsubscribe: ActorRef => Unit)) + ActorClassificationUnsubscriber.start(system, this.toString(), this.unsubscribe: ActorRef => Unit) @tailrec protected final def associate(monitored: ActorRef, monitor: ActorRef): Boolean = { @@ -376,14 +346,10 @@ trait ManagedActorClassification { this: ActorEventBus with ActorClassifier => } } - /** - * Returns the Classifier associated with the specified Event - */ + /** Returns the Classifier associated with the specified Event */ protected def classify(event: Event): Classifier - /** - * This is a size hint for the number of Classifiers you expect to have (use powers of 2) - */ + /** This is a size hint for the number of Classifiers you expect to have (use powers of 2) */ protected def mapSize: Int def publish(event: Event): Unit = { @@ -407,17 +373,13 @@ trait ManagedActorClassification { this: ActorEventBus with ActorClassifier => if (subscriber eq null) throw new IllegalArgumentException("Subscriber is null") else dissociate(subscriber) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def registerWithUnsubscriber(subscriber: ActorRef, seqNr: Int): Boolean = { unsubscriber ! ActorClassificationUnsubscriber.Register(subscriber, seqNr) true } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def unregisterFromUnsubscriber(subscriber: ActorRef, seqNr: Int): Boolean = { unsubscriber ! ActorClassificationUnsubscriber.Unregister(subscriber, seqNr) true diff --git a/akka-actor/src/main/scala/akka/event/EventStream.scala b/akka-actor/src/main/scala/akka/event/EventStream.scala index 0909845b699..fde9b2b01fe 100644 --- a/akka-actor/src/main/scala/akka/event/EventStream.scala +++ b/akka-actor/src/main/scala/akka/event/EventStream.scala @@ -55,9 +55,7 @@ class EventStream(sys: ActorSystem, private val debug: Boolean) extends LoggingB super.subscribe(subscriber, channel) } - /** - * Unsubscribe specific types subscriptions created by this actor from the event stream. - */ + /** Unsubscribe specific types subscriptions created by this actor from the event stream. */ override def unsubscribe(subscriber: ActorRef, channel: Class[_]): Boolean = { if (subscriber eq null) throw new IllegalArgumentException("subscriber is null") val ret = super.unsubscribe(subscriber, channel) @@ -68,9 +66,7 @@ class EventStream(sys: ActorSystem, private val debug: Boolean) extends LoggingB ret } - /** - * Unsubscribe all subscriptions created by this actor from the event stream. - */ + /** Unsubscribe all subscriptions created by this actor from the event stream. */ override def unsubscribe(subscriber: ActorRef): Unit = { if (subscriber eq null) throw new IllegalArgumentException("subscriber is null") super.unsubscribe(subscriber) @@ -87,9 +83,7 @@ class EventStream(sys: ActorSystem, private val debug: Boolean) extends LoggingB // sys may be null for backwards compatibility reasons if (sys ne null) EventStreamUnsubscriber.start(sys, this) - /** - * INTERNAL API - */ + /** INTERNAL API */ @tailrec final private[akka] def initUnsubscriber(unsubscriber: ActorRef): Boolean = { // sys may be null for backwards compatibility reasons @@ -123,9 +117,7 @@ class EventStream(sys: ActorSystem, private val debug: Boolean) extends LoggingB } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @tailrec private def registerWithUnsubscriber(subscriber: ActorRef): Unit = { // sys may be null for backwards compatibility reasons diff --git a/akka-actor/src/main/scala/akka/event/LoggerMailbox.scala b/akka-actor/src/main/scala/akka/event/LoggerMailbox.scala index b2583cc1e8e..993d089374b 100644 --- a/akka-actor/src/main/scala/akka/event/LoggerMailbox.scala +++ b/akka-actor/src/main/scala/akka/event/LoggerMailbox.scala @@ -17,9 +17,7 @@ import akka.util.unused trait LoggerMessageQueueSemantics -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class LoggerMailboxType(@unused settings: ActorSystem.Settings, @unused config: Config) extends MailboxType with ProducesMessageQueue[LoggerMailbox] { @@ -30,9 +28,7 @@ private[akka] class LoggerMailboxType(@unused settings: ActorSystem.Settings, @u } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class LoggerMailbox(@unused owner: ActorRef, system: ActorSystem) extends UnboundedMailbox.MessageQueue with LoggerMessageQueueSemantics { diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index f5c243b2d78..99c4f8f8c9e 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -43,9 +43,7 @@ trait LoggingBus extends ActorEventBus { private var loggers = Seq.empty[ActorRef] @volatile private var _logLevel: LogLevel = _ - /** - * Query currently set log level. See object Logging for more information. - */ + /** Query currently set log level. See object Logging for more information. */ def logLevel = _logLevel /** @@ -93,17 +91,13 @@ trait LoggingBus extends ActorEventBus { } } - /** - * Internal Akka use only - */ + /** Internal Akka use only */ private[akka] def startStdoutLogger(config: Settings): Unit = { setUpStdoutLogger(config) publish(Debug(simpleName(this), this.getClass, "StandardOutLogger started")) } - /** - * Internal Akka use only - */ + /** Internal Akka use only */ private[akka] def startDefaultLoggers(system: ActorSystemImpl): Unit = { val logName = simpleName(this) + "(" + system + ")" val level = levelFor(system.settings.LogLevel).getOrElse { @@ -127,12 +121,11 @@ trait LoggingBus extends ActorEventBus { .map { actorClass => addLogger(system, actorClass, level, logName) } - .recover { - case e => - throw new ConfigurationException( - "Logger specified in config can't be loaded [" + loggerName + - "] due to [" + e.toString + "]", - e) + .recover { case e => + throw new ConfigurationException( + "Logger specified in config can't be loaded [" + loggerName + + "] due to [" + e.toString + "]", + e) } .get } @@ -143,12 +136,13 @@ trait LoggingBus extends ActorEventBus { try { if (system.settings.DebugUnhandledMessage) subscribe( - system.systemActorOf(Props(new Actor { - def receive = { - case UnhandledMessage(msg, sender, rcp) => + system.systemActorOf( + Props(new Actor { + def receive = { case UnhandledMessage(msg, sender, rcp) => publish(Debug(rcp.path.toString, rcp.getClass, "unhandled message from " + sender + ": " + msg)) - } - }), "UnhandledMessageForwarder"), + } + }), + "UnhandledMessageForwarder"), classOf[UnhandledMessage]) } catch { case _: InvalidActorNameException => // ignore if it is already running @@ -165,9 +159,7 @@ trait LoggingBus extends ActorEventBus { } } - /** - * Internal Akka use only - */ + /** Internal Akka use only */ private[akka] def stopDefaultLoggers(system: ActorSystem): Unit = { @nowarn("msg=never used") val level = _logLevel // volatile access before reading loggers @@ -189,9 +181,7 @@ trait LoggingBus extends ActorEventBus { publish(Debug(simpleName(this), this.getClass, "all default loggers stopped")) } - /** - * INTERNAL API - */ + /** INTERNAL API */ private def addLogger( system: ActorSystemImpl, clazz: Class[_ <: Actor], @@ -201,16 +191,17 @@ trait LoggingBus extends ActorEventBus { val actor = system.systemActorOf(Props(clazz).withDispatcher(system.settings.LoggersDispatcher), name) implicit def timeout: Timeout = system.settings.LoggerStartTimeout import akka.pattern.ask - val response = try Await.result(actor ? InitializeLogger(this), timeout.duration) - catch { - case _: TimeoutException => - publish( - Warning( - logName, - this.getClass, - "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)")) - "[TIMEOUT]" - } + val response = + try Await.result(actor ? InitializeLogger(this), timeout.duration) + catch { + case _: TimeoutException => + publish( + Warning( + logName, + this.getClass, + "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)")) + "[TIMEOUT]" + } if (response != LoggerInitialized) throw new LoggerInitializationException( "Logger " + name + " did not respond with LoggerInitialized, sent instead " + response) @@ -271,9 +262,7 @@ trait LoggingBus extends ActorEventBus { def getClazz(t: T): Class[_] = t.getClass } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class ActorWithLogClass(actor: Actor, logClass: Class[_]) /** @@ -325,9 +314,7 @@ object LogSource { } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] implicit val fromActorWithLoggerClass: LogSource[ActorWithLogClass] = new LogSource[ActorWithLogClass] { def genString(a: ActorWithLogClass) = fromActor.genString(a.actor) @@ -452,25 +439,19 @@ object Logging { case m => m.getClass.getName } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] object LogExt extends ExtensionId[LogExt] { override def createExtension(system: ExtendedActorSystem): LogExt = new LogExt(system) } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] class LogExt(@unused system: ExtendedActorSystem) extends Extension { private val loggerId = new AtomicInteger def id() = loggerId.incrementAndGet() } - /** - * Marker trait for annotating LogLevel, which must be Int after erasure. - */ + /** Marker trait for annotating LogLevel, which must be Int after erasure. */ final case class LogLevel(asInt: Int) extends AnyVal { @inline final def >=(other: LogLevel): Boolean = asInt >= other.asInt @inline final def <=(other: LogLevel): Boolean = asInt <= other.asInt @@ -524,9 +505,7 @@ object Logging { else DebugLevel } - /** - * Returns the event class associated with the given LogLevel - */ + /** Returns the event class associated with the given LogLevel */ def classFor(level: LogLevel): Class[_ <: LogEvent] = level match { case ErrorLevel => classOf[Error] case WarningLevel => classOf[Warning] @@ -698,58 +677,38 @@ object Logging { */ class LoggerException extends AkkaException("") - /** - * Exception that wraps a LogEvent. - */ + /** Exception that wraps a LogEvent. */ class LogEventException(val event: LogEvent, cause: Throwable) extends NoStackTrace { override def getMessage: String = event.toString override def getCause: Throwable = cause } - /** - * Base type of LogEvents - */ + /** Base type of LogEvents */ sealed trait LogEvent extends NoSerializationVerificationNeeded { - /** - * The thread that created this log event - */ + /** The thread that created this log event */ @transient val thread: Thread = Thread.currentThread() - /** - * When this LogEvent was created according to System.currentTimeMillis - */ + /** When this LogEvent was created according to System.currentTimeMillis */ val timestamp: Long = System.currentTimeMillis - /** - * The LogLevel of this LogEvent - */ + /** The LogLevel of this LogEvent */ def level: LogLevel - /** - * The source of this event - */ + /** The source of this event */ def logSource: String - /** - * The class of the source of this event - */ + /** The class of the source of this event */ def logClass: Class[_] - /** - * The message, may be any object or null. - */ + /** The message, may be any object or null. */ def message: Any - /** - * Extra values for adding to MDC - */ + /** Extra values for adding to MDC */ def mdc: MDC = emptyMDC - /** - * Java API: Retrieve the contents of the MDC. - */ + /** Java API: Retrieve the contents of the MDC. */ def getMDC: java.util.Map[String, Any] = { import akka.util.ccompat.JavaConverters._ mdc.asJava @@ -793,9 +752,7 @@ object Logging { def cause: Throwable } - /** - * For ERROR Logging - */ + /** For ERROR Logging */ case class Error(override val cause: Throwable, logSource: String, logClass: Class[_], message: Any = "") extends LogEvent with LogEventWithCause { @@ -846,9 +803,7 @@ object Logging { } def noCause = Error.NoCause - /** - * For WARNING Logging - */ + /** For WARNING Logging */ case class Warning(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent { override def level = WarningLevel } @@ -884,9 +839,7 @@ object Logging { new Warning4(logSource, logClass, message, mdc, marker, cause) } - /** - * For INFO Logging - */ + /** For INFO Logging */ case class Info(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent { override def level = InfoLevel } @@ -907,9 +860,7 @@ object Logging { new Info3(logSource, logClass, message, mdc, marker) } - /** - * For DEBUG Logging - */ + /** For DEBUG Logging */ case class Debug(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent { override def level = DebugLevel } @@ -961,21 +912,15 @@ object Logging { abstract class LoggerInitialized case object LoggerInitialized extends LoggerInitialized { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this } - /** - * Java API to create a LoggerInitialized message. - */ + /** Java API to create a LoggerInitialized message. */ // weird return type due to binary compatibility def loggerInitialized(): LoggerInitialized.type = LoggerInitialized - /** - * LoggerInitializationException is thrown to indicate that there was a problem initializing a logger - */ + /** LoggerInitializationException is thrown to indicate that there was a problem initializing a logger */ class LoggerInitializationException(msg: String) extends AkkaException(msg) trait StdOutLogger { @@ -1113,9 +1058,7 @@ object Logging { private val serializedStandardOutLogger = new SerializedStandardOutLogger - /** - * INTERNAL API - */ + /** INTERNAL API */ @SerialVersionUID(1L) private[akka] class SerializedStandardOutLogger extends Serializable { @throws(classOf[java.io.ObjectStreamException]) private def readResolve(): AnyRef = Logging.StandardOutLogger @@ -1135,9 +1078,7 @@ object Logging { } } - /** - * Returns the StackTrace for the given Throwable as a String - */ + /** Returns the StackTrace for the given Throwable as a String */ def stackTraceFor(e: Throwable): String = e match { case null | Error.NoCause => "" case _: NoStackTrace => s" (${e.getClass.getName}: ${e.getMessage})" @@ -1446,9 +1387,7 @@ trait LoggingAdapter { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3, arg4)) } - /** - * Log message at the specified log level. - */ + /** Log message at the specified log level. */ def log(level: Logging.LogLevel, message: String): Unit = { if (isEnabled(level)) notifyLog(level, message) } /** @@ -1461,30 +1400,22 @@ trait LoggingAdapter { if (isEnabled(level)) notifyLog(level, format1(template, arg1)) } - /** - * Message template with 2 replacement arguments. - */ + /** Message template with 2 replacement arguments. */ def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2)) } - /** - * Message template with 3 replacement arguments. - */ + /** Message template with 3 replacement arguments. */ def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3)) } - /** - * Message template with 4 replacement arguments. - */ + /** Message template with 4 replacement arguments. */ def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3, arg4)) } - /** - * @return true if the specified log level is enabled - */ + /** @return true if the specified log level is enabled */ final def isEnabled(level: Logging.LogLevel): Boolean = level match { case Logging.ErrorLevel => isErrorEnabled case Logging.WarningLevel => isWarningEnabled @@ -1664,13 +1595,11 @@ trait DiagnosticLoggingAdapter extends LoggingAdapter { */ def setMDC(jMdc: java.util.Map[String, Any]): Unit = mdc(if (jMdc != null) jMdc.asScala.toMap else emptyMDC) - /** - * Clear all entries in the MDC - */ + /** Clear all entries in the MDC */ def clearMDC(): Unit = mdc(emptyMDC) } -/** DO NOT INHERIT: Class is open only for use by akka-slf4j*/ +/** DO NOT INHERIT: Class is open only for use by akka-slf4j */ @DoNotInherit class LogMarker(val name: String, val properties: Map[String, Any]) { @@ -1706,9 +1635,7 @@ object LogMarker { private[akka] final val Security = apply("SECURITY") - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object Properties { val MessageClass = "akkaMessageClass" val RemoteAddress = "akkaRemoteAddress" @@ -1717,9 +1644,7 @@ object LogMarker { } -/** - * [[LoggingAdapter]] extension which adds Marker support. - */ +/** [[LoggingAdapter]] extension which adds Marker support. */ class MarkerLoggingAdapter( override val bus: LoggingBus, override val logSource: String, @@ -1963,9 +1888,7 @@ class MarkerLoggingAdapter( if (isDebugEnabled(marker)) bus.publish(Debug(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker)) - /** - * Log message at the specified log level. - */ + /** Log message at the specified log level. */ def log(marker: LogMarker, level: Logging.LogLevel, message: String): Unit = { level match { case Logging.DebugLevel => debug(marker, message) @@ -1979,8 +1902,8 @@ class MarkerLoggingAdapter( // Copy of LoggingAdapter.format1 due to binary compatibility restrictions private def format1(t: String, arg: Any): String = arg match { case a: Array[_] if !a.getClass.getComponentType.isPrimitive => format(t, a.toIndexedSeq) - case a: Array[_] => format(t, a.map(_.asInstanceOf[AnyRef]).toIndexedSeq) - case x => format(t, x) + case a: Array[_] => format(t, a.map(_.asInstanceOf[AnyRef]).toIndexedSeq) + case x => format(t, x) } } @@ -1992,9 +1915,7 @@ final class DiagnosticMarkerBusLoggingAdapter( extends MarkerLoggingAdapter(bus, logSource, logClass, loggingFilter) with DiagnosticLoggingAdapter -/** - * [[akka.event.LoggingAdapter]] that publishes [[akka.event.Logging.LogEvent]] to event stream. - */ +/** [[akka.event.LoggingAdapter]] that publishes [[akka.event.Logging.LogEvent]] to event stream. */ class BusLogging(val bus: LoggingBus, val logSource: String, val logClass: Class[_], loggingFilter: LoggingFilter) extends LoggingAdapter { @@ -2024,9 +1945,7 @@ class BusLogging(val bus: LoggingBus, val logSource: String, val logClass: Class bus.publish(Debug(logSource, logClass, message, mdc)) } -/** - * NoLogging is a LoggingAdapter that does absolutely nothing – no logging at all. - */ +/** NoLogging is a LoggingAdapter that does absolutely nothing – no logging at all. */ object NoLogging extends LoggingAdapter { /** @@ -2048,9 +1967,7 @@ object NoLogging extends LoggingAdapter { final protected override def notifyDebug(message: String): Unit = () } -/** - * NoLogging is a MarkerLoggingAdapter that does absolutely nothing – no logging at all. - */ +/** NoLogging is a MarkerLoggingAdapter that does absolutely nothing – no logging at all. */ object NoMarkerLogging extends MarkerLoggingAdapter(null, "source", classOf[String], null) { /** diff --git a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala index 14dd66ae479..f7a4059f0fa 100644 --- a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala +++ b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala @@ -40,25 +40,19 @@ object LoggingReceive { */ def apply(logLevel: LogLevel)(r: Receive)(implicit context: ActorContext): Receive = withLabel(null, logLevel)(r) - /** - * Java API: compatible with lambda expressions - */ + /** Java API: compatible with lambda expressions */ def create(r: AbstractActor.Receive, context: AbstractActor.ActorContext): AbstractActor.Receive = new AbstractActor.Receive( apply(r.onMessage.asInstanceOf[PartialFunction[Any, Unit]])(context) .asInstanceOf[PartialFunction[Any, BoxedUnit]]) - /** - * Create a decorated logger which will append `" in state " + label` to each message it logs. - */ + /** Create a decorated logger which will append `" in state " + label` to each message it logs. */ def withLabel(label: String, logLevel: LogLevel)(r: Receive)(implicit context: ActorContext): Receive = r match { case _: LoggingReceive => r - case _ => if (context.system.settings.AddLoggingReceive) new LoggingReceive(None, r, Option(label), logLevel) else r + case _ => if (context.system.settings.AddLoggingReceive) new LoggingReceive(None, r, Option(label), logLevel) else r } - /** - * Create a decorated logger which will append `" in state " + label` to each message it logs. - */ + /** Create a decorated logger which will append `" in state " + label` to each message it logs. */ def withLabel(label: String)(r: Receive)(implicit context: ActorContext): Receive = withLabel(label, Logging.DebugLevel)(r) } @@ -67,8 +61,8 @@ object LoggingReceive { * This decorator adds invocation logging to a Receive function. * @param source the log source, if not defined the actor of the context will be used */ -class LoggingReceive(source: Option[AnyRef], r: Receive, label: Option[String], logLevel: LogLevel)( - implicit context: ActorContext) +class LoggingReceive(source: Option[AnyRef], r: Receive, label: Option[String], logLevel: LogLevel)(implicit + context: ActorContext) extends Receive { def this(source: Option[AnyRef], r: Receive, label: Option[String])(implicit context: ActorContext) = this(source, r, label, Logging.DebugLevel) @@ -79,8 +73,8 @@ class LoggingReceive(source: Option[AnyRef], r: Receive, label: Option[String], if (context.system.eventStream.logLevel >= logLevel) { val src = source.getOrElse(context.asInstanceOf[ActorCell].actor) val (str, clazz) = LogSource.fromAnyRef(src) - val message = "received " + (if (handled) "handled" else "unhandled") + " message " + o + " from " + context - .sender() + + val message = + "received " + (if (handled) "handled" else "unhandled") + " message " + o + " from " + context.sender() + (label match { case Some(l) => " in state " + l case _ => "" diff --git a/akka-actor/src/main/scala/akka/event/japi/EventBusJavaAPI.scala b/akka-actor/src/main/scala/akka/event/japi/EventBusJavaAPI.scala index 369af3b7609..3c53b34e294 100644 --- a/akka-actor/src/main/scala/akka/event/japi/EventBusJavaAPI.scala +++ b/akka-actor/src/main/scala/akka/event/japi/EventBusJavaAPI.scala @@ -27,14 +27,10 @@ trait EventBus[E, S, C] { */ def unsubscribe(subscriber: S, from: C): Boolean - /** - * Attempts to deregister the subscriber from all Classifiers it may be subscribed to - */ + /** Attempts to deregister the subscriber from all Classifiers it may be subscribed to */ def unsubscribe(subscriber: S): Unit - /** - * Publishes the specified Event to this bus - */ + /** Publishes the specified Event to this bus */ def publish(event: E): Unit } @@ -62,24 +58,16 @@ abstract class LookupEventBus[E, S, C] extends EventBus[E, S, C] { LookupEventBus.this.publish(event, subscriber) } - /** - * This is a size hint for the number of Classifiers you expect to have (use powers of 2) - */ + /** This is a size hint for the number of Classifiers you expect to have (use powers of 2) */ protected def mapSize(): Int - /** - * Provides a total ordering of Subscribers (think java.util.Comparator.compare) - */ + /** Provides a total ordering of Subscribers (think java.util.Comparator.compare) */ protected def compareSubscribers(a: S, b: S): Int - /** - * Returns the Classifier associated with the given Event - */ + /** Returns the Classifier associated with the given Event */ protected def classify(event: E): C - /** - * Publishes the given Event to the given Subscriber - */ + /** Publishes the given Event to the given Subscriber */ protected def publish(event: E, subscriber: S): Unit override def subscribe(subscriber: S, to: C): Boolean = bus.subscribe(subscriber, to) @@ -111,19 +99,13 @@ abstract class SubchannelEventBus[E, S, C] extends EventBus[E, S, C] { SubchannelEventBus.this.publish(event, subscriber) } - /** - * The logic to form sub-class hierarchy - */ + /** The logic to form sub-class hierarchy */ def subclassification: Subclassification[C] - /** - * Returns the Classifier associated with the given Event - */ + /** Returns the Classifier associated with the given Event */ protected def classify(event: E): C - /** - * Publishes the given Event to the given Subscriber - */ + /** Publishes the given Event to the given Subscriber */ protected def publish(event: E, subscriber: S): Unit override def subscribe(subscriber: S, to: C): Boolean = bus.subscribe(subscriber, to) @@ -158,24 +140,16 @@ abstract class ScanningEventBus[E, S, C] extends EventBus[E, S, C] { ScanningEventBus.this.publish(event, subscriber) } - /** - * Provides a total ordering of Classifiers (think java.util.Comparator.compare) - */ + /** Provides a total ordering of Classifiers (think java.util.Comparator.compare) */ protected def compareClassifiers(a: C, b: C): Int - /** - * Provides a total ordering of Subscribers (think java.util.Comparator.compare) - */ + /** Provides a total ordering of Subscribers (think java.util.Comparator.compare) */ protected def compareSubscribers(a: S, b: S): Int - /** - * Returns whether the specified Classifier matches the specified Event - */ + /** Returns whether the specified Classifier matches the specified Event */ protected def matches(classifier: C, event: E): Boolean - /** - * Publishes the specified Event to the specified Subscriber - */ + /** Publishes the specified Event to the specified Subscriber */ protected def publish(event: E, subscriber: S): Unit override def subscribe(subscriber: S, to: C): Boolean = bus.subscribe(subscriber, to) @@ -191,8 +165,9 @@ abstract class ScanningEventBus[E, S, C] extends EventBus[E, S, C] { * E is the Event type */ abstract class ManagedActorEventBus[E](system: ActorSystem) extends EventBus[E, ActorRef, ActorRef] { - private val bus = new akka.event.ActorEventBus with akka.event.ManagedActorClassification - with akka.event.ActorClassifier { + private val bus = new akka.event.ActorEventBus + with akka.event.ManagedActorClassification + with akka.event.ActorClassifier { type Event = E override val system = ManagedActorEventBus.this.system @@ -203,14 +178,10 @@ abstract class ManagedActorEventBus[E](system: ActorSystem) extends EventBus[E, ManagedActorEventBus.this.classify(event) } - /** - * This is a size hint for the number of Classifiers you expect to have (use powers of 2) - */ + /** This is a size hint for the number of Classifiers you expect to have (use powers of 2) */ protected def mapSize(): Int - /** - * Returns the Classifier associated with the given Event - */ + /** Returns the Classifier associated with the given Event */ protected def classify(event: E): ActorRef override def subscribe(subscriber: ActorRef, to: ActorRef): Boolean = bus.subscribe(subscriber, to) diff --git a/akka-actor/src/main/scala/akka/event/jul/JavaLogger.scala b/akka-actor/src/main/scala/akka/event/jul/JavaLogger.scala index cea5cf33ff3..53c7e6c97ec 100644 --- a/akka-actor/src/main/scala/akka/event/jul/JavaLogger.scala +++ b/akka-actor/src/main/scala/akka/event/jul/JavaLogger.scala @@ -16,9 +16,7 @@ import akka.event.Logging._ import akka.event.LoggingFilter import akka.util.unused -/** - * `java.util.logging` logger. - */ +/** `java.util.logging` logger. */ @deprecated("Use Slf4jLogger instead.", "2.6.0") class JavaLogger extends Actor with RequiresMessageQueue[LoggerMessageQueueSemantics] { import Logger.mapLevel @@ -46,18 +44,14 @@ class JavaLogger extends Actor with RequiresMessageQueue[LoggerMessageQueueSeman } } -/** - * Base trait for all classes that wants to be able use the JUL logging infrastructure. - */ +/** Base trait for all classes that wants to be able use the JUL logging infrastructure. */ @deprecated("Use SLF4J or direct java.util.logging instead.", "2.6.0") trait JavaLogging { @transient lazy val log: logging.Logger = Logger(this.getClass.getName) } -/** - * Logger is a factory for obtaining JUL Loggers - */ +/** Logger is a factory for obtaining JUL Loggers */ @deprecated("Use SLF4J or direct java.util.logging instead.", "2.6.0") object Logger { @@ -77,9 +71,7 @@ object Logger { case _ => logging.Logger.getLogger(logClass.getName) } - /** - * Returns the JUL Root Logger - */ + /** Returns the JUL Root Logger */ def root: logging.Logger = logging.Logger.getGlobal() def mapLevel(level: LogLevel): logging.Level = level.asInt match { diff --git a/akka-actor/src/main/scala/akka/io/DirectByteBufferPool.scala b/akka-actor/src/main/scala/akka/io/DirectByteBufferPool.scala index b306ce8cb9a..25756e4a154 100644 --- a/akka-actor/src/main/scala/akka/io/DirectByteBufferPool.scala +++ b/akka-actor/src/main/scala/akka/io/DirectByteBufferPool.scala @@ -87,10 +87,12 @@ private[akka] object DirectByteBufferPool { cleanMethod.setAccessible(true) { (bb: ByteBuffer) => - try if (bb.isDirect) { - val cleaner = cleanerMethod.invoke(bb) - cleanMethod.invoke(cleaner) - } catch { case NonFatal(_) => /* ok, best effort attempt to cleanup failed */ } + try + if (bb.isDirect) { + val cleaner = cleanerMethod.invoke(bb) + cleanMethod.invoke(cleaner) + } + catch { case NonFatal(_) => /* ok, best effort attempt to cleanup failed */ } } } catch { case NonFatal(_) => _ => () /* reflection failed, use no-op fallback */ } diff --git a/akka-actor/src/main/scala/akka/io/Dns.scala b/akka-actor/src/main/scala/akka/io/Dns.scala index 837a51b53a0..de8b30d22d9 100644 --- a/akka-actor/src/main/scala/akka/io/Dns.scala +++ b/akka-actor/src/main/scala/akka/io/Dns.scala @@ -96,10 +96,12 @@ object Dns extends ExtensionId[DnsExt] with ExtensionIdProvider { @deprecated("Use cached(DnsProtocol.Resolve)", "2.6.0") def apply(newProtocol: DnsProtocol.Resolved): Resolved = { - Resolved(newProtocol.name, newProtocol.records.collect { - case r: ARecord => r.ip - case r: AAAARecord => r.ip - }) + Resolved( + newProtocol.name, + newProtocol.records.collect { + case r: ARecord => r.ip + case r: AAAARecord => r.ip + }) } } @@ -142,9 +144,7 @@ object Dns extends ExtensionId[DnsExt] with ExtensionIdProvider { override def createExtension(system: ExtendedActorSystem): DnsExt = new DnsExt(system) - /** - * Java API: retrieve the Udp extension for the given system. - */ + /** Java API: retrieve the Udp extension for the given system. */ override def get(system: ActorSystem): DnsExt = super.get(system) override def get(system: ClassicActorSystemProvider): DnsExt = super.get(system) } @@ -204,9 +204,7 @@ class DnsExt private[akka] (val system: ExtendedActorSystem, resolverName: Strin class Settings private[DnsExt] (config: Config, resolverName: String) { - /** - * Load the default resolver - */ + /** Load the default resolver */ def this(config: Config) = this(config, config.getString("resolver")) val Dispatcher: String = config.getString("dispatcher") @@ -240,15 +238,11 @@ class DnsExt private[akka] (val system: ExtendedActorSystem, resolverName: Strin } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi object IpVersionSelector { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi def getInetAddress(ipv4: Option[Inet4Address], ipv6: Option[Inet6Address]): Option[InetAddress] = System.getProperty("java.net.preferIPv6Addresses") match { diff --git a/akka-actor/src/main/scala/akka/io/DnsProvider.scala b/akka-actor/src/main/scala/akka/io/DnsProvider.scala index 10554c62088..1b2758be3a8 100644 --- a/akka-actor/src/main/scala/akka/io/DnsProvider.scala +++ b/akka-actor/src/main/scala/akka/io/DnsProvider.scala @@ -12,7 +12,6 @@ import akka.actor.Actor * It is expected that this will be deprecated/removed in future Akka versions * * TODO make private and remove deprecated in 2.7.0 - * */ @deprecated("Overriding the DNS implementation will be removed in future versions of Akka", "2.6.0") trait DnsProvider { diff --git a/akka-actor/src/main/scala/akka/io/Inet.scala b/akka-actor/src/main/scala/akka/io/Inet.scala index f70a2331a79..b2c2a1543d2 100644 --- a/akka-actor/src/main/scala/akka/io/Inet.scala +++ b/akka-actor/src/main/scala/akka/io/Inet.scala @@ -19,24 +19,16 @@ object Inet { */ trait SocketOption { - /** - * Action to be taken for this option before bind() is called - */ + /** Action to be taken for this option before bind() is called */ def beforeDatagramBind(@unused ds: DatagramSocket): Unit = () - /** - * Action to be taken for this option before bind() is called - */ + /** Action to be taken for this option before bind() is called */ def beforeServerSocketBind(@unused ss: ServerSocket): Unit = () - /** - * Action to be taken for this option before calling connect() - */ + /** Action to be taken for this option before calling connect() */ def beforeConnect(@unused s: Socket): Unit = () - /** - * Action to be taken for this option after connect returned. - */ + /** Action to be taken for this option after connect returned. */ def afterConnect(@unused s: Socket): Unit = () } @@ -48,31 +40,21 @@ object Inet { trait SocketOptionV2 extends SocketOption { - /** - * Action to be taken for this option after connect returned. - */ + /** Action to be taken for this option after connect returned. */ def afterBind(@unused s: DatagramSocket): Unit = () - /** - * Action to be taken for this option after connect returned. - */ + /** Action to be taken for this option after connect returned. */ def afterBind(@unused s: ServerSocket): Unit = () - /** - * Action to be taken for this option after connect returned. - */ + /** Action to be taken for this option after connect returned. */ def afterConnect(@unused s: DatagramSocket): Unit = () } - /** - * Java API - */ + /** Java API */ abstract class AbstractSocketOptionV2 extends SocketOptionV2 - /** - * DatagramChannel creation behavior. - */ + /** DatagramChannel creation behavior. */ class DatagramChannelCreator extends SocketOption { /** diff --git a/akka-actor/src/main/scala/akka/io/InetAddressDnsProvider.scala b/akka-actor/src/main/scala/akka/io/InetAddressDnsProvider.scala index 48a3bf51450..fdebf87fffa 100644 --- a/akka-actor/src/main/scala/akka/io/InetAddressDnsProvider.scala +++ b/akka-actor/src/main/scala/akka/io/InetAddressDnsProvider.scala @@ -8,9 +8,7 @@ import scala.annotation.nowarn import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @nowarn("msg=deprecated") @InternalApi class InetAddressDnsProvider extends DnsProvider { diff --git a/akka-actor/src/main/scala/akka/io/SelectionHandler.scala b/akka-actor/src/main/scala/akka/io/SelectionHandler.scala index b36a8d87ded..a61d2353045 100644 --- a/akka-actor/src/main/scala/akka/io/SelectionHandler.scala +++ b/akka-actor/src/main/scala/akka/io/SelectionHandler.scala @@ -43,9 +43,7 @@ abstract class SelectionHandlerSettings(config: Config) { def MaxChannelsPerSelector: Int } -/** - * Interface behind which we hide our selector management logic from the connection actors - */ +/** Interface behind which we hide our selector management logic from the connection actors */ private[io] trait ChannelRegistry { /** @@ -131,9 +129,11 @@ private[io] object SelectionHandler { cause: Throwable, decision: SupervisorStrategy.Directive): Unit = if (cause.isInstanceOf[DeathPactException]) { - try context.system.eventStream.publish { - Logging.Debug(child.path.toString, getClass, "Closed after handler termination") - } catch { case NonFatal(_) => } + try + context.system.eventStream.publish { + Logging.Debug(child.path.toString, getClass, "Closed after handler termination") + } + catch { case NonFatal(_) => } } else super.logFailure(context, child, cause, decision) } @@ -328,8 +328,8 @@ private[io] class SelectionHandler(settings: SelectionHandlerSettings) // we can never recover from failures of a connection or listener child // and log the failure at debug level override def supervisorStrategy = { - def stoppingDecider: SupervisorStrategy.Decider = { - case _: Exception => SupervisorStrategy.Stop + def stoppingDecider: SupervisorStrategy.Decider = { case _: Exception => + SupervisorStrategy.Stop } new OneForOneStrategy()(stoppingDecider) { override def logFailure( diff --git a/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala b/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala index d3cdc0de414..eca3163b22f 100644 --- a/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala +++ b/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala @@ -65,9 +65,7 @@ class SimpleDnsCache extends Dns with PeriodicCacheCleanup with NoSerializationV else (now - nanoBase) / 1000000 } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final def get(key: (String, RequestType)): Option[Resolved] = { cacheRef.get().get(key) @@ -90,9 +88,7 @@ class SimpleDnsCache extends Dns with PeriodicCacheCleanup with NoSerializationV } object SimpleDnsCache { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[io] class Cache[K, V]( queue: immutable.SortedSet[ExpiryEntry[K]], @@ -134,18 +130,14 @@ object SimpleDnsCache { def isValid(clock: Long): Boolean = clock < until } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[io] class ExpiryEntry[K](val name: K, val until: Long) extends Ordered[ExpiryEntry[K]] { def isValid(clock: Long): Boolean = clock < until override def compare(that: ExpiryEntry[K]): Int = -until.compareTo(that.until) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[io] def expiryEntryOrdering[K]() = new Ordering[ExpiryEntry[K]] { override def compare(x: ExpiryEntry[K], y: ExpiryEntry[K]): Int = { diff --git a/akka-actor/src/main/scala/akka/io/SimpleDnsManager.scala b/akka-actor/src/main/scala/akka/io/SimpleDnsManager.scala index 6c126eea967..302144fb397 100644 --- a/akka-actor/src/main/scala/akka/io/SimpleDnsManager.scala +++ b/akka-actor/src/main/scala/akka/io/SimpleDnsManager.scala @@ -40,10 +40,9 @@ final class SimpleDnsManager(val ext: DnsExt) } @nowarn("cat=deprecation") - val oldApis: Receive = { - case r @ Dns.Resolve(name) => - log.debug("(deprecated) Resolution request for {} from {}", name, sender()) - resolver.forward(r) + val oldApis: Receive = { case r @ Dns.Resolve(name) => + log.debug("(deprecated) Resolution request for {} from {}", name, sender()) + resolver.forward(r) } // the inet resolver supports the old and new DNS APIs diff --git a/akka-actor/src/main/scala/akka/io/Tcp.scala b/akka-actor/src/main/scala/akka/io/Tcp.scala index 35e3aff9e2c..a0bfdfeca8b 100644 --- a/akka-actor/src/main/scala/akka/io/Tcp.scala +++ b/akka-actor/src/main/scala/akka/io/Tcp.scala @@ -43,9 +43,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { override def createExtension(system: ExtendedActorSystem): TcpExt = new TcpExt(system) - /** - * Java API: retrieve the Tcp extension for the given system. - */ + /** Java API: retrieve the Tcp extension for the given system. */ override def get(system: ActorSystem): TcpExt = super.get(system) override def get(system: ClassicActorSystemProvider): TcpExt = super.get(system) @@ -95,16 +93,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { } - /** - * The common interface for [[Command]] and [[Event]]. - */ + /** The common interface for [[Command]] and [[Event]]. */ sealed trait Message extends NoSerializationVerificationNeeded /// COMMANDS - /** - * This is the common trait for all commands understood by TCP actors. - */ + /** This is the common trait for all commands understood by TCP actors. */ trait Command extends Message with SelectionHandler.HasFailureMessage { def failureMessage = CommandFailed(this) } @@ -184,9 +178,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { */ case object Unbind extends Command - /** - * Common interface for all commands which aim to close down an open connection. - */ + /** Common interface for all commands which aim to close down an open connection. */ sealed trait CloseCommand extends Command with DeadLetterSuppression { /** @@ -256,9 +248,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { */ object NoAck extends NoAck(null) - /** - * Common interface for all write commands. - */ + /** Common interface for all write commands. */ sealed abstract class WriteCommand extends Command { /** @@ -294,26 +284,18 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { object WriteCommand { - /** - * Combines the given number of write commands into one atomic `WriteCommand`. - */ + /** Combines the given number of write commands into one atomic `WriteCommand`. */ def apply(writes: Iterable[WriteCommand]): WriteCommand = writes ++: Write.empty - /** - * Java API: combines the given number of write commands into one atomic `WriteCommand`. - */ + /** Java API: combines the given number of write commands into one atomic `WriteCommand`. */ def create(writes: JIterable[WriteCommand]): WriteCommand = apply(writes.asScala) } - /** - * Common supertype of [[Write]] and [[WriteFile]]. - */ + /** Common supertype of [[Write]] and [[WriteFile]]. */ sealed abstract class SimpleWriteCommand extends WriteCommand { require(ack != null, "ack must be non-null. Use NoAck if you don't want acks.") - /** - * The acknowledgment token associated with this write command. - */ + /** The acknowledgment token associated with this write command. */ def ack: Event /** @@ -322,9 +304,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { */ def wantsAck: Boolean = !ack.isInstanceOf[NoAck] - /** - * Java API: appends this command with another `WriteCommand` to form a `CompoundWrite`. - */ + /** Java API: appends this command with another `WriteCommand` to form a `CompoundWrite`. */ def append(that: WriteCommand): CompoundWrite = this +: that } @@ -349,16 +329,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { */ val empty: Write = Write(ByteString.empty, NoAck) - /** - * Create a new unacknowledged Write command with the given data. - */ + /** Create a new unacknowledged Write command with the given data. */ def apply(data: ByteString): Write = if (data.isEmpty) empty else Write(data, NoAck) } - /** - * @see [[WritePath]] - */ + /** @see [[WritePath]] */ @deprecated("Use WritePath instead", "2.5.10") @nowarn("msg=deprecated") final case class WriteFile(filePath: String, position: Long, count: Long, ack: Event) extends SimpleWriteCommand { @@ -440,9 +416,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { final case class ResumeAccepting(batchSize: Int) extends Command /// EVENTS - /** - * Common interface for all events generated by the TCP layer actors. - */ + /** Common interface for all events generated by the TCP layer actors. */ trait Event extends Message /** @@ -528,9 +502,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { */ sealed trait ConnectionClosed extends Event with DeadLetterSuppression { - /** - * `true` iff the connection has been closed in response to an `Abort` command. - */ + /** `true` iff the connection has been closed in response to an `Abort` command. */ def isAborted: Boolean = false /** @@ -546,9 +518,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { */ def isPeerClosed: Boolean = false - /** - * `true` iff the connection has been closed due to an IO error. - */ + /** `true` iff the connection has been closed due to an IO error. */ def isErrorClosed: Boolean = false /** @@ -558,14 +528,10 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { def getErrorCause: String = null } - /** - * The connection has been closed normally in response to a `Close` command. - */ + /** The connection has been closed normally in response to a `Close` command. */ case object Closed extends ConnectionClosed - /** - * The connection has been aborted in response to an `Abort` command. - */ + /** The connection has been aborted in response to an `Abort` command. */ case object Aborted extends ConnectionClosed { override def isAborted = true } @@ -578,16 +544,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { override def isConfirmed = true } - /** - * The peer has closed its writing half of the connection. - */ + /** The peer has closed its writing half of the connection. */ case object PeerClosed extends ConnectionClosed { override def isPeerClosed = true } - /** - * The connection has been closed due to an IO error. - */ + /** The connection has been closed due to an IO error. */ final case class ErrorClosed(cause: String) extends ConnectionClosed { override def isErrorClosed = true override def getErrorCause = cause @@ -626,8 +588,8 @@ class TcpExt(system: ExtendedActorSystem) extends IO.Extension { val FinishConnectRetries: Int = getInt("finish-connect-retries").requiring(_ > 0, "finish-connect-retries must be > 0") - val WindowsConnectionAbortWorkaroundEnabled - : Boolean = getString("windows-connection-abort-workaround-enabled") match { + val WindowsConnectionAbortWorkaroundEnabled: Boolean = getString( + "windows-connection-abort-workaround-enabled") match { case "auto" => Helpers.isWindows case _ => getBoolean("windows-connection-abort-workaround-enabled") } @@ -641,7 +603,6 @@ class TcpExt(system: ExtendedActorSystem) extends IO.Extension { } /** - * */ val manager: ActorRef = { system.systemActorOf( @@ -649,18 +610,14 @@ class TcpExt(system: ExtendedActorSystem) extends IO.Extension { name = "IO-TCP") } - /** - * Java API: retrieve a reference to the manager actor. - */ + /** Java API: retrieve a reference to the manager actor. */ def getManager: ActorRef = manager val bufferPool: BufferPool = new DirectByteBufferPool(Settings.DirectBufferSize, Settings.MaxDirectBufferPoolSize) val fileIoDispatcher = system.dispatchers.lookup(Settings.FileIODispatcher) } -/** - * Java API for accessing socket options. - */ +/** Java API for accessing socket options. */ object TcpSO extends SoJavaFactories { import Tcp.SO._ @@ -769,9 +726,7 @@ object TcpMessage { options: JIterable[SocketOption], pullMode: Boolean): Command = Bind(handler, endpoint, backlog, options, pullMode) - /** - * Open a listening socket without specifying options. - */ + /** Open a listening socket without specifying options. */ def bind(handler: ActorRef, endpoint: InetSocketAddress, backlog: Int): Command = Bind(handler, endpoint, backlog, Nil) @@ -796,9 +751,7 @@ object TcpMessage { def register(handler: ActorRef, keepOpenOnPeerClosed: Boolean, useResumeWriting: Boolean): Command = Register(handler, keepOpenOnPeerClosed, useResumeWriting) - /** - * The same as `register(handler, false, false)`. - */ + /** The same as `register(handler, false, false)`. */ def register(handler: ActorRef): Command = Register(handler) /** @@ -859,9 +812,7 @@ object TcpMessage { */ def write(data: ByteString, ack: Event): Command = Write(data, ack) - /** - * The same as `write(data, noAck())`. - */ + /** The same as `write(data, noAck())`. */ def write(data: ByteString): Command = Write(data) /** diff --git a/akka-actor/src/main/scala/akka/io/TcpConnection.scala b/akka-actor/src/main/scala/akka/io/TcpConnection.scala index 5143feef72b..2cc6741396a 100644 --- a/akka-actor/src/main/scala/akka/io/TcpConnection.scala +++ b/akka-actor/src/main/scala/akka/io/TcpConnection.scala @@ -78,7 +78,7 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha // if we are in push mode or already have resumed reading in pullMode while waiting for Register // then register OP_READ interest - if (!pullMode || (/*pullMode && */ !readingSuspended)) resumeReading(info, None) + if (!pullMode || ( /*pullMode && */ !readingSuspended)) resumeReading(info, None) case ResumeReading => readingSuspended = false @@ -284,17 +284,19 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha } else MoreDataWaiting val buffer = bufferPool.acquire() - try innerRead(buffer, ReceivedMessageSizeLimit) match { - case AllRead => // nothing to do - case MoreDataWaiting => - if (!pullMode) self ! ChannelReadable - case EndOfStream if channel.socket.isOutputShutdown => - if (TraceLogging) log.debug("Read returned end-of-stream, our side already closed") - doCloseConnection(info.handler, closeCommander, ConfirmedClosed) - case EndOfStream => - if (TraceLogging) log.debug("Read returned end-of-stream, our side not yet closed") - handleClose(info, closeCommander, PeerClosed) - } catch { + try + innerRead(buffer, ReceivedMessageSizeLimit) match { + case AllRead => // nothing to do + case MoreDataWaiting => + if (!pullMode) self ! ChannelReadable + case EndOfStream if channel.socket.isOutputShutdown => + if (TraceLogging) log.debug("Read returned end-of-stream, our side already closed") + doCloseConnection(info.handler, closeCommander, ConfirmedClosed) + case EndOfStream => + if (TraceLogging) log.debug("Read returned end-of-stream, our side not yet closed") + handleClose(info, closeCommander, PeerClosed) + } + catch { case e: IOException => handleError(info.handler, e) } finally bufferPool.release(buffer) } @@ -426,7 +428,7 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha def PendingWrite(commander: ActorRef, write: WriteCommand): PendingWrite = { @tailrec def create(head: WriteCommand, tail: WriteCommand): PendingWrite = head match { - case Write.empty => if (tail eq Write.empty) EmptyPendingWrite else create(tail, Write.empty) + case Write.empty => if (tail eq Write.empty) EmptyPendingWrite else create(tail, Write.empty) case Write(data, ack) if data.nonEmpty => PendingBufferWrite(commander, data, ack, tail) case WriteFile(path, offset, count, ack) => PendingWriteFile(commander, Paths.get(path), offset, count, ack, tail) @@ -528,7 +530,7 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha self ! UpdatePendingWriteAndThen(updated, TcpConnection.doNothing) } else { release() - val andThen = if (!ack.isInstanceOf[NoAck])() => commander ! ack else doNothing + val andThen = if (!ack.isInstanceOf[NoAck]) () => commander ! ack else doNothing self ! UpdatePendingWriteAndThen(PendingWrite(commander, tail), andThen) } } catch { @@ -537,9 +539,7 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[io] object TcpConnection { sealed trait ReadResult object EndOfStream extends ReadResult @@ -552,9 +552,7 @@ private[io] object TcpConnection { */ final case class CloseInformation(notificationsTo: Set[ActorRef], closedEvent: Event) - /** - * Groups required connection-related data that are only available once the connection has been fully established. - */ + /** Groups required connection-related data that are only available once the connection has been fully established. */ final case class ConnectionInfo( registration: ChannelRegistration, handler: ActorRef, diff --git a/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala b/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala index b9c9878326e..48c2c0450dc 100644 --- a/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala +++ b/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala @@ -32,7 +32,7 @@ private[io] class TcpIncomingConnection( registry.register(channel, initialOps = 0) - def receive = { - case registration: ChannelRegistration => completeConnect(registration, bindHandler, options) + def receive = { case registration: ChannelRegistration => + completeConnect(registration, bindHandler, options) } } diff --git a/akka-actor/src/main/scala/akka/io/TcpListener.scala b/akka-actor/src/main/scala/akka/io/TcpListener.scala index 07c4025a955..79d19c850d9 100644 --- a/akka-actor/src/main/scala/akka/io/TcpListener.scala +++ b/akka-actor/src/main/scala/akka/io/TcpListener.scala @@ -15,9 +15,7 @@ import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } import akka.io.SelectionHandler._ import akka.io.Tcp._ -/** - * INTERNAL API - */ +/** INTERNAL API */ private[io] object TcpListener { final case class RegisterIncoming(channel: SocketChannel) @@ -30,9 +28,7 @@ private[io] object TcpListener { } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[io] class TcpListener( selectorRouter: ActorRef, tcp: TcpExt, @@ -85,10 +81,9 @@ private[io] class TcpListener( override def supervisorStrategy = SelectionHandler.connectionSupervisorStrategy - def receive: Receive = { - case registration: ChannelRegistration => - bindCommander ! Bound(channel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress]) - context.become(bound(registration)) + def receive: Receive = { case registration: ChannelRegistration => + bindCommander ! Bound(channel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress]) + context.become(bound(registration)) } def bound(registration: ChannelRegistration): Receive = { @@ -115,11 +110,10 @@ private[io] class TcpListener( context.become(unregistering(sender())) } - def unregistering(requester: ActorRef): Receive = { - case Unbound => - requester ! Unbound - log.debug("Unbound endpoint {}, stopping listener", localAddress) - context.stop(self) + def unregistering(requester: ActorRef): Receive = { case Unbound => + requester ! Unbound + log.debug("Unbound endpoint {}, stopping listener", localAddress) + context.stop(self) } @tailrec final def acceptAllPending(registration: ChannelRegistration, limit: Int): Int = { diff --git a/akka-actor/src/main/scala/akka/io/TcpManager.scala b/akka-actor/src/main/scala/akka/io/TcpManager.scala index 0fb2c10158e..494995d10f6 100644 --- a/akka-actor/src/main/scala/akka/io/TcpManager.scala +++ b/akka-actor/src/main/scala/akka/io/TcpManager.scala @@ -43,7 +43,6 @@ import akka.actor.{ ActorLogging, Props } * If the connect request is rejected because the Tcp system is not able to register more channels (see the nr-of-selectors * and max-channels configuration options in the akka.io.tcp section of the configuration) the sender will be notified * with a [[akka.io.Tcp.CommandFailed]] message. This message contains the original command for reference. - * */ private[io] class TcpManager(tcp: TcpExt) extends SelectionHandler.SelectorBasedManager(tcp.Settings, tcp.Settings.NrOfSelectors) @@ -52,11 +51,11 @@ private[io] class TcpManager(tcp: TcpExt) def receive = workerForCommandHandler { case c: Connect => val commander = sender() // cache because we create a function that will run asynchly - (registry => Props(classOf[TcpOutgoingConnection], tcp, registry, commander, c)) + registry => Props(classOf[TcpOutgoingConnection], tcp, registry, commander, c) case b: Bind => val commander = sender() // cache because we create a function that will run asynchly - (registry => Props(classOf[TcpListener], selectorPool, tcp, registry, commander, b)) + registry => Props(classOf[TcpListener], selectorPool, tcp, registry, commander, b) } } diff --git a/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala b/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala index 499b7553b2c..9d4e116fdbc 100644 --- a/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala +++ b/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala @@ -43,7 +43,7 @@ private[io] class TcpOutgoingConnection( options.foreach(_.beforeConnect(channel.socket)) localAddress.foreach(channel.socket.bind) channelRegistry.register(channel, 0) - timeout.foreach(context.setReceiveTimeout) //Initiate connection timeout if supplied + timeout.foreach(context.setReceiveTimeout) // Initiate connection timeout if supplied private def stop(cause: Throwable): Unit = stopWith(CloseInformation(Set(commander), CommandFailed(connect).withCause(cause)), shouldAbort = true) diff --git a/akka-actor/src/main/scala/akka/io/Udp.scala b/akka-actor/src/main/scala/akka/io/Udp.scala index 2260108cc0a..02c8a1703dc 100644 --- a/akka-actor/src/main/scala/akka/io/Udp.scala +++ b/akka-actor/src/main/scala/akka/io/Udp.scala @@ -37,20 +37,14 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider { override def createExtension(system: ExtendedActorSystem): UdpExt = new UdpExt(system) - /** - * Java API: retrieve the Udp extension for the given system. - */ + /** Java API: retrieve the Udp extension for the given system. */ override def get(system: ActorSystem): UdpExt = super.get(system) override def get(system: ClassicActorSystemProvider): UdpExt = super.get(system) - /** - * The common interface for [[Command]] and [[Event]]. - */ + /** The common interface for [[Command]] and [[Event]]. */ sealed trait Message - /** - * The common type of all commands supported by the UDP implementation. - */ + /** The common type of all commands supported by the UDP implementation. */ trait Command extends SelectionHandler.HasFailureMessage with Message { def failureMessage = CommandFailed(this) } @@ -142,9 +136,7 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider { */ case object ResumeReading extends Command - /** - * The common type of all events emitted by the UDP implementation. - */ + /** The common type of all events emitted by the UDP implementation. */ trait Event extends Message /** @@ -166,9 +158,7 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider { */ final case class Bound(localAddress: InetSocketAddress) extends Event - /** - * The “simple sender” sends this message type in response to a [[SimpleSender]] query. - */ + /** The “simple sender” sends this message type in response to a [[SimpleSender]] query. */ sealed trait SimpleSenderReady extends Event case object SimpleSenderReady extends SimpleSenderReady @@ -229,21 +219,15 @@ class UdpExt(system: ExtendedActorSystem) extends IO.Extension { name = "IO-UDP-FF") } - /** - * Java API: retrieve the UDP manager actor’s reference. - */ + /** Java API: retrieve the UDP manager actor’s reference. */ def getManager: ActorRef = manager - /** - * INTERNAL API - */ + /** INTERNAL API */ private[io] val bufferPool: BufferPool = new DirectByteBufferPool(settings.DirectBufferSize, settings.MaxDirectBufferPoolSize) } -/** - * Java API: factory methods for the message types used when communicating with the Udp service. - */ +/** Java API: factory methods for the message types used when communicating with the Udp service. */ object UdpMessage { import java.lang.{ Iterable => JIterable } @@ -283,9 +267,7 @@ object UdpMessage { */ def send(payload: ByteString, target: InetSocketAddress, ack: Event): Command = Send(payload, target, ack) - /** - * The same as `send(payload, target, noAck())`. - */ + /** The same as `send(payload, target, noAck())`. */ def send(payload: ByteString, target: InetSocketAddress): Command = Send(payload, target) /** @@ -297,9 +279,7 @@ object UdpMessage { def bind(handler: ActorRef, endpoint: InetSocketAddress, options: JIterable[SocketOption]): Command = Bind(handler, endpoint, options.asScala.to(immutable.IndexedSeq)) - /** - * Bind without specifying options. - */ + /** Bind without specifying options. */ def bind(handler: ActorRef, endpoint: InetSocketAddress): Command = Bind(handler, endpoint, Nil) /** @@ -321,9 +301,7 @@ object UdpMessage { */ def simpleSender(options: JIterable[SocketOption]): Command = SimpleSender(options.asScala.to(immutable.IndexedSeq)) - /** - * Retrieve a simple sender without specifying options. - */ + /** Retrieve a simple sender without specifying options. */ def simpleSender: Command = SimpleSender /** diff --git a/akka-actor/src/main/scala/akka/io/UdpConnected.scala b/akka-actor/src/main/scala/akka/io/UdpConnected.scala index adb2252c5fc..f7369e15395 100644 --- a/akka-actor/src/main/scala/akka/io/UdpConnected.scala +++ b/akka-actor/src/main/scala/akka/io/UdpConnected.scala @@ -35,20 +35,14 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide override def createExtension(system: ExtendedActorSystem): UdpConnectedExt = new UdpConnectedExt(system) - /** - * Java API: retrieve the UdpConnected extension for the given system. - */ + /** Java API: retrieve the UdpConnected extension for the given system. */ override def get(system: ActorSystem): UdpConnectedExt = super.get(system) override def get(system: ClassicActorSystemProvider): UdpConnectedExt = super.get(system) - /** - * The common interface for [[Command]] and [[Event]]. - */ + /** The common interface for [[Command]] and [[Event]]. */ sealed trait Message - /** - * The common type of all commands supported by the UDP implementation. - */ + /** The common type of all commands supported by the UDP implementation. */ trait Command extends SelectionHandler.HasFailureMessage with Message { def failureMessage = CommandFailed(this) } @@ -122,9 +116,7 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide */ case object ResumeReading extends Command - /** - * The common type of all events emitted by the UDP implementation. - */ + /** The common type of all events emitted by the UDP implementation. */ trait Event extends Message /** @@ -168,18 +160,14 @@ class UdpConnectedExt(system: ExtendedActorSystem) extends IO.Extension { name = "IO-UDP-CONN") } - /** - * Java API: retrieve the UDP manager actor’s reference. - */ + /** Java API: retrieve the UDP manager actor’s reference. */ def getManager: ActorRef = manager val bufferPool: BufferPool = new DirectByteBufferPool(settings.DirectBufferSize, settings.MaxDirectBufferPoolSize) } -/** - * Java API: factory methods for the message types used when communicating with the UdpConnected service. - */ +/** Java API: factory methods for the message types used when communicating with the UdpConnected service. */ object UdpConnectedMessage { import UdpConnected._ import language.implicitConversions @@ -196,15 +184,11 @@ object UdpConnectedMessage { localAddress: InetSocketAddress, options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, Some(localAddress), options) - /** - * Connect without specifying the `localAddress`. - */ + /** Connect without specifying the `localAddress`. */ def connect(handler: ActorRef, remoteAddress: InetSocketAddress, options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, None, options) - /** - * Connect without specifying the `localAddress` or `options`. - */ + /** Connect without specifying the `localAddress` or `options`. */ def connect(handler: ActorRef, remoteAddress: InetSocketAddress): Command = Connect(handler, remoteAddress, None, Nil) /** @@ -217,9 +201,7 @@ object UdpConnectedMessage { */ def send(data: ByteString, ack: AnyRef): Command = Send(data, ack) - /** - * Send without requesting acknowledgment. - */ + /** Send without requesting acknowledgment. */ def send(data: ByteString): Command = Send(data) /** diff --git a/akka-actor/src/main/scala/akka/io/UdpConnectedManager.scala b/akka-actor/src/main/scala/akka/io/UdpConnectedManager.scala index b358851a783..de582f33001 100644 --- a/akka-actor/src/main/scala/akka/io/UdpConnectedManager.scala +++ b/akka-actor/src/main/scala/akka/io/UdpConnectedManager.scala @@ -7,16 +7,13 @@ package akka.io import akka.actor.Props import akka.io.UdpConnected.Connect -/** - * INTERNAL API - */ +/** INTERNAL API */ private[io] class UdpConnectedManager(udpConn: UdpConnectedExt) extends SelectionHandler.SelectorBasedManager(udpConn.settings, udpConn.settings.NrOfSelectors) { - def receive = workerForCommandHandler { - case c: Connect => - val commander = sender() // cache because we create a function that will run asynchly - registry => Props(classOf[UdpConnection], udpConn, registry, commander, c) + def receive = workerForCommandHandler { case c: Connect => + val commander = sender() // cache because we create a function that will run asynchly + registry => Props(classOf[UdpConnection], udpConn, registry, commander, c) } } diff --git a/akka-actor/src/main/scala/akka/io/UdpConnection.scala b/akka-actor/src/main/scala/akka/io/UdpConnection.scala index 038ad638abe..48c4d624a60 100644 --- a/akka-actor/src/main/scala/akka/io/UdpConnection.scala +++ b/akka-actor/src/main/scala/akka/io/UdpConnection.scala @@ -20,9 +20,7 @@ import akka.io.UdpConnected._ import akka.io.dns.DnsProtocol import akka.util.{ unused, ByteString } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[io] class UdpConnection( udpConn: UdpConnectedExt, channelRegistry: ChannelRegistry, @@ -81,14 +79,13 @@ private[io] class UdpConnection( log.debug("Successfully connected to [{}]", remoteAddress) } - def receive = { - case registration: ChannelRegistration => - options.foreach { - case v2: Inet.SocketOptionV2 => v2.afterConnect(channel.socket) - case _ => - } - commander ! Connected - context.become(connected(registration), discardOld = true) + def receive = { case registration: ChannelRegistration => + options.foreach { + case v2: Inet.SocketOptionV2 => v2.afterConnect(channel.socket) + case _ => + } + commander ! Connected + context.become(connected(registration), discardOld = true) } def connected(registration: ChannelRegistration): Receive = { diff --git a/akka-actor/src/main/scala/akka/io/UdpListener.scala b/akka-actor/src/main/scala/akka/io/UdpListener.scala index f8607e1abad..a43a9242df7 100644 --- a/akka-actor/src/main/scala/akka/io/UdpListener.scala +++ b/akka-actor/src/main/scala/akka/io/UdpListener.scala @@ -18,9 +18,7 @@ import akka.io.SelectionHandler._ import akka.io.Udp._ import akka.util.ByteString -/** - * INTERNAL API - */ +/** INTERNAL API */ private[io] class UdpListener(val udp: UdpExt, channelRegistry: ChannelRegistry, bindCommander: ActorRef, bind: Bind) extends Actor with ActorLogging @@ -35,8 +33,8 @@ private[io] class UdpListener(val udp: UdpExt, channelRegistry: ChannelRegistry, context.watch(bind.handler) // sign death pact val channel = bind.options - .collectFirst { - case creator: DatagramChannelCreator => creator + .collectFirst { case creator: DatagramChannelCreator => + creator } .getOrElse(DatagramChannelCreator()) .create() @@ -65,10 +63,9 @@ private[io] class UdpListener(val udp: UdpExt, channelRegistry: ChannelRegistry, context.stop(self) } - def receive: Receive = { - case registration: ChannelRegistration => - bindCommander ! Bound(channel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress]) - context.become(readHandlers(registration).orElse(sendHandlers(registration)), discardOld = true) + def receive: Receive = { case registration: ChannelRegistration => + bindCommander ! Bound(channel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress]) + context.become(readHandlers(registration).orElse(sendHandlers(registration)), discardOld = true) } def readHandlers(registration: ChannelRegistration): Receive = { @@ -82,11 +79,10 @@ private[io] class UdpListener(val udp: UdpExt, channelRegistry: ChannelRegistry, context.become(unregistering(sender())) } - def unregistering(requester: ActorRef): Receive = { - case Unbound => - log.debug("Unbound endpoint [{}], stopping listener", bind.localAddress) - requester ! Unbound - context.stop(self) + def unregistering(requester: ActorRef): Receive = { case Unbound => + log.debug("Unbound endpoint [{}], stopping listener", bind.localAddress) + requester ! Unbound + context.stop(self) } def doReceive(registration: ChannelRegistration, handler: ActorRef): Unit = { @@ -101,7 +97,9 @@ private[io] class UdpListener(val udp: UdpExt, channelRegistry: ChannelRegistry, if (readsLeft > 0) innerReceive(readsLeft - 1, buffer) case null => // null means no data was available case unexpected => - throw new RuntimeException(s"Unexpected address in buffer: $unexpected") // will not happen, for exhaustiveness check + throw new RuntimeException( + s"Unexpected address in buffer: $unexpected" + ) // will not happen, for exhaustiveness check } } diff --git a/akka-actor/src/main/scala/akka/io/UdpManager.scala b/akka-actor/src/main/scala/akka/io/UdpManager.scala index 9e227243581..310814d6be4 100644 --- a/akka-actor/src/main/scala/akka/io/UdpManager.scala +++ b/akka-actor/src/main/scala/akka/io/UdpManager.scala @@ -42,7 +42,6 @@ import akka.io.Udp._ * message that the service is available. UDP datagrams can be sent by sending [[akka.io.Udp.Send]] messages to the * sender of SimpleSenderReady. All the datagrams will contain an ephemeral local port as sender and answers will be * discarded. - * */ private[io] class UdpManager(udp: UdpExt) extends SelectionHandler.SelectorBasedManager(udp.settings, udp.settings.NrOfSelectors) { @@ -50,11 +49,11 @@ private[io] class UdpManager(udp: UdpExt) def receive = workerForCommandHandler { case b: Bind => val commander = sender() // cache because we create a function that will run asynchly - (registry => Props(classOf[UdpListener], udp, registry, commander, b)) + registry => Props(classOf[UdpListener], udp, registry, commander, b) case s: SimpleSender => val commander = sender() // cache because we create a function that will run asynchly - (registry => Props(classOf[UdpSender], udp, registry, commander, s.options)) + registry => Props(classOf[UdpSender], udp, registry, commander, s.options) } } diff --git a/akka-actor/src/main/scala/akka/io/UdpSender.scala b/akka-actor/src/main/scala/akka/io/UdpSender.scala index a0614cb4fcc..faccee86575 100644 --- a/akka-actor/src/main/scala/akka/io/UdpSender.scala +++ b/akka-actor/src/main/scala/akka/io/UdpSender.scala @@ -13,9 +13,7 @@ import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } import akka.io.Inet.{ DatagramChannelCreator, SocketOption } import akka.io.Udp._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @nowarn("msg=deprecated") private[io] class UdpSender( val udp: UdpExt, @@ -29,8 +27,8 @@ private[io] class UdpSender( val channel = { val datagramChannel = options - .collectFirst { - case creator: DatagramChannelCreator => creator + .collectFirst { case creator: DatagramChannelCreator => + creator } .getOrElse(DatagramChannelCreator()) .create() @@ -42,14 +40,13 @@ private[io] class UdpSender( } channelRegistry.register(channel, initialOps = 0) - def receive: Receive = { - case registration: ChannelRegistration => - options.foreach { - case v2: Inet.SocketOptionV2 => v2.afterConnect(channel.socket) - case _ => - } - commander ! SimpleSenderReady - context.become(sendHandlers(registration)) + def receive: Receive = { case registration: ChannelRegistration => + options.foreach { + case v2: Inet.SocketOptionV2 => v2.afterConnect(channel.socket) + case _ => + } + commander ! SimpleSenderReady + context.become(sendHandlers(registration)) } override def postStop(): Unit = if (channel.isOpen) { diff --git a/akka-actor/src/main/scala/akka/io/WithUdpSend.scala b/akka-actor/src/main/scala/akka/io/WithUdpSend.scala index e5418f34356..d87aeb4b809 100644 --- a/akka-actor/src/main/scala/akka/io/WithUdpSend.scala +++ b/akka-actor/src/main/scala/akka/io/WithUdpSend.scala @@ -14,9 +14,7 @@ import akka.io.SelectionHandler._ import akka.io.Udp.{ CommandFailed, Send } import akka.io.dns.DnsProtocol -/** - * INTERNAL API - */ +/** INTERNAL API */ private[io] trait WithUdpSend { me: Actor with ActorLogging => diff --git a/akka-actor/src/main/scala/akka/io/dns/CachePolicy.scala b/akka-actor/src/main/scala/akka/io/dns/CachePolicy.scala index e5fc435aa40..84f1fdc66d7 100644 --- a/akka-actor/src/main/scala/akka/io/dns/CachePolicy.scala +++ b/akka-actor/src/main/scala/akka/io/dns/CachePolicy.scala @@ -41,9 +41,7 @@ object CachePolicy { } def fromPositive(value: java.time.Duration): Ttl = fromPositive(value.asScala) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def toTll(policy: CachePolicy): Ttl = policy match { case Never => Ttl.never diff --git a/akka-actor/src/main/scala/akka/io/dns/DnsProtocol.scala b/akka-actor/src/main/scala/akka/io/dns/DnsProtocol.scala index d1660e3c94a..7500dc8d31a 100644 --- a/akka-actor/src/main/scala/akka/io/dns/DnsProtocol.scala +++ b/akka-actor/src/main/scala/akka/io/dns/DnsProtocol.scala @@ -24,7 +24,6 @@ import akka.util.ccompat.JavaConverters._ * * Allows for more detailed lookups, by specifying which records should be checked, * and responses can more information than plain IP addresses (e.g. ports for SRV records). - * */ object DnsProtocol { @@ -32,19 +31,13 @@ object DnsProtocol { final case class Ip(ipv4: Boolean = true, ipv6: Boolean = true) extends RequestType case object Srv extends RequestType - /** - * Java API - */ + /** Java API */ def ipRequestType(ipv4: Boolean, ipv6: Boolean): RequestType = Ip(ipv4, ipv6) - /** - * Java API - */ + /** Java API */ def ipRequestType(): RequestType = Ip(ipv4 = true, ipv6 = true) - /** - * Java API - */ + /** Java API */ def srvRequestType(): RequestType = Srv /** @@ -59,14 +52,10 @@ object DnsProtocol { def apply(name: String): Resolve = Resolve(name, Ip()) } - /** - * Java API - */ + /** Java API */ def resolve(name: String): Resolve = Resolve(name, Ip()) - /** - * Java API - */ + /** Java API */ def resolve(name: String, requestType: RequestType): Resolve = Resolve(name, requestType) /** @@ -88,13 +77,12 @@ object DnsProtocol { * Java API * * Records that relate to the query but are not strickly answers e.g. A records for the records returned for an SRV query. - * */ def getAdditionalRecords(): util.List[ResourceRecord] = additionalRecords.asJava private val _address: Option[InetAddress] = { val ipv4: Option[Inet4Address] = records.collectFirst { case ARecord(_, _, ip: Inet4Address) => ip } - val ipv6: Option[Inet6Address] = records.collectFirst { case AAAARecord(_, _, ip) => ip } + val ipv6: Option[Inet6Address] = records.collectFirst { case AAAARecord(_, _, ip) => ip } IpVersionSelector.getInetAddress(ipv4, ipv6) } diff --git a/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala b/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala index c884a43826b..95d6bbef817 100644 --- a/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala +++ b/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala @@ -17,9 +17,7 @@ import akka.annotation.InternalApi import akka.io.dns.internal.{ DomainName, _ } import akka.util.{ unused, ByteIterator, ByteString } -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit sealed abstract class ResourceRecord( val name: String, @@ -31,9 +29,7 @@ sealed abstract class ResourceRecord( final case class ARecord(override val name: String, override val ttl: CachePolicy.Ttl, ip: InetAddress) extends ResourceRecord(name, ttl, RecordType.A.code, RecordClass.IN.code) {} -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[io] object ARecord { def parseBody(name: String, ttl: Ttl, @unused length: Short, it: ByteIterator): ARecord = { @@ -46,15 +42,11 @@ private[io] object ARecord { final case class AAAARecord(override val name: String, override val ttl: CachePolicy.Ttl, ip: Inet6Address) extends ResourceRecord(name, ttl, RecordType.AAAA.code, RecordClass.IN.code) {} -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[io] object AAAARecord { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi def parseBody(name: String, ttl: Ttl, @unused length: Short, it: ByteIterator): AAAARecord = { val address = Array.ofDim[Byte](16) @@ -69,9 +61,7 @@ final case class CNameRecord(override val name: String, override val ttl: Ttl, c @InternalApi private[dns] object CNameRecord { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi def parseBody(name: String, ttl: Ttl, @unused length: Short, it: ByteIterator, msg: ByteString): CNameRecord = { CNameRecord(name, ttl, DomainName.parse(it, msg)) @@ -87,20 +77,16 @@ final case class SRVRecord( target: String) extends ResourceRecord(name, ttl, RecordType.SRV.code, RecordClass.IN.code) {} -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[dns] object SRVRecord { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi def parseBody(name: String, ttl: Ttl, @unused length: Short, it: ByteIterator, msg: ByteString): SRVRecord = { - val priority = it.getShort.toInt & 0xFFFF - val weight = it.getShort.toInt & 0xFFFF - val port = it.getShort.toInt & 0xFFFF + val priority = it.getShort.toInt & 0xffff + val weight = it.getShort.toInt & 0xffff + val port = it.getShort.toInt & 0xffff SRVRecord(name, ttl, priority, weight, port, DomainName.parse(it, msg)) } } @@ -113,15 +99,11 @@ final case class UnknownRecord( data: ByteString) extends ResourceRecord(name, ttl, recType, recClass) {} -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[dns] object UnknownRecord { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi def parseBody( name: String, @@ -133,15 +115,11 @@ private[dns] object UnknownRecord { UnknownRecord(name, ttl, recType, recClass, it.toByteString) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[dns] object ResourceRecord { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi def parse(it: ByteIterator, msg: ByteString): ResourceRecord = { val name = DomainName.parse(it, msg) @@ -150,7 +128,7 @@ private[dns] object ResourceRecord { // If the number of cases increase remember to add a `@switch` annotation e.g.: // val ttl = (it.getInt: @switch) match { // According to https://www.ietf.org/rfc/rfc1035.txt: "TTL: positive values of a signed 32 bit number." - val ttl = (it.getInt) match { + val ttl = it.getInt match { case 0 => Ttl.never case nonZero => Ttl.fromPositive(nonZero.seconds) } diff --git a/akka-actor/src/main/scala/akka/io/dns/DnsSettings.scala b/akka-actor/src/main/scala/akka/io/dns/DnsSettings.scala index 9f49a32b2e0..5da99fa6189 100644 --- a/akka-actor/src/main/scala/akka/io/dns/DnsSettings.scala +++ b/akka-actor/src/main/scala/akka/io/dns/DnsSettings.scala @@ -121,9 +121,7 @@ private[dns] final class DnsSettings(system: ExtendedActorSystem, c: Config) { val RandomStrategyName: String = c.getString("id-strategy") - /** - * A thunk to generate the next request ID. Not thread-safe, requires some other coordination. - */ + /** A thunk to generate the next request ID. Not thread-safe, requires some other coordination. */ def idGenerator: Function0[Short] = if (RandomStrategyName == "NOT-IN-ANY-WAY-RANDOM-test-sequential") { new Function0[Short] { @@ -173,15 +171,15 @@ object DnsSettings { private final val DnsFallbackPort = 53 private val inetSocketAddress = """(.*?)(?::(\d+))?""".r - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def parseNameserverAddress(str: String): InetSocketAddress = str match { case inetSocketAddress(host, port) => new InetSocketAddress(host, Option(port).fold(DnsFallbackPort)(_.toInt)) case unexpected => - throw new IllegalArgumentException(s"Unparseable address string: $unexpected") // will not happen, for exhaustiveness check + throw new IllegalArgumentException( + s"Unparseable address string: $unexpected" + ) // will not happen, for exhaustiveness check } /** diff --git a/akka-actor/src/main/scala/akka/io/dns/RecordType.scala b/akka-actor/src/main/scala/akka/io/dns/RecordType.scala index eb1c7356e69..4561ec4df75 100644 --- a/akka-actor/src/main/scala/akka/io/dns/RecordType.scala +++ b/akka-actor/src/main/scala/akka/io/dns/RecordType.scala @@ -6,9 +6,7 @@ package akka.io.dns import akka.util.OptionVal -/** - * DNS Record Type - */ +/** DNS Record Type */ final case class RecordType(code: Short, name: String) object RecordType { diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala index e4da33da48e..97739a988ce 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala @@ -23,9 +23,7 @@ import akka.io.dns.internal.AsyncDnsManager.CacheCleanup import akka.routing.FromConfig import akka.util.Timeout -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object AsyncDnsManager { private case object CacheCleanup @@ -33,9 +31,7 @@ private[akka] object AsyncDnsManager { case object GetCache } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @nowarn("msg=deprecated") private[io] final class AsyncDnsManager( @@ -52,9 +48,7 @@ private[io] final class AsyncDnsManager( import akka.pattern.ask import akka.pattern.pipe - /** - * Ctr expected by the DnsExt for all DnsMangers - */ + /** Ctr expected by the DnsExt for all DnsMangers */ def this(ext: DnsExt) = this( ext.Settings.Resolver, @@ -71,9 +65,13 @@ private[io] final class AsyncDnsManager( private val resolver = { val props: Props = FromConfig.props( - Props(provider.actorClass, settings, cache, (factory: ActorRefFactory, dns: List[InetSocketAddress]) => { - dns.map(ns => factory.actorOf(Props(new DnsClient(ns)))) - }).withDeploy(Deploy.local).withDispatcher(dispatcher)) + Props( + provider.actorClass, + settings, + cache, + (factory: ActorRefFactory, dns: List[InetSocketAddress]) => { + dns.map(ns => factory.actorOf(Props(new DnsClient(ns)))) + }).withDeploy(Deploy.local).withDispatcher(dispatcher)) context.actorOf(props, name) } diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsProvider.scala b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsProvider.scala index 9b9bb80625b..5ea0452b87d 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsProvider.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsProvider.scala @@ -9,9 +9,7 @@ import scala.annotation.nowarn import akka.annotation.InternalApi import akka.io._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @nowarn("msg=deprecated") private[akka] class AsyncDnsProvider extends DnsProvider { diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsResolver.scala b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsResolver.scala index 67079f0f94d..5e47446a3da 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsResolver.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsResolver.scala @@ -25,9 +25,7 @@ import akka.pattern.AskTimeoutException import akka.util.{ Helpers, Timeout } import akka.util.PrettyDuration._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[io] final class AsyncDnsResolver( settings: DnsSettings, @@ -97,11 +95,13 @@ private[io] final class AsyncDnsResolver( case ipv6: Inet6Address => AAAARecord(name, Ttl.effectivelyForever, ipv6) case unexpected => throw new IllegalArgumentException(s"Unexpected address: $unexpected") } - }.fold(ex => { sender() ! Status.Failure(ex) }, record => { - val resolved = DnsProtocol.Resolved(name, record :: Nil) - cache.put(name -> mode, resolved, record.ttl) - sender() ! resolved - }) + }.fold( + ex => { sender() ! Status.Failure(ex) }, + record => { + val resolved = DnsProtocol.Resolved(name, record :: Nil) + cache.put(name -> mode, resolved, record.ttl) + sender() ! resolved + }) } else if (resolvers.isEmpty) { sender() ! Status.Failure(failToResolve(name, nameServers)) } else { @@ -138,9 +138,7 @@ private[io] final class AsyncDnsResolver( } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object AsyncDnsResolver { @@ -225,8 +223,8 @@ private[akka] object AsyncDnsResolver { (resolver ? DropRequest(question)) .mapTo[Dropped] - .recover { - case _ => DidntDrop(question.id) + .recover { case _ => + DidntDrop(question.id) } .pipeTo(self) }(ExecutionContexts.parasitic) diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/DnsClient.scala b/akka-actor/src/main/scala/akka/io/dns/internal/DnsClient.scala index 2383a3684da..68e94d7b014 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/DnsClient.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/DnsClient.scala @@ -20,9 +20,7 @@ import akka.io.{ IO, Tcp, Udp } import akka.io.dns.{ RecordClass, RecordType, ResourceRecord } import akka.pattern.{ BackoffOpts, BackoffSupervisor } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object DnsClient { sealed trait DnsQuestion { def id: Short @@ -49,9 +47,7 @@ import akka.pattern.{ BackoffOpts, BackoffSupervisor } } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class DnsClient(ns: InetSocketAddress) extends Actor with Stash { import DnsClient._ @@ -87,9 +83,7 @@ import akka.pattern.{ BackoffOpts, BackoffSupervisor } Message(id, MessageFlags(), im.Seq(Question(name, recordType, RecordClass.IN))) } - /** - * Silent to allow map update syntax - */ + /** Silent to allow map update syntax */ @nowarn() def ready(socket: ActorRef): Receive = { case DropRequest(question) => @@ -168,10 +162,9 @@ import akka.pattern.{ BackoffOpts, BackoffSupervisor } // best effort, don't throw Try { val msg = Message.parse(send.payload) - inflightRequests.get(msg.id).foreach { - case (s, _) => - s ! Failure(new RuntimeException("Send failed to nameserver")) - inflightRequests -= msg.id + inflightRequests.get(msg.id).foreach { case (s, _) => + s ! Failure(new RuntimeException("Send failed to nameserver")) + inflightRequests -= msg.id } } case _ => diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala b/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala index 8d612778f9b..913f47315d6 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala @@ -13,9 +13,7 @@ import akka.annotation.InternalApi import akka.io.dns.ResourceRecord import akka.util.{ ByteString, ByteStringBuilder } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[internal] object OpCode extends Enumeration { val QUERY = Value(0) @@ -23,9 +21,7 @@ private[internal] object OpCode extends Enumeration { val STATUS = Value(2) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[internal] object ResponseCode extends Enumeration { val SUCCESS = Value(0) @@ -36,9 +32,7 @@ private[internal] object ResponseCode extends Enumeration { val REFUSED = Value(5) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[internal] case class MessageFlags(flags: Short) extends AnyVal { def isQuery: Boolean = (flags & 0x8000) == 0 @@ -72,9 +66,7 @@ private[internal] case class MessageFlags(flags: Short) extends AnyVal { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[internal] object MessageFlags { def apply( @@ -96,9 +88,7 @@ private[internal] object MessageFlags { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[internal] case class Message( id: Short, @@ -127,9 +117,7 @@ private[internal] case class Message( } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[internal] object Message { def parse(msg: ByteString): Message = { diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/DomainName.scala b/akka-actor/src/main/scala/akka/io/dns/internal/DomainName.scala index 98dc55fb5a3..d9cb16dc514 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/DomainName.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/DomainName.scala @@ -7,9 +7,7 @@ package akka.io.dns.internal import akka.annotation.InternalApi import akka.util.{ ByteIterator, ByteString, ByteStringBuilder } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object DomainName { def length(name: String): Short = { diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/Question.scala b/akka-actor/src/main/scala/akka/io/dns/internal/Question.scala index a5fabea24b2..255caf158b4 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/Question.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/Question.scala @@ -8,9 +8,7 @@ import akka.annotation.InternalApi import akka.io.dns.{ RecordClass, RecordType } import akka.util.{ ByteIterator, ByteString, ByteStringBuilder } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Question(name: String, qType: RecordType, qClass: RecordClass) { def write(out: ByteStringBuilder): Unit = { @@ -20,9 +18,7 @@ private[akka] final case class Question(name: String, qType: RecordType, qClass: } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Question { def parse(it: ByteIterator, msg: ByteString): Question = { diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/RecordClassSerializer.scala b/akka-actor/src/main/scala/akka/io/dns/internal/RecordClassSerializer.scala index d2b180517fe..f978a65c734 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/RecordClassSerializer.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/RecordClassSerializer.scala @@ -8,9 +8,7 @@ import akka.annotation.InternalApi import akka.io.dns.RecordClass import akka.util.{ ByteIterator, ByteStringBuilder } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object RecordClassSerializer { diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/RecordTypeSerializer.scala b/akka-actor/src/main/scala/akka/io/dns/internal/RecordTypeSerializer.scala index 0247abacc5e..bac7afbe881 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/RecordTypeSerializer.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/RecordTypeSerializer.scala @@ -7,9 +7,7 @@ package akka.io.dns.internal import akka.io.dns.RecordType import akka.util.{ ByteIterator, ByteStringBuilder, OptionVal } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object RecordTypeSerializer { // TODO other type than ByteStringBuilder? (was used in akka-dns) diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/ResolvConfParser.scala b/akka-actor/src/main/scala/akka/io/dns/internal/ResolvConfParser.scala index 9341af7e397..dc682f6bb92 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/ResolvConfParser.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/ResolvConfParser.scala @@ -20,9 +20,7 @@ private[dns] object ResolvConfParser { private val OptionsLabel = "options" private val NdotsOption = "ndots:" - /** - * Does a partial parse according to https://linux.die.net/man/5/resolver. - */ + /** Does a partial parse according to https://linux.die.net/man/5/resolver. */ def parseFile(file: File): Try[ResolvConf] = { Try { parseLines(Files.lines(file.toPath).iterator().asScala) diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/TcpDnsClient.scala b/akka-actor/src/main/scala/akka/io/dns/internal/TcpDnsClient.scala index 4bb32aaab71..4aec56f10cd 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/TcpDnsClient.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/TcpDnsClient.scala @@ -13,9 +13,7 @@ import akka.io.Tcp import akka.io.dns.internal.DnsClient.Answer import akka.util.ByteString -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class TcpDnsClient(tcp: ActorRef, ns: InetSocketAddress, answerRecipient: ActorRef) extends Actor with ActorLogging @@ -24,12 +22,11 @@ import akka.util.ByteString override def receive: Receive = idle - val idle: Receive = { - case _: Message => - stash() - log.debug("Connecting to [{}]", ns) - tcp ! Tcp.Connect(ns) - context.become(connecting) + val idle: Receive = { case _: Message => + stash() + log.debug("Connecting to [{}]", ns) + tcp ! Tcp.Connect(ns) + context.become(connecting) } val connecting: Receive = { diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/package.scala b/akka-actor/src/main/scala/akka/io/dns/internal/package.scala index 066fd299d3d..3959826e86d 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/package.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/package.scala @@ -9,9 +9,7 @@ import java.nio.ByteOrder import akka.annotation.InternalApi import akka.io.dns.CachePolicy.{ CachePolicy, Forever, Never, Ttl } -/** - * INTERNAL API - */ +/** INTERNAL API */ package object internal { /** diff --git a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala index 32ab6a73d1a..776d5352c6e 100644 --- a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala +++ b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala @@ -93,9 +93,7 @@ object Pair { @FunctionalInterface trait Creator[T] extends Serializable { - /** - * This method must return a different instance upon every call. - */ + /** This method must return a different instance upon every call. */ @throws(classOf[Exception]) def create(): T } @@ -181,14 +179,10 @@ sealed abstract class Option[A] extends java.lang.Iterable[A] { object Option { - /** - * Option factory that creates Some - */ + /** Option factory that creates Some */ def some[A](v: A): Option[A] = Some(v) - /** - * Option factory that creates None - */ + /** Option factory that creates None */ def none[A] = None.asInstanceOf[Option[A]] /** @@ -197,9 +191,7 @@ object Option { */ def option[A](v: A): Option[A] = if (v == null) none else some(v) - /** - * Converts a Scala Option to a Java Option - */ + /** Converts a Scala Option to a Java Option */ def fromScalaOption[T](scalaOption: scala.Option[T]): Option[T] = scalaOption match { case scala.Some(r) => some(r) case scala.None => none @@ -216,9 +208,7 @@ object Option { def asScala: scala.Some[A] = scala.Some(v) } - /** - * This case object represents non-existent values. - */ + /** This case object represents non-existent values. */ private case object None extends Option[Nothing] { def get: Nothing = throw new NoSuchElementException("None.get") def getOrElse[B](defaultValue: B): B = defaultValue @@ -230,14 +220,10 @@ object Option { implicit def scala2JavaOption[A](o: scala.Option[A]): Option[A] = if (o.isDefined) some(o.get) else none } -/** - * This class hold common utilities for Java - */ +/** This class hold common utilities for Java */ object Util { - /** - * Returns a ClassTag describing the provided Class. - */ + /** Returns a ClassTag describing the provided Class. */ def classTag[T](clazz: Class[T]): ClassTag[T] = ClassTag(clazz) /** @@ -246,15 +232,11 @@ object Util { */ def immutableSeq(arr: Array[Class[_]]): immutable.Seq[Class[_]] = immutableSeq[Class[_]](arr) - /** - * Turns an array into an immutable Scala sequence (by copying it). - */ + /** Turns an array into an immutable Scala sequence (by copying it). */ def immutableSeq[T](arr: Array[T]): immutable.Seq[T] = if ((arr ne null) && arr.length > 0) arr.toIndexedSeq else Nil - /** - * Turns an [[java.lang.Iterable]] into an immutable Scala sequence (by copying it). - */ + /** Turns an [[java.lang.Iterable]] into an immutable Scala sequence (by copying it). */ def immutableSeq[T](iterable: java.lang.Iterable[T]): immutable.Seq[T] = iterable match { case imm: immutable.Seq[_] => imm.asInstanceOf[immutable.Seq[T]] @@ -278,9 +260,7 @@ object Util { l } - /** - * Turns an [[java.lang.Iterable]] into an immutable Scala IndexedSeq (by copying it). - */ + /** Turns an [[java.lang.Iterable]] into an immutable Scala IndexedSeq (by copying it). */ def immutableIndexedSeq[T](iterable: java.lang.Iterable[T]): immutable.IndexedSeq[T] = immutableSeq(iterable).toVector diff --git a/akka-actor/src/main/scala/akka/japi/function/Function.scala b/akka-actor/src/main/scala/akka/japi/function/Function.scala index f84114f116b..cecddef9939 100644 --- a/akka-actor/src/main/scala/akka/japi/function/Function.scala +++ b/akka-actor/src/main/scala/akka/japi/function/Function.scala @@ -80,9 +80,7 @@ trait Predicate[-T] extends java.io.Serializable { @FunctionalInterface trait Creator[+T] extends Serializable { - /** - * This method must return a different instance upon every call. - */ + /** This method must return a different instance upon every call. */ @throws(classOf[Exception]) def create(): T } diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 115b26fa3a4..18244fa20b0 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -32,9 +32,7 @@ class AskTimeoutException(message: String, cause: Throwable) extends TimeoutExce override def getCause(): Throwable = cause } -/** - * This object contains implementation details of the “ask” pattern. - */ +/** This object contains implementation details of the “ask” pattern. */ trait AskSupport { /** @@ -81,7 +79,6 @@ trait AskSupport { * EnrichedMessage(response) * } pipeTo nextActor * }}} - * */ def ask(actorRef: ActorRef, message: Any)(implicit timeout: Timeout): Future[Any] = actorRef.internalAsk(message, timeout, ActorRef.noSender) @@ -148,7 +145,6 @@ trait AskSupport { * EnrichedMessage(response) * } pipeTo nextActor * }}} - * */ def ask(actorSelection: ActorSelection, message: Any)(implicit timeout: Timeout): Future[Any] = actorSelection.internalAsk(message, timeout, ActorRef.noSender) @@ -215,8 +211,8 @@ trait ExplicitAskSupport { */ def ask(actorRef: ActorRef, messageFactory: ActorRef => Any)(implicit timeout: Timeout): Future[Any] = actorRef.internalAsk(messageFactory, timeout, ActorRef.noSender) - def ask(actorRef: ActorRef, messageFactory: ActorRef => Any, sender: ActorRef)( - implicit timeout: Timeout): Future[Any] = + def ask(actorRef: ActorRef, messageFactory: ActorRef => Any, sender: ActorRef)(implicit + timeout: Timeout): Future[Any] = actorRef.internalAsk(messageFactory, timeout, sender) /** @@ -270,12 +266,11 @@ trait ExplicitAskSupport { * EnrichedMessage(response) * } pipeTo nextActor * }}} - * */ def ask(actorSelection: ActorSelection, messageFactory: ActorRef => Any)(implicit timeout: Timeout): Future[Any] = actorSelection.internalAsk(messageFactory, timeout, ActorRef.noSender) - def ask(actorSelection: ActorSelection, messageFactory: ActorRef => Any, sender: ActorRef)( - implicit timeout: Timeout): Future[Any] = + def ask(actorSelection: ActorSelection, messageFactory: ActorRef => Any, sender: ActorRef)(implicit + timeout: Timeout): Future[Any] = actorSelection.internalAsk(messageFactory, timeout, sender) } @@ -287,9 +282,7 @@ object AskableActorRef { s"Message of type [${msg.getClass.getName}]$wasSentBy." } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def negativeTimeoutException( recipient: Any, message: Any, @@ -299,9 +292,7 @@ object AskableActorRef { messagePartOfException(message, sender)) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def recipientTerminatedException( recipient: Any, message: Any, @@ -311,9 +302,7 @@ object AskableActorRef { messagePartOfException(message, sender)) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def unsupportedRecipientType( recipient: Any, message: Any, @@ -329,39 +318,31 @@ object AskableActorRef { */ final class AskableActorRef(val actorRef: ActorRef) extends AnyVal { - /** - * INTERNAL API: for binary compatibility - */ + /** INTERNAL API: for binary compatibility */ protected def ask(message: Any, timeout: Timeout): Future[Any] = internalAsk(message, timeout, ActorRef.noSender) - //todo add scaladoc + // todo add scaladoc def ask(message: Any)(implicit timeout: Timeout, sender: ActorRef = Actor.noSender): Future[Any] = internalAsk(message, timeout, sender) def askWithStatus(message: Any)(implicit timeout: Timeout, sender: ActorRef = Actor.noSender): Future[Any] = internalAskWithStatus(message) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[pattern] def internalAskWithStatus( message: Any)(implicit timeout: Timeout, sender: ActorRef = Actor.noSender): Future[Any] = StatusReply.flattenStatusFuture[Any](internalAsk(message, timeout, sender).mapTo[StatusReply[Any]]) - /** - * INTERNAL API: for binary compatibility - */ + /** INTERNAL API: for binary compatibility */ protected def ?(message: Any)(implicit timeout: Timeout): Future[Any] = internalAsk(message, timeout, ActorRef.noSender) def ?(message: Any)(implicit timeout: Timeout, sender: ActorRef = Actor.noSender): Future[Any] = internalAsk(message, timeout, sender) - /** - * INTERNAL API: for binary compatibility - */ + /** INTERNAL API: for binary compatibility */ private[pattern] def internalAsk(message: Any, timeout: Timeout, sender: ActorRef) = actorRef match { case ref: InternalActorRef if ref.isTerminated => actorRef ! message @@ -389,9 +370,7 @@ final class ExplicitlyAskableActorRef(val actorRef: ActorRef) extends AnyVal { def ?(message: ActorRef => Any)(implicit timeout: Timeout, sender: ActorRef = Actor.noSender): Future[Any] = internalAsk(message, timeout, sender) - /** - * INTERNAL API: for binary compatibility - */ + /** INTERNAL API: for binary compatibility */ private[pattern] def internalAsk(messageFactory: ActorRef => Any, timeout: Timeout, sender: ActorRef): Future[Any] = actorRef match { case ref: InternalActorRef if ref.isTerminated => @@ -425,27 +404,21 @@ final class ExplicitlyAskableActorRef(val actorRef: ActorRef) extends AnyVal { */ final class AskableActorSelection(val actorSel: ActorSelection) extends AnyVal { - /** - * INTERNAL API: for binary compatibility - */ + /** INTERNAL API: for binary compatibility */ protected def ask(message: Any, timeout: Timeout): Future[Any] = internalAsk(message, timeout, ActorRef.noSender) def ask(message: Any)(implicit timeout: Timeout, sender: ActorRef = Actor.noSender): Future[Any] = internalAsk(message, timeout, sender) - /** - * INTERNAL API: for binary compatibility - */ + /** INTERNAL API: for binary compatibility */ protected def ?(message: Any)(implicit timeout: Timeout): Future[Any] = internalAsk(message, timeout, ActorRef.noSender) def ?(message: Any)(implicit timeout: Timeout, sender: ActorRef = Actor.noSender): Future[Any] = internalAsk(message, timeout, sender) - /** - * INTERNAL API: for binary compatibility - */ + /** INTERNAL API: for binary compatibility */ private[pattern] def internalAsk(message: Any, timeout: Timeout, sender: ActorRef): Future[Any] = actorSel.anchor match { case ref: InternalActorRef => @@ -471,9 +444,7 @@ final class ExplicitlyAskableActorSelection(val actorSel: ActorSelection) extend def ?(message: ActorRef => Any)(implicit timeout: Timeout, sender: ActorRef = Actor.noSender): Future[Any] = internalAsk(message, timeout, sender) - /** - * INTERNAL API: for binary compatibility - */ + /** INTERNAL API: for binary compatibility */ private[pattern] def internalAsk(messageFactory: ActorRef => Any, timeout: Timeout, sender: ActorRef): Future[Any] = actorSel.anchor match { case ref: InternalActorRef => @@ -693,9 +664,7 @@ private[akka] final class PromiseActorRef( private[akka] def onTimeout(@unused timeout: Timeout): Unit = {} } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object PromiseActorRef { private case object Registering diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala b/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala index c78e62f1e3b..8db66e276d6 100644 --- a/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala +++ b/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala @@ -11,9 +11,7 @@ import akka.annotation.{ DoNotInherit, InternalApi } import akka.pattern.internal.{ BackoffOnRestartSupervisor, BackoffOnStopSupervisor } import akka.util.JavaDurationConverters._ -/** - * Backoff options allow to specify a number of properties for backoff supervisors. - */ +/** Backoff options allow to specify a number of properties for backoff supervisors. */ object BackoffOpts { /** @@ -247,9 +245,7 @@ object BackoffOpts { onStop(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor) } -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit private[akka] sealed trait ExtendedBackoffOptions[T <: ExtendedBackoffOptions[T]] { @@ -308,9 +304,7 @@ private[akka] sealed trait ExtendedBackoffOptions[T <: ExtendedBackoffOptions[T] */ def withHandlerWhileStopped(handler: ActorRef): T - /** - * Returns the props to create the back-off supervisor. - */ + /** Returns the props to create the back-off supervisor. */ private[akka] def props: Props } @@ -361,9 +355,8 @@ private final case class BackoffOnStopOptionsImpl[T]( // additional def withDefaultStoppingStrategy = - copy( - supervisorStrategy = - OneForOneStrategy(supervisorStrategy.maxNrOfRetries)(SupervisorStrategy.stoppingStrategy.decider)) + copy(supervisorStrategy = + OneForOneStrategy(supervisorStrategy.maxNrOfRetries)(SupervisorStrategy.stoppingStrategy.decider)) def withFinalStopMessage(action: Any => Boolean) = copy(finalStopMessage = Some(action)) def props: Props = { diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala index 42458a6b15f..240ac04c94f 100644 --- a/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala +++ b/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala @@ -47,9 +47,7 @@ object BackoffSupervisor { */ final case class CurrentChild(ref: Option[ActorRef]) { - /** - * Java API: The `ActorRef` of the current child, if any - */ + /** Java API: The `ActorRef` of the current child, if any */ def getRef: Optional[ActorRef] = Optional.ofNullable(ref.orNull) } @@ -79,15 +77,11 @@ object BackoffSupervisor { final case class RestartCount(count: Int) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case object StartChild extends DeadLetterSuppression - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case class ResetRestartCount(current: Int) extends DeadLetterSuppression diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala index ad31efe4f48..75f25a46e4f 100644 --- a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala +++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala @@ -26,9 +26,7 @@ import akka.pattern.internal.{ CircuitBreakerNoopTelemetry, CircuitBreakerTeleme import akka.util.JavaDurationConverters._ import akka.util.Unsafe -/** - * Companion object providing factory methods for Circuit Breaker which runs callbacks in caller's thread - */ +/** Companion object providing factory methods for Circuit Breaker which runs callbacks in caller's thread */ object CircuitBreaker { /** @@ -172,8 +170,7 @@ class CircuitBreaker( } // add the old constructor to make it binary compatible - def this(scheduler: Scheduler, maxFailures: Int, callTimeout: FiniteDuration, resetTimeout: FiniteDuration)( - implicit + def this(scheduler: Scheduler, maxFailures: Int, callTimeout: FiniteDuration, resetTimeout: FiniteDuration)(implicit executor: ExecutionContext) = { this( scheduler, @@ -192,9 +189,7 @@ class CircuitBreaker( callTimeout: FiniteDuration, resetTimeout: FiniteDuration, maxResetTimeout: FiniteDuration, - exponentialBackoffFactor: Double)( - implicit - executor: ExecutionContext) = { + exponentialBackoffFactor: Double)(implicit executor: ExecutionContext) = { this(scheduler, maxFailures, callTimeout, resetTimeout, maxResetTimeout, exponentialBackoffFactor, 0.0)(executor) } @@ -246,16 +241,12 @@ class CircuitBreaker( telemetry)(executor) } - /** - * Holds reference to current state of CircuitBreaker - *access only via helper methods* - */ + /** Holds reference to current state of CircuitBreaker - *access only via helper methods* */ @volatile @nowarn("msg=never updated") private[this] var _currentStateDoNotCallMeDirectly: State = Closed - /** - * Holds reference to current resetTimeout of CircuitBreaker - *access only via helper methods* - */ + /** Holds reference to current resetTimeout of CircuitBreaker - *access only via helper methods* */ @volatile @nowarn("msg=never updated") private[this] var _currentResetTimeoutDoNotCallMeDirectly: FiniteDuration = resetTimeout @@ -285,9 +276,7 @@ class CircuitBreaker( private[this] def currentState: State = Unsafe.instance.getObjectVolatile(this, AbstractCircuitBreaker.stateOffset).asInstanceOf[State] - /** - * Helper method for updating the underlying resetTimeout via Unsafe - */ + /** Helper method for updating the underlying resetTimeout via Unsafe */ @inline private[this] def swapResetTimeout(oldResetTimeout: FiniteDuration, newResetTimeout: FiniteDuration): Boolean = Unsafe.instance.compareAndSwapObject( @@ -296,9 +285,7 @@ class CircuitBreaker( oldResetTimeout, newResetTimeout) - /** - * Helper method for accessing to the underlying resetTimeout via Unsafe - */ + /** Helper method for accessing to the underlying resetTimeout via Unsafe */ @inline private[this] def currentResetTimeout: FiniteDuration = Unsafe.instance.getObjectVolatile(this, AbstractCircuitBreaker.resetTimeoutOffset).asInstanceOf[FiniteDuration] @@ -320,7 +307,6 @@ class CircuitBreaker( * @param body Call needing protected * @return [[scala.concurrent.Future]] containing the call result or a * `scala.concurrent.TimeoutException` if the call timed out - * */ def withCircuitBreaker[T](body: => Future[T]): Future[T] = currentState.invoke(body, failureFn) @@ -374,9 +360,12 @@ class CircuitBreaker( def callWithCircuitBreakerCS[T]( body: Callable[CompletionStage[T]], defineFailureFn: BiFunction[Optional[T], Optional[Throwable], java.lang.Boolean]): CompletionStage[T] = - FutureConverters.toJava[T](callWithCircuitBreaker(new Callable[Future[T]] { - override def call(): Future[T] = FutureConverters.toScala(body.call()) - }, defineFailureFn)) + FutureConverters.toJava[T]( + callWithCircuitBreaker( + new Callable[Future[T]] { + override def call(): Future[T] = FutureConverters.toScala(body.call()) + }, + defineFailureFn)) /** * Wraps invocations of synchronous calls that need to be protected. @@ -667,9 +656,7 @@ class CircuitBreaker( */ private def tripBreaker(fromState: State): Unit = transition(fromState, Open) - /** - * Resets breaker to a closed state. This is valid from an Half-Open state only. - */ + /** Resets breaker to a closed state. This is valid from an Half-Open state only. */ private def resetBreaker(): Unit = transition(HalfOpen, Closed) /** @@ -720,9 +707,7 @@ class CircuitBreaker( } } - /** - * Invokes all onCallBreakerOpen callback handlers. - */ + /** Invokes all onCallBreakerOpen callback handlers. */ private def notifyCallBreakerOpenListeners(): Unit = if (!callBreakerOpenListeners.isEmpty) { val iterator = callBreakerOpenListeners.iterator() while (iterator.hasNext) { @@ -731,9 +716,7 @@ class CircuitBreaker( } } - /** - * Attempts to reset breaker by transitioning to a half-open state. This is valid from an Open state only. - */ + /** Attempts to reset breaker by transitioning to a half-open state. This is valid from an Open state only. */ private def attemptReset(): Unit = transition(Open, HalfOpen) private val timeoutEx = new TimeoutException("Circuit Breaker Timed out.") with NoStackTrace @@ -768,9 +751,7 @@ class CircuitBreaker( case _ => true } - /** - * Internal state abstraction - */ + /** Internal state abstraction */ private sealed trait State { private val listeners = new CopyOnWriteArrayList[Runnable] @@ -890,38 +871,26 @@ class CircuitBreaker( */ def invoke[T](body: => Future[T]): Future[T] = invoke(body, failureFn) - /** - * Invoked when call succeeds - * - */ + /** Invoked when call succeeds */ def callSucceeds(): Unit - /** - * Invoked when call fails - * - */ + /** Invoked when call fails */ def callFails(): Unit /** * Invoked on the transitioned-to state during transition. Notifies listeners after invoking subclass template * method _enter - * */ final def enter(): Unit = { _enter() notifyTransitionListeners() } - /** - * Template method for concrete traits - * - */ + /** Template method for concrete traits */ def _enter(): Unit } - /** - * Concrete implementation of Closed state - */ + /** Concrete implementation of Closed state */ private object Closed extends AtomicInteger with State { /** @@ -966,9 +935,7 @@ class CircuitBreaker( override def toString: String = "Closed with failure count = " + get() } - /** - * Concrete implementation of half-open state - */ + /** Concrete implementation of half-open state */ private object HalfOpen extends AtomicBoolean(true) with State { /** @@ -1015,9 +982,7 @@ class CircuitBreaker( override def toString: String = "Half-Open currently testing call for success = " + get() } - /** - * Concrete implementation of Open state - */ + /** Concrete implementation of Open state */ private object Open extends AtomicLong with State { /** diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreakersRegistry.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreakersRegistry.scala index 5f93ce59177..4e3ffc69fa2 100644 --- a/akka-actor/src/main/scala/akka/pattern/CircuitBreakersRegistry.scala +++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreakersRegistry.scala @@ -19,9 +19,7 @@ import akka.actor.{ import akka.pattern.internal.CircuitBreakerTelemetryProvider import akka.util.ccompat.JavaConverters._ -/** - * Companion object providing factory methods for Circuit Breaker which runs callbacks in caller's thread - */ +/** Companion object providing factory methods for Circuit Breaker which runs callbacks in caller's thread */ object CircuitBreakersRegistry extends ExtensionId[CircuitBreakersRegistry] with ExtensionIdProvider { /** @@ -31,9 +29,7 @@ object CircuitBreakersRegistry extends ExtensionId[CircuitBreakersRegistry] with override def createExtension(system: ExtendedActorSystem): CircuitBreakersRegistry = new CircuitBreakersRegistry(system) - /** - * Returns the canonical ExtensionId for this Extension - */ + /** Returns the canonical ExtensionId for this Extension */ override def lookup: ExtensionId[_ <: Extension] = CircuitBreakersRegistry /** @@ -49,9 +45,7 @@ object CircuitBreakersRegistry extends ExtensionId[CircuitBreakersRegistry] with override def get(system: ClassicActorSystemProvider): CircuitBreakersRegistry = super.get(system) } -/** - * A CircuitBreakersPanel is a central point collecting all circuit breakers in Akka. - */ +/** A CircuitBreakersPanel is a central point collecting all circuit breakers in Akka. */ final class CircuitBreakersRegistry(system: ExtendedActorSystem) extends Extension { private val breakers = new ConcurrentHashMap[String, CircuitBreaker] diff --git a/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala b/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala index c2532247a2b..971cf99a479 100644 --- a/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala @@ -21,8 +21,8 @@ trait FutureTimeoutSupport { * Returns a [[scala.concurrent.Future]] that will be completed with the success or failure of the provided value * after the specified duration. */ - def after[T](duration: FiniteDuration)(value: => Future[T])( - implicit system: ClassicActorSystemProvider): Future[T] = { + def after[T](duration: FiniteDuration)(value: => Future[T])(implicit + system: ClassicActorSystemProvider): Future[T] = { after(duration, using = system.classicSystem.scheduler)(value)(system.classicSystem.dispatcher) } @@ -30,16 +30,16 @@ trait FutureTimeoutSupport { * Returns a [[java.util.concurrent.CompletionStage]] that will be completed with the success or failure of the provided value * after the specified duration. */ - def afterCompletionStage[T](duration: FiniteDuration)(value: => CompletionStage[T])( - implicit system: ClassicActorSystemProvider): CompletionStage[T] = + def afterCompletionStage[T](duration: FiniteDuration)(value: => CompletionStage[T])(implicit + system: ClassicActorSystemProvider): CompletionStage[T] = afterCompletionStage(duration, system.classicSystem.scheduler)(value)(system.classicSystem.dispatcher) /** * Returns a [[scala.concurrent.Future]] that will be completed with the success or failure of the provided value * after the specified duration. */ - def after[T](duration: FiniteDuration, using: Scheduler)(value: => Future[T])( - implicit ec: ExecutionContext): Future[T] = + def after[T](duration: FiniteDuration, using: Scheduler)(value: => Future[T])(implicit + ec: ExecutionContext): Future[T] = if (duration.isFinite && duration.length < 1) { try value catch { case NonFatal(t) => Future.failed(t) } @@ -58,8 +58,8 @@ trait FutureTimeoutSupport { * Returns a [[java.util.concurrent.CompletionStage]] that will be completed with the success or failure of the provided value * after the specified duration. */ - def afterCompletionStage[T](duration: FiniteDuration, using: Scheduler)(value: => CompletionStage[T])( - implicit ec: ExecutionContext): CompletionStage[T] = + def afterCompletionStage[T](duration: FiniteDuration, using: Scheduler)(value: => CompletionStage[T])(implicit + ec: ExecutionContext): CompletionStage[T] = if (duration.isFinite && duration.length < 1) { try value catch { case NonFatal(t) => Futures.failedCompletionStage(t) } diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 38f8f3e0f24..ede22da58bf 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -52,9 +52,11 @@ trait GracefulStopSupport { PromiseActorRef(internalTarget.provider, Timeout(timeout), target, stopMessage.getClass.getName, target.path.name) internalTarget.sendSystemMessage(Watch(internalTarget, ref)) target.tell(stopMessage, Actor.noSender) - ref.result.future.transform({ - case Terminated(t) if t.path == target.path => true - case _ => { internalTarget.sendSystemMessage(Unwatch(target, ref)); false } - }, t => { internalTarget.sendSystemMessage(Unwatch(target, ref)); t })(ExecutionContexts.parasitic) + ref.result.future.transform( + { + case Terminated(t) if t.path == target.path => true + case _ => { internalTarget.sendSystemMessage(Unwatch(target, ref)); false } + }, + t => { internalTarget.sendSystemMessage(Unwatch(target, ref)); t })(ExecutionContexts.parasitic) } } diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala index 8e54ae82bd8..faa8ad3b886 100644 --- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -14,9 +14,7 @@ import scala.concurrent.ExecutionContext import akka.actor.{ ActorSelection, ClassicActorSystemProvider, Scheduler } import akka.util.JavaDurationConverters._ -/** - * Java API: for Akka patterns such as `ask`, `pipe` and others which work with [[java.util.concurrent.CompletionStage]]. - */ +/** Java API: for Akka patterns such as `ask`, `pipe` and others which work with [[java.util.concurrent.CompletionStage]]. */ object Patterns { import scala.concurrent.Future import scala.concurrent.duration._ @@ -486,7 +484,7 @@ object Patterns { shouldRetry: Predicate[Throwable], attempts: Int, ec: ExecutionContext): CompletionStage[T] = - scalaRetry(() => attempt.call().toScala, (ex) => shouldRetry.test(ex), attempts)(ec).toJava + scalaRetry(() => attempt.call().toScala, ex => shouldRetry.test(ex), attempts)(ec).toJava /** * Returns an internally retrying [[java.util.concurrent.CompletionStage]] @@ -604,7 +602,7 @@ object Patterns { ec: ExecutionContext): CompletionStage[T] = scalaRetry( () => attempt.call().toScala, - (ex) => shouldRetry.test(ex), + ex => shouldRetry.test(ex), attempts, minBackoff.asScala, maxBackoff.asScala, @@ -718,8 +716,8 @@ object Patterns { scalaRetry( () => attempt.call().toScala, - (ex) => shouldRetry.test(ex), + ex => shouldRetry.test(ex), attempts, - (attempted) => delayFunction.apply(attempted).asScala.map(_.asScala))(context, scheduler).toJava + attempted => delayFunction.apply(attempted).asScala.map(_.asScala))(context, scheduler).toJava } } diff --git a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala index e192fda77bb..90681babc05 100644 --- a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala @@ -43,8 +43,8 @@ trait PipeToSupport { } } - final class PipeableCompletionStage[T](val future: CompletionStage[T])( - implicit @unused executionContext: ExecutionContext) { + final class PipeableCompletionStage[T](val future: CompletionStage[T])(implicit + @unused executionContext: ExecutionContext) { def pipeTo(recipient: ActorRef)(implicit sender: ActorRef = Actor.noSender): CompletionStage[T] = { future.whenComplete(new BiConsumer[T, Throwable] { override def accept(t: T, ex: Throwable): Unit = { @@ -112,6 +112,6 @@ trait PipeToSupport { * The successful result of the future is sent as a message to the recipient, or * the failure is sent in a [[akka.actor.Status.Failure]] to the recipient. */ - implicit def pipeCompletionStage[T](future: CompletionStage[T])( - implicit executionContext: ExecutionContext): PipeableCompletionStage[T] = new PipeableCompletionStage(future) + implicit def pipeCompletionStage[T](future: CompletionStage[T])(implicit + executionContext: ExecutionContext): PipeableCompletionStage[T] = new PipeableCompletionStage(future) } diff --git a/akka-actor/src/main/scala/akka/pattern/PromiseRef.scala b/akka-actor/src/main/scala/akka/pattern/PromiseRef.scala index c59b5faff8b..ceef3a33b77 100644 --- a/akka-actor/src/main/scala/akka/pattern/PromiseRef.scala +++ b/akka-actor/src/main/scala/akka/pattern/PromiseRef.scala @@ -15,14 +15,10 @@ import akka.util.Timeout */ trait FutureRef[T] { - /** - * ActorRef associated with this FutureRef. - */ + /** ActorRef associated with this FutureRef. */ def ref: ActorRef - /** - * Future associated with this FutureRef. - */ + /** Future associated with this FutureRef. */ def future: Future[T] } @@ -32,32 +28,22 @@ trait FutureRef[T] { */ trait PromiseRef[T] { this: FutureRef[T] => - /** - * ActorRef associated with this PromiseRef. - */ + /** ActorRef associated with this PromiseRef. */ def ref: ActorRef - /** - * Promise associated with this PromiseRef. - */ + /** Promise associated with this PromiseRef. */ def promise: Promise[T] - /** - * Future containing the value of the Promise associated with this PromiseRef. - */ + /** Future containing the value of the Promise associated with this PromiseRef. */ final def future = promise.future - /** - * Converts this PromiseRef to FutureRef, effectively narrowing it's API. - */ + /** Converts this PromiseRef to FutureRef, effectively narrowing it's API. */ def toFutureRef: FutureRef[T] } object PromiseRef { - /** - * Wraps an ActorRef and a Promise into a PromiseRef. - */ + /** Wraps an ActorRef and a Promise into a PromiseRef. */ private[akka] def wrap[T](actorRef: ActorRef, promise: Promise[T]): PromiseRef[T] = { new PromiseRefImpl(actorRef, promise) } @@ -99,9 +85,7 @@ object PromiseRef { object FutureRef { - /** - * Wraps an ActorRef and a Future into a FutureRef. - */ + /** Wraps an ActorRef and a Future into a FutureRef. */ private[akka] def wrap[T](actorRef: ActorRef, future: Future[T]): FutureRef[T] = { new FutureRefImpl(actorRef, future) } diff --git a/akka-actor/src/main/scala/akka/pattern/RetrySupport.scala b/akka-actor/src/main/scala/akka/pattern/RetrySupport.scala index 21be432b426..63ed0c6b67f 100644 --- a/akka-actor/src/main/scala/akka/pattern/RetrySupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/RetrySupport.scala @@ -11,9 +11,7 @@ import scala.util.control.NonFatal import akka.actor.Scheduler import akka.util.ConstantFun -/** - * This trait provides the retry utility function - */ +/** This trait provides the retry utility function */ trait RetrySupport { /** @@ -58,8 +56,8 @@ trait RetrySupport { * shouldRetry = { (ex) => ex.isInstanceOf[IllegalArgumentException] }) * }}} */ - def retry[T](attempt: () => Future[T], shouldRetry: Throwable => Boolean, attempts: Int)( - implicit ec: ExecutionContext): Future[T] = + def retry[T](attempt: () => Future[T], shouldRetry: Throwable => Boolean, attempts: Int)(implicit + ec: ExecutionContext): Future[T] = RetrySupport.retry(attempt, attempts, ConstantFun.scalaAnyToNone, attempted = 0, shouldRetry)(ec, null) /** @@ -171,8 +169,8 @@ trait RetrySupport { * ) * }}} */ - def retry[T](attempt: () => Future[T], attempts: Int, delay: FiniteDuration)( - implicit ec: ExecutionContext, + def retry[T](attempt: () => Future[T], attempts: Int, delay: FiniteDuration)(implicit + ec: ExecutionContext, scheduler: Scheduler): Future[T] = { retry(attempt, attempts, _ => Some(delay)) } @@ -202,8 +200,7 @@ trait RetrySupport { * ) * }}} */ - def retry[T](attempt: () => Future[T], attempts: Int, delayFunction: Int => Option[FiniteDuration])( - implicit + def retry[T](attempt: () => Future[T], attempts: Int, delayFunction: Int => Option[FiniteDuration])(implicit ec: ExecutionContext, scheduler: Scheduler): Future[T] = { RetrySupport.retry(attempt, attempts, delayFunction, attempted = 0, ConstantFun.anyToTrue) @@ -251,8 +248,8 @@ trait RetrySupport { object RetrySupport extends RetrySupport { - private def retry[T](attempt: () => Future[T], maxAttempts: Int, attempted: Int)( - implicit ec: ExecutionContext): Future[T] = + private def retry[T](attempt: () => Future[T], maxAttempts: Int, attempted: Int)(implicit + ec: ExecutionContext): Future[T] = retry(attempt, maxAttempts, ConstantFun.scalaAnyToNone, attempted, ConstantFun.anyToTrue)(ec, null) private def retry[T]( diff --git a/akka-actor/src/main/scala/akka/pattern/StatusReply.scala b/akka-actor/src/main/scala/akka/pattern/StatusReply.scala index d0907a6c064..d6c572b1f16 100644 --- a/akka-actor/src/main/scala/akka/pattern/StatusReply.scala +++ b/akka-actor/src/main/scala/akka/pattern/StatusReply.scala @@ -36,9 +36,7 @@ final class StatusReply[+T] private (private val status: Try[T]) { */ def getValue: T = status.get - /** - * Java API: returns the exception if the reply is a failure, or throws an exception if not. - */ + /** Java API: returns the exception if the reply is a failure, or throws an exception if not. */ def getError: Throwable = status match { case ScalaFailure(ex) => ex case _ => throw new IllegalArgumentException("Expected reply to be a failure, but was a success") @@ -64,24 +62,16 @@ final class StatusReply[+T] private (private val status: Try[T]) { object StatusReply { - /** - * Scala API: A general purpose message for using as an Ack - */ + /** Scala API: A general purpose message for using as an Ack */ val Ack: StatusReply[Done] = success(Done) - /** - * Java API: A general purpose message for using as an Ack - */ + /** Java API: A general purpose message for using as an Ack */ def ack(): StatusReply[Done] = Ack - /** - * Java API: Create a successful reply containing `value` - */ + /** Java API: Create a successful reply containing `value` */ def success[T](value: T): StatusReply[T] = new StatusReply(ScalaSuccess(value)) - /** - * Java API: Create an status response with a error message describing why the request was failed or denied. - */ + /** Java API: Create an status response with a error message describing why the request was failed or denied. */ def error[T](errorMessage: String): StatusReply[T] = Error(errorMessage) /** @@ -144,9 +134,7 @@ object StatusReply { */ object Success { - /** - * Scala API: Create a successful reply containing `value` - */ + /** Scala API: Create a successful reply containing `value` */ def apply[T](value: T): StatusReply[T] = new StatusReply(ScalaSuccess(value)) def unapply(status: StatusReply[Any]): Option[Any] = if (status != null && status.isSuccess) Some(status.getValue) @@ -163,9 +151,7 @@ object StatusReply { */ object Error { - /** - * Scala API: Create an status response with a error message describing why the request was failed or denied. - */ + /** Scala API: Create an status response with a error message describing why the request was failed or denied. */ def apply[T](errorMessage: String): StatusReply[T] = error(new ErrorMessage(errorMessage)) /** @@ -185,9 +171,7 @@ object StatusReply { else None } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def flattenStatusFuture[T](f: Future[StatusReply[T]]): Future[T] = f.transform { diff --git a/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnRestartSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnRestartSupervisor.scala index d5e19168083..fcb69f6bc5d 100644 --- a/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnRestartSupervisor.scala +++ b/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnRestartSupervisor.scala @@ -44,40 +44,39 @@ import akka.pattern.{ override val supervisorStrategy: OneForOneStrategy = { val decider = super.supervisorStrategy.decider - OneForOneStrategy(strategy.maxNrOfRetries, strategy.withinTimeRange, strategy.loggingEnabled) { - case ex => - val defaultDirective: Directive = - decider.applyOrElse(ex, (_: Any) => Escalate) + OneForOneStrategy(strategy.maxNrOfRetries, strategy.withinTimeRange, strategy.loggingEnabled) { case ex => + val defaultDirective: Directive = + decider.applyOrElse(ex, (_: Any) => Escalate) - strategy.decider.applyOrElse(ex, (_: Any) => defaultDirective) match { - // Whatever the final Directive is, we will translate all Restarts - // to our own Restarts, which involves stopping the child. - case Restart => - val nextRestartCount = restartCount + 1 + strategy.decider.applyOrElse(ex, (_: Any) => defaultDirective) match { + // Whatever the final Directive is, we will translate all Restarts + // to our own Restarts, which involves stopping the child. + case Restart => + val nextRestartCount = restartCount + 1 - if (strategy.withinTimeRange.isFinite && restartCount == 0) { - // If the user has defined a time range for the maxNrOfRetries, we'll schedule a message - // to ourselves every time that range elapses, to reset the restart counter. We hide it - // behind this conditional to avoid queuing the message unnecessarily - val finiteWithinTimeRange = strategy.withinTimeRange.asInstanceOf[FiniteDuration] - system.scheduler.scheduleOnce(finiteWithinTimeRange, self, ResetRestartCount(nextRestartCount)) - } - val childRef = sender() - if (strategy.maxNrOfRetries >= 0 && nextRestartCount > strategy.maxNrOfRetries) { - // If we've exceeded the maximum # of retries allowed by the Strategy, die. - log.debug( - s"Terminating on restart #{} which exceeds max allowed restarts ({})", - nextRestartCount, - strategy.maxNrOfRetries) - become(receive) - stop(self) - } else { - become(waitChildTerminatedBeforeBackoff(childRef).orElse(handleBackoff)) - } - Stop + if (strategy.withinTimeRange.isFinite && restartCount == 0) { + // If the user has defined a time range for the maxNrOfRetries, we'll schedule a message + // to ourselves every time that range elapses, to reset the restart counter. We hide it + // behind this conditional to avoid queuing the message unnecessarily + val finiteWithinTimeRange = strategy.withinTimeRange.asInstanceOf[FiniteDuration] + system.scheduler.scheduleOnce(finiteWithinTimeRange, self, ResetRestartCount(nextRestartCount)) + } + val childRef = sender() + if (strategy.maxNrOfRetries >= 0 && nextRestartCount > strategy.maxNrOfRetries) { + // If we've exceeded the maximum # of retries allowed by the Strategy, die. + log.debug( + s"Terminating on restart #{} which exceeds max allowed restarts ({})", + nextRestartCount, + strategy.maxNrOfRetries) + become(receive) + stop(self) + } else { + become(waitChildTerminatedBeforeBackoff(childRef).orElse(handleBackoff)) + } + Stop - case other => other - } + case other => other + } } } @@ -92,10 +91,9 @@ import akka.pattern.{ case StartChild => // Ignore it, we will schedule a new one once current child terminated. } - def onTerminated: Receive = { - case Terminated(c) => - log.debug(s"Terminating, because child [$c] terminated itself") - stop(self) + def onTerminated: Receive = { case Terminated(c) => + log.debug(s"Terminating, because child [$c] terminated itself") + stop(self) } def receive: Receive = onTerminated.orElse(handleBackoff) diff --git a/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnStopSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnStopSupervisor.scala index 5ffb4218270..b252cf490bd 100644 --- a/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnStopSupervisor.scala +++ b/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnStopSupervisor.scala @@ -47,12 +47,11 @@ import akka.pattern.{ val decider = super.supervisorStrategy.decider strategy match { case oneForOne: OneForOneStrategy => - OneForOneStrategy(oneForOne.maxNrOfRetries, oneForOne.withinTimeRange, oneForOne.loggingEnabled) { - case ex => - val defaultDirective: Directive = - decider.applyOrElse(ex, (_: Any) => Escalate) + OneForOneStrategy(oneForOne.maxNrOfRetries, oneForOne.withinTimeRange, oneForOne.loggingEnabled) { case ex => + val defaultDirective: Directive = + decider.applyOrElse(ex, (_: Any) => Escalate) - strategy.decider.applyOrElse(ex, (_: Any) => defaultDirective) + strategy.decider.applyOrElse(ex, (_: Any) => defaultDirective) } case s => s } diff --git a/akka-actor/src/main/scala/akka/pattern/internal/CircuitBreakerTelemetry.scala b/akka-actor/src/main/scala/akka/pattern/internal/CircuitBreakerTelemetry.scala index 31baf23fad3..afe7837933e 100644 --- a/akka-actor/src/main/scala/akka/pattern/internal/CircuitBreakerTelemetry.scala +++ b/akka-actor/src/main/scala/akka/pattern/internal/CircuitBreakerTelemetry.scala @@ -23,19 +23,13 @@ import akka.util.ccompat.JavaConverters._ @InternalStableApi trait CircuitBreakerTelemetry { - /** - * Invoked when the circuit breaker transitions to the open state. - */ + /** Invoked when the circuit breaker transitions to the open state. */ def onOpen(): Unit - /** - * Invoked when the circuit breaker transitions to the close state. - */ + /** Invoked when the circuit breaker transitions to the close state. */ def onClose(): Unit - /** - * Invoked when the circuit breaker transitions to the half-open state after reset timeout. - */ + /** Invoked when the circuit breaker transitions to the half-open state after reset timeout. */ def onHalfOpen(): Unit /** @@ -74,9 +68,7 @@ trait CircuitBreakerTelemetry { def stopped(): Unit } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object CircuitBreakerTelemetryProvider { def start(breakerId: String, system: ExtendedActorSystem): CircuitBreakerTelemetry = { val configPath = "akka.circuit-breaker.telemetry.implementations" @@ -106,9 +98,7 @@ trait CircuitBreakerTelemetry { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object CircuitBreakerNoopTelemetry extends CircuitBreakerTelemetry { override def onOpen(): Unit = () @@ -127,9 +117,7 @@ trait CircuitBreakerTelemetry { override def stopped(): Unit = () } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class CircuitBreakerEnsembleTelemetry( telemetryFqcns: Seq[String], breakerId: String, diff --git a/akka-actor/src/main/scala/akka/routing/Balancing.scala b/akka-actor/src/main/scala/akka/routing/Balancing.scala index 158775fdbaa..b15aecb79d0 100644 --- a/akka-actor/src/main/scala/akka/routing/Balancing.scala +++ b/akka-actor/src/main/scala/akka/routing/Balancing.scala @@ -17,9 +17,7 @@ import akka.actor.SupervisorStrategy import akka.dispatch.BalancingDispatcherConfigurator import akka.dispatch.Dispatchers -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object BalancingRoutingLogic { def apply(): BalancingRoutingLogic = new BalancingRoutingLogic } @@ -87,9 +85,7 @@ final case class BalancingPool( override def createRouter(system: ActorSystem): Router = new Router(BalancingRoutingLogic()) - /** - * Setting the supervisor strategy to be used for the “head” Router actor. - */ + /** Setting the supervisor strategy to be used for the “head” Router actor. */ def withSupervisorStrategy(strategy: SupervisorStrategy): BalancingPool = copy(supervisorStrategy = strategy) /** @@ -100,9 +96,7 @@ final case class BalancingPool( def nrOfInstances(sys: ActorSystem) = this.nrOfInstances - /** - * INTERNAL API - */ + /** INTERNAL API */ override private[akka] def newRoutee(routeeProps: Props, context: ActorContext): Routee = { val rawDeployPath = context.self.path.elements.drop(1).mkString("/", "/", "") @@ -144,7 +138,7 @@ final case class BalancingPool( other match { case p: Pool => if ((this.supervisorStrategy eq Pool.defaultSupervisorStrategy) - && (p.supervisorStrategy ne Pool.defaultSupervisorStrategy)) + && (p.supervisorStrategy ne Pool.defaultSupervisorStrategy)) this.withSupervisorStrategy(p.supervisorStrategy) else this @@ -152,9 +146,7 @@ final case class BalancingPool( } } - /** - * Resizer cannot be used together with BalancingPool - */ + /** Resizer cannot be used together with BalancingPool */ override val resizer: Option[Resizer] = None } diff --git a/akka-actor/src/main/scala/akka/routing/Broadcast.scala b/akka-actor/src/main/scala/akka/routing/Broadcast.scala index 35513071528..b080ee00c15 100644 --- a/akka-actor/src/main/scala/akka/routing/Broadcast.scala +++ b/akka-actor/src/main/scala/akka/routing/Broadcast.scala @@ -18,9 +18,7 @@ object BroadcastRoutingLogic { def apply(): BroadcastRoutingLogic = new BroadcastRoutingLogic } -/** - * Broadcasts a message to all its routees. - */ +/** Broadcasts a message to all its routees. */ @nowarn("msg=@SerialVersionUID has no effect") @SerialVersionUID(1L) final class BroadcastRoutingLogic extends RoutingLogic { @@ -85,14 +83,10 @@ final case class BroadcastPool( override def nrOfInstances(sys: ActorSystem) = this.nrOfInstances - /** - * Setting the supervisor strategy to be used for the “head” Router actor. - */ + /** Setting the supervisor strategy to be used for the “head” Router actor. */ def withSupervisorStrategy(strategy: SupervisorStrategy): BroadcastPool = copy(supervisorStrategy = strategy) - /** - * Setting the resizer to be used. - */ + /** Setting the resizer to be used. */ def withResizer(resizer: Resizer): BroadcastPool = copy(resizer = Some(resizer)) /** diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala index a59aaa315d0..0fd6261c15b 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala @@ -17,7 +17,6 @@ import scala.reflect.ClassTag * * Note that toString of the ring nodes are used for the node * hash, i.e. make sure it is different for different nodes. - * */ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], val virtualNodesFactor: Int) { @@ -40,9 +39,11 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v */ def :+(node: T): ConsistentHash[T] = { val nodeHash = hashFor(node.toString) - new ConsistentHash(nodes ++ ((1 to virtualNodesFactor).map { r => - (concatenateNodeHash(nodeHash, r) -> node) - }), virtualNodesFactor) + new ConsistentHash( + nodes ++ ((1 to virtualNodesFactor).map { r => + concatenateNodeHash(nodeHash, r) -> node + }), + virtualNodesFactor) } /** @@ -59,9 +60,11 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v */ def :-(node: T): ConsistentHash[T] = { val nodeHash = hashFor(node.toString) - new ConsistentHash(nodes -- ((1 to virtualNodesFactor).map { r => - concatenateNodeHash(nodeHash, r) - }), virtualNodesFactor) + new ConsistentHash( + nodes -- ((1 to virtualNodesFactor).map { r => + concatenateNodeHash(nodeHash, r) + }), + virtualNodesFactor) } /** @@ -104,9 +107,7 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v nodeRing(idx(Arrays.binarySearch(nodeHashRing, hashFor(key)))) } - /** - * Is the node ring empty, i.e. no nodes added or all removed. - */ + /** Is the node ring empty, i.e. no nodes added or all removed. */ def isEmpty: Boolean = nodes.isEmpty } @@ -119,13 +120,11 @@ object ConsistentHash { node <- nodes nodeHash = hashFor(node.toString) vnode <- 1 to virtualNodesFactor - } yield (concatenateNodeHash(nodeHash, vnode) -> node)), + } yield concatenateNodeHash(nodeHash, vnode) -> node), virtualNodesFactor) } - /** - * Java API: Factory method to create a ConsistentHash - */ + /** Java API: Factory method to create a ConsistentHash */ def create[T](nodes: java.lang.Iterable[T], virtualNodesFactor: Int): ConsistentHash[T] = { import akka.util.ccompat.JavaConverters._ apply(nodes.asScala, virtualNodesFactor)(ClassTag(classOf[Any].asInstanceOf[Class[T]])) diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala index bd8df278368..bb3584e5525 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala @@ -98,9 +98,7 @@ object ConsistentHashingRouter { def hashKey(message: Any): Any } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def hashMappingAdapter(mapper: ConsistentHashMapper): ConsistentHashMapping = { case message if mapper.hashKey(message).asInstanceOf[AnyRef] ne null => mapper.hashKey(message) @@ -110,9 +108,7 @@ object ConsistentHashingRouter { object ConsistentHashingRoutingLogic { - /** - * Address to use for the selfAddress parameter - */ + /** Address to use for the selfAddress parameter */ def defaultAddress(system: ActorSystem): Address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress } @@ -143,7 +139,6 @@ object ConsistentHashingRoutingLogic { * use for the consistent hash key * * @param system the actor system hosting this router - * */ @SerialVersionUID(1L) final case class ConsistentHashingRoutingLogic( @@ -178,14 +173,10 @@ final case class ConsistentHashingRoutingLogic( private lazy val log = Logging(system, classOf[ConsistentHashingRoutingLogic]) - /** - * Setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]] - */ + /** Setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]] */ def withVirtualNodesFactor(vnodes: Int): ConsistentHashingRoutingLogic = copy(virtualNodesFactor = vnodes) - /** - * Java API: Setting the mapping from message to the data to use for the consistent hash key. - */ + /** Java API: Setting the mapping from message to the data to use for the consistent hash key. */ def withHashMapper(mapper: ConsistentHashingRouter.ConsistentHashMapper): ConsistentHashingRoutingLogic = copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapper)) @@ -222,9 +213,11 @@ final case class ConsistentHashingRoutingLogic( hashData match { case bytes: Array[Byte] => currentConsistenHash.nodeFor(bytes).routee case str: String => currentConsistenHash.nodeFor(str).routee - case x: AnyRef => currentConsistenHash.nodeFor(SerializationExtension(system).serialize(x).get).routee + case x: AnyRef => currentConsistenHash.nodeFor(SerializationExtension(system).serialize(x).get).routee case unexpected => - throw new IllegalArgumentException(s"Unexpected hashdata: $unexpected") // will not happen, for exhaustiveness check + throw new IllegalArgumentException( + s"Unexpected hashdata: $unexpected" + ) // will not happen, for exhaustiveness check } } catch { case NonFatal(e) => @@ -312,14 +305,10 @@ final case class ConsistentHashingPool( override def nrOfInstances(sys: ActorSystem) = this.nrOfInstances - /** - * Setting the supervisor strategy to be used for the “head” Router actor. - */ + /** Setting the supervisor strategy to be used for the “head” Router actor. */ def withSupervisorStrategy(strategy: SupervisorStrategy): ConsistentHashingPool = copy(supervisorStrategy = strategy) - /** - * Setting the resizer to be used. - */ + /** Setting the resizer to be used. */ def withResizer(resizer: Resizer): ConsistentHashingPool = copy(resizer = Some(resizer)) /** @@ -328,14 +317,10 @@ final case class ConsistentHashingPool( */ def withDispatcher(dispatcherId: String): ConsistentHashingPool = copy(routerDispatcher = dispatcherId) - /** - * Setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]] - */ + /** Setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]] */ def withVirtualNodesFactor(vnodes: Int): ConsistentHashingPool = copy(virtualNodesFactor = vnodes) - /** - * Java API: Setting the mapping from message to the data to use for the consistent hash key. - */ + /** Java API: Setting the mapping from message to the data to use for the consistent hash key. */ def withHashMapper(mapper: ConsistentHashingRouter.ConsistentHashMapper): ConsistentHashingPool = copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapper)) @@ -347,8 +332,8 @@ final case class ConsistentHashingPool( */ override def withFallback(other: RouterConfig): RouterConfig = other match { case _: FromConfig | _: NoRouter => this.overrideUnsetConfig(other) - case otherRouter: ConsistentHashingPool => (copy(hashMapping = otherRouter.hashMapping)).overrideUnsetConfig(other) - case _ => throw new IllegalArgumentException("Expected ConsistentHashingPool, got [%s]".format(other)) + case otherRouter: ConsistentHashingPool => copy(hashMapping = otherRouter.hashMapping).overrideUnsetConfig(other) + case _ => throw new IllegalArgumentException("Expected ConsistentHashingPool, got [%s]".format(other)) } } @@ -401,24 +386,18 @@ final case class ConsistentHashingGroup( */ def withDispatcher(dispatcherId: String): ConsistentHashingGroup = copy(routerDispatcher = dispatcherId) - /** - * Setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]] - */ + /** Setting the number of virtual nodes per node, used in [[akka.routing.ConsistentHash]] */ def withVirtualNodesFactor(vnodes: Int): ConsistentHashingGroup = copy(virtualNodesFactor = vnodes) - /** - * Java API: Setting the mapping from message to the data to use for the consistent hash key. - */ + /** Java API: Setting the mapping from message to the data to use for the consistent hash key. */ def withHashMapper(mapper: ConsistentHashingRouter.ConsistentHashMapper): ConsistentHashingGroup = copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapper)) - /** - * Uses the `hashMapping` defined in code, since that can't be defined in configuration. - */ + /** Uses the `hashMapping` defined in code, since that can't be defined in configuration. */ override def withFallback(other: RouterConfig): RouterConfig = other match { case _: FromConfig | _: NoRouter => super.withFallback(other) case otherRouter: ConsistentHashingGroup => copy(hashMapping = otherRouter.hashMapping) - case _ => throw new IllegalArgumentException("Expected ConsistentHashingGroup, got [%s]".format(other)) + case _ => throw new IllegalArgumentException("Expected ConsistentHashingGroup, got [%s]".format(other)) } } diff --git a/akka-actor/src/main/scala/akka/routing/Listeners.scala b/akka-actor/src/main/scala/akka/routing/Listeners.scala index 700c7e6a269..2705575fae1 100644 --- a/akka-actor/src/main/scala/akka/routing/Listeners.scala +++ b/akka-actor/src/main/scala/akka/routing/Listeners.scala @@ -40,9 +40,7 @@ trait Listeners { self: Actor => while (i.hasNext) f(i.next) } - /** - * Sends the supplied message to all current listeners using the provided sender() as sender. - */ + /** Sends the supplied message to all current listeners using the provided sender() as sender. */ protected def gossip(msg: Any)(implicit sender: ActorRef = Actor.noSender): Unit = { val i = listeners.iterator while (i.hasNext) i.next ! msg diff --git a/akka-actor/src/main/scala/akka/routing/MurmurHash.scala b/akka-actor/src/main/scala/akka/routing/MurmurHash.scala index 919f9c1e55a..2eb646b1a7e 100644 --- a/akka-actor/src/main/scala/akka/routing/MurmurHash.scala +++ b/akka-actor/src/main/scala/akka/routing/MurmurHash.scala @@ -88,7 +88,7 @@ object MurmurHash { /** Once all hashes have been incorporated, this performs a final mixing */ def finalizeHash(hash: Int): Int = { - var i = (hash ^ (hash >>> 16)) + var i = hash ^ (hash >>> 16) i *= finalMixer1 i ^= (i >>> 13) i *= finalMixer2 diff --git a/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala b/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala index 0d4fd00b1d6..ad8d410bf54 100644 --- a/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala +++ b/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala @@ -32,28 +32,20 @@ trait OptimalSizeExploringResizer extends Resizer { case object OptimalSizeExploringResizer { - /** - * INTERNAL API - */ + /** INTERNAL API */ private[routing] type PoolSize = Int - /** - * INTERNAL API - */ + /** INTERNAL API */ private[routing] case class UnderUtilizationStreak(start: LocalDateTime, highestUtilization: Int) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[routing] case class ResizeRecord( underutilizationStreak: Option[UnderUtilizationStreak] = None, messageCount: Long = 0, totalQueueLength: Int = 0, checkTime: Long = 0) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[routing] type PerformanceLog = Map[PoolSize, Duration] def apply(resizerCfg: Config): OptimalSizeExploringResizer = @@ -114,7 +106,6 @@ case object OptimalSizeExploringResizer { * * For documentation about the parameters, see the reference.conf - * akka.actor.deployment.default.optimal-size-exploring-resizer - * */ @SerialVersionUID(1L) case class DefaultOptimalSizeExploringResizer( @@ -237,7 +228,7 @@ case class DefaultOptimalSizeExploringResizer( if (totalProcessed > 0) { val duration = Duration.fromNanos(System.nanoTime() - record.checkTime) val last: Duration = duration / totalProcessed - //exponentially decrease the weight of old last metrics data + // exponentially decrease the weight of old last metrics data val toUpdate = performanceLog.get(currentSize).fold(last) { oldSpeed => (oldSpeed * (1.0 - weightOfLatestMetric)) + (last * weightOfLatestMetric) } @@ -259,7 +250,8 @@ case class DefaultOptimalSizeExploringResizer( val currentSize = currentRoutees.length val now = LocalDateTime.now val proposedChange = - if (record.underutilizationStreak.fold(false)(_.start.isBefore(now.minus(downsizeAfterUnderutilizedFor.asJava)))) { + if (record.underutilizationStreak.fold(false)( + _.start.isBefore(now.minus(downsizeAfterUnderutilizedFor.asJava)))) { val downsizeTo = (record.underutilizationStreak.get.highestUtilization * downsizeRatio).toInt Math.min(downsizeTo - currentSize, 0) } else if (performanceLog.isEmpty || record.underutilizationStreak.isDefined) { diff --git a/akka-actor/src/main/scala/akka/routing/Random.scala b/akka-actor/src/main/scala/akka/routing/Random.scala index 68377beaf9b..92da625f100 100644 --- a/akka-actor/src/main/scala/akka/routing/Random.scala +++ b/akka-actor/src/main/scala/akka/routing/Random.scala @@ -20,9 +20,7 @@ object RandomRoutingLogic { def apply(): RandomRoutingLogic = new RandomRoutingLogic } -/** - * Randomly selects one of the target routees to send a message to - */ +/** Randomly selects one of the target routees to send a message to */ @nowarn("msg=@SerialVersionUID has no effect") @SerialVersionUID(1L) final class RandomRoutingLogic extends RoutingLogic { @@ -87,14 +85,10 @@ final case class RandomPool( override def nrOfInstances(sys: ActorSystem) = this.nrOfInstances - /** - * Setting the supervisor strategy to be used for the “head” Router actor. - */ + /** Setting the supervisor strategy to be used for the “head” Router actor. */ def withSupervisorStrategy(strategy: SupervisorStrategy): RandomPool = copy(supervisorStrategy = strategy) - /** - * Setting the resizer to be used. - */ + /** Setting the resizer to be used. */ def withResizer(resizer: Resizer): RandomPool = copy(resizer = Some(resizer)) /** diff --git a/akka-actor/src/main/scala/akka/routing/Resizer.scala b/akka-actor/src/main/scala/akka/routing/Resizer.scala index 6339203794e..ae8e7c683ce 100644 --- a/akka-actor/src/main/scala/akka/routing/Resizer.scala +++ b/akka-actor/src/main/scala/akka/routing/Resizer.scala @@ -74,9 +74,7 @@ class ResizerInitializationException(message: String, cause: Throwable) extends case object DefaultResizer { - /** - * Creates a new DefaultResizer from the given configuration - */ + /** Creates a new DefaultResizer from the given configuration */ def apply(resizerConfig: Config): DefaultResizer = DefaultResizer( lowerBound = resizerConfig.getInt("lower-bound"), @@ -136,9 +134,7 @@ case class DefaultResizer( messagesPerResize: Int = 10) extends Resizer { - /** - * Java API constructor for default values except bounds. - */ + /** Java API constructor for default values except bounds. */ def this(lower: Int, upper: Int) = this(lowerBound = lower, upperBound = upper) if (lowerBound < 0) throw new IllegalArgumentException("lowerBound must be >= 0, was: [%s]".format(lowerBound)) @@ -153,7 +149,7 @@ case class DefaultResizer( if (messagesPerResize <= 0) throw new IllegalArgumentException("messagesPerResize must be > 0, was [%s]".format(messagesPerResize)) - def isTimeForResize(messageCounter: Long): Boolean = (messageCounter % messagesPerResize == 0) + def isTimeForResize(messageCounter: Long): Boolean = messageCounter % messagesPerResize == 0 override def resize(currentRoutees: immutable.IndexedSeq[Routee]): Int = capacity(currentRoutees) @@ -248,9 +244,7 @@ case class DefaultResizer( else 0 } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] final class ResizablePoolCell( _system: ActorSystemImpl, _ref: InternalActorRef, @@ -275,7 +269,7 @@ private[akka] final class ResizablePoolCell( override def sendMessage(envelope: Envelope): Unit = { if (!routerConfig.isManagementMessage(envelope.message) && - resizer.isTimeForResize(resizeCounter.getAndIncrement()) && resizeInProgress.compareAndSet(false, true)) { + resizer.isTimeForResize(resizeCounter.getAndIncrement()) && resizeInProgress.compareAndSet(false, true)) { super.sendMessage(Envelope(ResizablePoolActor.Resize, self, system)) } @@ -297,28 +291,22 @@ private[akka] final class ResizablePoolCell( } finally resizeInProgress.set(false) } - /** - * This approach is chosen for binary compatibility - */ + /** This approach is chosen for binary compatibility */ private def tryReportMessageCount(): Unit = { resizer match { case r: OptimalSizeExploringResizer => r.reportMessageCount(router.routees, resizeCounter.get()) - case _ => //ignore + case _ => // ignore } } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object ResizablePoolActor { case object Resize extends RouterManagementMesssage } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class ResizablePoolActor(supervisorStrategy: SupervisorStrategy) extends RouterPoolActor(supervisorStrategy) { import ResizablePoolActor._ @@ -331,9 +319,8 @@ private[akka] class ResizablePoolActor(supervisorStrategy: SupervisorStrategy) } override def receive = - ({ - case Resize => - resizerCell.resize(initial = false) + ({ case Resize => + resizerCell.resize(initial = false) }: Actor.Receive).orElse(super.receive) } diff --git a/akka-actor/src/main/scala/akka/routing/RoundRobin.scala b/akka-actor/src/main/scala/akka/routing/RoundRobin.scala index 6a5cc7fb25d..4b7dc68f06e 100644 --- a/akka-actor/src/main/scala/akka/routing/RoundRobin.scala +++ b/akka-actor/src/main/scala/akka/routing/RoundRobin.scala @@ -95,14 +95,10 @@ final case class RoundRobinPool( override def nrOfInstances(sys: ActorSystem) = this.nrOfInstances - /** - * Setting the supervisor strategy to be used for the “head” Router actor. - */ + /** Setting the supervisor strategy to be used for the “head” Router actor. */ def withSupervisorStrategy(strategy: SupervisorStrategy): RoundRobinPool = copy(supervisorStrategy = strategy) - /** - * Setting the resizer to be used. - */ + /** Setting the resizer to be used. */ def withResizer(resizer: Resizer): RoundRobinPool = copy(resizer = Some(resizer)) /** diff --git a/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala b/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala index 8b708a188ce..e1022d5d167 100644 --- a/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala +++ b/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala @@ -22,9 +22,7 @@ import akka.dispatch.Envelope import akka.dispatch.MessageDispatcher import akka.util.ccompat._ -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object RoutedActorCell { class RouterActorCreator(routerConfig: RouterConfig) extends IndirectActorProducer { override def actorClass = classOf[RouterActor] @@ -33,9 +31,7 @@ private[akka] object RoutedActorCell { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @ccompatUsedUntil213 private[akka] class RoutedActorCell( _system: ActorSystemImpl, @@ -131,9 +127,7 @@ private[akka] class RoutedActorCell( * end of construction */ - /** - * Route the message via the router to the selected destination. - */ + /** Route the message via the router to the selected destination. */ override def sendMessage(envelope: Envelope): Unit = { if (routerConfig.isManagementMessage(envelope.message)) super.sendMessage(envelope) @@ -143,9 +137,7 @@ private[akka] class RoutedActorCell( } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class RouterActor extends Actor { val cell = context match { case x: RoutedActorCell => x @@ -181,9 +173,7 @@ private[akka] class RouterActor extends Actor { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class RouterPoolActor(override val supervisorStrategy: SupervisorStrategy) extends RouterActor { val pool = cell.routerConfig match { @@ -193,16 +183,15 @@ private[akka] class RouterPoolActor(override val supervisorStrategy: SupervisorS } override def receive = - ({ - case AdjustPoolSize(change: Int) => - if (change > 0) { - val newRoutees = Vector.fill(change)(pool.newRoutee(cell.routeeProps, context)) - cell.addRoutees(newRoutees) - } else if (change < 0) { - val currentRoutees = cell.router.routees - val abandon = currentRoutees.drop(currentRoutees.length + change) - cell.removeRoutees(abandon, stopChild = true) - } + ({ case AdjustPoolSize(change: Int) => + if (change > 0) { + val newRoutees = Vector.fill(change)(pool.newRoutee(cell.routeeProps, context)) + cell.addRoutees(newRoutees) + } else if (change < 0) { + val currentRoutees = cell.router.routees + val abandon = currentRoutees.drop(currentRoutees.length + change) + cell.removeRoutees(abandon, stopChild = true) + } }: Actor.Receive).orElse(super.receive) } diff --git a/akka-actor/src/main/scala/akka/routing/Router.scala b/akka-actor/src/main/scala/akka/routing/Router.scala index 7c52b982f14..bfa8a9da8c8 100644 --- a/akka-actor/src/main/scala/akka/routing/Router.scala +++ b/akka-actor/src/main/scala/akka/routing/Router.scala @@ -35,24 +35,18 @@ trait RoutingLogic extends NoSerializationVerificationNeeded { } -/** - * Abstraction of a destination for messages routed via a [[Router]]. - */ +/** Abstraction of a destination for messages routed via a [[Router]]. */ trait Routee { def send(message: Any, sender: ActorRef): Unit } -/** - * [[Routee]] that sends the messages to an [[akka.actor.ActorRef]]. - */ +/** [[Routee]] that sends the messages to an [[akka.actor.ActorRef]]. */ final case class ActorRefRoutee(ref: ActorRef) extends Routee { override def send(message: Any, sender: ActorRef): Unit = ref.tell(message, sender) } -/** - * [[Routee]] that sends the messages to an [[akka.actor.ActorSelection]]. - */ +/** [[Routee]] that sends the messages to an [[akka.actor.ActorSelection]]. */ final case class ActorSelectionRoutee(selection: ActorSelection) extends Routee { override def send(message: Any, sender: ActorRef): Unit = selection.tell(message, sender) @@ -67,19 +61,13 @@ object NoRoutee extends Routee { override def send(message: Any, sender: ActorRef): Unit = () } -/** - * [[Routee]] that sends each message to all `routees`. - */ +/** [[Routee]] that sends each message to all `routees`. */ final case class SeveralRoutees(routees: immutable.IndexedSeq[Routee]) extends Routee { - /** - * Java API - */ + /** Java API */ def this(rs: java.lang.Iterable[Routee]) = this(routees = immutableSeq(rs).toVector) - /** - * Java API - */ + /** Java API */ def getRoutees(): java.util.List[Routee] = { import akka.util.ccompat.JavaConverters._ routees.asJava @@ -99,14 +87,10 @@ final case class SeveralRoutees(routees: immutable.IndexedSeq[Routee]) extends R */ final case class Router(logic: RoutingLogic, routees: immutable.IndexedSeq[Routee] = Vector.empty) { - /** - * Java API - */ + /** Java API */ def this(logic: RoutingLogic) = this(logic, Vector.empty) - /** - * Java API - */ + /** Java API */ def this(logic: RoutingLogic, routees: java.lang.Iterable[Routee]) = this(logic, immutableSeq(routees).toVector) /** @@ -133,14 +117,10 @@ final case class Router(logic: RoutingLogic, routees: immutable.IndexedSeq[Route case _ => msg } - /** - * Create a new instance with the specified routees and the same [[RoutingLogic]]. - */ + /** Create a new instance with the specified routees and the same [[RoutingLogic]]. */ def withRoutees(rs: immutable.IndexedSeq[Routee]): Router = copy(routees = rs) - /** - * Create a new instance with one more routee and the same [[RoutingLogic]]. - */ + /** Create a new instance with one more routee and the same [[RoutingLogic]]. */ def addRoutee(routee: Routee): Router = copy(routees = routees :+ routee) /** @@ -155,9 +135,7 @@ final case class Router(logic: RoutingLogic, routees: immutable.IndexedSeq[Route */ def addRoutee(sel: ActorSelection): Router = addRoutee(ActorSelectionRoutee(sel)) - /** - * Create a new instance without the specified routee. - */ + /** Create a new instance without the specified routee. */ def removeRoutee(routee: Routee): Router = copy(routees = routees.filterNot(_ == routee)) /** diff --git a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala index 7a6ba9745b0..d6f801c4053 100644 --- a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala +++ b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala @@ -79,14 +79,10 @@ trait RouterConfig extends Serializable { */ def stopRouterWhenAllRouteesRemoved: Boolean = true - /** - * Overridable merge strategy, by default completely prefers `this` (i.e. no merge). - */ + /** Overridable merge strategy, by default completely prefers `this` (i.e. no merge). */ def withFallback(@unused other: RouterConfig): RouterConfig = this - /** - * Check that everything is there which is needed. Called in constructor of RoutedActorRef to fail early. - */ + /** Check that everything is there which is needed. Called in constructor of RoutedActorRef to fail early. */ def verifyConfig(@unused path: ActorPath): Unit = () /** @@ -112,7 +108,7 @@ private[akka] trait PoolOverrideUnsetConfig[T <: Pool] extends Pool { case p: Pool => val wssConf: PoolOverrideUnsetConfig[T] = if ((this.supervisorStrategy eq Pool.defaultSupervisorStrategy) - && (p.supervisorStrategy ne Pool.defaultSupervisorStrategy)) + && (p.supervisorStrategy ne Pool.defaultSupervisorStrategy)) this.withSupervisorStrategy(p.supervisorStrategy).asInstanceOf[PoolOverrideUnsetConfig[T]] else this @@ -129,9 +125,7 @@ private[akka] trait PoolOverrideUnsetConfig[T <: Pool] extends Pool { def withResizer(resizer: Resizer): T } -/** - * Java API: Base class for custom router [[Group]] - */ +/** Java API: Base class for custom router [[Group]] */ abstract class GroupBase extends Group { def getPaths(system: ActorSystem): java.lang.Iterable[String] @@ -155,27 +149,21 @@ trait Group extends RouterConfig { */ def props(): Props = Props.empty.withRouter(this) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def routeeFor(path: String, context: ActorContext): Routee = ActorSelectionRoutee(context.actorSelection(path)) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] override def createRouterActor(): RouterActor = new RouterActor } object Pool { - val defaultSupervisorStrategy: SupervisorStrategy = OneForOneStrategy() { - case _ => SupervisorStrategy.Escalate + val defaultSupervisorStrategy: SupervisorStrategy = OneForOneStrategy() { case _ => + SupervisorStrategy.Escalate } } -/** - * Java API: Base class for custom router [[Pool]] - */ +/** Java API: Base class for custom router [[Pool]] */ abstract class PoolBase extends Pool /** @@ -184,9 +172,7 @@ abstract class PoolBase extends Pool */ trait Pool extends RouterConfig { - /** - * Initial number of routee instances - */ + /** Initial number of routee instances */ def nrOfInstances(sys: ActorSystem): Int /** @@ -196,15 +182,11 @@ trait Pool extends RouterConfig { */ def usePoolDispatcher: Boolean = false - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def newRoutee(routeeProps: Props, context: ActorContext): Routee = ActorRefRoutee(context.actorOf(enrichWithPoolDispatcher(routeeProps, context))) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def enrichWithPoolDispatcher(routeeProps: Props, context: ActorContext): Props = if (usePoolDispatcher && routeeProps.dispatcher == Dispatchers.DefaultDispatcherId) routeeProps.withDispatcher( @@ -221,9 +203,7 @@ trait Pool extends RouterConfig { */ def resizer: Option[Resizer] - /** - * SupervisorStrategy for the head actor, i.e. for supervising the routees of the pool. - */ + /** SupervisorStrategy for the head actor, i.e. for supervising the routees of the pool. */ def supervisorStrategy: SupervisorStrategy /** @@ -239,9 +219,7 @@ trait Pool extends RouterConfig { */ override def stopRouterWhenAllRouteesRemoved: Boolean = resizer.isEmpty - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] override def createRouterActor(): RouterActor = resizer match { case None => new RouterPoolActor(supervisorStrategy) @@ -256,9 +234,7 @@ trait Pool extends RouterConfig { */ abstract class CustomRouterConfig extends RouterConfig { - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] override def createRouterActor(): RouterActor = new RouterActor override def routerDispatcher: String = Dispatchers.DefaultDispatcherId @@ -271,9 +247,7 @@ abstract class CustomRouterConfig extends RouterConfig { */ case object FromConfig extends FromConfig { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this @inline final def apply( resizer: Option[Resizer] = None, @@ -304,24 +278,18 @@ class FromConfig( override def createRouter(system: ActorSystem): Router = throw new UnsupportedOperationException("FromConfig must not create Router") - /** - * INTERNAL API - */ + /** INTERNAL API */ override private[akka] def createRouterActor(): RouterActor = throw new UnsupportedOperationException("FromConfig must not create RouterActor") override def verifyConfig(path: ActorPath): Unit = throw new ConfigurationException(s"Configuration missing for router [$path] in 'akka.actor.deployment' section.") - /** - * Setting the supervisor strategy to be used for the “head” Router actor. - */ + /** Setting the supervisor strategy to be used for the “head” Router actor. */ def withSupervisorStrategy(strategy: SupervisorStrategy): FromConfig = new FromConfig(resizer, strategy, routerDispatcher) - /** - * Setting the resizer to be used. - */ + /** Setting the resizer to be used. */ def withResizer(resizer: Resizer): FromConfig = new FromConfig(Some(resizer), supervisorStrategy, routerDispatcher) @@ -355,26 +323,20 @@ case object NoRouter extends NoRouter { override def createRouter(system: ActorSystem): Router = throw new UnsupportedOperationException("NoRouter has no Router") - /** - * INTERNAL API - */ + /** INTERNAL API */ override private[akka] def createRouterActor(): RouterActor = throw new UnsupportedOperationException("NoRouter must not create RouterActor") override def routerDispatcher: String = throw new UnsupportedOperationException("NoRouter has no dispatcher") override def withFallback(other: akka.routing.RouterConfig): akka.routing.RouterConfig = other - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this def props(routeeProps: Props): Props = routeeProps.withRouter(this) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @nowarn("msg=@SerialVersionUID has no effect") @SerialVersionUID(1L) private[akka] trait RouterManagementMesssage @@ -390,21 +352,15 @@ abstract class GetRoutees extends RouterManagementMesssage @SerialVersionUID(1L) case object GetRoutees extends GetRoutees { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this } -/** - * Message used to carry information about what routees the router is currently using. - */ +/** Message used to carry information about what routees the router is currently using. */ @SerialVersionUID(1L) final case class Routees(routees: immutable.IndexedSeq[Routee]) { - /** - * Java API - */ + /** Java API */ def getRoutees: java.util.List[Routee] = { import akka.util.ccompat.JavaConverters._ routees.asJava @@ -425,7 +381,6 @@ final case class AddRoutee(routee: Routee) extends RouterManagementMesssage * For a pool, with child routees, the routee is stopped by sending a [[akka.actor.PoisonPill]] * to the routee. Precautions are taken reduce the risk of dropping messages that are concurrently * being routed to the removed routee, but there are no guarantees. - * */ @SerialVersionUID(1L) final case class RemoveRoutee(routee: Routee) extends RouterManagementMesssage diff --git a/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala b/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala index 39b5d16b2b4..b0ea0f3758d 100644 --- a/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala +++ b/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala @@ -37,9 +37,7 @@ final case class ScatterGatherFirstCompletedRoutingLogic(within: FiniteDuration) ScatterGatherFirstCompletedRoutees(routees, within) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) private[akka] final case class ScatterGatherFirstCompletedRoutees( routees: immutable.IndexedSeq[Routee], @@ -138,15 +136,11 @@ final case class ScatterGatherFirstCompletedPool( override def nrOfInstances(sys: ActorSystem) = this.nrOfInstances - /** - * Setting the supervisor strategy to be used for the “head” Router actor. - */ + /** Setting the supervisor strategy to be used for the “head” Router actor. */ def withSupervisorStrategy(strategy: SupervisorStrategy): ScatterGatherFirstCompletedPool = copy(supervisorStrategy = strategy) - /** - * Setting the resizer to be used. - */ + /** Setting the resizer to be used. */ def withResizer(resizer: Resizer): ScatterGatherFirstCompletedPool = copy(resizer = Some(resizer)) /** diff --git a/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala b/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala index e8d7f2dbf2f..135bb1e79f1 100644 --- a/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala +++ b/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala @@ -67,12 +67,12 @@ class SmallestMailboxRoutingLogic extends RoutingLogic { val target = targets(at) val newScore: Long = if (isSuspended(target)) Long.MaxValue - 1 - else { //Just about better than the DeadLetters + else { // Just about better than the DeadLetters (if (isProcessingMessage(target)) 1L else 0L) + (if (!hasMessages(target)) 0L - else { //Race between hasMessages and numberOfMessages here, unfortunate the numberOfMessages returns 0 if unknown + else { // Race between hasMessages and numberOfMessages here, unfortunate the numberOfMessages returns 0 if unknown val noOfMsgs: Long = if (deep) numberOfMessages(target) else 0 - if (noOfMsgs > 0) noOfMsgs else Long.MaxValue - 3 //Just better than a suspended actorref + if (noOfMsgs > 0) noOfMsgs else Long.MaxValue - 3 // Just better than a suspended actorref }) } @@ -208,14 +208,10 @@ final case class SmallestMailboxPool( override def nrOfInstances(sys: ActorSystem) = this.nrOfInstances - /** - * Setting the supervisor strategy to be used for the “head” Router actor. - */ + /** Setting the supervisor strategy to be used for the “head” Router actor. */ def withSupervisorStrategy(strategy: SupervisorStrategy): SmallestMailboxPool = copy(supervisorStrategy = strategy) - /** - * Setting the resizer to be used. - */ + /** Setting the resizer to be used. */ def withResizer(resizer: Resizer): SmallestMailboxPool = copy(resizer = Some(resizer)) /** diff --git a/akka-actor/src/main/scala/akka/routing/TailChopping.scala b/akka-actor/src/main/scala/akka/routing/TailChopping.scala index ed1c65fe716..e918823dac2 100644 --- a/akka-actor/src/main/scala/akka/routing/TailChopping.scala +++ b/akka-actor/src/main/scala/akka/routing/TailChopping.scala @@ -59,9 +59,7 @@ final case class TailChoppingRoutingLogic( } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) private[akka] final case class TailChoppingRoutees( scheduler: Scheduler, @@ -95,10 +93,9 @@ private[akka] final case class TailChoppingRoutees( promise.tryFailure(new AskTimeoutException(s"Ask timed out on [$sender] after [$within.toMillis} ms]"))) val f = promise.future - f.onComplete { - case _ => - tryWithNext.cancel() - sendTimeout.cancel() + f.onComplete { case _ => + tryWithNext.cancel() + sendTimeout.cancel() } f.pipeTo(sender) } @@ -195,14 +192,10 @@ final case class TailChoppingPool( override def nrOfInstances(sys: ActorSystem) = this.nrOfInstances - /** - * Setting the supervisor strategy to be used for the “head” Router actor. - */ + /** Setting the supervisor strategy to be used for the “head” Router actor. */ def withSupervisorStrategy(strategy: SupervisorStrategy): TailChoppingPool = copy(supervisorStrategy = strategy) - /** - * Setting the resizer to be used. - */ + /** Setting the resizer to be used. */ def withResizer(resizer: Resizer): TailChoppingPool = copy(resizer = Some(resizer)) /** diff --git a/akka-actor/src/main/scala/akka/serialization/AsyncSerializer.scala b/akka-actor/src/main/scala/akka/serialization/AsyncSerializer.scala index 64a17cc2917..17b4227b100 100644 --- a/akka-actor/src/main/scala/akka/serialization/AsyncSerializer.scala +++ b/akka-actor/src/main/scala/akka/serialization/AsyncSerializer.scala @@ -29,9 +29,7 @@ trait AsyncSerializer { */ def toBinaryAsync(o: AnyRef): Future[Array[Byte]] - /** - * Produces an object from an array of bytes, with an optional type-hint. - */ + /** Produces an object from an array of bytes, with an optional type-hint. */ def fromBinaryAsync(bytes: Array[Byte], manifest: String): Future[AnyRef] } @@ -72,15 +70,11 @@ abstract class AsyncSerializerWithStringManifestCS(system: ExtendedActorSystem) def fromBinaryAsyncCS(bytes: Array[Byte], manifest: String): CompletionStage[AnyRef] - /** - * Delegates to [[AsyncSerializerWithStringManifestCS#toBinaryAsyncCS]] - */ + /** Delegates to [[AsyncSerializerWithStringManifestCS#toBinaryAsyncCS]] */ final def toBinaryAsync(o: AnyRef): Future[Array[Byte]] = toBinaryAsyncCS(o).toScala - /** - * Delegates to [[AsyncSerializerWithStringManifestCS#fromBinaryAsyncCS]] - */ + /** Delegates to [[AsyncSerializerWithStringManifestCS#fromBinaryAsyncCS]] */ def fromBinaryAsync(bytes: Array[Byte], manifest: String): Future[AnyRef] = fromBinaryAsyncCS(bytes, manifest).toScala } diff --git a/akka-actor/src/main/scala/akka/serialization/PrimitiveSerializers.scala b/akka-actor/src/main/scala/akka/serialization/PrimitiveSerializers.scala index 49a70600c25..6e946155ade 100644 --- a/akka-actor/src/main/scala/akka/serialization/PrimitiveSerializers.scala +++ b/akka-actor/src/main/scala/akka/serialization/PrimitiveSerializers.scala @@ -10,9 +10,7 @@ import akka.actor.ExtendedActorSystem import akka.annotation.InternalApi import akka.util.ByteString -/** - * INTERNAL API: only public by configuration - */ +/** INTERNAL API: only public by configuration */ @InternalApi private[akka] final class LongSerializer(val system: ExtendedActorSystem) extends Serializer with ByteBufferSerializer { @@ -33,7 +31,7 @@ import akka.util.ByteString var long = Long.unbox(o) var i = 0 while (long != 0) { - result(i) = (long & 0xFF).toByte + result(i) = (long & 0xff).toByte i += 1 long >>>= 8 } @@ -45,16 +43,14 @@ import akka.util.ByteString var i = 7 while (i >= 0) { result <<= 8 - result |= (bytes(i).toLong & 0xFF) + result |= (bytes(i).toLong & 0xff) i -= 1 } Long.box(result) } } -/** - * INTERNAL API: only public by configuration - */ +/** INTERNAL API: only public by configuration */ @InternalApi private[akka] final class IntSerializer(val system: ExtendedActorSystem) extends Serializer with ByteBufferSerializer { @@ -71,7 +67,7 @@ import akka.util.ByteString var int = Int.unbox(o) var i = 0 while (int != 0) { - result(i) = (int & 0xFF).toByte + result(i) = (int & 0xff).toByte i += 1 int >>>= 8 } @@ -83,16 +79,14 @@ import akka.util.ByteString var i = 3 while (i >= 0) { result <<= 8 - result |= (bytes(i).toInt & 0xFF) + result |= (bytes(i).toInt & 0xff) i -= 1 } Int.box(result) } } -/** - * INTERNAL API: only public by configuration - */ +/** INTERNAL API: only public by configuration */ @InternalApi private[akka] final class StringSerializer(val system: ExtendedActorSystem) extends Serializer with ByteBufferSerializer { @@ -114,9 +108,7 @@ import akka.util.ByteString } -/** - * INTERNAL API: only public by configuration - */ +/** INTERNAL API: only public by configuration */ @InternalApi private[akka] final class ByteStringSerializer(val system: ExtendedActorSystem) extends Serializer with ByteBufferSerializer { @@ -148,9 +140,7 @@ import akka.util.ByteString } -/** - * INTERNAL API: only public by configuration - */ +/** INTERNAL API: only public by configuration */ @InternalApi private[akka] final class BooleanSerializer(val system: ExtendedActorSystem) extends Serializer with ByteBufferSerializer { diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index 2153a12ee89..9684c53c6af 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -27,9 +27,7 @@ import akka.util.ccompat._ @ccompatUsedUntil213 object Serialization { - /** - * Tuple that represents mapping from Class to Serializer - */ + /** Tuple that represents mapping from Class to Serializer */ type ClassSerializer = (Class[_], Serializer) /** @@ -48,7 +46,7 @@ object Serialization { private final def configToMap(cfg: Config): Map[String, String] = { import akka.util.ccompat.JavaConverters._ - cfg.root.unwrapped.asScala.toMap.map { case (k, v) => (k -> v.toString) } + cfg.root.unwrapped.asScala.toMap.map { case (k, v) => k -> v.toString } } } @@ -176,14 +174,15 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { @deprecated("Use deserialize that accepts the `manifest` as a class name.", since = "2.6.0") def deserialize[T](bytes: Array[Byte], serializerId: Int, clazz: Option[Class[_ <: T]]): Try[T] = Try { - val serializer = try getSerializerById(serializerId) - catch { - case _: NoSuchElementException => - throw new NotSerializableException( - s"Cannot find serializer with id [$serializerId]${clazz.map(c => " (class [" + c.getName + "])").getOrElse("")}. " + - "The most probable reason is that the configuration entry " + - "akka.actor.serializers is not in sync between the two systems.") - } + val serializer = + try getSerializerById(serializerId) + catch { + case _: NoSuchElementException => + throw new NotSerializableException( + s"Cannot find serializer with id [$serializerId]${clazz.map(c => " (class [" + c.getName + "])").getOrElse("")}. " + + "The most probable reason is that the configuration entry " + + "akka.actor.serializers is not in sync between the two systems.") + } withTransportInformation { () => serializer.fromBinary(bytes, clazz).asInstanceOf[T] } @@ -196,13 +195,14 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { */ def deserialize(bytes: Array[Byte], serializerId: Int, manifest: String): Try[AnyRef] = Try { - val serializer = try getSerializerById(serializerId) - catch { - case _: NoSuchElementException => - throw new NotSerializableException( - s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + - "akka.actor.serializers is not in sync between the two systems.") - } + val serializer = + try getSerializerById(serializerId) + catch { + case _: NoSuchElementException => + throw new NotSerializableException( + s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + + "akka.actor.serializers is not in sync between the two systems.") + } deserializeByteArray(bytes, serializer, manifest) } @@ -246,13 +246,14 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { */ @throws(classOf[NotSerializableException]) def deserializeByteBuffer(buf: ByteBuffer, serializerId: Int, manifest: String): AnyRef = { - val serializer = try getSerializerById(serializerId) - catch { - case _: NoSuchElementException => - throw new NotSerializableException( - s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + - "akka.actor.serializers is not in synch between the two systems.") - } + val serializer = + try getSerializerById(serializerId) + catch { + case _: NoSuchElementException => + throw new NotSerializableException( + s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + + "akka.actor.serializers is not in synch between the two systems.") + } // not using `withTransportInformation { () =>` because deserializeByteBuffer is supposed to be the // possibility for allocation free serialization @@ -310,8 +311,8 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { (possibilities.forall(_._2 == possibilities(0)._2)) val ser = { - bindings.filter { - case (c, _) => c.isAssignableFrom(clazz) + bindings.filter { case (c, _) => + c.isAssignableFrom(clazz) } match { case immutable.Seq() => throw new NotSerializableException(s"No configured serialization-bindings for class [${clazz.getName}]") @@ -393,50 +394,42 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { case _: NoSuchMethodException => system.dynamicAccess .createInstanceFor[Serializer](fqn, List(classOf[ClassicActorSystemProvider] -> system)) - .recoverWith { - case _: NoSuchMethodException => - system.dynamicAccess.createInstanceFor[Serializer](fqn, Nil).recoverWith { - case _: NoSuchMethodException => - if (bindingName == "") { - // compatibility with (public) serializerOf method without bindingName - throw new NoSuchMethodException(s"The serializer [$fqn] doesn't have a matching constructor, " + + .recoverWith { case _: NoSuchMethodException => + system.dynamicAccess.createInstanceFor[Serializer](fqn, Nil).recoverWith { + case _: NoSuchMethodException => + if (bindingName == "") { + // compatibility with (public) serializerOf method without bindingName + throw new NoSuchMethodException(s"The serializer [$fqn] doesn't have a matching constructor, " + s"see API documentation of ${classOf[Serializer].getName}") - } else - system.dynamicAccess - .createInstanceFor[Serializer]( - fqn, - List(classOf[ExtendedActorSystem] -> system, classOf[String] -> bindingName)) - .recoverWith { - case _: NoSuchMethodException => + } else + system.dynamicAccess + .createInstanceFor[Serializer]( + fqn, + List(classOf[ExtendedActorSystem] -> system, classOf[String] -> bindingName)) + .recoverWith { case _: NoSuchMethodException => + system.dynamicAccess + .createInstanceFor[Serializer]( + fqn, + List(classOf[ActorSystem] -> system, classOf[String] -> bindingName)) + .recoverWith { case _: NoSuchMethodException => system.dynamicAccess .createInstanceFor[Serializer]( fqn, - List(classOf[ActorSystem] -> system, classOf[String] -> bindingName)) - .recoverWith { - case _: NoSuchMethodException => - system.dynamicAccess - .createInstanceFor[Serializer]( - fqn, - List( - classOf[ClassicActorSystemProvider] -> system, - classOf[String] -> bindingName)) - .recoverWith { - case _: NoSuchMethodException => - Failure(new NoSuchMethodException( - s"The serializer [$fqn] for binding [$bindingName] doesn't have a matching " + - s"constructor, see API documentation of ${classOf[Serializer].getName}")) - } + List(classOf[ClassicActorSystemProvider] -> system, classOf[String] -> bindingName)) + .recoverWith { case _: NoSuchMethodException => + Failure(new NoSuchMethodException( + s"The serializer [$fqn] for binding [$bindingName] doesn't have a matching " + + s"constructor, see API documentation of ${classOf[Serializer].getName}")) } - } - } + } + } + } } } } } - /** - * Programmatically defined serializers - */ + /** Programmatically defined serializers */ private val serializerDetails: immutable.Seq[SerializerDetails] = (system.settings.setup.get[SerializationSetup] match { case None => Vector.empty @@ -490,8 +483,8 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { private def warnUnexpectedNonAkkaSerializer(clazz: Class[_], ser: Serializer): Boolean = { val className = clazz.getName if (className.startsWith("akka.") && !ser.getClass.getName.startsWith("akka.") && - // no serializers for these in Akka - !(className.startsWith("akka.grpc") || className.startsWith("akka.http"))) { + // no serializers for these in Akka + !(className.startsWith("akka.grpc") || className.startsWith("akka.http"))) { log.warning( "Using serializer [{}] for message [{}]. Note that this serializer " + "is not implemented by Akka. It's not recommended to replace serializers for messages " + @@ -523,15 +516,13 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * obeying any order between unrelated subtypes (insert sort). */ private def sort(in: Iterable[ClassSerializer]): immutable.Seq[ClassSerializer] = - (in - .foldLeft(new ArrayBuffer[ClassSerializer](in.size)) { (buf, ca) => - buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { - case -1 => buf.append(ca) - case x => buf.insert(x, ca) - } - buf - }) - .to(immutable.Seq) + in.foldLeft(new ArrayBuffer[ClassSerializer](in.size)) { (buf, ca) => + buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { + case -1 => buf.append(ca) + case x => buf.insert(x, ca) + } + buf + }.to(immutable.Seq) /** * serializerMap is a Map whose keys is the class that is serializable and values is the serializer @@ -540,40 +531,33 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { private val serializerMap: ConcurrentHashMap[Class[_], Serializer] = bindings.foldLeft(new ConcurrentHashMap[Class[_], Serializer]) { case (map, (c, s)) => map.put(c, s); map } - /** - * Maps from a Serializer Identity (Int) to a Serializer instance (optimization) - */ + /** Maps from a Serializer Identity (Int) to a Serializer instance (optimization) */ val serializerByIdentity: Map[Int, Serializer] = { val zero: Map[Int, Serializer] = Map(NullSerializer.identifier -> NullSerializer) - serializers.foldLeft(zero) { - case (acc, (_, ser)) => - val id = ser.identifier - acc.get(id) match { - case Some(existing) if existing != ser => - throw new IllegalArgumentException( - s"Serializer identifier [$id] of [${ser.getClass.getName}] " + - s"is not unique. It is also used by [${acc(id).getClass.getName}].") - case _ => - acc.updated(id, ser) - } + serializers.foldLeft(zero) { case (acc, (_, ser)) => + val id = ser.identifier + acc.get(id) match { + case Some(existing) if existing != ser => + throw new IllegalArgumentException( + s"Serializer identifier [$id] of [${ser.getClass.getName}] " + + s"is not unique. It is also used by [${acc(id).getClass.getName}].") + case _ => + acc.updated(id, ser) + } } } - /** - * Serializers with id 0 - 1023 are stored in an array for quick allocation free access - */ + /** Serializers with id 0 - 1023 are stored in an array for quick allocation free access */ private val quickSerializerByIdentity: Array[Serializer] = { val size = 1024 val table = new Array[Serializer](size) - serializerByIdentity.foreach { - case (id, ser) => if (0 <= id && id < size) table(id) = ser + serializerByIdentity.foreach { case (id, ser) => + if (0 <= id && id < size) table(id) = ser } table } - /** - * @throws `NoSuchElementException` if no serializer with given `id` - */ + /** @throws `NoSuchElementException` if no serializer with given `id` */ private def getSerializerById(id: Int): Serializer = { if (0 <= id && id < quickSerializerByIdentity.length) { quickSerializerByIdentity(id) match { @@ -593,13 +577,11 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { serializer.isInstanceOf[JavaSerializer] && !system.settings.AllowJavaSerialization } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def shouldWarnAboutJavaSerializer(serializedClass: Class[_], serializer: Serializer) = { def suppressWarningOnNonSerializationVerification(serializedClass: Class[_]) = { - //suppressed, only when warn-on-no-serialization-verification = off, and extending NoSerializationVerificationNeeded + // suppressed, only when warn-on-no-serialization-verification = off, and extending NoSerializationVerificationNeeded !isWarningOnNoVerificationEnabled && classOf[NoSerializationVerificationNeeded].isAssignableFrom(serializedClass) } diff --git a/akka-actor/src/main/scala/akka/serialization/SerializationSetup.scala b/akka-actor/src/main/scala/akka/serialization/SerializationSetup.scala index 2b2502c72b1..018d21bd7d9 100644 --- a/akka-actor/src/main/scala/akka/serialization/SerializationSetup.scala +++ b/akka-actor/src/main/scala/akka/serialization/SerializationSetup.scala @@ -30,9 +30,7 @@ object SerializationSetup { } -/** - * Setup for the serialization subsystem, constructor is *Internal API*, use factories in [[SerializationSetup]] - */ +/** Setup for the serialization subsystem, constructor is *Internal API*, use factories in [[SerializationSetup]] */ final class SerializationSetup private (val createSerializers: ExtendedActorSystem => immutable.Seq[SerializerDetails]) extends Setup diff --git a/akka-actor/src/main/scala/akka/serialization/Serializer.scala b/akka-actor/src/main/scala/akka/serialization/Serializer.scala index 58256b0919e..872b748ccc8 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serializer.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serializer.scala @@ -58,9 +58,7 @@ trait Serializer { */ def toBinary(o: AnyRef): Array[Byte] - /** - * Returns whether this serializer needs a manifest in the fromBinary method - */ + /** Returns whether this serializer needs a manifest in the fromBinary method */ def includeManifest: Boolean /** @@ -70,14 +68,10 @@ trait Serializer { @throws(classOf[NotSerializableException]) def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef - /** - * Java API: deserialize without type hint - */ + /** Java API: deserialize without type hint */ final def fromBinary(bytes: Array[Byte]): AnyRef = fromBinary(bytes, None) - /** - * Java API: deserialize with type hint - */ + /** Java API: deserialize with type hint */ @throws(classOf[NotSerializableException]) final def fromBinary(bytes: Array[Byte], clazz: Class[_]): AnyRef = fromBinary(bytes, Option(clazz)) } @@ -189,7 +183,6 @@ abstract class SerializerWithStringManifest extends Serializer { * // you need to know the maximum size in bytes of the serialized messages * val pool = new akka.io.DirectByteBufferPool(defaultBufferSize = 1024 * 1024, maxPoolEntries = 10) * - * * // Implement this method for compatibility with `SerializerWithStringManifest`. * override def toBinary(o: AnyRef): Array[Byte] = { * val buf = pool.acquire() @@ -213,9 +206,7 @@ abstract class SerializerWithStringManifest extends Serializer { //#ByteBufferSerializer trait ByteBufferSerializer { - /** - * Serializes the given object into the `ByteBuffer`. - */ + /** Serializes the given object into the `ByteBuffer`. */ def toBinary(o: AnyRef, buf: ByteBuffer): Unit /** @@ -234,9 +225,7 @@ trait ByteBufferSerializer { */ trait BaseSerializer extends Serializer { - /** - * Actor system which is required by most serializer implementations. - */ + /** Actor system which is required by most serializer implementations. */ def system: ExtendedActorSystem /** @@ -256,9 +245,7 @@ trait BaseSerializer extends Serializer { */ override val identifier: Int = identifierFromConfig - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def identifierFromConfig: Int = BaseSerializer.identifierFromConfig(getClass, system) @@ -298,9 +285,7 @@ abstract class JSerializer extends Serializer { final def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = fromBinaryJava(bytes, manifest.orNull) - /** - * This method must be implemented, manifest may be null. - */ + /** This method must be implemented, manifest may be null. */ protected def fromBinaryJava(bytes: Array[Byte], manifest: Class[_]): AnyRef } @@ -337,9 +322,7 @@ object JavaSerializer { } } -/** - * This Serializer uses standard Java Serialization - */ +/** This Serializer uses standard Java Serialization */ class JavaSerializer(val system: ExtendedActorSystem) extends BaseSerializer { if (!system.settings.AllowJavaSerialization) throw new DisabledJavaSerializer.JavaSerializationException( @@ -364,9 +347,7 @@ class JavaSerializer(val system: ExtendedActorSystem) extends BaseSerializer { } } -/** - * This Serializer is used when `akka.actor.java-serialization = off` - */ +/** This Serializer is used when `akka.actor.java-serialization = off` */ final case class DisabledJavaSerializer(system: ExtendedActorSystem) extends Serializer with ByteBufferSerializer { import DisabledJavaSerializer._ @@ -420,9 +401,7 @@ object DisabledJavaSerializer { "Attempted to deserialize message using Java serialization while `akka.actor.allow-java-serialization` was disabled. Check WARNING logs for more details.") } -/** - * This is a special Serializer that Serializes and deserializes nulls only - */ +/** This is a special Serializer that Serializes and deserializes nulls only */ class NullSerializer extends Serializer { val nullAsBytes = Array[Byte]() def includeManifest: Boolean = false diff --git a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala index 03259bf6007..3ecc1fb6fc0 100644 --- a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala +++ b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala @@ -38,7 +38,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin protected def createNotEmptyCondition(): Condition = lock.newCondition() protected def createNotFullCondition(): Condition = lock.newCondition() - def put(e: E): Unit = { //Blocks until not full + def put(e: E): Unit = { // Blocks until not full if (e eq null) throw new NullPointerException lock.lockInterruptibly() @@ -56,7 +56,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin } finally lock.unlock() } - def take(): E = { //Blocks until not empty + def take(): E = { // Blocks until not empty lock.lockInterruptibly() try { @tailrec def takeElement(): E = { @@ -74,26 +74,26 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin } finally lock.unlock() } - def offer(e: E): Boolean = { //Tries to do it immediately, if fail return false + def offer(e: E): Boolean = { // Tries to do it immediately, if fail return false if (e eq null) throw new NullPointerException lock.lock() try { if (backing.size() == maxCapacity) false else { - require(backing.offer(e)) //Should never fail + require(backing.offer(e)) // Should never fail notEmpty.signal() true } } finally lock.unlock() } - def offer(e: E, timeout: Long, unit: TimeUnit): Boolean = { //Tries to do it within the timeout, return false if fail + def offer(e: E, timeout: Long, unit: TimeUnit): Boolean = { // Tries to do it within the timeout, return false if fail if (e eq null) throw new NullPointerException lock.lockInterruptibly() try { @tailrec def offerElement(remainingNanos: Long): Boolean = { if (backing.size() < maxCapacity) { - require(backing.offer(e)) //Should never fail + require(backing.offer(e)) // Should never fail notEmpty.signal() true } else if (remainingNanos <= 0) false @@ -103,7 +103,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin } finally lock.unlock() } - def poll(timeout: Long, unit: TimeUnit): E = { //Tries to do it within the timeout, returns null if fail + def poll(timeout: Long, unit: TimeUnit): E = { // Tries to do it within the timeout, returns null if fail lock.lockInterruptibly() try { @tailrec def pollElement(remainingNanos: Long): E = { @@ -120,7 +120,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin } finally lock.unlock() } - def poll(): E = { //Tries to remove the head of the queue immediately, if fail, return null + def poll(): E = { // Tries to remove the head of the queue immediately, if fail, return null lock.lock() try { backing.poll() match { @@ -132,7 +132,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin } finally lock.unlock() } - override def remove(e: AnyRef): Boolean = { //Tries to do it immediately, if fail, return false + override def remove(e: AnyRef): Boolean = { // Tries to do it immediately, if fail, return false if (e eq null) throw new NullPointerException lock.lock() try { @@ -252,7 +252,7 @@ class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backin override def remove(): Unit = { if (last < 0) throw new IllegalStateException val target = elements(last) - last = -1 //To avoid 2 subsequent removes without a next in between + last = -1 // To avoid 2 subsequent removes without a next in between lock.lock() try { @tailrec def removeTarget(i: Iterator[E] = backing.iterator()): Unit = diff --git a/akka-actor/src/main/scala/akka/util/Clock.scala b/akka-actor/src/main/scala/akka/util/Clock.scala index fd4864f32de..0f7a7cd829b 100644 --- a/akka-actor/src/main/scala/akka/util/Clock.scala +++ b/akka-actor/src/main/scala/akka/util/Clock.scala @@ -20,9 +20,7 @@ import akka.actor.ExtensionIdProvider import akka.actor.Scheduler import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Clock extends ExtensionId[Clock] with ExtensionIdProvider { override def get(system: ActorSystem): Clock = super.get(system) @@ -39,18 +37,14 @@ import akka.annotation.InternalApi } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait Clock extends Extension { def currentTime(): Long def earlierTime(duration: FiniteDuration): Long } -/** - * INTERNAL API: Clock backed by `System.nanoTime`. - */ +/** INTERNAL API: Clock backed by `System.nanoTime`. */ @InternalApi private[akka] final class NanoClock extends Clock { override def currentTime(): Long = System.nanoTime() diff --git a/akka-actor/src/main/scala/akka/util/Collections.scala b/akka-actor/src/main/scala/akka/util/Collections.scala index ed47255eca7..99dbecf59db 100644 --- a/akka-actor/src/main/scala/akka/util/Collections.scala +++ b/akka-actor/src/main/scala/akka/util/Collections.scala @@ -7,9 +7,7 @@ package akka.util import scala.annotation.tailrec import scala.collection.immutable -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object Collections { case object EmptyImmutableSeq extends immutable.Seq[Nothing] { @@ -36,7 +34,7 @@ private[akka] object Collections { _next = apply(potentiallyNext) _hasNext = true true - } else tailrecHasNext() //Attempt to find the next + } else tailrecHasNext() // Attempt to find the next } else _hasNext // Return if we found one } diff --git a/akka-actor/src/main/scala/akka/util/ConstantFun.scala b/akka-actor/src/main/scala/akka/util/ConstantFun.scala index 18c1e81ade6..2f78a020226 100644 --- a/akka-actor/src/main/scala/akka/util/ConstantFun.scala +++ b/akka-actor/src/main/scala/akka/util/ConstantFun.scala @@ -8,9 +8,7 @@ import akka.annotation.InternalApi import akka.japi.{ Pair => JPair } import akka.japi.function.{ Function => JFun, Function2 => JFun2 } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ConstantFun { private[this] val JavaIdentityFunction = new JFun[Any, Any] { diff --git a/akka-actor/src/main/scala/akka/util/ErrorMessages.scala b/akka-actor/src/main/scala/akka/util/ErrorMessages.scala index a41a1279fb8..96c7f395cb6 100644 --- a/akka-actor/src/main/scala/akka/util/ErrorMessages.scala +++ b/akka-actor/src/main/scala/akka/util/ErrorMessages.scala @@ -4,9 +4,7 @@ package akka.util -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object ErrorMessages { val RemoteDeploymentConfigErrorPrefix = "configuration requested remote deployment" } diff --git a/akka-actor/src/main/scala/akka/util/FlightRecorderLoader.scala b/akka-actor/src/main/scala/akka/util/FlightRecorderLoader.scala index f9359c4fd62..64664682a7d 100644 --- a/akka-actor/src/main/scala/akka/util/FlightRecorderLoader.scala +++ b/akka-actor/src/main/scala/akka/util/FlightRecorderLoader.scala @@ -9,9 +9,7 @@ import scala.util.{ Failure, Success } import akka.actor.{ ClassicActorSystemProvider, ExtendedActorSystem } import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FlightRecorderLoader { def load[T: ClassTag](casp: ClassicActorSystemProvider, fqcn: String, fallback: T): T = { diff --git a/akka-actor/src/main/scala/akka/util/FrequencyList.scala b/akka-actor/src/main/scala/akka/util/FrequencyList.scala index 0deb0fa5327..06e73c0ee6d 100644 --- a/akka-actor/src/main/scala/akka/util/FrequencyList.scala +++ b/akka-actor/src/main/scala/akka/util/FrequencyList.scala @@ -9,9 +9,7 @@ import scala.concurrent.duration.FiniteDuration import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FrequencyList { def empty[A](dynamicAging: Boolean = false): FrequencyList[A] = diff --git a/akka-actor/src/main/scala/akka/util/FrequencySketch.scala b/akka-actor/src/main/scala/akka/util/FrequencySketch.scala index 8a670b9cdf1..78d1c3172bb 100644 --- a/akka-actor/src/main/scala/akka/util/FrequencySketch.scala +++ b/akka-actor/src/main/scala/akka/util/FrequencySketch.scala @@ -66,7 +66,7 @@ private[akka] object FrequencySketch { * INTERNAL API * * A frequency sketch for estimating the popularity of items. For implementing the TinyLFU cache admission policy. - + * * This is a generalised frequency sketch with configurable depth (number of hash functions) and counter size. * * The matrix of counters is a two-dimensional array of longs, which each hold multiple counters depending on the @@ -128,9 +128,7 @@ private[akka] final class FrequencySketch[A]( private[this] val rowSizes = Array.ofDim[Int](depth) private[this] var updatedSize = 0 - /** - * Get the current size of the sketch (the number of incremented counters). - */ + /** Get the current size of the sketch (the number of incremented counters). */ def size: Int = updatedSize /** @@ -214,9 +212,7 @@ private[akka] final class FrequencySketch[A]( def toDebugString: String = FrequencySketchUtil.debugString(matrix, rowWidth, slots, counterWidth, counterMask) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FastFrequencySketch { @@ -239,7 +235,7 @@ private[akka] object FastFrequencySketch { * INTERNAL API * * A faster implementation of the frequency sketch (around twice as fast). - + * * This frequency sketch uses a fixed depth (number of hash functions) of 4 and a counter size of 4 bits (0-15), * so that constants can be used for improved efficiency. It also uses its own rehashing of item hash codes. * @@ -255,17 +251,17 @@ private[akka] final class FastFrequencySketch[A](width: Int, resetSize: Int) { private final val Depth = 4 private final val SlotShift = 4 - private final val SlotMask = 0xF + private final val SlotMask = 0xf private final val CounterShift = 2 - private final val CounterMask = 0xFL + private final val CounterMask = 0xfL private final val OddMask = 0x1111111111111111L private final val ResetMask = 0x7777777777777777L // seeds are large primes between 2^63 and 2^64 - private final val Seed0 = 0xC3A5C85C97CB3127L - private final val Seed1 = 0xB492B66FBE98F273L - private final val Seed2 = 0x9AE16A3B2F90404FL - private final val Seed3 = 0xCBF29CE484222325L + private final val Seed0 = 0xc3a5c85c97cb3127L + private final val Seed1 = 0xb492b66fbe98f273L + private final val Seed2 = 0x9ae16a3b2f90404fL + private final val Seed3 = 0xcbf29ce484222325L private[this] val rowWidth = math.max(1, width >>> SlotShift) private[this] val indexMask = width - 1 @@ -348,15 +344,11 @@ private[akka] final class FastFrequencySketch[A](width: Int, resetSize: Int) { FrequencySketchUtil.debugString(matrix, rowWidth, slots = 16, counterWidth = 4, CounterMask) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FrequencySketchUtil { - /** - * Create a pretty table with all the frequency sketch counters for debugging (smaller) sketches. - */ + /** Create a pretty table with all the frequency sketch counters for debugging (smaller) sketches. */ def debugString( matrix: Array[Array[Long]], rowWidth: Int, diff --git a/akka-actor/src/main/scala/akka/util/HashCode.scala b/akka-actor/src/main/scala/akka/util/HashCode.scala index c76a8a754f0..0d91b914bdd 100644 --- a/akka-actor/src/main/scala/akka/util/HashCode.scala +++ b/akka-actor/src/main/scala/akka/util/HashCode.scala @@ -4,7 +4,7 @@ package akka.util -import java.lang.{ Float => JFloat, Double => JDouble } +import java.lang.{ Double => JDouble, Float => JFloat } import java.lang.reflect.{ Array => JArray } /** @@ -41,7 +41,9 @@ object HashCode { else for (id <- 0 until JArray.getLength(value)) result = hash(result, JArray.get(value, id)) // is an array result case unexpected => - throw new IllegalArgumentException(s"Unexpected hash parameter: $unexpected") // will not happen, for exhaustiveness check + throw new IllegalArgumentException( + s"Unexpected hash parameter: $unexpected" + ) // will not happen, for exhaustiveness check } def hash(seed: Int, value: Boolean): Int = firstTerm(seed) + (if (value) 1 else 0) def hash(seed: Int, value: Char): Int = firstTerm(seed) + value.asInstanceOf[Int] diff --git a/akka-actor/src/main/scala/akka/util/Helpers.scala b/akka-actor/src/main/scala/akka/util/Helpers.scala index 585e5517281..917bb86685c 100644 --- a/akka-actor/src/main/scala/akka/util/Helpers.scala +++ b/akka-actor/src/main/scala/akka/util/Helpers.scala @@ -32,7 +32,7 @@ object Helpers { * that the ordering is actually consistent and you cannot have a * sequence which cyclically is monotone without end. */ - val diff = ((System.identityHashCode(a) & 0XFFFFFFFFL) - (System.identityHashCode(b) & 0XFFFFFFFFL)) + val diff = (System.identityHashCode(a) & 0xffffffffL) - (System.identityHashCode(b) & 0xffffffffL) if (diff > 0) 1 else if (diff < 0) -1 else 0 } @@ -149,9 +149,7 @@ object Helpers { } } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] final implicit class ConfigOps(val config: Config) extends AnyVal { def getMillisDuration(path: String): FiniteDuration = getDuration(path, TimeUnit.MILLISECONDS) diff --git a/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala b/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala index e4945e6f214..a196cda52f1 100644 --- a/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala +++ b/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala @@ -9,9 +9,7 @@ import scala.annotation.tailrec import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ImmutableIntMap { final val empty: ImmutableIntMap = new ImmutableIntMap(Array.emptyIntArray, 0) } @@ -39,7 +37,8 @@ import akka.annotation.InternalApi @tailrec def find(lo: Int, hi: Int): Int = if (lo <= hi) { val lohi = lo + hi // Since we search in half the array we don't need to div by 2 to find the real index of key - val idx = lohi & ~1 // Since keys are in even slots, we get the key idx from lo+hi by removing the lowest bit if set (odd) + val idx = + lohi & ~1 // Since keys are in even slots, we get the key idx from lo+hi by removing the lowest bit if set (odd) val k = kvs(idx) if (k == key) idx else if (k < key) find((lohi >>> 1) + 1, hi) @@ -58,7 +57,9 @@ import akka.annotation.InternalApi @tailrec def find(lo: Int, hi: Int): Int = if (lo <= hi) { val lohi = lo + hi // Since we search in half the array we don't need to div by 2 to find the real index of key - val k = kvs(lohi & ~1) // Since keys are in even slots, we get the key idx from lo+hi by removing the lowest bit if set (odd) + val k = kvs( + lohi & ~1 + ) // Since keys are in even slots, we get the key idx from lo+hi by removing the lowest bit if set (odd) if (k == key) kvs(lohi | 1) // lohi, if odd, already points to the value-index, if even, we set the lowest bit to add 1 else if (k < key) find((lohi >>> 1) + 1, hi) @@ -68,9 +69,7 @@ import akka.annotation.InternalApi find(0, size - 1) } - /** - * Worst case `O(log n)`, allocation free. - */ + /** Worst case `O(log n)`, allocation free. */ final def contains(key: Int): Boolean = indexForKey(key) >= 0 /** @@ -131,9 +130,7 @@ import akka.annotation.InternalApi } else this } - /** - * All keys - */ + /** All keys */ final def keysIterator: Iterator[Int] = if (size < 1) Iterator.empty else Iterator.range(0, kvs.length - 1, 2).map(kvs.apply) diff --git a/akka-actor/src/main/scala/akka/util/Index.scala b/akka-actor/src/main/scala/akka/util/Index.scala index 47a6ddbdd46..2b6ca1d366a 100644 --- a/akka-actor/src/main/scala/akka/util/Index.scala +++ b/akka-actor/src/main/scala/akka/util/Index.scala @@ -32,7 +32,7 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { * @return true if the value didn't exist for the key previously, and false otherwise */ def put(key: K, value: V): Boolean = { - //Tailrecursive spin-locking put + // Tailrecursive spin-locking put @tailrec def spinPut(k: K, v: V): Boolean = { var retry = false @@ -41,8 +41,8 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { if (set ne null) { set.synchronized { - if (set.isEmpty) retry = true //IF the set is empty then it has been removed, so signal retry - else { //Else add the value to the set and signal that retry is not needed + if (set.isEmpty) retry = true // IF the set is empty then it has been removed, so signal retry + else { // Else add the value to the set and signal that retry is not needed added = set.add(v) retry = false } @@ -55,8 +55,8 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { val oldSet = container.putIfAbsent(k, newSet) if (oldSet ne null) { oldSet.synchronized { - if (oldSet.isEmpty) retry = true //IF the set is empty then it has been removed, so signal retry - else { //Else try to add the value to the set and signal that retry is not needed + if (oldSet.isEmpty) retry = true // IF the set is empty then it has been removed, so signal retry + else { // Else try to add the value to the set and signal that retry is not needed added = oldSet.add(v) retry = false } @@ -81,9 +81,7 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { case set => set.iterator.asScala.find(f) } - /** - * Returns an Iterator of V containing the values for the supplied key, or an empty iterator if the key doesn't exist - */ + /** Returns an Iterator of V containing the values for the supplied key, or an empty iterator if the key doesn't exist */ def valueIterator(key: K): scala.Iterator[V] = { container.get(key) match { case null => Iterator.empty @@ -91,17 +89,13 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { } } - /** - * Applies the supplied function to all keys and their values - */ + /** Applies the supplied function to all keys and their values */ def foreach(fun: (K, V) => Unit): Unit = container.entrySet.iterator.asScala.foreach { e => e.getValue.iterator.asScala.foreach(fun(e.getKey, _)) } - /** - * Returns the union of all value sets. - */ + /** Returns the union of all value sets. */ def values: Set[V] = { val builder = Set.newBuilder[V] for { @@ -111,9 +105,7 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { builder.result() } - /** - * Returns the key set. - */ + /** Returns the key set. */ def keys: Iterable[K] = container.keySet.asScala /** @@ -125,14 +117,14 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { if (set ne null) { set.synchronized { - if (set.remove(value)) { //If we can remove the value - if (set.isEmpty) //and the set becomes empty - container.remove(key, emptySet) //We try to remove the key if it's mapped to an empty set + if (set.remove(value)) { // If we can remove the value + if (set.isEmpty) // and the set becomes empty + container.remove(key, emptySet) // We try to remove the key if it's mapped to an empty set - true //Remove succeeded - } else false //Remove failed + true // Remove succeeded + } else false // Remove failed } - } else false //Remove failed + } else false // Remove failed } /** @@ -146,16 +138,15 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { set.synchronized { container.remove(key, set) @nowarn("msg=deprecated") - val ret = collectionAsScalaIterableConverter(set.clone()).asScala // Make copy since we need to clear the original + val ret = + collectionAsScalaIterableConverter(set.clone()).asScala // Make copy since we need to clear the original set.clear() // Clear the original set to signal to any pending writers that there was a conflict Some(ret) } - } else None //Remove failed + } else None // Remove failed } - /** - * Removes the specified value from all keys - */ + /** Removes the specified value from all keys */ def removeValue(value: V): Unit = { val i = container.entrySet().iterator() while (i.hasNext) { @@ -164,23 +155,19 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { if (set ne null) { set.synchronized { - if (set.remove(value)) { //If we can remove the value - if (set.isEmpty) //and the set becomes empty - container.remove(e.getKey, emptySet) //We try to remove the key if it's mapped to an empty set + if (set.remove(value)) { // If we can remove the value + if (set.isEmpty) // and the set becomes empty + container.remove(e.getKey, emptySet) // We try to remove the key if it's mapped to an empty set } } } } } - /** - * @return true if the underlying containers is empty, may report false negatives when the last remove is underway - */ + /** @return true if the underlying containers is empty, may report false negatives when the last remove is underway */ def isEmpty: Boolean = container.isEmpty - /** - * Removes all keys and all values - */ + /** Removes all keys and all values */ def clear(): Unit = { val i = container.entrySet().iterator() while (i.hasNext) { diff --git a/akka-actor/src/main/scala/akka/util/JavaDurationConverters.scala b/akka-actor/src/main/scala/akka/util/JavaDurationConverters.scala index 6b87ce7712f..14249c8154c 100644 --- a/akka-actor/src/main/scala/akka/util/JavaDurationConverters.scala +++ b/akka-actor/src/main/scala/akka/util/JavaDurationConverters.scala @@ -9,9 +9,7 @@ import scala.concurrent.duration.{ Duration, FiniteDuration } import akka.annotation.InternalStableApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalStableApi private[akka] object JavaDurationConverters { def asFiniteDuration(duration: JDuration): FiniteDuration = duration.asScala diff --git a/akka-actor/src/main/scala/akka/util/JavaVersion.scala b/akka-actor/src/main/scala/akka/util/JavaVersion.scala index 4943677ec39..9465701b29d 100644 --- a/akka-actor/src/main/scala/akka/util/JavaVersion.scala +++ b/akka-actor/src/main/scala/akka/util/JavaVersion.scala @@ -6,9 +6,7 @@ package akka.util import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object JavaVersion { val majorVersion: Int = { diff --git a/akka-actor/src/main/scala/akka/util/LineNumbers.scala b/akka-actor/src/main/scala/akka/util/LineNumbers.scala index 79b4b2c7ee8..b07481b9ad2 100644 --- a/akka-actor/src/main/scala/akka/util/LineNumbers.scala +++ b/akka-actor/src/main/scala/akka/util/LineNumbers.scala @@ -278,8 +278,8 @@ object LineNumbers { (1 to count) .map(_ => readMethod(d, c("Code"), c("LineNumberTable"), filter)) .flatten - .foldLeft(Int.MaxValue -> 0) { - case ((low, high), (start, end)) => (Math.min(low, start), Math.max(high, end)) + .foldLeft(Int.MaxValue -> 0) { case ((low, high), (start, end)) => + (Math.min(low, start), Math.max(high, end)) } match { case (Int.MaxValue, 0) => None case other => Some(other) @@ -291,8 +291,8 @@ object LineNumbers { } } - private def readMethod(d: DataInputStream, codeTag: Int, lineNumberTableTag: Int, filter: Option[String])( - implicit c: Constants): Option[(Int, Int)] = { + private def readMethod(d: DataInputStream, codeTag: Int, lineNumberTableTag: Int, filter: Option[String])(implicit + c: Constants): Option[(Int, Int)] = { skip(d, 2) // access flags val name = d.readUnsignedShort() // name skip(d, 2) // signature diff --git a/akka-actor/src/main/scala/akka/util/LockUtil.scala b/akka-actor/src/main/scala/akka/util/LockUtil.scala index 4251bcfbb54..c0226631318 100644 --- a/akka-actor/src/main/scala/akka/util/LockUtil.scala +++ b/akka-actor/src/main/scala/akka/util/LockUtil.scala @@ -17,9 +17,7 @@ final class ReentrantGuard extends ReentrantLock { } } -/** - * An atomic switch that can be either on or off - */ +/** An atomic switch that can be either on or off */ class Switch(startAsOn: Boolean = false) { private val switch = new AtomicBoolean(startAsOn) // FIXME switch to AQS @@ -49,29 +47,19 @@ class Switch(startAsOn: Boolean = false) { */ def switchOn(action: => Unit): Boolean = transcend(from = false, action) - /** - * Switches the switch off (if on), uses locking - */ + /** Switches the switch off (if on), uses locking */ def switchOff: Boolean = synchronized { switch.compareAndSet(true, false) } - /** - * Switches the switch on (if off), uses locking - */ + /** Switches the switch on (if off), uses locking */ def switchOn: Boolean = synchronized { switch.compareAndSet(false, true) } - /** - * Executes the provided action and returns its value if the switch is IMMEDIATELY on (i.e. no lock involved) - */ + /** Executes the provided action and returns its value if the switch is IMMEDIATELY on (i.e. no lock involved) */ def ifOnYield[T](action: => T): Option[T] = if (switch.get) Some(action) else None - /** - * Executes the provided action and returns its value if the switch is IMMEDIATELY off (i.e. no lock involved) - */ + /** Executes the provided action and returns its value if the switch is IMMEDIATELY off (i.e. no lock involved) */ def ifOffYield[T](action: => T): Option[T] = if (!switch.get) Some(action) else None - /** - * Executes the provided action and returns if the action was executed or not, if the switch is IMMEDIATELY on (i.e. no lock involved) - */ + /** Executes the provided action and returns if the action was executed or not, if the switch is IMMEDIATELY on (i.e. no lock involved) */ def ifOn(action: => Unit): Boolean = { if (switch.get) { action @@ -79,9 +67,7 @@ class Switch(startAsOn: Boolean = false) { } else false } - /** - * Executes the provided action and returns if the action was executed or not, if the switch is IMMEDIATELY off (i.e. no lock involved) - */ + /** Executes the provided action and returns if the action was executed or not, if the switch is IMMEDIATELY off (i.e. no lock involved) */ def ifOff(action: => Unit): Boolean = { if (!switch.get) { action @@ -129,18 +115,12 @@ class Switch(startAsOn: Boolean = false) { */ def fold[T](on: => T)(off: => T): T = synchronized { if (switch.get) on else off } - /** - * Executes the given code while holding this switch’s lock, i.e. protected from concurrent modification of the switch status. - */ + /** Executes the given code while holding this switch’s lock, i.e. protected from concurrent modification of the switch status. */ def locked[T](code: => T): T = synchronized { code } - /** - * Returns whether the switch is IMMEDIATELY on (no locking) - */ + /** Returns whether the switch is IMMEDIATELY on (no locking) */ def isOn: Boolean = switch.get - /** - * Returns whether the switch is IMMEDIATELY off (no locking) - */ + /** Returns whether the switch is IMMEDIATELY off (no locking) */ def isOff: Boolean = !isOn } diff --git a/akka-actor/src/main/scala/akka/util/ManifestInfo.scala b/akka-actor/src/main/scala/akka/util/ManifestInfo.scala index f68b8fca5da..ab06a707411 100644 --- a/akka-actor/src/main/scala/akka/util/ManifestInfo.scala +++ b/akka-actor/src/main/scala/akka/util/ManifestInfo.scala @@ -47,9 +47,7 @@ object ManifestInfo extends ExtensionId[ManifestInfo] with ExtensionIdProvider { override def createExtension(system: ExtendedActorSystem): ManifestInfo = new ManifestInfo(system) - /** - * Comparable version information - */ + /** Comparable version information */ final class Version(val version: String) extends Comparable[Version] { private val impl = new akka.util.Version(version) @@ -105,9 +103,7 @@ object ManifestInfo extends ExtensionId[ManifestInfo] with ExtensionIdProvider { final class ManifestInfo(val system: ExtendedActorSystem) extends Extension { import ManifestInfo._ - /** - * Versions of artifacts from known vendors. - */ + /** Versions of artifacts from known vendors. */ val versions: Map[String, Version] = { var manifests = Map.empty[String, Version] @@ -133,9 +129,9 @@ final class ManifestInfo(val system: ExtendedActorSystem) extends Extension { } if (title != null - && version != null - && vendor != null - && knownVendors(vendor)) { + && version != null + && vendor != null + && knownVendors(vendor)) { manifests = manifests.updated(title, new Version(version)) } } finally { diff --git a/akka-actor/src/main/scala/akka/util/MessageBuffer.scala b/akka-actor/src/main/scala/akka/util/MessageBuffer.scala index 1fd87e76537..869d2c09443 100644 --- a/akka-actor/src/main/scala/akka/util/MessageBuffer.scala +++ b/akka-actor/src/main/scala/akka/util/MessageBuffer.scala @@ -8,9 +8,7 @@ import akka.actor.{ ActorRef, Dropped } import akka.annotation.InternalApi import akka.japi.function.Procedure2 -/** - * A non thread safe mutable message buffer that can be used to buffer messages inside actors. - */ +/** A non thread safe mutable message buffer that can be used to buffer messages inside actors. */ final class MessageBuffer private (private var _head: MessageBuffer.Node, private var _tail: MessageBuffer.Node) { import MessageBuffer._ @@ -57,9 +55,7 @@ final class MessageBuffer private (private var _head: MessageBuffer.Node, privat this } - /** - * Remove the first element of the message buffer. - */ + /** Remove the first element of the message buffer. */ def dropHead(): Unit = if (nonEmpty) { _head = _head.next _size -= 1 @@ -111,9 +107,7 @@ final class MessageBuffer private (private var _head: MessageBuffer.Node, privat */ def forEach(f: Procedure2[Any, ActorRef]): Unit = foreach { case (message, ref) => f(message, ref) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def filterNot(p: (Any, ActorRef) => Boolean): Unit = { // easiest to collect a new list, and then re-link the nodes var result = Vector.empty[Node] @@ -219,9 +213,7 @@ final class MessageBufferMap[I] { } else buffer } - /** - * Add an id to the buffer map - */ + /** Add an id to the buffer map */ def add(id: I): Unit = { getOrAddBuffer(id) } diff --git a/akka-actor/src/main/scala/akka/util/OptionVal.scala b/akka-actor/src/main/scala/akka/util/OptionVal.scala index 1703468c400..4190fb6c240 100644 --- a/akka-actor/src/main/scala/akka/util/OptionVal.scala +++ b/akka-actor/src/main/scala/akka/util/OptionVal.scala @@ -6,9 +6,7 @@ package akka.util import akka.annotation.InternalStableApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalStableApi private[akka] object OptionVal { @@ -21,9 +19,7 @@ private[akka] object OptionVal { def none[A]: OptionVal[A] = None.asInstanceOf[OptionVal[A]] - /** - * Represents non-existent values, `null` values. - */ + /** Represents non-existent values, `null` values. */ val None = new OptionVal[Null](null) } @@ -39,15 +35,11 @@ private[akka] object OptionVal { @InternalStableApi private[akka] final class OptionVal[+A](val x: A) extends AnyVal { - /** - * Returns true if the option is `OptionVal.None`, false otherwise. - */ + /** Returns true if the option is `OptionVal.None`, false otherwise. */ def isEmpty: Boolean = x == null - /** - * Returns false if the option is `OptionVal.None`, true otherwise. - */ + /** Returns false if the option is `OptionVal.None`, true otherwise. */ def isDefined: Boolean = !isEmpty /** @@ -57,18 +49,14 @@ private[akka] final class OptionVal[+A](val x: A) extends AnyVal { def getOrElse[B >: A](default: B): B = if (x == null) default else x - /** - * Convert to `scala.Option` - */ + /** Convert to `scala.Option` */ def toOption: Option[A] = Option(x) def contains[B >: A](it: B): Boolean = x != null && x == it - /** - * Returns the option's value if it is nonempty, or `null` if it is empty. - */ + /** Returns the option's value if it is nonempty, or `null` if it is empty. */ def orNull[A1 >: A](implicit ev: Null <:< A1): A1 = this.getOrElse(ev(null)) /** diff --git a/akka-actor/src/main/scala/akka/util/PrettyDuration.scala b/akka-actor/src/main/scala/akka/util/PrettyDuration.scala index 431afbe3f50..02b62d941a7 100644 --- a/akka-actor/src/main/scala/akka/util/PrettyDuration.scala +++ b/akka-actor/src/main/scala/akka/util/PrettyDuration.scala @@ -26,7 +26,7 @@ private[akka] object PrettyDuration { implicit class PrettyPrintableDuration(val duration: Duration) extends AnyVal { - /** Selects most appropriate TimeUnit for given duration and formats it accordingly, with 4 digits precision **/ + /** Selects most appropriate TimeUnit for given duration and formats it accordingly, with 4 digits precision * */ def pretty: String = pretty(includeNanos = false) /** Selects most appropriate TimeUnit for given duration and formats it accordingly */ diff --git a/akka-actor/src/main/scala/akka/util/RecencyList.scala b/akka-actor/src/main/scala/akka/util/RecencyList.scala index 63ddd598909..7a9547d0b92 100644 --- a/akka-actor/src/main/scala/akka/util/RecencyList.scala +++ b/akka-actor/src/main/scala/akka/util/RecencyList.scala @@ -10,9 +10,7 @@ import scala.concurrent.duration.FiniteDuration import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object RecencyList { def emptyWithNanoClock[A]: RecencyList[A] = diff --git a/akka-actor/src/main/scala/akka/util/Reflect.scala b/akka-actor/src/main/scala/akka/util/Reflect.scala index 318576a575d..6324f161bc7 100644 --- a/akka-actor/src/main/scala/akka/util/Reflect.scala +++ b/akka-actor/src/main/scala/akka/util/Reflect.scala @@ -97,10 +97,9 @@ private[akka] object Reflect { clazz.getDeclaredConstructors.asInstanceOf[Array[Constructor[T]]].iterator.filter { c => val parameterTypes = c.getParameterTypes parameterTypes.length == length && - (parameterTypes.iterator.zip(args.iterator).forall { - case (found, required) => - found.isInstance(required) || BoxedType(found).isInstance(required) || - (required == null && !found.isPrimitive) + (parameterTypes.iterator.zip(args.iterator).forall { case (found, required) => + found.isInstance(required) || BoxedType(found).isInstance(required) || + (required == null && !found.isPrimitive) }) } if (candidates.hasNext) { @@ -132,8 +131,8 @@ private[akka] object Reflect { case c: Class[_] if marker.isAssignableFrom(c) => c case t: ParameterizedType if marker.isAssignableFrom(t.getRawType.asInstanceOf[Class[_]]) => t } match { - case None => throw new IllegalArgumentException(s"cannot find [$marker] in ancestors of [$root]") - case Some(c: Class[_]) => if (c == marker) c else rec(c) + case None => throw new IllegalArgumentException(s"cannot find [$marker] in ancestors of [$root]") + case Some(c: Class[_]) => if (c == marker) c else rec(c) case Some(t: ParameterizedType) => if (t.getRawType == marker) t else rec(t.getRawType.asInstanceOf[Class[_]]) case _ => ??? // cannot happen due to collectFirst } @@ -141,9 +140,7 @@ private[akka] object Reflect { rec(root) } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def findClassLoader(): ClassLoader = { def findCaller(get: Int => Class[_]): ClassLoader = Iterator diff --git a/akka-actor/src/main/scala/akka/util/SegmentedRecencyList.scala b/akka-actor/src/main/scala/akka/util/SegmentedRecencyList.scala index 8e0e1926579..d4ed738ab46 100644 --- a/akka-actor/src/main/scala/akka/util/SegmentedRecencyList.scala +++ b/akka-actor/src/main/scala/akka/util/SegmentedRecencyList.scala @@ -9,9 +9,7 @@ import scala.concurrent.duration.FiniteDuration import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object SegmentedRecencyList { def empty[A](limits: immutable.Seq[Int]): SegmentedRecencyList[A] = diff --git a/akka-actor/src/main/scala/akka/util/StablePriorityQueue.scala b/akka-actor/src/main/scala/akka/util/StablePriorityQueue.scala index bcf4fd406d1..dc8f917c3a7 100644 --- a/akka-actor/src/main/scala/akka/util/StablePriorityQueue.scala +++ b/akka-actor/src/main/scala/akka/util/StablePriorityQueue.scala @@ -8,9 +8,7 @@ import java.util.{ AbstractQueue, Comparator, Iterator, PriorityQueue } import java.util.concurrent.PriorityBlockingQueue import java.util.concurrent.atomic.AtomicLong -/** - * PriorityQueueStabilizer wraps a priority queue so that it respects FIFO for elements of equal priority. - */ +/** PriorityQueueStabilizer wraps a priority queue so that it respects FIFO for elements of equal priority. */ trait PriorityQueueStabilizer[E <: AnyRef] extends AbstractQueue[E] { val backingQueue: AbstractQueue[PriorityQueueStabilizer.WrappedElement[E]] val seqNum = new AtomicLong(0) diff --git a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala index 9727c84bd2e..df3e835b042 100644 --- a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala +++ b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala @@ -8,26 +8,20 @@ import scala.collection.immutable import akka.util.ccompat._ -/** - * Typeclass which describes a classification hierarchy. Observe the contract between `isEqual` and `isSubclass`! - */ +/** Typeclass which describes a classification hierarchy. Observe the contract between `isEqual` and `isSubclass`! */ trait Subclassification[K] { - /** - * True if and only if x and y are of the same class. - */ + /** True if and only if x and y are of the same class. */ def isEqual(x: K, y: K): Boolean - /** - * True if and only if x is a subclass of y; equal classes must be considered sub-classes! - */ + /** True if and only if x is a subclass of y; equal classes must be considered sub-classes! */ def isSubclass(x: K, y: K): Boolean } private[akka] object SubclassifiedIndex { - class Nonroot[K, V](override val root: SubclassifiedIndex[K, V], val key: K, _values: Set[V])( - implicit sc: Subclassification[K]) + class Nonroot[K, V](override val root: SubclassifiedIndex[K, V], val key: K, _values: Set[V])(implicit + sc: Subclassification[K]) extends SubclassifiedIndex[K, V](_values) { override def innerAddValue(key: K, value: V): Changes = { @@ -148,8 +142,8 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[ // the reason for not using the values in the returned diff is that we need to // go through the whole tree to find all values for the "changed" keys in other // parts of the tree as well, since new nodes might have been created - mergeChangesByKey(innerRemoveValue(key, value)).map { - case (k, _) => (k, findValues(k)) + mergeChangesByKey(innerRemoveValue(key, value)).map { case (k, _) => + (k, findValues(k)) } // this will return the keys and values to be removed from the cache @@ -174,9 +168,7 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[ */ def removeValue(value: V): Changes = mergeChangesByKey(subkeys.flatMap(_.removeValue(value))) - /** - * Find all values for a given key in the index. - */ + /** Find all values for a given key in the index. */ protected final def findValues(key: K): Set[V] = root.innerFindValues(key) protected def innerFindValues(key: K): Set[V] = subkeys.foldLeft(Set.empty[V]) { (s, n) => @@ -186,9 +178,7 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[ s } - /** - * Find all subkeys of a given key in the index excluding some subkeys. - */ + /** Find all subkeys of a given key in the index excluding some subkeys. */ protected final def findSubKeysExcept(key: K, except: Vector[Nonroot[K, V]]): Set[K] = root.innerFindSubKeys(key, except) protected def innerFindSubKeys(key: K, except: Vector[Nonroot[K, V]]): Set[K] = @@ -219,8 +209,8 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[ private def mergeChangesByKey(changes: Changes): Changes = changes - .foldLeft(emptyMergeMap[K, V]) { - case (m, (k, s)) => m.updated(k, m(k) ++ s) + .foldLeft(emptyMergeMap[K, V]) { case (m, (k, s)) => + m.updated(k, m(k) ++ s) } .to(immutable.Seq) } diff --git a/akka-actor/src/main/scala/akka/util/Timeout.scala b/akka-actor/src/main/scala/akka/util/Timeout.scala index fedbddffbee..e05afa2ba16 100644 --- a/akka-actor/src/main/scala/akka/util/Timeout.scala +++ b/akka-actor/src/main/scala/akka/util/Timeout.scala @@ -13,30 +13,20 @@ import language.implicitConversions @SerialVersionUID(1L) case class Timeout(duration: FiniteDuration) { - /** - * Construct a Timeout from the given time unit and factor. - */ + /** Construct a Timeout from the given time unit and factor. */ def this(length: Long, unit: TimeUnit) = this(Duration(length, unit)) } -/** - * A Timeout is a wrapper on top of Duration to be more specific about what the duration means. - */ +/** A Timeout is a wrapper on top of Duration to be more specific about what the duration means. */ object Timeout { - /** - * A timeout with zero duration, will cause most requests to always timeout. - */ + /** A timeout with zero duration, will cause most requests to always timeout. */ val zero: Timeout = new Timeout(Duration.Zero) - /** - * Construct a Timeout from the given time unit and factor. - */ + /** Construct a Timeout from the given time unit and factor. */ def apply(length: Long, unit: TimeUnit): Timeout = new Timeout(length, unit) - /** - * Create a Timeout from java.time.Duration. - */ + /** Create a Timeout from java.time.Duration. */ def create(duration: java.time.Duration): Timeout = { import JavaDurationConverters._ new Timeout(duration.asScala) diff --git a/akka-actor/src/main/scala/akka/util/TokenBucket.scala b/akka-actor/src/main/scala/akka/util/TokenBucket.scala index 43f30bf260e..02408511720 100644 --- a/akka-actor/src/main/scala/akka/util/TokenBucket.scala +++ b/akka-actor/src/main/scala/akka/util/TokenBucket.scala @@ -4,9 +4,7 @@ package akka.util -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] abstract class TokenBucket(capacity: Long, nanosBetweenTokens: Long) { require(capacity >= 0, "Capacity must be non-negative.") require(nanosBetweenTokens > 0, "Time between tokens must be larger than zero nanoseconds.") @@ -14,9 +12,7 @@ private[akka] abstract class TokenBucket(capacity: Long, nanosBetweenTokens: Lon private[this] var availableTokens: Long = _ private[this] var lastUpdate: Long = _ - /** - * This method must be called before the token bucket can be used. - */ + /** This method must be called before the token bucket can be used. */ def init(): Unit = { availableTokens = capacity lastUpdate = currentTime @@ -81,9 +77,7 @@ private[akka] abstract class TokenBucket(capacity: Long, nanosBetweenTokens: Lon } -/** - * Default implementation of [[TokenBucket]] that uses `System.nanoTime` as the time source. - */ +/** Default implementation of [[TokenBucket]] that uses `System.nanoTime` as the time source. */ final class NanoTimeTokenBucket(_cap: Long, _period: Long) extends TokenBucket(_cap, _period) { override def currentTime: Long = System.nanoTime() } diff --git a/akka-actor/src/main/scala/akka/util/TypedMultiMap.scala b/akka-actor/src/main/scala/akka/util/TypedMultiMap.scala index 63ff2da68e3..6bae4e4c60b 100644 --- a/akka-actor/src/main/scala/akka/util/TypedMultiMap.scala +++ b/akka-actor/src/main/scala/akka/util/TypedMultiMap.scala @@ -31,14 +31,10 @@ package akka.util */ class TypedMultiMap[T <: AnyRef, K[_ <: T]] private (private val map: Map[T, Set[Any]]) { - /** - * Return the set of keys which are mapped to non-empty value sets. - */ + /** Return the set of keys which are mapped to non-empty value sets. */ def keySet: Set[T] = map.keySet - /** - * Return a map that has the given value added to the mappings for the given key. - */ + /** Return a map that has the given value added to the mappings for the given key. */ def inserted(key: T)(value: K[key.type]): TypedMultiMap[T, K] = { val set = map.get(key) match { case Some(s) => s @@ -47,18 +43,14 @@ class TypedMultiMap[T <: AnyRef, K[_ <: T]] private (private val map: Map[T, Set new TypedMultiMap[T, K](map.updated(key, set + value)) } - /** - * Obtain all mappings for the given key. - */ + /** Obtain all mappings for the given key. */ def get(key: T): Set[K[key.type]] = map.get(key) match { case Some(s) => s.asInstanceOf[Set[K[key.type]]] case None => Set.empty } - /** - * Return a map that has the given value removed from all keys. - */ + /** Return a map that has the given value removed from all keys. */ def valueRemoved(value: Any): TypedMultiMap[T, K] = { val s = Set(value) val m = map.collect { @@ -67,14 +59,10 @@ class TypedMultiMap[T <: AnyRef, K[_ <: T]] private (private val map: Map[T, Set new TypedMultiMap[T, K](m) } - /** - * Return a map that has all mappings for the given key removed. - */ + /** Return a map that has all mappings for the given key removed. */ def keyRemoved(key: T): TypedMultiMap[T, K] = new TypedMultiMap[T, K](map - key) - /** - * Return a map that has the given mapping from the given key removed. - */ + /** Return a map that has the given mapping from the given key removed. */ def removed(key: T)(value: K[key.type]): TypedMultiMap[T, K] = { map.get(key) match { case None => this @@ -109,8 +97,6 @@ class TypedMultiMap[T <: AnyRef, K[_ <: T]] private (private val map: Map[T, Set object TypedMultiMap { private val _empty = new TypedMultiMap[Nothing, Nothing](Map.empty) - /** - * Obtain the empty map for the given key type and key–value type function. - */ + /** Obtain the empty map for the given key type and key–value type function. */ def empty[T <: AnyRef, K[_ <: T]]: TypedMultiMap[T, K] = _empty.asInstanceOf[TypedMultiMap[T, K]] } diff --git a/akka-actor/src/main/scala/akka/util/UUIDComparator.scala b/akka-actor/src/main/scala/akka/util/UUIDComparator.scala index 02921ed7a0d..2486bf3e3b3 100644 --- a/akka-actor/src/main/scala/akka/util/UUIDComparator.scala +++ b/akka-actor/src/main/scala/akka/util/UUIDComparator.scala @@ -54,9 +54,9 @@ class UUIDComparator extends Comparator[UUID] { * but if signs don't agree need to resolve differently */ if (i1 < 0) { - if (i2 < 0) (i1 - i2) else 1 + if (i2 < 0) i1 - i2 else 1 } else { - if (i2 < 0) -1 else (i1 - i2) + if (i2 < 0) -1 else i1 - i2 } } diff --git a/akka-actor/src/main/scala/akka/util/Version.scala b/akka-actor/src/main/scala/akka/util/Version.scala index 20ff749f1c8..9cbcfa53ffe 100644 --- a/akka-actor/src/main/scala/akka/util/Version.scala +++ b/akka-actor/src/main/scala/akka/util/Version.scala @@ -40,9 +40,7 @@ final class Version(val version: String) extends Comparable[Version] { @volatile private var numbers: Array[Int] = Array.emptyIntArray private var rest: String = "" - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def parse(): Version = { def parseLastPart(s: String): (Int, String) = { // for example 2, 2-SNAPSHOT or dynver 2+10-1234abcd diff --git a/akka-actor/src/main/scala/akka/util/WallClock.scala b/akka-actor/src/main/scala/akka/util/WallClock.scala index 83b4d241dd6..88ca6f01097 100644 --- a/akka-actor/src/main/scala/akka/util/WallClock.scala +++ b/akka-actor/src/main/scala/akka/util/WallClock.scala @@ -10,9 +10,7 @@ import java.util.function.LongUnaryOperator import akka.annotation.ApiMayChange import akka.annotation.InternalApi -/** - * A time source. - */ +/** A time source. */ @ApiMayChange trait WallClock { def currentTimeMillis(): Long @@ -27,9 +25,7 @@ object WallClock { val AlwaysIncreasingClock: WallClock = new AlwaysIncreasingClock() } -/** - * INTERNAL API: Always increasing wall clock time. - */ +/** INTERNAL API: Always increasing wall clock time. */ @InternalApi private[akka] final class AlwaysIncreasingClock() extends AtomicLong with WallClock { diff --git a/akka-actor/src/main/scala/akka/util/WildcardIndex.scala b/akka-actor/src/main/scala/akka/util/WildcardIndex.scala index 8739160cb73..dcb15f46c84 100644 --- a/akka-actor/src/main/scala/akka/util/WildcardIndex.scala +++ b/akka-actor/src/main/scala/akka/util/WildcardIndex.scala @@ -58,9 +58,8 @@ private[akka] final case class WildcardTree[T]( throw new IllegalArgumentException( "double wildcard can't be used as a suffix (e.g. /user/actor**), only as a full subPath element (e.g. /user/actor/**)") else if (e != "*" && e != "**" && e.endsWith("*")) - copy( - wildcardSuffixChildren = wildcardSuffixChildren - .updated(e.stripSuffix("*"), wildcardSuffixChildren.getOrElse(e, WildcardTree[T]()).insert(elems, d))) + copy(wildcardSuffixChildren = wildcardSuffixChildren + .updated(e.stripSuffix("*"), wildcardSuffixChildren.getOrElse(e, WildcardTree[T]()).insert(elems, d))) else copy(children = children.updated(e, children.getOrElse(e, WildcardTree[T]()).insert(elems, d))) } diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala index a96b8205ed2..05de803e6d6 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala @@ -37,7 +37,7 @@ class ActorBenchmark { @Param(Array("50")) var batchSize = 0 - //@Param(Array("akka.actor.ManyToOneArrayMailbox")) + // @Param(Array("akka.actor.ManyToOneArrayMailbox")) @Param( Array( "akka.dispatch.SingleConsumerOnlyUnboundedMailbox", diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala index 93c1b3f5565..36b1c291903 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala @@ -49,7 +49,6 @@ class ActorCreationBenchmark { } class MyActor extends Actor { - override def receive: Receive = { - case _ => + override def receive: Receive = { case _ => } } diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala index 94c9ef8f85e..0e32bc72e0a 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala @@ -26,7 +26,7 @@ class AffinityPoolComparativeBenchmark { @Param(Array("affinity-dispatcher", "default-fj-dispatcher", "fixed-size-dispatcher")) var dispatcher = "" - @Param(Array("SingleConsumerOnlyUnboundedMailbox")) //"default" + @Param(Array("SingleConsumerOnlyUnboundedMailbox")) // "default" var mailbox = "" final val numThreads, numActors = 8 diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala index 5f4e38e1651..b5a66c72166 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala @@ -26,7 +26,7 @@ class AffinityPoolRequestResponseBenchmark { @Param(Array("affinity-dispatcher", "default-fj-dispatcher", "fixed-size-dispatcher")) var dispatcher = "" - @Param(Array("SingleConsumerOnlyUnboundedMailbox")) //"default" + @Param(Array("SingleConsumerOnlyUnboundedMailbox")) // "default" var mailbox = "" final val numThreads, numActors = 8 diff --git a/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala b/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala index 83273645b20..b104e2bdcde 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala @@ -19,15 +19,14 @@ object BenchmarkActors { class PingPong(val messagesPerPair: Int, latch: CountDownLatch) extends Actor { var left = messagesPerPair / 2 - def receive = { - case Message => - if (left == 0) { - latch.countDown() - context.stop(self) - } + def receive = { case Message => + if (left == 0) { + latch.countDown() + context.stop(self) + } - sender() ! Message - left -= 1 + sender() ! Message + left -= 1 } } @@ -36,9 +35,8 @@ object BenchmarkActors { } class Echo extends Actor { - def receive = { - case Message => - sender() ! Message + def receive = { case Message => + sender() ! Message } } @@ -53,15 +51,14 @@ object BenchmarkActors { private var left = messagesPerPair / 2 private var batch = 0 - def receive = { - case Message => - batch -= 1 - if (batch <= 0) { - if (!sendBatch()) { - latch.countDown() - context.stop(self) - } + def receive = { case Message => + batch -= 1 + if (batch <= 0) { + if (!sendBatch()) { + latch.countDown() + context.stop(self) } + } } private def sendBatch(): Boolean = { @@ -93,8 +90,8 @@ object BenchmarkActors { def props(next: Option[ActorRef]) = Props(new Pipe(next)) } - private def startPingPongActorPairs(messagesPerPair: Int, numPairs: Int, dispatcher: String)( - implicit system: ActorSystem): (Vector[(ActorRef, ActorRef)], CountDownLatch) = { + private def startPingPongActorPairs(messagesPerPair: Int, numPairs: Int, dispatcher: String)(implicit + system: ActorSystem): (Vector[(ActorRef, ActorRef)], CountDownLatch) = { val fullPathToDispatcher = "akka.actor." + dispatcher val latch = new CountDownLatch(numPairs * 2) val actors = List @@ -116,8 +113,8 @@ object BenchmarkActors { } } - private def startEchoActorPairs(messagesPerPair: Int, numPairs: Int, dispatcher: String, batchSize: Int)( - implicit system: ActorSystem): (Vector[ActorRef], CountDownLatch) = { + private def startEchoActorPairs(messagesPerPair: Int, numPairs: Int, dispatcher: String, batchSize: Int)(implicit + system: ActorSystem): (Vector[ActorRef], CountDownLatch) = { val fullPathToDispatcher = "akka.actor." + dispatcher val latch = new CountDownLatch(numPairs) diff --git a/akka-bench-jmh/src/main/scala/akka/actor/RequestResponseActors.scala b/akka-bench-jmh/src/main/scala/akka/actor/RequestResponseActors.scala index 0d78a782208..164445f5fa3 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/RequestResponseActors.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/RequestResponseActors.scala @@ -42,17 +42,16 @@ object RequestResponseActors { class UserServiceActor(userDb: Map[Int, User], latch: CountDownLatch, numQueries: Int) extends Actor { private var left = numQueries - def receive = { - case Request(id) => - userDb.get(id) match { - case Some(u) => sender() ! u - case None => - } - if (left == 0) { - latch.countDown() - context.stop(self) - } - left -= 1 + def receive = { case Request(id) => + userDb.get(id) match { + case Some(u) => sender() ! u + case None => + } + if (left == 0) { + latch.countDown() + context.stop(self) + } + left -= 1 } } diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala index 87cf4347b8d..1e673661ddf 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala @@ -57,9 +57,8 @@ class ScheduleBenchmark { val idx = aIdx.getAndIncrement if (idx <= to) op(idx) } - promise.future.onComplete { - case _ => - tryWithNext.cancel() + promise.future.onComplete { case _ => + tryWithNext.cancel() } Await.result(promise.future, within) } @@ -71,9 +70,8 @@ class ScheduleBenchmark { val idx = aIdx.getAndIncrement if (idx <= to) op(idx) } - promise.future.onComplete { - case _ => - tryWithNext.cancel() + promise.future.onComplete { case _ => + tryWithNext.cancel() } Await.result(promise.future, within) } @@ -81,16 +79,16 @@ class ScheduleBenchmark { @Benchmark def multipleScheduleOnce(): Unit = { val tryWithNext = (1 to to) - .foldLeft(0.millis -> List[Cancellable]()) { - case ((interv, c), idx) => - (interv + interval, scheduler.scheduleOnce(interv) { + .foldLeft(0.millis -> List[Cancellable]()) { case ((interv, c), idx) => + ( + interv + interval, + scheduler.scheduleOnce(interv) { op(idx) } :: c) } ._2 - promise.future.onComplete { - case _ => - tryWithNext.foreach(_.cancel()) + promise.future.onComplete { case _ => + tryWithNext.foreach(_.cancel()) } Await.result(promise.future, within) } diff --git a/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala index f0ba398940e..0aeb2131523 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala @@ -16,8 +16,8 @@ import akka.testkit.TestProbe object StashCreationBenchmark { class StashingActor extends Actor with Stash { - def receive = { - case msg => sender() ! msg + def receive = { case msg => + sender() ! msg } } diff --git a/akka-bench-jmh/src/main/scala/akka/actor/typed/TypedBenchmarkActors.scala b/akka-bench-jmh/src/main/scala/akka/actor/typed/TypedBenchmarkActors.scala index f232c574568..7bd4437865e 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/typed/TypedBenchmarkActors.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/typed/TypedBenchmarkActors.scala @@ -95,17 +95,16 @@ object TypedBenchmarkActors { val startNanoTime = System.nanoTime() pairs.foreach(_ ! Message) var interactionsLeft = numPairs - Behaviors.receiveMessagePartial { - case Done => - interactionsLeft -= 1 - if (interactionsLeft == 0) { - val totalNumMessages = numPairs * messagesPerPair - printProgress(totalNumMessages, numActors, startNanoTime) - respondTo ! Completed(startNanoTime) - Behaviors.stopped - } else { - Behaviors.same - } + Behaviors.receiveMessagePartial { case Done => + interactionsLeft -= 1 + if (interactionsLeft == 0) { + val totalNumMessages = numPairs * messagesPerPair + printProgress(totalNumMessages, numActors, startNanoTime) + respondTo ! Completed(startNanoTime) + Behaviors.stopped + } else { + Behaviors.same + } } } @@ -174,16 +173,15 @@ object TypedBenchmarkActors { Behaviors.setup { ctx => var left = messagesPerPair / 2 val pong = Message(ctx.self) // we re-use a single pong to avoid alloc on each msg - Behaviors.receiveMessage[Message] { - case Message(replyTo) => - replyTo ! pong - if (left == 0) { - latch.countDown() - Behaviors.stopped // note that this will likely lead to dead letters - } else { - left -= 1 - Behaviors.same - } + Behaviors.receiveMessage[Message] { case Message(replyTo) => + replyTo ! pong + if (left == 0) { + latch.countDown() + Behaviors.stopped // note that this will likely lead to dead letters + } else { + left -= 1 + Behaviors.same + } } } diff --git a/akka-bench-jmh/src/main/scala/akka/actor/typed/delivery/ReliableDeliveryBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/typed/delivery/ReliableDeliveryBenchmark.scala index d25cc0ff7e7..5468a4707c1 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/typed/delivery/ReliableDeliveryBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/typed/delivery/ReliableDeliveryBenchmark.scala @@ -88,12 +88,11 @@ object Consumer { context.messageAdapter[ConsumerController.Delivery[Command]](WrappedDelivery(_)) consumerController ! ConsumerController.Start(deliveryAdapter) - Behaviors.receiveMessagePartial { - case WrappedDelivery(d @ ConsumerController.Delivery(_, confirmTo)) => - if (traceEnabled) - context.log.trace("Processed {}", d.seqNr) - confirmTo ! ConsumerController.Confirmed - Behaviors.same + Behaviors.receiveMessagePartial { case WrappedDelivery(d @ ConsumerController.Delivery(_, confirmTo)) => + if (traceEnabled) + context.log.trace("Processed {}", d.seqNr) + confirmTo ! ConsumerController.Confirmed + Behaviors.same } } } @@ -112,7 +111,7 @@ object WorkPullingProducer { val requestNextAdapter = context.messageAdapter[WorkPullingProducerController.RequestNext[Consumer.Command]](WrappedRequestNext(_)) var remaining = numberOfMessages + context.system.settings.config - .getInt("akka.reliable-delivery.consumer-controller.flow-control-window") + .getInt("akka.reliable-delivery.consumer-controller.flow-control-window") Behaviors.receiveMessagePartial { case WrappedRequestNext(next) => diff --git a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala index 154a62b0bf4..1c4e32bfbea 100644 --- a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/VersionVectorBenchmark.scala @@ -56,19 +56,19 @@ class VersionVectorBenchmark { } @Benchmark - def increment: VersionVector = (vv1 + nodeA) + def increment: VersionVector = vv1 + nodeA @Benchmark - def compareSame1: Boolean = (vv1 == dot1) + def compareSame1: Boolean = vv1 == dot1 @Benchmark - def compareSame2: Boolean = (vv2 == dot1) + def compareSame2: Boolean = vv2 == dot1 @Benchmark - def compareGreaterThan1: Boolean = (vv1 > dot1) + def compareGreaterThan1: Boolean = vv1 > dot1 @Benchmark - def compareGreaterThan2: Boolean = (vv2 > dot1) + def compareGreaterThan2: Boolean = vv2 > dot1 @Benchmark def merge: VersionVector = vv1.merge(vv2) diff --git a/akka-bench-jmh/src/main/scala/akka/dispatch/NodeQueueBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/dispatch/NodeQueueBenchmark.scala index 1aeccf2fb32..09372274ef3 100644 --- a/akka-bench-jmh/src/main/scala/akka/dispatch/NodeQueueBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/dispatch/NodeQueueBenchmark.scala @@ -29,7 +29,8 @@ object NodeQueueBenchmark { class NodeQueueBenchmark { import NodeQueueBenchmark._ - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString(""" dispatcher { executor = "thread-pool-executor" throughput = 1000 @@ -43,12 +44,14 @@ mailbox { } """).withFallback(ConfigFactory.load()) implicit val sys: ActorSystem = ActorSystem("ANQ", config) - val ref = sys.actorOf(Props(new Actor { - def receive = { - case Stop => sender() ! Stop - case _ => - } - }).withDispatcher("dispatcher").withMailbox("mailbox"), "receiver") + val ref = sys.actorOf( + Props(new Actor { + def receive = { + case Stop => sender() ! Stop + case _ => + } + }).withDispatcher("dispatcher").withMailbox("mailbox"), + "receiver") @TearDown def teardown(): Unit = Await.result(sys.terminate(), 5.seconds) diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/Common.scala b/akka-bench-jmh/src/main/scala/akka/persistence/Common.scala index 96292c16f66..b405e91cdb0 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/Common.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/Common.scala @@ -8,8 +8,8 @@ import akka.actor.Actor /** only as a "the best we could possibly get" baseline, does not persist anything */ class BaselineActor(respondAfter: Int) extends Actor { - override def receive = { - case n: Int => if (n == respondAfter) sender() ! n + override def receive = { case n: Int => + if (n == respondAfter) sender() ! n } } diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala index c566f18e678..f8ee15d124b 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala @@ -89,31 +89,24 @@ class `persistAsync, defer`(respondAfter: Int) extends PersistentActor { override def persistenceId: String = self.path.name - override def receiveCommand = { - case n: Int => - persistAsync(Evt(n)) { _ => - } - deferAsync(Evt(n)) { e => - if (e.i == respondAfter) sender() ! e.i - } + override def receiveCommand = { case n: Int => + persistAsync(Evt(n)) { _ => } + deferAsync(Evt(n)) { e => + if (e.i == respondAfter) sender() ! e.i + } } - override def receiveRecover = { - case _ => // do nothing + override def receiveRecover = { case _ => // do nothing } } class `persistAsync, defer, respond ASAP`(respondAfter: Int) extends PersistentActor { override def persistenceId: String = self.path.name - override def receiveCommand = { - case n: Int => - persistAsync(Evt(n)) { _ => - } - deferAsync(Evt(n)) { _ => - } - if (n == respondAfter) sender() ! n + override def receiveCommand = { case n: Int => + persistAsync(Evt(n)) { _ => } + deferAsync(Evt(n)) { _ => } + if (n == respondAfter) sender() ! n } - override def receiveRecover = { - case _ => // do nothing + override def receiveRecover = { case _ => // do nothing } } diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala index 964447be8bc..3cdecf1c273 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala @@ -111,11 +111,10 @@ class NoPersistPersistentActor(respondAfter: Int) extends PersistentActor { override def persistenceId: String = self.path.name - override def receiveCommand = { - case n: Int => if (n == respondAfter) sender() ! Evt(n) + override def receiveCommand = { case n: Int => + if (n == respondAfter) sender() ! Evt(n) } - override def receiveRecover = { - case _ => // do nothing + override def receiveRecover = { case _ => // do nothing } } @@ -123,14 +122,12 @@ class PersistPersistentActor(respondAfter: Int) extends PersistentActor { override def persistenceId: String = self.path.name - override def receiveCommand = { - case n: Int => - persist(Evt(n)) { e => - if (e.i == respondAfter) sender() ! e - } + override def receiveCommand = { case n: Int => + persist(Evt(n)) { e => + if (e.i == respondAfter) sender() ! e + } } - override def receiveRecover = { - case _ => // do nothing + override def receiveRecover = { case _ => // do nothing } } @@ -138,14 +135,12 @@ class PersistPersistentActor(respondAfter: Int) extends PersistentActor { class PersistAsyncPersistentActor(respondAfter: Int) extends PersistentActor { override def persistenceId: String = self.path.name - override def receiveCommand = { - case n: Int => - persistAsync(Evt(n)) { e => - if (e.i == respondAfter) sender() ! e - } + override def receiveCommand = { case n: Int => + persistAsync(Evt(n)) { e => + if (e.i == respondAfter) sender() ! e + } } - override def receiveRecover = { - case _ => // do nothing + override def receiveRecover = { case _ => // do nothing } } @@ -153,13 +148,11 @@ class PersistAsyncQuickReplyPersistentActor(respondAfter: Int) extends Persisten override def persistenceId: String = self.path.name - override def receiveCommand = { - case n: Int => - val e = Evt(n) - if (n == respondAfter) sender() ! e - persistAsync(e)(identity) + override def receiveCommand = { case n: Int => + val e = Evt(n) + if (n == respondAfter) sender() ! e + persistAsync(e)(identity) } - override def receiveRecover = { - case _ => // do nothing + override def receiveRecover = { case _ => // do nothing } } diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala index 1d789890c51..b3072cbfb99 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala @@ -108,15 +108,14 @@ class NoPersistPersistentActorWithAtLeastOnceDelivery( case n: Int => deliver(downStream)(deliveryId => Msg(deliveryId, n)) if (n == respondAfter) - //switch to wait all message confirmed + // switch to wait all message confirmed context.become(waitConfirm) case Confirm(deliveryId) => confirmDelivery(deliveryId) case _ => // do nothing } - override def receiveRecover = { - case _ => // do nothing + override def receiveRecover = { case _ => // do nothing } val waitConfirm: Actor.Receive = { @@ -146,7 +145,7 @@ class PersistPersistentActorWithAtLeastOnceDelivery( persist(MsgSent(n)) { _ => deliver(downStream)(deliveryId => Msg(deliveryId, n)) if (n == respondAfter) - //switch to wait all message confirmed + // switch to wait all message confirmed context.become(waitConfirm) } case Confirm(deliveryId) => @@ -154,8 +153,7 @@ class PersistPersistentActorWithAtLeastOnceDelivery( case _ => // do nothing } - override def receiveRecover = { - case _ => // do nothing + override def receiveRecover = { case _ => // do nothing } val waitConfirm: Actor.Receive = { @@ -185,7 +183,7 @@ class PersistAsyncPersistentActorWithAtLeastOnceDelivery( persistAsync(MsgSent(n)) { _ => deliver(downStream)(deliveryId => Msg(deliveryId, n)) if (n == respondAfter) - //switch to wait all message confirmed + // switch to wait all message confirmed context.become(waitConfirm) } case Confirm(deliveryId) => @@ -193,8 +191,7 @@ class PersistAsyncPersistentActorWithAtLeastOnceDelivery( case _ => // do nothing } - override def receiveRecover = { - case _ => // do nothing + override def receiveRecover = { case _ => // do nothing } val waitConfirm: Actor.Receive = { @@ -227,7 +224,7 @@ class DestinationActor extends Actor { case Msg(deliveryId, _) => seqNr += 1 if (seqNr % 11 == 0) { - //drop it + // drop it } else { sender() ! Confirm(deliveryId) } diff --git a/akka-bench-jmh/src/main/scala/akka/remote/artery/compress/SerializationFormatCacheBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/remote/artery/compress/SerializationFormatCacheBenchmark.scala index 82f4d4dcbd7..7023eddcbe6 100644 --- a/akka-bench-jmh/src/main/scala/akka/remote/artery/compress/SerializationFormatCacheBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/remote/artery/compress/SerializationFormatCacheBenchmark.scala @@ -27,9 +27,7 @@ import akka.pattern.PromiseActorRef import akka.remote.artery.SerializationFormatCache import akka.serialization.Serialization -/** - * Actually more like specific benchmarks for the few concrete LRU cache usages - */ +/** Actually more like specific benchmarks for the few concrete LRU cache usages */ @Fork(1) @State(Scope.Benchmark) @nowarn @@ -64,14 +62,13 @@ class SerializationFormatCacheBenchmark { @Setup def init(): Unit = { system = ActorSystem("SerializationFormatCacheBenchmark") - temporaryActorRefs = Array.tabulate(uniqueTemporaryRefs)( - n => - new PromiseActorRef( - system.asInstanceOf[ExtendedActorSystem].provider, - Promise(), - "Any", - // request path is encoded in this string - s"_user_region_shard${n % 100}_entitypretendid${n}")) + temporaryActorRefs = Array.tabulate(uniqueTemporaryRefs)(n => + new PromiseActorRef( + system.asInstanceOf[ExtendedActorSystem].provider, + Promise(), + "Any", + // request path is encoded in this string + s"_user_region_shard${n % 100}_entitypretendid${n}")) topLevelActorRefs = Array.tabulate(uniqueTopLevelRefs)(n => system.actorOf(Props.empty, s"actor_$n")) } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/AskBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/AskBenchmark.scala index a3a9fd9415a..3e36b77b5b8 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/AskBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/AskBenchmark.scala @@ -62,8 +62,8 @@ class AskBenchmark { def setup(): Unit = { testSource = Source.fromGraph(new BenchTestSource(OperationsPerInvocation)) actor = system.actorOf(Props(new Actor { - override def receive = { - case element => sender() ! element + override def receive = { case element => + sender() ! element } })) // eager init of materializer diff --git a/akka-bench-jmh/src/main/scala/akka/stream/EmptySourceBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/EmptySourceBenchmark.scala index 71b87b1cb32..6488d1df065 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/EmptySourceBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/EmptySourceBenchmark.scala @@ -39,5 +39,5 @@ class EmptySourceBenchmark { Rewrite to GraphStage: [info] EmptySourceBenchmark.empty thrpt 10 17.556 ± 2.865 ops/ms - */ + */ } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala index c754ac00218..3adbaf20843 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala @@ -25,7 +25,8 @@ import akka.stream.scaladsl._ @BenchmarkMode(Array(Mode.Throughput)) class FlowMapBenchmark { - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString(""" akka { log-config-on-start = off log-dead-letters-during-shutdown = off @@ -49,7 +50,8 @@ class FlowMapBenchmark { type = akka.testkit.CallingThreadDispatcherConfigurator } } - }""".stripMargin).withFallback(ConfigFactory.load()) + }""".stripMargin) + .withFallback(ConfigFactory.load()) implicit val system: ActorSystem = ActorSystem("test", config) diff --git a/akka-bench-jmh/src/main/scala/akka/stream/FramingBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/FramingBenchmark.scala index bd99f735de4..f826df32111 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/FramingBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/FramingBenchmark.scala @@ -28,7 +28,8 @@ import akka.util.ByteString @BenchmarkMode(Array(Mode.Throughput)) class FramingBenchmark { - val config: Config = ConfigFactory.parseString(""" + val config: Config = ConfigFactory + .parseString(""" akka { log-config-on-start = off log-dead-letters-during-shutdown = off @@ -50,7 +51,8 @@ class FramingBenchmark { type = akka.testkit.CallingThreadDispatcherConfigurator } } - }""".stripMargin).withFallback(ConfigFactory.load()) + }""".stripMargin) + .withFallback(ConfigFactory.load()) implicit val system: ActorSystem = ActorSystem("test", config) diff --git a/akka-bench-jmh/src/main/scala/akka/stream/FusedGraphsBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/FusedGraphsBenchmark.scala index 7afe65634c2..81d6cf22cf1 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/FusedGraphsBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/FusedGraphsBenchmark.scala @@ -188,8 +188,8 @@ class FusedGraphsBenchmark { .buffer(10, OverflowStrategy.backpressure) .toMat(testSink)(Keep.right)) - val broadcastZipFlow: Flow[MutableElement, MutableElement, NotUsed] = Flow.fromGraph(GraphDSL.create() { - implicit b => + val broadcastZipFlow: Flow[MutableElement, MutableElement, NotUsed] = + Flow.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val bcast = b.add(Broadcast[MutableElement](2)) @@ -199,10 +199,10 @@ class FusedGraphsBenchmark { bcast ~> zip.in1 FlowShape(bcast.in, zip.out.map(_._1).outlet) - }) + }) - val balanceMergeFlow: Flow[MutableElement, MutableElement, NotUsed] = Flow.fromGraph(GraphDSL.create() { - implicit b => + val balanceMergeFlow: Flow[MutableElement, MutableElement, NotUsed] = + Flow.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val balance = b.add(Balance[MutableElement](2)) @@ -212,7 +212,7 @@ class FusedGraphsBenchmark { balance ~> merge FlowShape(balance.in, merge.out) - }) + }) broadcastZip = fuse(testSource.via(broadcastZipFlow).toMat(testSink)(Keep.right)) diff --git a/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala index 75348e4bbcc..210901b5fea 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala @@ -38,7 +38,7 @@ class InterpreterBenchmark { val b = builder(identities: _*).connect(source, identities.head.in).connect(identities.last.out, sink) // FIXME: This should not be here, this is pure setup overhead - for (i <- (0 until identities.size - 1)) { + for (i <- 0 until identities.size - 1) { b.connect(identities(i).out, identities(i + 1).in) } @@ -77,13 +77,15 @@ object InterpreterBenchmark { override val in: akka.stream.Inlet[T] = Inlet[T]("in") in.id = 0 - setHandler(in, new InHandler { - override def onPush(): Unit = { - expected -= 1 - if (expected > 0) pull(in) - // Otherwise do nothing, it will exit the interpreter - } - }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + expected -= 1 + if (expected > 0) pull(in) + // Otherwise do nothing, it will exit the interpreter + } + }) def requestOne(): Unit = pull(in) } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/SourceRefBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/SourceRefBenchmark.scala index e930f32efc8..31d81b37acb 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/SourceRefBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/SourceRefBenchmark.scala @@ -27,12 +27,14 @@ import akka.stream.scaladsl._ @BenchmarkMode(Array(Mode.Throughput)) class SourceRefBenchmark { - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString(""" akka { log-config-on-start = off log-dead-letters-during-shutdown = off loglevel = "WARNING" - }""".stripMargin).withFallback(ConfigFactory.load()) + }""".stripMargin) + .withFallback(ConfigFactory.load()) implicit val system: ActorSystem = ActorSystem("test", config) diff --git a/akka-bench-jmh/src/main/scala/akka/util/FastFrequencySketchBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/util/FastFrequencySketchBenchmark.scala index 91596c4b196..68f8374dec9 100644 --- a/akka-bench-jmh/src/main/scala/akka/util/FastFrequencySketchBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/util/FastFrequencySketchBenchmark.scala @@ -21,7 +21,7 @@ import org.openjdk.jmh.annotations.Warmup class FastFrequencySketchBenchmark { private[this] val Capacity = 10000 private[this] val GeneratedSize = 1 << 16 - private final val IndexMask = 0xFFFF + private final val IndexMask = 0xffff private[this] var sketch: FastFrequencySketch[String] = _ private[this] var generated: Array[String] = _ diff --git a/akka-bench-jmh/src/main/scala/akka/util/FrequencySketchBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/util/FrequencySketchBenchmark.scala index 456aac4a646..7e665d79f18 100644 --- a/akka-bench-jmh/src/main/scala/akka/util/FrequencySketchBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/util/FrequencySketchBenchmark.scala @@ -21,7 +21,7 @@ import org.openjdk.jmh.annotations.Warmup class FrequencySketchBenchmark { private[this] val Capacity = 10000 private[this] val GeneratedSize = 1 << 16 - private final val IndexMask = 0xFFFF + private final val IndexMask = 0xffff private[this] var sketch: FrequencySketch[String] = _ private[this] var generated: Array[String] = _ diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala index 93b77013d3c..8fd1f8f9e4e 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala @@ -19,14 +19,10 @@ import akka.cluster.ClusterEvent import akka.cluster.Member import akka.cluster.MemberStatus -/** - * Runtime collection management commands. - */ +/** Runtime collection management commands. */ sealed abstract class CollectionControlMessage extends Serializable -/** - * Command for [[ClusterMetricsSupervisor]] to start metrics collection. - */ +/** Command for [[ClusterMetricsSupervisor]] to start metrics collection. */ @SerialVersionUID(1L) case object CollectionStartMessage extends CollectionControlMessage { @@ -34,9 +30,7 @@ case object CollectionStartMessage extends CollectionControlMessage { def getInstance = CollectionStartMessage } -/** - * Command for [[ClusterMetricsSupervisor]] to stop metrics collection. - */ +/** Command for [[ClusterMetricsSupervisor]] to stop metrics collection. */ @SerialVersionUID(1L) case object CollectionStopMessage extends CollectionControlMessage { @@ -89,9 +83,7 @@ private[metrics] class ClusterMetricsSupervisor extends Actor with ActorLogging */ trait ClusterMetricsEvent -/** - * Current snapshot of cluster node metrics. - */ +/** Current snapshot of cluster node metrics. */ final case class ClusterMetricsChanged(nodeMetrics: Set[NodeMetrics]) extends ClusterMetricsEvent { /** Java API */ @@ -119,9 +111,7 @@ private[metrics] final case class MetricsGossipEnvelope(from: Address, gossip: M extends ClusterMetricsMessage with DeadLetterSuppression -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[metrics] object ClusterMetricsCollector { case object MetricsTick case object GossipTick @@ -143,24 +133,16 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { val metrics = ClusterMetricsExtension(context.system) import metrics.settings._ - /** - * The node ring gossipped that contains only members that are Up. - */ + /** The node ring gossipped that contains only members that are Up. */ var nodes: immutable.SortedSet[Address] = immutable.SortedSet.empty - /** - * The latest metric values with their statistical data. - */ + /** The latest metric values with their statistical data. */ var latestGossip: MetricsGossip = MetricsGossip.empty - /** - * The metrics collector that samples data on the node. - */ + /** The metrics collector that samples data on the node. */ val collector: MetricsCollector = MetricsCollector(context.system) - /** - * Start periodic gossip to random nodes in cluster - */ + /** Start periodic gossip to random nodes in cluster */ val gossipTask = scheduler.scheduleWithFixedDelay( PeriodicTasksInitialDelay max CollectorGossipInterval, @@ -168,9 +150,7 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { self, GossipTick) - /** - * Start periodic metrics collection - */ + /** Start periodic metrics collection */ val sampleTask = scheduler.scheduleWithFixedDelay( PeriodicTasksInitialDelay max CollectorSampleInterval, CollectorSampleInterval, @@ -196,7 +176,6 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { if (m.status == MemberStatus.Up || m.status == MemberStatus.WeaklyUp) addMember(m) case _: MemberEvent => // not interested in other types of MemberEvent - } override def postStop(): Unit = { @@ -206,25 +185,19 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { collector.close() } - /** - * Adds a member to the node ring. - */ + /** Adds a member to the node ring. */ def addMember(member: Member): Unit = nodes += member.address - /** - * Removes a member from the member node ring. - */ + /** Removes a member from the member node ring. */ def removeMember(member: Member): Unit = { nodes -= member.address latestGossip = latestGossip.remove(member.address) publish() } - /** - * Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus]] `Up`. - */ + /** Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus]] `Up`. */ def receiveState(state: CurrentClusterState): Unit = - nodes = (state.members.diff(state.unreachable)).collect { + nodes = state.members.diff(state.unreachable).collect { case m if m.status == MemberStatus.Up || m.status == MemberStatus.WeaklyUp => m.address } @@ -253,9 +226,7 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { replyGossipTo(envelope.from) } - /** - * Gossip to peer nodes. - */ + /** Gossip to peer nodes. */ def gossip(): Unit = selectRandomNode((nodes - selfAddress).toVector).foreach(gossipTo) def gossipTo(address: Address): Unit = @@ -270,9 +241,7 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { def selectRandomNode(addresses: immutable.IndexedSeq[Address]): Option[Address] = if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current.nextInt(addresses.size))) - /** - * Publishes to the event stream. - */ + /** Publishes to the event stream. */ def publish(): Unit = context.system.eventStream.publish(ClusterMetricsChanged(latestGossip.nodes)) } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala index 2d75476e911..37932fa5a65 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala @@ -36,9 +36,7 @@ import akka.event.LoggingAdapter */ class ClusterMetricsExtension(system: ExtendedActorSystem) extends Extension { - /** - * Metrics extension configuration. - */ + /** Metrics extension configuration. */ val settings = ClusterMetricsSettings(system.settings.config) import settings._ @@ -54,7 +52,7 @@ class ClusterMetricsExtension(system: ExtendedActorSystem) extends Extension { .getOrElse { val log: LoggingAdapter = Logging(system, classOf[ClusterMetricsExtension]) log.error(s"Configured strategy provider ${SupervisorStrategyProvider} failed to load, using default ${classOf[ - ClusterMetricsStrategy].getName}.") + ClusterMetricsStrategy].getName}.") new ClusterMetricsStrategy(SupervisorStrategyConfiguration) } @@ -84,9 +82,7 @@ class ClusterMetricsExtension(system: ExtendedActorSystem) extends Extension { } -/** - * Cluster metrics extension provider. - */ +/** Cluster metrics extension provider. */ object ClusterMetricsExtension extends ExtensionId[ClusterMetricsExtension] with ExtensionIdProvider { override def lookup = ClusterMetricsExtension override def get(system: ActorSystem): ClusterMetricsExtension = super.get(system) diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala index 856222401d5..3d91685620c 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala @@ -162,9 +162,7 @@ final case class AdaptiveLoadBalancingPool( classOf[AdaptiveLoadBalancingMetricsListener], routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic])) - /** - * Setting the supervisor strategy to be used for the “head” Router actor. - */ + /** Setting the supervisor strategy to be used for the “head” Router actor. */ def withSupervisorStrategy(strategy: SupervisorStrategy): AdaptiveLoadBalancingPool = copy(supervisorStrategy = strategy) @@ -260,19 +258,16 @@ final case class AdaptiveLoadBalancingGroup( case object HeapMetricsSelector extends CapacityMetricsSelector { import akka.cluster.metrics.StandardMetrics.HeapMemory - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = { - nodeMetrics.collect { - case HeapMemory(address, _, used, committed, max) => - val capacity = max match { - case None => (committed - used).toDouble / committed - case Some(m) => (m - used).toDouble / m - } - (address, capacity) + nodeMetrics.collect { case HeapMemory(address, _, used, committed, max) => + val capacity = max match { + case None => (committed - used).toDouble / committed + case Some(m) => (m - used).toDouble / m + } + (address, capacity) }.toMap } } @@ -289,9 +284,7 @@ case object HeapMetricsSelector extends CapacityMetricsSelector { case object CpuMetricsSelector extends CapacityMetricsSelector { import akka.cluster.metrics.StandardMetrics.Cpu - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this // Notes from reading around: @@ -307,12 +300,11 @@ case object CpuMetricsSelector extends CapacityMetricsSelector { require(0.0 <= factor, s"factor must be non negative: ${factor}") override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = { - nodeMetrics.collect { - case Cpu(address, _, _, Some(cpuCombined), Some(cpuStolen), _) => - // Arbitrary load rating function which skews in favor of stolen time. - val load = cpuCombined + cpuStolen * (1.0 + factor) - val capacity = if (load >= 1.0) 0.0 else 1.0 - load - (address, capacity) + nodeMetrics.collect { case Cpu(address, _, _, Some(cpuCombined), Some(cpuStolen), _) => + // Arbitrary load rating function which skews in favor of stolen time. + val load = cpuCombined + cpuStolen * (1.0 + factor) + val capacity = if (load >= 1.0) 0.0 else 1.0 - load + (address, capacity) }.toMap } } @@ -328,16 +320,13 @@ case object CpuMetricsSelector extends CapacityMetricsSelector { case object SystemLoadAverageMetricsSelector extends CapacityMetricsSelector { import akka.cluster.metrics.StandardMetrics.Cpu - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = { - nodeMetrics.collect { - case Cpu(address, _, Some(systemLoadAverage), _, _, processors) => - val capacity = 1.0 - math.min(1.0, systemLoadAverage / processors) - (address, capacity) + nodeMetrics.collect { case Cpu(address, _, Some(systemLoadAverage), _, _, processors) => + val capacity = 1.0 - math.min(1.0, systemLoadAverage / processors) + (address, capacity) }.toMap } } @@ -350,9 +339,7 @@ case object SystemLoadAverageMetricsSelector extends CapacityMetricsSelector { object MixMetricsSelector extends MixMetricsSelectorBase(Vector(HeapMetricsSelector, CpuMetricsSelector, SystemLoadAverageMetricsSelector)) { - /** - * Java API: get the default singleton instance - */ + /** Java API: get the default singleton instance */ def getInstance = this } @@ -365,16 +352,12 @@ object MixMetricsSelector final case class MixMetricsSelector(selectors: immutable.IndexedSeq[CapacityMetricsSelector]) extends MixMetricsSelectorBase(selectors) -/** - * Base class for MetricsSelector that combines other selectors and aggregates their capacity. - */ +/** Base class for MetricsSelector that combines other selectors and aggregates their capacity. */ @SerialVersionUID(1L) abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMetricsSelector]) extends CapacityMetricsSelector { - /** - * Java API: construct a mix-selector from a sequence of selectors - */ + /** Java API: construct a mix-selector from a sequence of selectors */ def this(selectors: java.lang.Iterable[CapacityMetricsSelector]) = this(immutableSeq(selectors).toVector) override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = { @@ -382,13 +365,12 @@ abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMe // aggregated average of the capacities by address val init: Map[Address, (Double, Int)] = Map.empty.withDefaultValue((0.0, 0)) combined - .foldLeft(init) { - case (acc, (address, capacity)) => - val (sum, count) = acc(address) - acc + (address -> ((sum + capacity, count + 1))) + .foldLeft(init) { case (acc, (address, capacity)) => + val (sum, count) = acc(address) + acc + (address -> ((sum + capacity, count + 1))) } - .map { - case (address, (sum, count)) => address -> (sum / count) + .map { case (address, (sum, count)) => + address -> (sum / count) } } @@ -405,28 +387,23 @@ object MetricsSelector { val args = List(classOf[Config] -> config) dynamicAccess .createInstanceFor[MetricsSelector](fqn, args) - .recover({ - case exception => - throw new IllegalArgumentException( - (s"Cannot instantiate metrics-selector [$fqn], " + - "make sure it extends [akka.cluster.routing.MetricsSelector] and " + - "has constructor with [com.typesafe.config.Config] parameter"), - exception) - }) + .recover { case exception => + throw new IllegalArgumentException( + s"Cannot instantiate metrics-selector [$fqn], " + + "make sure it extends [akka.cluster.routing.MetricsSelector] and " + + "has constructor with [com.typesafe.config.Config] parameter", + exception) + } .get } } -/** - * A MetricsSelector is responsible for producing weights from the node metrics. - */ +/** A MetricsSelector is responsible for producing weights from the node metrics. */ @nowarn("msg=@SerialVersionUID") @SerialVersionUID(1L) trait MetricsSelector extends Serializable { - /** - * The weights per address, based on the nodeMetrics. - */ + /** The weights per address, based on the nodeMetrics. */ def weights(nodeMetrics: Set[NodeMetrics]): Map[Address, Int] } @@ -458,7 +435,7 @@ abstract class CapacityMetricsSelector extends MetricsSelector { val (_, min) = capacity.minBy { case (_, c) => c } // lowest usable capacity is 1% (>= 0.5% will be rounded to weight 1), also avoids div by zero val divisor = math.max(0.01, min) - capacity.map { case (address, c) => (address -> math.round((c) / divisor).toInt) } + capacity.map { case (address, c) => address -> math.round(c / divisor).toInt } } } @@ -516,9 +493,7 @@ private[metrics] class WeightedRoutees( buckets(buckets.length - 1) } - /** - * Pick the routee matching a value, from 1 to total. - */ + /** Pick the routee matching a value, from 1 to total. */ def apply(value: Int): Routee = { require(1 <= value && value <= total, "value must be between [1 - %s]".format(total)) routees(idx(Arrays.binarySearch(buckets, value))) diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsSettings.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsSettings.scala index a58dbe543ba..07e949eaa0a 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsSettings.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsSettings.scala @@ -12,9 +12,7 @@ import com.typesafe.config.Config import akka.util.Helpers.ConfigOps import akka.util.Helpers.Requiring -/** - * Metrics extension settings. Documented in: `src/main/resources/reference.conf`. - */ +/** Metrics extension settings. Documented in: `src/main/resources/reference.conf`. */ case class ClusterMetricsSettings(config: Config) { private val cc = config.getConfig("akka.cluster.metrics") diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala index 5823ed00483..a5e9e014acd 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala @@ -19,16 +19,12 @@ class ClusterMetricsStrategy(config: Config) withinTimeRange = config.getMillisDuration("withinTimeRange"), loggingEnabled = config.getBoolean("loggingEnabled"))(ClusterMetricsStrategy.metricsDecider) -/** - * Provide custom metrics strategy resources. - */ +/** Provide custom metrics strategy resources. */ object ClusterMetricsStrategy { import akka.actor._ import akka.actor.SupervisorStrategy._ - /** - * [[akka.actor.SupervisorStrategy]] `Decider` which allows to survive intermittent Sigar native method calls failures. - */ + /** [[akka.actor.SupervisorStrategy]] `Decider` which allows to survive intermittent Sigar native method calls failures. */ val metricsDecider: SupervisorStrategy.Decider = { case _: ActorInitializationException => Stop case _: ActorKilledException => Stop diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala index 1f39304d06a..75d1b0c7620 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/EWMA.scala @@ -23,7 +23,6 @@ import scala.concurrent.duration.FiniteDuration * @param value the current exponentially weighted moving average, e.g. Y(n - 1), or, * the sampled value resulting from the previous smoothing iteration. * This value is always used as the previous EWMA to calculate the new EWMA. - * */ @SerialVersionUID(1L) final case class EWMA(value: Double, alpha: Double) { @@ -46,9 +45,7 @@ final case class EWMA(value: Double, alpha: Double) { object EWMA { - /** - * math.log(2) - */ + /** math.log(2) */ private val LogOf2 = 0.69315 /** diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala index bfa83bba515..d725eaccac3 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala @@ -34,27 +34,22 @@ final case class Metric private[metrics] (name: String, value: Number, average: */ def :+(latest: Metric): Metric = if (this.sameAs(latest)) average match { - case Some(avg) => copy(value = latest.value, average = Some(avg :+ latest.value.doubleValue)) + case Some(avg) => copy(value = latest.value, average = Some(avg :+ latest.value.doubleValue)) case None if latest.average.isDefined => copy(value = latest.value, average = latest.average) case _ => copy(value = latest.value) - } else this + } + else this - /** - * The numerical value of the average, if defined, otherwise the latest value - */ + /** The numerical value of the average, if defined, otherwise the latest value */ def smoothValue: Double = average match { case Some(avg) => avg.value case None => value.doubleValue } - /** - * @return true if this value is smoothed - */ + /** @return true if this value is smoothed */ def isSmooth: Boolean = average.isDefined - /** - * Returns true if that is tracking the same metric as this. - */ + /** Returns true if that is tracking the same metric as this. */ def sameAs(that: Metric): Boolean = name == that.name override def hashCode = name.## @@ -65,9 +60,7 @@ final case class Metric private[metrics] (name: String, value: Number, average: } -/** - * Factory for creating valid Metric instances. - */ +/** Factory for creating valid Metric instances. */ object Metric extends MetricNumericConverter { /** @@ -256,9 +249,7 @@ private[metrics] trait MetricNumericConverter { case Right(b) => !(b < 0.0 || b.isNaN || b.isInfinite) } - /** - * May involve rounding or truncation. - */ + /** May involve rounding or truncation. */ def convertNumber(from: Any): Either[Long, Double] = from match { case n: Int => Left(n) case n: Long => Left(n) @@ -284,9 +275,7 @@ private[metrics] trait MetricNumericConverter { @SerialVersionUID(1L) final case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Metric] = Set.empty[Metric]) { - /** - * Returns the most recent data. - */ + /** Returns the most recent data. */ def merge(that: NodeMetrics): NodeMetrics = { require(address == that.address, s"merge only allowed for same address, [$address] != [$that.address]") if (timestamp >= that.timestamp) this // that is older @@ -296,9 +285,7 @@ final case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Met } } - /** - * Returns the most recent data with [[EWMA]] averaging. - */ + /** Returns the most recent data with [[EWMA]] averaging. */ def update(that: NodeMetrics): NodeMetrics = { require(address == that.address, s"update only allowed for same address, [$address] != [$that.address]") // Apply sample ordering. @@ -320,16 +307,12 @@ final case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Met def metric(key: String): Option[Metric] = metrics.collectFirst { case m if m.name == key => m } - /** - * Java API - */ + /** Java API */ @nowarn("msg=deprecated") def getMetrics: java.lang.Iterable[Metric] = scala.collection.JavaConverters.asJavaIterableConverter(metrics).asJava - /** - * Returns true if that address is the same as this - */ + /** Returns true if that address is the same as this */ def sameAs(that: NodeMetrics): Boolean = address == that.address override def hashCode = address.## @@ -340,9 +323,7 @@ final case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Met } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[metrics] object MetricsGossip { val empty = MetricsGossip(Set.empty[NodeMetrics]) } @@ -355,37 +336,27 @@ private[metrics] object MetricsGossip { @SerialVersionUID(1L) private[metrics] final case class MetricsGossip(nodes: Set[NodeMetrics]) { - /** - * Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus]] `Up`. - */ + /** Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus]] `Up`. */ def remove(node: Address): MetricsGossip = copy(nodes = nodes.filterNot(_.address == node)) - /** - * Only the nodes that are in the `includeNodes` Set. - */ + /** Only the nodes that are in the `includeNodes` Set. */ def filter(includeNodes: Set[Address]): MetricsGossip = copy(nodes = nodes.filter { includeNodes contains _.address }) - /** - * Adds new remote [[NodeMetrics]] and merges existing from a remote gossip. - */ + /** Adds new remote [[NodeMetrics]] and merges existing from a remote gossip. */ def merge(otherGossip: MetricsGossip): MetricsGossip = otherGossip.nodes.foldLeft(this) { (gossip, nodeMetrics) => gossip :+ nodeMetrics } - /** - * Adds new local [[NodeMetrics]], or merges an existing. - */ + /** Adds new local [[NodeMetrics]], or merges an existing. */ def :+(newNodeMetrics: NodeMetrics): MetricsGossip = nodeMetricsFor(newNodeMetrics.address) match { case Some(existingNodeMetrics) => copy(nodes = nodes - existingNodeMetrics + (existingNodeMetrics.update(newNodeMetrics))) case None => copy(nodes = nodes + newNodeMetrics) } - /** - * Returns [[NodeMetrics]] for a node if exists. - */ + /** Returns [[NodeMetrics]] for a node if exists. */ def nodeMetricsFor(address: Address): Option[NodeMetrics] = nodes.find { n => n.address == address } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala index acaf44c943f..ea4ba35bd1f 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala @@ -77,8 +77,8 @@ private[metrics] object MetricsCollector { else // Use complete fall back chain. create(collectorCustom).orElse(create(collectorSigar)).orElse(create(collectorJMX)) - collector.recover { - case e => throw new ConfigurationException(s"Could not create metrics collector: ${e.getMessage}", e) + collector.recover { case e => + throw new ConfigurationException(s"Could not create metrics collector: ${e.getMessage}", e) }.get } } @@ -95,9 +95,7 @@ class JmxMetricsCollector(address: Address, decayFactor: Double) extends Metrics private def this(address: Address, settings: ClusterMetricsSettings) = this(address, EWMA.alpha(settings.CollectorMovingAverageHalfLife, settings.CollectorSampleInterval)) - /** - * This constructor is used when creating an instance from configured FQCN - */ + /** This constructor is used when creating an instance from configured FQCN */ def this(system: ActorSystem) = this(Cluster(system).selfAddress, ClusterMetricsExtension(system).settings) private val decayFactorOption = Some(decayFactor) @@ -137,9 +135,7 @@ class JmxMetricsCollector(address: Address, decayFactor: Double) extends Metrics def processors: Option[Metric] = Metric.create(name = Processors, value = osMBean.getAvailableProcessors, decayFactor = None) - /** - * Current heap to be passed in to heapUsed, heapCommitted and heapMax - */ + /** Current heap to be passed in to heapUsed, heapCommitted and heapMax */ def heapMemoryUsage: MemoryUsage = memoryMBean.getHeapMemoryUsage /** @@ -194,16 +190,12 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP def this(address: Address, settings: ClusterMetricsSettings) = this(address, settings, DefaultSigarProvider(settings).createSigarInstance) - /** - * This constructor is used when creating an instance from configured FQCN - */ + /** This constructor is used when creating an instance from configured FQCN */ def this(system: ActorSystem) = this(Cluster(system).selfAddress, ClusterMetricsExtension(system).settings) private val decayFactorOption = Some(decayFactor) - /** - * Verify at the end of construction that Sigar is operational. - */ + /** Verify at the end of construction that Sigar is operational. */ metrics() // Construction complete. @@ -254,9 +246,7 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP def cpuIdle(cpuPerc: CpuPerc): Option[Metric] = Metric.create(name = CpuIdle, value = cpuPerc.getIdle.asInstanceOf[Number], decayFactor = decayFactorOption) - /** - * Releases any native resources associated with this instance. - */ + /** Releases any native resources associated with this instance. */ override def close(): Unit = SigarProvider.close(sigar) } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala index ed69e7635ba..fb6f689f6d1 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala @@ -67,12 +67,11 @@ trait SigarProvider { TryNative { verifiedSigarInstance }.orElse(TryNative { - provisionSigarLibrary() - verifiedSigarInstance - }) - .recover { - case e: Throwable => throw new RuntimeException("Failed to load sigar:", e) - } get + provisionSigarLibrary() + verifiedSigarInstance + }).recover { case e: Throwable => + throw new RuntimeException("Failed to load sigar:", e) + } get } } @@ -89,16 +88,12 @@ object SigarProvider { } } -/** - * Provide sigar instance as `SigarProxy` with configured location via [[ClusterMetricsSettings]]. - */ +/** Provide sigar instance as `SigarProxy` with configured location via [[ClusterMetricsSettings]]. */ case class DefaultSigarProvider(settings: ClusterMetricsSettings) extends SigarProvider { def extractFolder = settings.NativeLibraryExtractFolder } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[metrics] object TryNative { def apply[T](r: => T): Try[T] = try Success(r) diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala index d4576fe7115..e43b3d25356 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala @@ -23,9 +23,7 @@ import akka.util.ClassLoaderObjectInputStream import akka.util.ccompat._ import akka.util.ccompat.JavaConverters._ -/** - * Protobuf serializer for [[akka.cluster.metrics.ClusterMetricsMessage]] types. - */ +/** Protobuf serializer for [[akka.cluster.metrics.ClusterMetricsMessage]] types. */ @ccompatUsedUntil213 class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest with BaseSerializer { @@ -307,7 +305,7 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS val mm = cm.MixMetricsSelector.parseFrom(bytes) MixMetricsSelector( mm.getSelectorsList.asScala - // should be safe because we serialized only the right subtypes of MetricsSelector + // should be safe because we serialized only the right subtypes of MetricsSelector .map(s => metricSelectorFromProto(s).asInstanceOf[CapacityMetricsSelector]) .toIndexedSeq) } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala index ed98a36901b..c7cc97cd021 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala @@ -108,10 +108,10 @@ abstract class ClusterMetricsEnabledSpec enterBarrier("cluster-started") awaitAssert(clusterView.members.count(_.status == MemberStatus.Up) should ===(roles.size)) // TODO ensure same contract - //awaitAssert(clusterView.clusterMetrics.size should ===(roles.size)) + // awaitAssert(clusterView.clusterMetrics.size should ===(roles.size)) awaitAssert(metricsView.clusterMetrics.size should ===(roles.size)) val collector = MetricsCollector(cluster.system) - collector.sample().metrics.size should be > (3) + collector.sample().metrics.size should be > 3 enterBarrier("after") } "reflect the correct number of node metrics in cluster view" in within(30 seconds) { @@ -122,7 +122,7 @@ abstract class ClusterMetricsEnabledSpec runOn(node2, node3, node4, node5) { markNodeAsUnavailable(node1) // TODO ensure same contract - //awaitAssert(clusterView.clusterMetrics.size should ===(roles.size - 1)) + // awaitAssert(clusterView.clusterMetrics.size should ===(roles.size - 1)) awaitAssert(metricsView.clusterMetrics.size should ===(roles.size - 1)) } enterBarrier("finished") @@ -146,12 +146,12 @@ abstract class ClusterMetricsDisabledSpec "not collect metrics, not publish metrics events, and not gossip metrics" in { awaitClusterUp(roles: _*) // TODO ensure same contract - //clusterView.clusterMetrics.size should ===(0) + // clusterView.clusterMetrics.size should ===(0) metricsView.clusterMetrics.size should ===(0) ClusterMetricsExtension(system).subscribe(testActor) expectNoMessage() // TODO ensure same contract - //clusterView.clusterMetrics.size should ===(0) + // clusterView.clusterMetrics.size should ===(0) metricsView.clusterMetrics.size should ===(0) enterBarrier("after") } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala index b50e130de1e..2f282116657 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala @@ -33,26 +33,25 @@ import akka.util.unused object AdaptiveLoadBalancingRouterConfig extends MultiNodeConfig { class Echo extends Actor { - def receive = { - case _ => sender() ! Reply(Cluster(context.system).selfAddress) + def receive = { case _ => + sender() ! Reply(Cluster(context.system).selfAddress) } } class Memory extends Actor with ActorLogging { var usedMemory: Array[Array[Int]] = _ - def receive = { - case AllocateMemory => - val heap = ManagementFactory.getMemoryMXBean.getHeapMemoryUsage - // getMax can be undefined (-1) - val max = math.max(heap.getMax, heap.getCommitted) - val used = heap.getUsed - log.info("used heap before: [{}] bytes, of max [{}]", used, heap.getMax) - // allocate 70% of free space - val allocateBytes = (0.7 * (max - used)).toInt - val numberOfArrays = allocateBytes / 1024 - usedMemory = Array.ofDim(numberOfArrays, 248) // each 248 element Int array will use ~ 1 kB - log.info("used heap after: [{}] bytes", ManagementFactory.getMemoryMXBean.getHeapMemoryUsage.getUsed) - sender() ! "done" + def receive = { case AllocateMemory => + val heap = ManagementFactory.getMemoryMXBean.getHeapMemoryUsage + // getMax can be undefined (-1) + val max = math.max(heap.getMax, heap.getCommitted) + val used = heap.getUsed + log.info("used heap before: [{}] bytes, of max [{}]", used, heap.getMax) + // allocate 70% of free space + val allocateBytes = (0.7 * (max - used)).toInt + val numberOfArrays = allocateBytes / 1024 + usedMemory = Array.ofDim(numberOfArrays, 248) // each 248 element Int array will use ~ 1 kB + log.info("used heap after: [{}] bytes", ManagementFactory.getMemoryMXBean.getHeapMemoryUsage.getUsed) + sender() ! "done" } } @@ -133,16 +132,14 @@ abstract class AdaptiveLoadBalancingRouterSpec def receiveReplies(expectedReplies: Int): Map[Address, Int] = { val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0) - (receiveWhile(5 seconds, messages = expectedReplies) { - case Reply(address) => address - }).foldLeft(zero) { - case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) + receiveWhile(5 seconds, messages = expectedReplies) { case Reply(address) => + address + }.foldLeft(zero) { case (replyMap, address) => + replyMap + (address -> (replyMap(address) + 1)) } } - /** - * Fills in self address for local ActorRef - */ + /** Fills in self address for local ActorRef */ def fullAddress(actorRef: ActorRef): Address = actorRef.path.address match { case Address(_, _, None, None) => cluster.selfAddress case a => a @@ -188,9 +185,9 @@ abstract class AdaptiveLoadBalancingRouterSpec val replies = receiveReplies(iterationCount) - replies(node1) should be > (0) - replies(node2) should be > (0) - replies(node3) should be > (0) + replies(node1) should be > 0 + replies(node2) should be > 0 + replies(node3) should be > 0 replies.values.sum should ===(iterationCount) } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala index 827d9b2bc87..1ea4cc7a481 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala @@ -87,24 +87,24 @@ abstract class StatsSampleSpec override def afterAll() = multiNodeSpecAfterAll() - //#abstract-test + // #abstract-test "The stats sample" must { - //#startup-cluster + // #startup-cluster "illustrate how to startup cluster" in within(15 seconds) { Cluster(system).subscribe(testActor, classOf[MemberUp]) expectMsgClass(classOf[CurrentClusterState]) - //#addresses + // #addresses val firstAddress = node(first).address val secondAddress = node(second).address val thirdAddress = node(third).address - //#addresses + // #addresses - //#join + // #join Cluster(system).join(firstAddress) - //#join + // #join system.actorOf(Props[StatsWorker](), "statsWorker") system.actorOf(Props[StatsService](), "statsService") @@ -116,9 +116,9 @@ abstract class StatsSampleSpec testConductor.enter("all-up") } - //#startup-cluster + // #startup-cluster - //#test-statsService + // #test-statsService "show usage of the statsService from one node" in within(15 seconds) { runOn(second) { assertServiceOk() @@ -137,7 +137,7 @@ abstract class StatsSampleSpec } } - //#test-statsService + // #test-statsService "show usage of the statsService from all nodes" in within(15 seconds) { assertServiceOk() diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala index a1053e4b0c4..c9ae5a8e6cf 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala @@ -50,7 +50,7 @@ class StatsAggregator(expectedResults: Int, replyTo: ActorRef) extends Actor { // not used, only for documentation abstract class StatsService2 extends Actor { - //#router-lookup-in-code + // #router-lookup-in-code import akka.cluster.routing.{ ClusterRouterGroup, ClusterRouterGroupSettings } import akka.routing.ConsistentHashingGroup @@ -63,12 +63,12 @@ abstract class StatsService2 extends Actor { allowLocalRoutees = true, useRoles = Set("compute"))).props(), name = "workerRouter2") - //#router-lookup-in-code + // #router-lookup-in-code } // not used, only for documentation abstract class StatsService3 extends Actor { - //#router-deploy-in-code + // #router-deploy-in-code import akka.cluster.routing.{ ClusterRouterPool, ClusterRouterPoolSettings } import akka.routing.ConsistentHashingPool @@ -78,5 +78,5 @@ abstract class StatsService3 extends Actor { ClusterRouterPoolSettings(totalInstances = 100, maxInstancesPerNode = 3, allowLocalRoutees = false)) .props(Props[StatsWorker]()), name = "workerRouter3") - //#router-deploy-in-code + // #router-deploy-in-code } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsWorker.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsWorker.scala index d82c3ef574a..115eabc2cbe 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsWorker.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsWorker.scala @@ -9,17 +9,16 @@ import akka.actor.Actor //#worker class StatsWorker extends Actor { var cache = Map.empty[String, Int] - def receive = { - case word: String => - val length = cache.get(word) match { - case Some(x) => x - case None => - val x = word.length - cache += (word -> x) - x - } + def receive = { case word: String => + val length = cache.get(word) match { + case Some(x) => x + case None => + val x = word.length + cache += (word -> x) + x + } - sender() ! length + sender() ! length } } //#worker diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala index b3b819f6cb7..a6dd846d7b5 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala @@ -73,17 +73,16 @@ class ClusterMetricsExtensionSpec expected.size should ===(sampleCount) - history.zip(expected).foreach { - case (mockMetrics, expectedData) => - (mockMetrics, expectedData) match { - case ( + history.zip(expected).foreach { case (mockMetrics, expectedData) => + (mockMetrics, expectedData) match { + case ( Cpu(_, _, loadAverageMock, cpuCombinedMock, cpuStolenMock, _), (loadAverageEwma, cpuCombinedEwma, cpuStolenEwma)) => - loadAverageMock.get should ===(loadAverageEwma +- epsilon) - cpuCombinedMock.get should ===(cpuCombinedEwma +- epsilon) - cpuStolenMock.get should ===(cpuStolenEwma +- epsilon) - case _ => fail() - } + loadAverageMock.get should ===(loadAverageEwma +- epsilon) + cpuCombinedMock.get should ===(cpuCombinedEwma +- epsilon) + cpuStolenMock.get should ===(cpuStolenEwma +- epsilon) + case _ => fail() + } } } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala index cfeb9ecdb25..36ff68b1bc4 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala @@ -124,7 +124,7 @@ class MetricsSelectorSpec extends AnyWordSpec with Matchers { capacity(a1) should ===((0.75 + 0.67 + 0.9375) / 3 +- 0.0001) capacity(b1) should ===((0.75 + 0.34 + 0.9375) / 3 +- 0.0001) capacity(c1) should ===((0.0 + 0.01 + 0.0) / 3 +- 0.0001) - capacity(d1) should ===((0.001953125) / 1 +- 0.0001) + capacity(d1) should ===(0.001953125 / 1 +- 0.0001) } } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala index 14842615dde..ea6fd95691b 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala @@ -64,15 +64,15 @@ class EWMASpec extends AkkaSpec(MetricsConfig.defaultEnabled) with MetricsCollec "calculate sane alpha from short half-life" in { val alpha = EWMA.alpha(1.millis, 3.seconds) - alpha should be <= (1.0) - alpha should be >= (0.0) + alpha should be <= 1.0 + alpha should be >= 0.0 alpha should ===(1.0 +- 0.001) } "calculate sane alpha from long half-life" in { val alpha = EWMA.alpha(1.day, 3.seconds) - alpha should be <= (1.0) - alpha should be >= (0.0) + alpha should be <= 1.0 + alpha should be >= 0.0 alpha should ===(0.0 +- 0.001) } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricSpec.scala index bc7acedbd5b..3fb3042c750 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricSpec.scala @@ -250,14 +250,13 @@ class MetricValuesSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with Metri val nodes: Seq[NodeMetrics] = { (1 to 100).foldLeft(List(node1, node2)) { (nodes, _) => nodes.map { n => - n.copy( - metrics = collector - .sample() - .metrics - .flatMap(latest => - n.metrics.collect { - case streaming if latest.sameAs(streaming) => streaming :+ latest - })) + n.copy(metrics = collector + .sample() + .metrics + .flatMap(latest => + n.metrics.collect { + case streaming if latest.sameAs(streaming) => streaming :+ latest + })) } } } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala index df95cd8803f..9df52af5252 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala @@ -57,8 +57,8 @@ class MetricsCollectorSpec "collect accurate metrics for a node" in { val sample = collector.sample() - val metrics = sample.metrics.collect { case m => (m.name, m.value) } - val used = metrics.collectFirst { case (HeapMemoryUsed, b) => b } + val metrics = sample.metrics.collect { case m => (m.name, m.value) } + val used = metrics.collectFirst { case (HeapMemoryUsed, b) => b } val committed = metrics.collectFirst { case (HeapMemoryCommitted, b) => b } metrics.foreach { case (SystemLoadAverage, b) => b.doubleValue should be >= 0.0 diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala index c71047a1ded..ce77f98c26a 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala @@ -26,9 +26,7 @@ import akka.dispatch.RequiresMessageQueue import akka.dispatch.UnboundedMessageQueueSemantics import akka.testkit.AkkaSpec -/** - * Redirect different logging sources to SLF4J. - */ +/** Redirect different logging sources to SLF4J. */ trait RedirectLogging { def redirectLogging(): Unit = { @@ -41,16 +39,12 @@ trait RedirectLogging { } -/** - * Provide sigar library from `project/target` location. - */ +/** Provide sigar library from `project/target` location. */ case class SimpleSigarProvider(location: String = "native") extends SigarProvider { def extractFolder = s"${System.getProperty("user.dir")}/target/${location}" } -/** - * Provide sigar library as static mock. - */ +/** Provide sigar library as static mock. */ case class MockitoSigarProvider( pid: Long = 123, loadAverage: Array[Double] = Array(0.7, 0.3, 0.1), @@ -106,7 +100,7 @@ trait MetricsCollectorFactory { this: AkkaSpec => def createMetricsCollector: MetricsCollector = try { new SigarMetricsCollector(selfAddress, defaultDecayFactor, new Sigar()) - //new SigarMetricsCollector(selfAddress, defaultDecayFactor, SimpleSigarProvider().createSigarInstance) + // new SigarMetricsCollector(selfAddress, defaultDecayFactor, SimpleSigarProvider().createSigarInstance) } catch { case e: Throwable => log.warning("Sigar failed to load. Using JMX. Reason: " + e.toString) @@ -133,7 +127,6 @@ trait MetricsCollectorFactory { this: AkkaSpec => } /** - * */ class MockitoSigarMetricsCollector(system: ActorSystem) extends SigarMetricsCollector( @@ -141,9 +134,7 @@ class MockitoSigarMetricsCollector(system: ActorSystem) MetricsConfig.defaultDecayFactor, MockitoSigarProvider().createSigarInstance) {} -/** - * Metrics test configurations. - */ +/** Metrics test configurations. */ object MetricsConfig { val defaultDecayFactor = 2.0 / (1 + 10) @@ -189,9 +180,7 @@ object MetricsConfig { """ } -/** - * Current cluster metrics, updated periodically via event bus. - */ +/** Current cluster metrics, updated periodically via event bus. */ class ClusterMetricsView(system: ExtendedActorSystem) extends Closeable { val extension = ClusterMetricsExtension(system) diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/WeightedRouteesSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/WeightedRouteesSpec.scala index cf9022030d9..ee061641b1e 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/WeightedRouteesSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/WeightedRouteesSpec.scala @@ -85,14 +85,14 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" val weighted = new WeightedRoutees(routees2, a1, weights) (1 to 2).foreach { weighted(_) should ===(testActorRoutee) } - (3 to weighted.total).foreach { weighted(_) should not be (testActorRoutee) } + (3 to weighted.total).foreach { weighted(_) should not be testActorRoutee } } "not allocate ref with weight zero" in { val weights = Map(a1 -> 0, b1 -> 2, c1 -> 10) val weighted = new WeightedRoutees(routees, a1, weights) - (1 to weighted.total).foreach { weighted(_) should not be (routeeA) } + (1 to weighted.total).foreach { weighted(_) should not be routeeA } } } diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingQuery.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingQuery.scala index 2d41b9030fc..efdd65a0237 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingQuery.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingQuery.scala @@ -12,9 +12,7 @@ import akka.cluster.sharding.ShardRegion.CurrentShardRegionState import akka.cluster.sharding.typed.scaladsl.EntityTypeKey import akka.util.JavaDurationConverters -/** - * Protocol for querying sharding state e.g. A ShardRegion's state - */ +/** Protocol for querying sharding state e.g. A ShardRegion's state */ sealed trait ClusterShardingQuery /** diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingSettings.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingSettings.scala index 5d5ec5382ef..7bb0e795c6c 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingSettings.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingSettings.scala @@ -85,7 +85,8 @@ object ClusterShardingSettings { rebalanceInterval = settings.tuningParameters.rebalanceInterval, snapshotAfter = settings.tuningParameters.snapshotAfter, keepNrOfBatches = settings.tuningParameters.keepNrOfBatches, - leastShardAllocationRebalanceThreshold = settings.tuningParameters.leastShardAllocationRebalanceThreshold, // TODO extract it a bit + leastShardAllocationRebalanceThreshold = + settings.tuningParameters.leastShardAllocationRebalanceThreshold, // TODO extract it a bit leastShardAllocationMaxSimultaneousRebalance = settings.tuningParameters.leastShardAllocationMaxSimultaneousRebalance, waitingForStateTimeout = settings.tuningParameters.waitingForStateTimeout, @@ -115,14 +116,10 @@ object ClusterShardingSettings { sealed trait StateStoreMode { def name: String } - /** - * Java API - */ + /** Java API */ def stateStoreModePersistence(): StateStoreMode = StateStoreModePersistence - /** - * Java API - */ + /** Java API */ def stateStoreModeDdata(): StateStoreMode = StateStoreModePersistence object StateStoreMode { @@ -139,14 +136,10 @@ object ClusterShardingSettings { case object StateStoreModeDData extends StateStoreMode { override def name = "ddata" } - /** - * Java API - */ + /** Java API */ def rememberEntitiesStoreModeEventSourced(): RememberEntitiesStoreMode = RememberEntitiesStoreModeEventSourced - /** - * Java API - */ + /** Java API */ def rememberEntitiesStoreModeDdata(): RememberEntitiesStoreMode = RememberEntitiesStoreModeDData sealed trait RememberEntitiesStoreMode { def name: String } @@ -165,9 +158,7 @@ object ClusterShardingSettings { } case object RememberEntitiesStoreModeDData extends RememberEntitiesStoreMode { override def name = "ddata" } - /** - * API MAY CHANGE: Settings for passivation strategies may change after additional testing and feedback. - */ + /** API MAY CHANGE: Settings for passivation strategies may change after additional testing and feedback. */ @ApiMayChange final class PassivationStrategySettings private ( val idleEntitySettings: Option[PassivationStrategySettings.IdleSettings], @@ -260,9 +251,7 @@ object ClusterShardingSettings { oldSettingUsed) } - /** - * API MAY CHANGE: Settings for passivation strategies may change after additional testing and feedback. - */ + /** API MAY CHANGE: Settings for passivation strategies may change after additional testing and feedback. */ @ApiMayChange object PassivationStrategySettings { import ClassicShardingSettings.{ PassivationStrategySettings => ClassicPassivationStrategySettings } @@ -680,7 +669,8 @@ object ClusterShardingSettings { rebalanceInterval = classic.rebalanceInterval, snapshotAfter = classic.snapshotAfter, keepNrOfBatches = classic.keepNrOfBatches, - leastShardAllocationRebalanceThreshold = classic.leastShardAllocationRebalanceThreshold, // TODO extract it a bit + leastShardAllocationRebalanceThreshold = + classic.leastShardAllocationRebalanceThreshold, // TODO extract it a bit leastShardAllocationMaxSimultaneousRebalance = classic.leastShardAllocationMaxSimultaneousRebalance, waitingForStateTimeout = classic.waitingForStateTimeout, updatingStateTimeout = classic.updatingStateTimeout, @@ -999,9 +989,7 @@ final class ClusterShardingSettings( def withPassivateIdleEntityAfter(duration: java.time.Duration): ClusterShardingSettings = copy(passivationStrategySettings = passivationStrategySettings.withOldIdleStrategy(duration.asScala)) - /** - * API MAY CHANGE: Settings for passivation strategies may change after additional testing and feedback. - */ + /** API MAY CHANGE: Settings for passivation strategies may change after additional testing and feedback. */ @ApiMayChange def withPassivationStrategy(settings: ClusterShardingSettings.PassivationStrategySettings): ClusterShardingSettings = copy(passivationStrategySettings = settings) diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ReplicatedEntityProvider.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ReplicatedEntityProvider.scala index 26d72fc7e5f..755bbaf3c65 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ReplicatedEntityProvider.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ReplicatedEntityProvider.scala @@ -51,13 +51,16 @@ object ReplicatedEntityProvider { */ def apply[M: ClassTag](typeName: String, allReplicaIds: Set[ReplicaId])( settingsPerReplicaFactory: (EntityTypeKey[M], ReplicaId) => ReplicatedEntity[M]): ReplicatedEntityProvider[M] = { - new ReplicatedEntityProvider(allReplicaIds.map { replicaId => - if (typeName.contains(Separator)) - throw new IllegalArgumentException(s"typeName [$typeName] contains [$Separator] which is a reserved character") - - val typeKey = EntityTypeKey[M](s"$typeName${Separator}${replicaId.id}") - (settingsPerReplicaFactory(typeKey, replicaId), typeName) - }.toVector, directReplication = true) + new ReplicatedEntityProvider( + allReplicaIds.map { replicaId => + if (typeName.contains(Separator)) + throw new IllegalArgumentException( + s"typeName [$typeName] contains [$Separator] which is a reserved character") + + val typeKey = EntityTypeKey[M](s"$typeName${Separator}${replicaId.id}") + (settingsPerReplicaFactory(typeKey, replicaId), typeName) + }.toVector, + directReplication = true) } /** @@ -69,9 +72,11 @@ object ReplicatedEntityProvider { def perDataCenter[M: ClassTag, E](typeName: String, allReplicaIds: Set[ReplicaId])( create: ReplicationId => Behavior[M]): ReplicatedEntityProvider[M] = { apply(typeName, allReplicaIds) { (typeKey, replicaId) => - ReplicatedEntity(replicaId, Entity(typeKey) { entityContext => - create(ReplicationId.fromString(entityContext.entityId)) - }.withDataCenter(replicaId.id)) + ReplicatedEntity( + replicaId, + Entity(typeKey) { entityContext => + create(ReplicationId.fromString(entityContext.entityId)) + }.withDataCenter(replicaId.id)) } } @@ -85,9 +90,11 @@ object ReplicatedEntityProvider { def perRole[M: ClassTag, E](typeName: String, allReplicaIds: Set[ReplicaId])( create: ReplicationId => Behavior[M]): ReplicatedEntityProvider[M] = { apply(typeName, allReplicaIds) { (typeKey, replicaId) => - ReplicatedEntity(replicaId, Entity(typeKey) { entityContext => - create(ReplicationId.fromString(entityContext.entityId)) - }.withRole(replicaId.id)) + ReplicatedEntity( + replicaId, + Entity(typeKey) { entityContext => + create(ReplicationId.fromString(entityContext.entityId)) + }.withRole(replicaId.id)) } } @@ -104,9 +111,11 @@ object ReplicatedEntityProvider { createBehavior: java.util.function.Function[ReplicationId, Behavior[M]]): ReplicatedEntityProvider[M] = { implicit val classTag: ClassTag[M] = ClassTag(messageClass) apply(typeName, allReplicaIds.asScala.toSet) { (typeKey, replicaId) => - ReplicatedEntity(replicaId, Entity(typeKey) { entityContext => - createBehavior(ReplicationId.fromString(entityContext.entityId)) - }.withDataCenter(replicaId.id)) + ReplicatedEntity( + replicaId, + Entity(typeKey) { entityContext => + createBehavior(ReplicationId.fromString(entityContext.entityId)) + }.withDataCenter(replicaId.id)) } } @@ -125,17 +134,16 @@ object ReplicatedEntityProvider { createBehavior: akka.japi.function.Function[ReplicationId, Behavior[M]]): ReplicatedEntityProvider[M] = { implicit val classTag: ClassTag[M] = ClassTag(messageClass) apply(typeName, allReplicaIds.asScala.toSet) { (typeKey, replicaId) => - ReplicatedEntity(replicaId, Entity(typeKey) { entityContext => - createBehavior(ReplicationId.fromString(entityContext.entityId)) - }.withRole(replicaId.id)) + ReplicatedEntity( + replicaId, + Entity(typeKey) { entityContext => + createBehavior(ReplicationId.fromString(entityContext.entityId)) + }.withRole(replicaId.id)) } } } -/** - * - * @tparam M The type of messages the replicated entity accepts - */ +/** @tparam M The type of messages the replicated entity accepts */ final class ReplicatedEntityProvider[M] private ( val replicas: immutable.Seq[(ReplicatedEntity[M], String)], val directReplication: Boolean) { @@ -145,7 +153,6 @@ final class ReplicatedEntityProvider[M] private ( * to also have it enabled through [[akka.persistence.typed.scaladsl.EventSourcedBehavior.withEventPublishing]] * or [[akka.persistence.typed.javadsl.ReplicatedEventSourcedBehavior.withEventPublishing]] * to work. - * */ def withDirectReplication(enabled: Boolean): ReplicatedEntityProvider[M] = new ReplicatedEntityProvider(replicas, directReplication = enabled) diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ReplicatedShardingExtension.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ReplicatedShardingExtension.scala index 278aa658691..42a1072bfbb 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ReplicatedShardingExtension.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ReplicatedShardingExtension.scala @@ -27,9 +27,7 @@ object ReplicatedShardingExtension extends ExtensionId[ReplicatedShardingExtensi } -/** - * Not for user extension. - */ +/** Not for user extension. */ @DoNotInherit trait ReplicatedShardingExtension extends Extension { @@ -61,13 +59,9 @@ trait ReplicatedShardingExtension extends Extension { @DoNotInherit trait ReplicatedSharding[M] { - /** - * Scala API: Returns the entity ref for each replica for user defined routing/replica selection - */ + /** Scala API: Returns the entity ref for each replica for user defined routing/replica selection */ def entityRefsFor(entityId: String): Map[ReplicaId, EntityRef[M]] - /** - * Java API: Returns the entity ref for each replica for user defined routing/replica selection - */ + /** Java API: Returns the entity ref for each replica for user defined routing/replica selection */ def getEntityRefsFor(entityId: String): JMap[ReplicaId, javadsl.EntityRef[M]] } diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardedDaemonProcessCommand.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardedDaemonProcessCommand.scala index 2adade0489f..518a61bbc6e 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardedDaemonProcessCommand.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardedDaemonProcessCommand.scala @@ -60,9 +60,7 @@ object ChangeNumberOfProcesses { } -/** - * Query the sharded daemon process for the current scale - */ +/** Query the sharded daemon process for the current scale */ final class GetNumberOfProcesses(val replyTo: ActorRef[NumberOfProcesses]) extends ShardedDaemonProcessCommand with ClusterShardingTypedSerializable { @@ -102,8 +100,6 @@ trait NumberOfProcesses { def rescaleInProgress: Boolean - /** - * Revision number increased for every re-scale that has been triggered with [[ChangeNumberOfProcesses]] - */ + /** Revision number increased for every re-scale that has been triggered with [[ChangeNumberOfProcesses]] */ def revision: Long } diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardedDaemonProcessSettings.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardedDaemonProcessSettings.scala index 508933a0e80..673eed1f635 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardedDaemonProcessSettings.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardedDaemonProcessSettings.scala @@ -27,9 +27,7 @@ object ShardedDaemonProcessSettings { def create(system: ActorSystem[_]): ShardedDaemonProcessSettings = apply(system) - /** - * Load settings from a specific config location. - */ + /** Load settings from a specific config location. */ def fromConfig(config: Config): ShardedDaemonProcessSettings = { val keepAliveInterval = config.getDuration("keep-alive-interval").asScala val keepAliveFromNumberOfNodes = config.getInt("keep-alive-from-number-of-nodes") @@ -44,9 +42,7 @@ object ShardedDaemonProcessSettings { } -/** - * Not for user constructions, use factory methods to instantiate. - */ +/** Not for user constructions, use factory methods to instantiate. */ final class ShardedDaemonProcessSettings @InternalApi private[akka] ( val keepAliveInterval: FiniteDuration, val shardingSettings: Option[ClusterShardingSettings], @@ -86,21 +82,15 @@ final class ShardedDaemonProcessSettings @InternalApi private[akka] ( def withRole(role: String): ShardedDaemonProcessSettings = copy(role = Option(role)) - /** - * Keep alive messages from this number of nodes. - */ + /** Keep alive messages from this number of nodes. */ def withKeepAliveFromNumberOfNodes(keepAliveFromNumberOfNodes: Int): ShardedDaemonProcessSettings = copy(keepAliveFromNumberOfNodes = keepAliveFromNumberOfNodes) - /** - * Scala API: Keep alive messages are sent with this delay between each message. - */ + /** Scala API: Keep alive messages are sent with this delay between each message. */ def withKeepAliveThrottleInterval(keepAliveThrottleInterval: FiniteDuration): ShardedDaemonProcessSettings = copy(keepAliveThrottleInterval = keepAliveThrottleInterval) - /** - * Java API: Keep alive messages are sent with this delay between each message. - */ + /** Java API: Keep alive messages are sent with this delay between each message. */ def withKeepAliveThrottleInterval(keepAliveThrottleInterval: JDuration): ShardedDaemonProcessSettings = copy(keepAliveThrottleInterval = keepAliveThrottleInterval.asScala) @@ -130,9 +120,7 @@ trait ShardedDaemonProcessContext { def processNumber: Int def totalProcesses: Int - /** - * The revision starts at 0 and each time the number of processes is changed, the revision increases with 1 - */ + /** The revision starts at 0 and each time the number of processes is changed, the revision increases with 1 */ def revision: Long def name: String diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardingDirectReplication.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardingDirectReplication.scala index dde1b573a83..13f8dcf7824 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardingDirectReplication.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardingDirectReplication.scala @@ -39,15 +39,11 @@ import akka.persistence.typed.ReplicationId @InternalApi private[akka] object ShardingDirectReplication { - /** - * Not for user extension - */ + /** Not for user extension */ @DoNotInherit sealed trait Command - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case class VerifyStarted(replyTo: ActorRef[Done]) extends Command @@ -73,15 +69,14 @@ private[akka] object ShardingDirectReplication { "Forwarding event for persistence id [{}] sequence nr [{}] to replicas.", event.persistenceId, event.sequenceNumber) - replicaShardingProxies.foreach { - case (replica, proxy) => - val newId = replicationId.withReplica(replica) - // receiving side is responsible for any tagging, so drop/unwrap any tags added by the local tagger - val withoutTags = event.withoutTags - val envelopedEvent = ShardingEnvelope(newId.persistenceId.id, withoutTags) - if (!selfReplica.contains(replica)) { - proxy.asInstanceOf[ActorRef[ShardingEnvelope[PublishedEvent]]] ! envelopedEvent - } + replicaShardingProxies.foreach { case (replica, proxy) => + val newId = replicationId.withReplica(replica) + // receiving side is responsible for any tagging, so drop/unwrap any tags added by the local tagger + val withoutTags = event.withoutTags + val envelopedEvent = ShardingEnvelope(newId.persistenceId.id, withoutTags) + if (!selfReplica.contains(replica)) { + proxy.asInstanceOf[ActorRef[ShardingEnvelope[PublishedEvent]]] ! envelopedEvent + } } } else { context.log.traceN( diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardingMessageExtractor.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardingMessageExtractor.scala index 77585c2827a..7c0a8551063 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardingMessageExtractor.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardingMessageExtractor.scala @@ -21,9 +21,7 @@ object ShardingMessageExtractor { def apply[M](numberOfShards: Int): ShardingMessageExtractor[ShardingEnvelope[M], M] = new HashCodeMessageExtractor[M](numberOfShards) - /** - * Scala API: Create a message extractor for a protocol where the entity id is available in each message. - */ + /** Scala API: Create a message extractor for a protocol where the entity id is available in each message. */ def noEnvelope[M](numberOfShards: Int, @unused stopMessage: M)( extractEntityId: M => String): ShardingMessageExtractor[M, M] = new HashCodeNoEnvelopeMessageExtractor[M](numberOfShards) { diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/ShardingConsumerController.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/ShardingConsumerController.scala index abb8e794d6a..e2fabc6e405 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/ShardingConsumerController.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/ShardingConsumerController.scala @@ -79,9 +79,7 @@ object ShardingConsumerController { def withConsumerControllerSettings(newConsumerControllerSettings: ConsumerController.Settings): Settings = copy(consumerControllerSettings = newConsumerControllerSettings) - /** - * Private copy method for internal use only. - */ + /** Private copy method for internal use only. */ private def copy( bufferSize: Int = bufferSize, consumerControllerSettings: ConsumerController.Settings = consumerControllerSettings) = diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/ShardingProducerController.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/ShardingProducerController.scala index 6b75fcd201f..d1d5eadbcb7 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/ShardingProducerController.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/ShardingProducerController.scala @@ -242,9 +242,7 @@ object ShardingProducerController { def withProducerControllerSettings(newProducerControllerSettings: ProducerController.Settings): Settings = copy(producerControllerSettings = newProducerControllerSettings) - /** - * Private copy method for internal use only. - */ + /** Private copy method for internal use only. */ private def copy( bufferSize: Int = bufferSize, internalAskTimeout: FiniteDuration = internalAskTimeout, @@ -279,9 +277,7 @@ object ShardingProducerController { ShardingProducerControllerImpl(producerId, region, durableQueueBehavior, settings) } - /** - * Java API - */ + /** Java API */ def create[A]( messageClass: Class[A], producerId: String, @@ -290,9 +286,7 @@ object ShardingProducerController { apply(producerId, region, durableQueueBehavior.asScala)(ClassTag(messageClass)) } - /** - * Java API - */ + /** Java API */ def create[A]( messageClass: Class[A], producerId: String, diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/internal/ShardingConsumerControllerImpl.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/internal/ShardingConsumerControllerImpl.scala index 5d50530da55..24291270204 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/internal/ShardingConsumerControllerImpl.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/internal/ShardingConsumerControllerImpl.scala @@ -16,9 +16,7 @@ import akka.actor.typed.scaladsl.Behaviors import akka.annotation.InternalApi import akka.cluster.sharding.typed.delivery.ShardingConsumerController -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ShardingConsumerControllerImpl { def apply[A, B]( consumerBehavior: ActorRef[ConsumerController.Start[A]] => Behavior[B], @@ -50,10 +48,9 @@ import akka.cluster.sharding.typed.delivery.ShardingConsumerController stashBuffer.stash(other) Behaviors.same } - .receiveSignal { - case (_, Terminated(`consumer`)) => - context.log.debug("Consumer terminated before initialized.") - Behaviors.stopped + .receiveSignal { case (_, Terminated(`consumer`)) => + context.log.debug("Consumer terminated before initialized.") + Behaviors.stopped } } } diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/internal/ShardingProducerControllerImpl.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/internal/ShardingProducerControllerImpl.scala index f2a8f90c338..902026e8cd1 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/internal/ShardingProducerControllerImpl.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/delivery/internal/ShardingProducerControllerImpl.scala @@ -29,9 +29,7 @@ import akka.cluster.sharding.typed.ShardingEnvelope import akka.cluster.sharding.typed.delivery.ShardingProducerController import akka.util.Timeout -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ShardingProducerControllerImpl { import ShardingProducerController.Command @@ -356,8 +354,8 @@ private class ShardingProducerControllerImpl[A: ClassTag]( } def onAck(outState: OutState[A], confirmedSeqNr: OutSeqNr): Vector[Unconfirmed[A]] = { - val (confirmed, newUnconfirmed) = outState.unconfirmed.partition { - case Unconfirmed(_, seqNr, _) => seqNr <= confirmedSeqNr + val (confirmed, newUnconfirmed) = outState.unconfirmed.partition { case Unconfirmed(_, seqNr, _) => + seqNr <= confirmedSeqNr } if (confirmed.nonEmpty) { @@ -473,17 +471,16 @@ private class ShardingProducerControllerImpl[A: ClassTag]( def receiveResendFirstUnconfirmed(): Behavior[InternalCommand] = { val now = System.nanoTime() - s.out.foreach { - case (outKey: OutKey, outState) => - val idleDurationMillis = (now - outState.usedNanoTime) / 1000 / 1000 - if (outState.unconfirmed.nonEmpty && idleDurationMillis >= settings.resendFirstUnconfirmedIdleTimeout.toMillis) { - context.log.debug( - "Resend first unconfirmed for [{}], because it was idle for [{} ms]", - outKey, - idleDurationMillis) - outState.producerController - .unsafeUpcast[ProducerControllerImpl.InternalCommand] ! ProducerControllerImpl.ResendFirstUnconfirmed - } + s.out.foreach { case (outKey: OutKey, outState) => + val idleDurationMillis = (now - outState.usedNanoTime) / 1000 / 1000 + if (outState.unconfirmed.nonEmpty && idleDurationMillis >= settings.resendFirstUnconfirmedIdleTimeout.toMillis) { + context.log.debug( + "Resend first unconfirmed for [{}], because it was idle for [{} ms]", + outKey, + idleDurationMillis) + outState.producerController + .unsafeUpcast[ProducerControllerImpl.InternalCommand] ! ProducerControllerImpl.ResendFirstUnconfirmed + } } Behaviors.same } @@ -491,15 +488,14 @@ private class ShardingProducerControllerImpl[A: ClassTag]( def receiveCleanupUnused(): Behavior[InternalCommand] = { val now = System.nanoTime() val removeOutKeys = - s.out.flatMap { - case (outKey: OutKey, outState) => - val idleDurationMillis = (now - outState.usedNanoTime) / 1000 / 1000 - if (outState.unconfirmed.isEmpty && outState.buffered.isEmpty && idleDurationMillis >= settings.cleanupUnusedAfter.toMillis) { - context.log.debug("Cleanup unused [{}], because it was idle for [{} ms]", outKey, idleDurationMillis) - context.stop(outState.producerController) - Some(outKey) - } else - None + s.out.flatMap { case (outKey: OutKey, outState) => + val idleDurationMillis = (now - outState.usedNanoTime) / 1000 / 1000 + if (outState.unconfirmed.isEmpty && outState.buffered.isEmpty && idleDurationMillis >= settings.cleanupUnusedAfter.toMillis) { + context.log.debug("Cleanup unused [{}], because it was idle for [{} ms]", outKey, idleDurationMillis) + context.stop(outState.producerController) + Some(outKey) + } else + None } if (removeOutKeys.isEmpty) Behaviors.same diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala index c47edabcb0a..14db24796cf 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala @@ -53,7 +53,7 @@ import akka.util.JavaDurationConverters._ extends ShardingMessageExtractor[Any, M] { override def entityId(message: Any): String = { message match { - case ShardingEnvelope(entityId, _) => entityId //also covers ClassicStartEntity in ShardingEnvelope + case ShardingEnvelope(entityId, _) => entityId // also covers ClassicStartEntity in ShardingEnvelope case ClassicStartEntity(entityId) => entityId case msg => delegate.entityId(msg.asInstanceOf[E]) } @@ -64,7 +64,7 @@ import akka.util.JavaDurationConverters._ override def unwrapMessage(message: Any): M = { message match { case ShardingEnvelope(_, msg: M @unchecked) => - //also covers ClassicStartEntity in ShardingEnvelope + // also covers ClassicStartEntity in ShardingEnvelope msg case msg: ClassicStartEntity => // not really of type M, but erased and StartEntity is only handled internally, not delivered to the entity @@ -77,9 +77,7 @@ import akka.util.JavaDurationConverters._ override def toString: String = delegate.toString } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class EntityTypeKeyImpl[T](name: String, messageClassName: String) extends javadsl.EntityTypeKey[T] with scaladsl.EntityTypeKey[T] { @@ -305,9 +303,7 @@ import akka.util.JavaDurationConverters._ } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class EntityRefImpl[M]( shardRegion: akka.actor.ActorRef, override val entityId: String, @@ -413,18 +409,14 @@ import akka.util.JavaDurationConverters._ override def toString: String = s"EntityRef($typeKey, $entityId)" - /** - * INTERNAL API - */ + /** INTERNAL API */ override private[akka] def asJava: javadsl.EntityRef[M] = this private[internal] def withDataCenter(dataCenter: Option[String]): EntityRefImpl[M] = new EntityRefImpl[M](shardRegion, entityId, typeKey, dataCenter) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ShardCommandActor { import akka.actor.typed.scaladsl.adapter._ import akka.cluster.sharding.ShardRegion.{ Passivate => ClassicPassivate } diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingTypedSerializable.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingTypedSerializable.scala index a799766a40b..8112b699c5e 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingTypedSerializable.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingTypedSerializable.scala @@ -6,8 +6,6 @@ package akka.cluster.sharding.typed.internal import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait ClusterShardingTypedSerializable diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/JoinConfigCompatCheckerClusterSharding.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/JoinConfigCompatCheckerClusterSharding.scala index 654f0b03f9f..c527c2651ac 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/JoinConfigCompatCheckerClusterSharding.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/JoinConfigCompatCheckerClusterSharding.scala @@ -11,9 +11,7 @@ import com.typesafe.config.Config import akka.annotation.InternalApi import akka.cluster.{ ConfigValidation, JoinConfigCompatChecker, Valid } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class JoinConfigCompatCheckerClusterSharding extends JoinConfigCompatChecker { diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/Murmur2.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/Murmur2.scala index 4541f7fe667..d725c834213 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/Murmur2.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/Murmur2.scala @@ -24,9 +24,7 @@ import java.nio.charset.StandardCharsets import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[sharding] object Murmur2 { def toPositive(number: Int): Int = number & 0x7fffffff @@ -42,7 +40,8 @@ private[sharding] object Murmur2 { val length4 = length / 4 for (i <- 0 until length4) { val i4 = i * 4 - var k = (data(i4 + 0) & 0xff) + ((data(i4 + 1) & 0xff) << 8) + ((data(i4 + 2) & 0xff) << 16) + ((data(i4 + 3) & 0xff) << 24) + var k = (data(i4 + 0) & 0xff) + ((data(i4 + 1) & 0xff) << 8) + ((data(i4 + 2) & 0xff) << 16) + ((data( + i4 + 3) & 0xff) << 24) k *= m k ^= k >>> r k *= m diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ReplicatedShardingExtensionImpl.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ReplicatedShardingExtensionImpl.scala index 40b79728b61..c3b2bd47bd2 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ReplicatedShardingExtensionImpl.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ReplicatedShardingExtensionImpl.scala @@ -24,9 +24,7 @@ import akka.persistence.typed.ReplicaId import akka.persistence.typed.ReplicationId import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class ReplicatedShardingExtensionImpl(system: ActorSystem[_]) extends ReplicatedShardingExtension { @@ -45,24 +43,23 @@ private[akka] final class ReplicatedShardingExtensionImpl(system: ActorSystem[_] settings: ReplicatedEntityProvider[M]): ReplicatedSharding[M] = { require(settings.replicas.nonEmpty, "Replicas must not be empty") val sharding = ClusterSharding(system) - val initializedReplicas = settings.replicas.map { - case (replicaSettings, typeName) => - // start up a sharding instance per replica id - logger.infoN( - "Starting Replicated Event Sourcing sharding for replica [{}] (ShardType: [{}], typeName [{}])", - replicaSettings.replicaId.id, - replicaSettings.entity.typeKey.name) - val regionOrProxy = sharding.init(replicaSettings.entity) - ( - typeName, - replicaSettings.replicaId, - replicaSettings.entity.typeKey, - regionOrProxy, - replicaSettings.entity.dataCenter) + val initializedReplicas = settings.replicas.map { case (replicaSettings, typeName) => + // start up a sharding instance per replica id + logger.infoN( + "Starting Replicated Event Sourcing sharding for replica [{}] (ShardType: [{}], typeName [{}])", + replicaSettings.replicaId.id, + replicaSettings.entity.typeKey.name) + val regionOrProxy = sharding.init(replicaSettings.entity) + ( + typeName, + replicaSettings.replicaId, + replicaSettings.entity.typeKey, + regionOrProxy, + replicaSettings.entity.dataCenter) } if (settings.directReplication) { - val replicaToRegionOrProxy = initializedReplicas.map { - case (_, replicaId, _, regionOrProxy, _) => replicaId -> regionOrProxy + val replicaToRegionOrProxy = initializedReplicas.map { case (_, replicaId, _, regionOrProxy, _) => + replicaId -> regionOrProxy }.toMap val typeNameWithoutReplicaId = settings.replicas.head._2 logger.infoN("Starting Replicated Event Sourcing Direct Replication") @@ -71,16 +68,14 @@ private[akka] final class ReplicatedShardingExtensionImpl(system: ActorSystem[_] s"directReplication-${counter.incrementAndGet()}") } - val replicaToTypeKey = initializedReplicas.map { - case (typeName, id, typeKey, _, dc) => id -> ((typeKey, dc, typeName)) + val replicaToTypeKey = initializedReplicas.map { case (typeName, id, typeKey, _, dc) => + id -> ((typeKey, dc, typeName)) }.toMap new ReplicatedShardingImpl(sharding, replicaToTypeKey) } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class ReplicatedShardingImpl[M]( sharding: ClusterSharding, @@ -88,13 +83,12 @@ private[akka] final class ReplicatedShardingImpl[M]( extends ReplicatedSharding[M] { override def entityRefsFor(entityId: String): Map[ReplicaId, EntityRef[M]] = - replicaTypeKeys.map { - case (replicaId, (typeKey, dc, typeName)) => - replicaId -> (dc match { - case None => sharding.entityRefFor(typeKey, ReplicationId(typeName, entityId, replicaId).persistenceId.id) - case Some(dc) => - sharding.entityRefFor(typeKey, ReplicationId(typeName, entityId, replicaId).persistenceId.id, dc) - }) + replicaTypeKeys.map { case (replicaId, (typeKey, dc, typeName)) => + replicaId -> (dc match { + case None => sharding.entityRefFor(typeKey, ReplicationId(typeName, entityId, replicaId).persistenceId.id) + case Some(dc) => + sharding.entityRefFor(typeKey, ReplicationId(typeName, entityId, replicaId).persistenceId.id, dc) + }) } override def getEntityRefsFor(entityId: String): JMap[ReplicaId, akka.cluster.sharding.typed.javadsl.EntityRef[M]] = diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessCoordinator.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessCoordinator.scala index a144a941f1b..076d496ff04 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessCoordinator.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessCoordinator.scala @@ -81,24 +81,23 @@ private[akka] object ShardedDaemonProcessCoordinator { shardingRef: ActorRef[ShardingEnvelope[T]]): Behavior[ShardedDaemonProcessCommand] = { Behaviors .supervise[ShardedDaemonProcessCommand](Behaviors.setup { context => - Behaviors.withTimers { - timers => - context.log.debug("ShardedDaemonProcessCoordinator for [{}] starting", daemonProcessName) - val key = ShardedDaemonProcessStateKey(daemonProcessName) - DistributedData.withReplicatorMessageAdapter[ShardedDaemonProcessCommand, ShardedDaemonProcessState] { - replicatorAdapter => - new ShardedDaemonProcessCoordinator( - settings, - shardingSettings, - context, - timers, - daemonProcessName, - shardingRef.toClassic, - initialNumberOfProcesses, - key, - replicatorAdapter).start() - - } + Behaviors.withTimers { timers => + context.log.debug("ShardedDaemonProcessCoordinator for [{}] starting", daemonProcessName) + val key = ShardedDaemonProcessStateKey(daemonProcessName) + DistributedData.withReplicatorMessageAdapter[ShardedDaemonProcessCommand, ShardedDaemonProcessState] { + replicatorAdapter => + new ShardedDaemonProcessCoordinator( + settings, + shardingSettings, + context, + timers, + daemonProcessName, + shardingRef.toClassic, + initialNumberOfProcesses, + key, + replicatorAdapter).start() + + } } }) .onFailure(SupervisorStrategy.restart) @@ -147,8 +146,8 @@ private final class ShardedDaemonProcessCoordinator private ( } private val shardStoppedAdapter = context - .messageAdapter[ShardCoordinator.Internal.ShardStopped] { - case ShardCoordinator.Internal.ShardStopped(shard) => ShardStopped(shard) + .messageAdapter[ShardCoordinator.Internal.ShardStopped] { case ShardCoordinator.Internal.ShardStopped(shard) => + ShardStopped(shard) } .toClassic @@ -323,7 +322,7 @@ private final class ShardedDaemonProcessCoordinator private ( request.foreach(req => req.replyTo ! StatusReply.Ack) val newState = state.completeScaling() replicatorAdapter.askUpdate( - replyTo => Replicator.Update(key, initialState, stateWriteConsistency, replyTo)((_) => newState), + replyTo => Replicator.Update(key, initialState, stateWriteConsistency, replyTo)(_ => newState), response => InternalUpdateResponse(response)) receiveWhileRescaling("rescalingComplete", state) { diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessId.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessId.scala index 1f6f13657cf..3f4022edd13 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessId.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessId.scala @@ -8,9 +8,7 @@ import akka.annotation.InternalApi import akka.cluster.sharding.typed.ShardingEnvelope import akka.cluster.sharding.typed.ShardingMessageExtractor -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ShardedDaemonProcessId { @@ -27,7 +25,11 @@ private[akka] object ShardedDaemonProcessId { id.split(Separator) match { case Array(rev, count, n) => DecodedId(rev.toLong, count.toInt, n.toInt) case Array(n) => - DecodedId(0L, initialNumberOfProcesses, n.toInt) // ping from old/supportsRescale=false node during rolling upgrade + DecodedId( + 0L, + initialNumberOfProcesses, + n.toInt + ) // ping from old/supportsRescale=false node during rolling upgrade case _ => throw new IllegalArgumentException(s"Unexpected id for sharded daemon process: '$id'") } diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessImpl.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessImpl.scala index 5920fc242f2..b1618423920 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessImpl.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessImpl.scala @@ -32,9 +32,7 @@ import akka.cluster.typed.ClusterSingleton import akka.cluster.typed.ClusterSingletonSettings import akka.cluster.typed.SingletonActor -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class ShardedDaemonProcessImpl(system: ActorSystem[_]) extends javadsl.ShardedDaemonProcess @@ -50,8 +48,8 @@ private[akka] final class ShardedDaemonProcessImpl(system: ActorSystem[_]) revision: Long) extends ShardedDaemonProcessContext - def init[T](name: String, numberOfInstances: Int, behaviorFactory: Int => Behavior[T])( - implicit classTag: ClassTag[T]): Unit = + def init[T](name: String, numberOfInstances: Int, behaviorFactory: Int => Behavior[T])(implicit + classTag: ClassTag[T]): Unit = init(name, numberOfInstances, behaviorFactory, ShardedDaemonProcessSettings(system), None, None)(classTag) override def init[T](name: String, numberOfInstances: Int, behaviorFactory: Int => Behavior[T], stopMessage: T)( @@ -86,8 +84,8 @@ private[akka] final class ShardedDaemonProcessImpl(system: ActorSystem[_]) override def initWithContext[T]( name: EntityId, initialNumberOfInstances: Int, - behaviorFactory: ShardedDaemonProcessContext => Behavior[T])( - implicit classTag: ClassTag[T]): ActorRef[ShardedDaemonProcessCommand] = + behaviorFactory: ShardedDaemonProcessContext => Behavior[T])(implicit + classTag: ClassTag[T]): ActorRef[ShardedDaemonProcessCommand] = internalInitWithContext(name, initialNumberOfInstances, behaviorFactory, None, None, None, true) override def initWithContext[T]( @@ -111,8 +109,8 @@ private[akka] final class ShardedDaemonProcessImpl(system: ActorSystem[_]) behaviorFactory: ShardedDaemonProcessContext => Behavior[T], settings: ShardedDaemonProcessSettings, stopMessage: Option[T], - shardAllocationStrategy: Option[ShardAllocationStrategy])( - implicit classTag: ClassTag[T]): ActorRef[ShardedDaemonProcessCommand] = + shardAllocationStrategy: Option[ShardAllocationStrategy])(implicit + classTag: ClassTag[T]): ActorRef[ShardedDaemonProcessCommand] = internalInitWithContext( name, numberOfInstances, diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessState.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessState.scala index e4932c5f1c3..6644a87926b 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessState.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardedDaemonProcessState.scala @@ -16,9 +16,7 @@ import akka.cluster.ddata.typed.scaladsl.DistributedData import akka.cluster.ddata.typed.scaladsl.Replicator import akka.cluster.sharding.typed.ShardedDaemonProcessContext -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] final case class ShardedDaemonProcessState( revision: Long, numberOfProcesses: Int, @@ -42,9 +40,7 @@ private[akka] final case class ShardedDaemonProcessState( } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class ShardedDaemonProcessStateKey(_id: String) extends Key[ShardedDaemonProcessState](_id) @@ -53,9 +49,7 @@ private[akka] final case class ShardedDaemonProcessStateKey(_id: String) ShardedDaemonProcessStateKey(newId) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object ShardedDaemonProcessState { val startRevision = 0L diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardingSerializer.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardingSerializer.scala index 93b15e544f8..a9e34975835 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardingSerializer.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardingSerializer.scala @@ -22,9 +22,7 @@ import akka.serialization.BaseSerializer import akka.serialization.ByteBufferSerializer import akka.serialization.SerializerWithStringManifest -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class ShardingSerializer(val system: akka.actor.ExtendedActorSystem) extends SerializerWithStringManifest with ByteBufferSerializer diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardingState.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardingState.scala index 46860be65ef..31c5fa25fc1 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardingState.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardingState.scala @@ -16,9 +16,7 @@ import akka.cluster.sharding.typed.ClusterShardingQuery import akka.cluster.sharding.typed.GetClusterShardingStats import akka.cluster.sharding.typed.GetShardRegionState -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ShardingState { def behavior(classicSharding: ClusterSharding): Behavior[ClusterShardingQuery] = { diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/testkit/TestEntityRefImpl.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/testkit/TestEntityRefImpl.scala index 66f8dcd6589..f4360abf40c 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/testkit/TestEntityRefImpl.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/testkit/TestEntityRefImpl.scala @@ -23,9 +23,7 @@ import akka.pattern.StatusReply import akka.util.JavaDurationConverters._ import akka.util.Timeout -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class TestEntityRefImpl[M]( override val entityId: String, probe: ActorRef[M], diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala index dc1aa410899..1fb917d4bb4 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala @@ -207,14 +207,10 @@ abstract class ClusterSharding { */ def entityRefFor[M](typeKey: EntityTypeKey[M], entityId: String, dataCenter: String): EntityRef[M] - /** - * Actor for querying Cluster Sharding state - */ + /** Actor for querying Cluster Sharding state */ def shardState: ActorRef[ClusterShardingQuery] - /** - * The default `ShardAllocationStrategy` is configured by `least-shard-allocation-strategy` properties. - */ + /** The default `ShardAllocationStrategy` is configured by `least-shard-allocation-strategy` properties. */ def defaultShardAllocationStrategy(settings: ClusterShardingSettings): ShardAllocationStrategy } @@ -245,9 +241,7 @@ object Entity { } -/** - * Defines how the entity should be created. Used in [[ClusterSharding#init]]. - */ +/** Defines how the entity should be created. Used in [[ClusterSharding#init]]. */ final class Entity[M, E] private ( val createBehavior: JFunction[EntityContext[M], Behavior[M]], val typeKey: EntityTypeKey[M], @@ -259,15 +253,11 @@ final class Entity[M, E] private ( val role: Optional[String], val dataCenter: Optional[String]) { - /** - * [[akka.actor.typed.Props]] of the entity actors, such as dispatcher settings. - */ + /** [[akka.actor.typed.Props]] of the entity actors, such as dispatcher settings. */ def withEntityProps(newEntityProps: Props): Entity[M, E] = copy(entityProps = newEntityProps) - /** - * Additional settings, typically loaded from configuration. - */ + /** Additional settings, typically loaded from configuration. */ def withSettings(newSettings: ClusterShardingSettings): Entity[M, E] = copy(settings = Optional.ofNullable(newSettings)) @@ -281,7 +271,6 @@ final class Entity[M, E] private ( copy(stopMessage = Optional.ofNullable(newStopMessage)) /** - * * If a `messageExtractor` is not specified the messages are sent to the entities by wrapping * them in [[ShardingEnvelope]] with the entityId of the recipient actor. That envelope * is used by the [[HashCodeMessageExtractor]] for extracting entityId and shardId. The number of @@ -300,9 +289,7 @@ final class Entity[M, E] private ( role, dataCenter) - /** - * Run the Entity actors on nodes with the given role. - */ + /** Run the Entity actors on nodes with the given role. */ def withRole(role: String): Entity[M, E] = copy(role = Optional.ofNullable(role)) @@ -342,9 +329,7 @@ final class Entity[M, E] private ( dataCenter) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def toScala: akka.cluster.sharding.typed.scaladsl.Entity[M, E] = new akka.cluster.sharding.typed.scaladsl.Entity( @@ -405,23 +390,17 @@ object StartEntity { */ @DoNotInherit abstract class EntityTypeKey[-T] { scaladslSelf: scaladsl.EntityTypeKey[T] => - /** - * Name of the entity type. - */ + /** Name of the entity type. */ def name: String - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def asScala: scaladsl.EntityTypeKey[T] = scaladslSelf } object EntityTypeKey { - /** - * Creates an `EntityTypeKey`. The `name` must be unique. - */ + /** Creates an `EntityTypeKey`. The `name` must be unique. */ def create[T](messageClass: Class[T], name: String): EntityTypeKey[T] = EntityTypeKeyImpl(name, messageClass.getName) @@ -443,14 +422,10 @@ object EntityTypeKey { @DoNotInherit abstract class EntityRef[-M] extends RecipientRef[M] { scaladslSelf: scaladsl.EntityRef[M] with InternalRecipientRef[M] => - /** - * The identifier for the particular entity referenced by this EntityRef. - */ + /** The identifier for the particular entity referenced by this EntityRef. */ def getEntityId: String = entityId - /** - * The name of the EntityTypeKey associated with this EntityRef - */ + /** The name of the EntityTypeKey associated with this EntityRef */ def getTypeKey: javadsl.EntityTypeKey[M] = typeKey.asJava /** @@ -485,9 +460,7 @@ object EntityTypeKey { */ def askWithStatus[Res](f: ActorRef[StatusReply[Res]] => M, timeout: Duration): CompletionStage[Res] - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def asScala: scaladsl.EntityRef[M] = scaladslSelf } diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ShardedDaemonProcess.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ShardedDaemonProcess.scala index f25cf079844..fb0ded5892d 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ShardedDaemonProcess.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ShardedDaemonProcess.scala @@ -132,7 +132,6 @@ abstract class ShardedDaemonProcess { * @param stopMessage If defined: sent to the actors when they need to stop because of a rebalance across the nodes of the cluster, * rescale or cluster shutdown. * @param shardAllocationStrategy If defined: used by entities to control the shard allocation - * */ @ApiMayChange def initWithContext[T]( diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala index 69124a4a97f..eb6fc73a3c8 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala @@ -208,19 +208,13 @@ trait ClusterSharding extends Extension { javadslSelf: javadsl.ClusterSharding = */ def entityRefFor[M](typeKey: EntityTypeKey[M], entityId: String, dataCenter: DataCenter): EntityRef[M] - /** - * Actor for querying Cluster Sharding state - */ + /** Actor for querying Cluster Sharding state */ def shardState: ActorRef[ClusterShardingQuery] - /** - * The default `ShardAllocationStrategy` is configured by `least-shard-allocation-strategy` properties. - */ + /** The default `ShardAllocationStrategy` is configured by `least-shard-allocation-strategy` properties. */ def defaultShardAllocationStrategy(settings: ClusterShardingSettings): ShardAllocationStrategy - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def asJava: javadsl.ClusterSharding = javadslSelf } @@ -240,9 +234,7 @@ object Entity { new Entity(createBehavior, typeKey, None, Props.empty, None, None, None, None, None) } -/** - * Defines how the entity should be created. Used in [[ClusterSharding#init]]. - */ +/** Defines how the entity should be created. Used in [[ClusterSharding#init]]. */ final class Entity[M, E] private[akka] ( val createBehavior: EntityContext[M] => Behavior[M], val typeKey: EntityTypeKey[M], @@ -254,15 +246,11 @@ final class Entity[M, E] private[akka] ( val role: Option[String], val dataCenter: Option[DataCenter]) { - /** - * [[akka.actor.typed.Props]] of the entity actors, such as dispatcher settings. - */ + /** [[akka.actor.typed.Props]] of the entity actors, such as dispatcher settings. */ def withEntityProps(newEntityProps: Props): Entity[M, E] = copy(entityProps = newEntityProps) - /** - * Additional settings, typically loaded from configuration. - */ + /** Additional settings, typically loaded from configuration. */ def withSettings(newSettings: ClusterShardingSettings): Entity[M, E] = copy(settings = Option(newSettings)) @@ -276,7 +264,6 @@ final class Entity[M, E] private[akka] ( copy(stopMessage = Option(newStopMessage)) /** - * * If a `messageExtractor` is not specified the messages are sent to the entities by wrapping * them in [[ShardingEnvelope]] with the entityId of the recipient actor. That envelope * is used by the [[HashCodeMessageExtractor]] for extracting entityId and shardId. The number of @@ -302,9 +289,7 @@ final class Entity[M, E] private[akka] ( def withAllocationStrategy(newAllocationStrategy: ShardAllocationStrategy): Entity[M, E] = copy(allocationStrategy = Option(newAllocationStrategy)) - /** - * Run the Entity actors on nodes with the given role. - */ + /** Run the Entity actors on nodes with the given role. */ def withRole(newRole: String): Entity[M, E] = copy(role = Some(newRole)) /** @@ -356,9 +341,7 @@ final class EntityContext[M]( val entityId: String, val shard: ActorRef[ClusterSharding.ShardCommand]) { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def toJava: akka.cluster.sharding.typed.javadsl.EntityContext[M] = new akka.cluster.sharding.typed.javadsl.EntityContext[M]( @@ -387,9 +370,7 @@ object StartEntity { */ @DoNotInherit trait EntityTypeKey[-T] { - /** - * Name of the entity type. - */ + /** Name of the entity type. */ def name: String private[akka] def asJava: javadsl.EntityTypeKey[T] @@ -397,9 +378,7 @@ object StartEntity { object EntityTypeKey { - /** - * Creates an `EntityTypeKey`. The `name` must be unique. - */ + /** Creates an `EntityTypeKey`. The `name` must be unique. */ def apply[T](name: String)(implicit tTag: ClassTag[T]): EntityTypeKey[T] = EntityTypeKeyImpl(name, implicitly[ClassTag[T]].runtimeClass.getName) @@ -429,9 +408,7 @@ object EntityTypeKey { */ def entityId: String - /** - * The EntityTypeKey associated with this EntityRef. - */ + /** The EntityTypeKey associated with this EntityRef. */ def typeKey: EntityTypeKey[M] /** @@ -522,9 +499,7 @@ object EntityTypeKey { def ?[Res](message: ActorRef[Res] => M)(implicit timeout: Timeout): Future[Res] = this.ask(message)(timeout) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def asJava: javadsl.EntityRef[M] } diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcess.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcess.scala index 3bc32ed96eb..7a7a78e0989 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcess.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcess.scala @@ -48,8 +48,8 @@ trait ShardedDaemonProcess extends Extension { javadslSelf: javadsl.ShardedDaemo * * @param behaviorFactory Given a unique id of `0` until `numberOfInstance` create the behavior for that actor. */ - def init[T](name: String, numberOfInstances: Int, behaviorFactory: Int => Behavior[T])( - implicit classTag: ClassTag[T]): Unit + def init[T](name: String, numberOfInstances: Int, behaviorFactory: Int => Behavior[T])(implicit + classTag: ClassTag[T]): Unit /** * Start a specific number of actors that is then kept alive in the cluster. @@ -59,8 +59,8 @@ trait ShardedDaemonProcess extends Extension { javadslSelf: javadsl.ShardedDaemo * @param stopMessage sent to the actors when they need to stop because of a rebalance across the nodes of the cluster * or cluster shutdown. */ - def init[T](name: String, numberOfInstances: Int, behaviorFactory: Int => Behavior[T], stopMessage: T)( - implicit classTag: ClassTag[T]): Unit + def init[T](name: String, numberOfInstances: Int, behaviorFactory: Int => Behavior[T], stopMessage: T)(implicit + classTag: ClassTag[T]): Unit /** * Start a specific number of actors, each with a unique numeric id in the set, that is then kept alive in the cluster. @@ -105,8 +105,8 @@ trait ShardedDaemonProcess extends Extension { javadslSelf: javadsl.ShardedDaemo def initWithContext[T]( name: String, initialNumberOfInstances: Int, - behaviorFactory: ShardedDaemonProcessContext => Behavior[T])( - implicit classTag: ClassTag[T]): ActorRef[ShardedDaemonProcessCommand] + behaviorFactory: ShardedDaemonProcessContext => Behavior[T])(implicit + classTag: ClassTag[T]): ActorRef[ShardedDaemonProcessCommand] /** * Start a specific number of actors, each with a unique numeric id in the set, that is then kept alive in the cluster. @@ -142,12 +142,10 @@ trait ShardedDaemonProcess extends Extension { javadslSelf: javadsl.ShardedDaemo behaviorFactory: ShardedDaemonProcessContext => Behavior[T], settings: ShardedDaemonProcessSettings, stopMessage: Option[T], - shardAllocationStrategy: Option[ShardAllocationStrategy])( - implicit classTag: ClassTag[T]): ActorRef[ShardedDaemonProcessCommand] + shardAllocationStrategy: Option[ShardAllocationStrategy])(implicit + classTag: ClassTag[T]): ActorRef[ShardedDaemonProcessCommand] - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def asJava: javadsl.ShardedDaemonProcess = javadslSelf } diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesPerfSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesPerfSpec.scala index 9b82338293a..39e8a476095 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesPerfSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesPerfSpec.scala @@ -171,8 +171,8 @@ abstract class ClusterShardingRememberEntitiesPerfSpec println(f"Average throughput: ${throughputs.sum / NrIterations}%,.0f msg/s") println("Combined latency figures:") println(s"total ${fullHistogram.getTotalCount} max ${fullHistogram.getMaxValue} ${percentiles - .map(p => s"$p% ${fullHistogram.getValueAtPercentile(p)}ms") - .mkString(" ")}") + .map(p => s"$p% ${fullHistogram.getValueAtPercentile(p)}ms") + .mkString(" ")}") recording.endAndDump(Paths.get("target", s"${name.replace(" ", "-")}.jfr")) } enterBarrier(s"after-start-stop-${testRun}") @@ -226,13 +226,15 @@ abstract class ClusterShardingRememberEntitiesPerfSpec } } - awaitAssert({ - val probe = TestProbe() - region.tell(GetShardRegionState, probe.ref) - val stats = probe.expectMsgType[CurrentShardRegionState] - stats.shards.head.shardId shouldEqual "0" - stats.shards.head.entityIds.toList.sorted shouldEqual List("0") // the init entity - }, 2.seconds) + awaitAssert( + { + val probe = TestProbe() + region.tell(GetShardRegionState, probe.ref) + val stats = probe.expectMsgType[CurrentShardRegionState] + stats.shards.head.shardId shouldEqual "0" + stats.shards.head.entityIds.toList.sorted shouldEqual List("0") // the init entity + }, + 2.seconds) numberOfMessages } diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingPreparingForShutdownSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingPreparingForShutdownSpec.scala index 0770571f712..f5e5819613c 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingPreparingForShutdownSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingPreparingForShutdownSpec.scala @@ -45,10 +45,9 @@ object ClusterShardingPreparingForShutdownSpec extends MultiNodeConfig { case class Pong(id: Int) extends CborSerializable def apply(): Behavior[Command] = Behaviors.setup { _ => - Behaviors.receiveMessage[Command] { - case Ping(id: Int, ref) => - ref ! Pong(id) - Behaviors.same + Behaviors.receiveMessage[Command] { case Ping(id: Int, ref) => + ref ! Pong(id) + Behaviors.same } } @@ -90,12 +89,14 @@ class ClusterShardingPreparingForShutdownSpec runOn(second) { cluster.manager ! PrepareForFullClusterShutdown } - awaitAssert({ - withClue("members: " + cluster.state.members) { - cluster.selfMember.status shouldEqual MemberStatus.ReadyForShutdown - cluster.state.members.unsorted.map(_.status) shouldEqual Set(MemberStatus.ReadyForShutdown) - } - }, 10.seconds) + awaitAssert( + { + withClue("members: " + cluster.state.members) { + cluster.selfMember.status shouldEqual MemberStatus.ReadyForShutdown + cluster.state.members.unsorted.map(_.status) shouldEqual Set(MemberStatus.ReadyForShutdown) + } + }, + 10.seconds) enterBarrier("preparation-complete") shardRegion ! ShardingEnvelope("id2", Pinger.Ping(2, probe.ref)) @@ -104,26 +105,31 @@ class ClusterShardingPreparingForShutdownSpec runOn(second) { cluster.manager ! Leave(address(second)) } - awaitAssert({ - runOn(first, third) { - withClue("members: " + cluster.state.members) { - cluster.state.members.size shouldEqual 2 + awaitAssert( + { + runOn(first, third) { + withClue("members: " + cluster.state.members) { + cluster.state.members.size shouldEqual 2 + } } - } - runOn(second) { - withClue("self member: " + cluster.selfMember) { - cluster.selfMember.status shouldEqual MemberStatus.Removed + runOn(second) { + withClue("self member: " + cluster.selfMember) { + cluster.selfMember.status shouldEqual MemberStatus.Removed + } } - } - }, 5.seconds) // keep this lower than coordinated shutdown timeout + }, + 5.seconds + ) // keep this lower than coordinated shutdown timeout // trigger creation of a new shard should be fine even though one node left runOn(first, third) { - awaitAssert({ - shardRegion ! ShardingEnvelope("id3", Pinger.Ping(3, probe.ref)) - probe.expectMessage(Pong(3)) - }, 10.seconds) + awaitAssert( + { + shardRegion ! ShardingEnvelope("id3", Pinger.Ping(3, probe.ref)) + probe.expectMessage(Pong(3)) + }, + 10.seconds) } enterBarrier("new-shards-verified") @@ -131,11 +137,13 @@ class ClusterShardingPreparingForShutdownSpec cluster.manager ! Leave(address(first)) cluster.manager ! Leave(address(third)) } - awaitAssert({ - withClue("self member: " + cluster.selfMember) { - cluster.selfMember.status shouldEqual Removed - } - }, 15.seconds) + awaitAssert( + { + withClue("self member: " + cluster.selfMember) { + cluster.selfMember.status shouldEqual Removed + } + }, + 15.seconds) enterBarrier("done") } } diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingStatsSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingStatsSpec.scala index 38a15fb74fb..42f4a4d4750 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingStatsSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ClusterShardingStatsSpec.scala @@ -27,11 +27,14 @@ object ClusterShardingStatsSpecConfig extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(ConfigFactory.parseString(""" + commonConfig( + ConfigFactory + .parseString(""" akka.log-dead-letters-during-shutdown = off akka.cluster.sharding.updating-state-timeout = 2s akka.cluster.sharding.waiting-for-state-timeout = 2s - """).withFallback(MultiNodeClusterSpec.clusterConfig)) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -45,10 +48,9 @@ object Pinger { case class Pong(id: Int) extends CborSerializable def apply(): Behavior[Command] = { - Behaviors.receiveMessage[Command] { - case Ping(id: Int, ref) => - ref ! Pong(id) - Behaviors.same + Behaviors.receiveMessage[Command] { case Ping(id: Int, ref) => + ref ! Pong(id) + Behaviors.same } } @@ -65,7 +67,7 @@ abstract class ClusterShardingStatsSpec private val typeKey = EntityTypeKey[Command]("ping") private val sharding = ClusterSharding(typedSystem) private val settings = ClusterShardingSettings(typedSystem) - private val queryTimeout = settings.shardRegionQueryTimeout * roles.size.toLong //numeric widening y'all + private val queryTimeout = settings.shardRegionQueryTimeout * roles.size.toLong // numeric widening y'all "Cluster sharding stats" must { "form cluster" in { diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/MultiDcClusterShardingSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/MultiDcClusterShardingSpec.scala index 37f6063f1d3..58c73670566 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/MultiDcClusterShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/MultiDcClusterShardingSpec.scala @@ -25,14 +25,17 @@ object MultiDcClusterShardingSpecConfig extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(ConfigFactory.parseString(""" + commonConfig( + ConfigFactory + .parseString(""" akka.loglevel = DEBUG akka.cluster.sharding { number-of-shards = 10 # First is likely to be ignored as shard coordinator not ready retry-interval = 0.2s } - """).withFallback(MultiNodeClusterSpec.clusterConfig)) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc1" @@ -111,10 +114,10 @@ abstract class MultiDcClusterShardingSpec "be able to message cross dc via proxy, defined with Entity" in { runOn(first, second) { val system = typedSystem - //#proxy-dc + // #proxy-dc val proxy: ActorRef[ShardingEnvelope[Command]] = ClusterSharding(system).init(Entity(typeKey)(_ => MultiDcPinger()).withDataCenter("dc2")) - //#proxy-dc + // #proxy-dc val probe = TestProbe[Pong]() proxy ! ShardingEnvelope(entityId, Ping(probe.ref)) probe.expectMessage(remainingOrDefault, Pong("dc2")) @@ -125,12 +128,12 @@ abstract class MultiDcClusterShardingSpec "be able to message cross dc via proxy, defined with EntityRef" in { runOn(first, second) { val system = typedSystem - //#proxy-dc-entityref + // #proxy-dc-entityref // it must still be started before usage ClusterSharding(system).init(Entity(typeKey)(_ => MultiDcPinger()).withDataCenter("dc2")) val entityRef = ClusterSharding(system).entityRefFor(typeKey, entityId, "dc2") - //#proxy-dc-entityref + // #proxy-dc-entityref val probe = TestProbe[Pong]() entityRef ! Ping(probe.ref) diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ReplicatedShardingSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ReplicatedShardingSpec.scala index 3742dfdf168..d95dd9b441b 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ReplicatedShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ReplicatedShardingSpec.scala @@ -36,13 +36,16 @@ object ReplicatedShardingSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(ConfigFactory.parseString(""" + commonConfig( + ConfigFactory + .parseString(""" akka.loglevel = DEBUG akka.persistence.journal.plugin = "akka.persistence.journal.inmem" // for the proxy plugin akka.actor.allow-java-serialization = on akka.actor.warn-about-java-serializer-usage = off - """).withFallback(MultiNodeClusterSpec.clusterConfig)) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first)(ConfigFactory.parseString(""" akka.persistence.journal.plugin = "akka.persistence.journal.proxy" @@ -99,11 +102,13 @@ object ReplicatedShardingSpec extends MultiNodeConfig { def provider(): ReplicatedEntityProvider[Command] = { ReplicatedEntityProvider[Command]("TestRES", AllReplicas) { (entityTypeKey, replicaId) => - ReplicatedEntity(replicaId, Entity(entityTypeKey) { entityContext => - Behaviors.setup { ctx => - TestRES(ReplicationId.fromString(entityContext.entityId), ctx) - } - }) + ReplicatedEntity( + replicaId, + Entity(entityTypeKey) { entityContext => + Behaviors.setup { ctx => + TestRES(ReplicationId.fromString(entityContext.entityId), ctx) + } + }) }.withDirectReplication(true) // this is required as we don't have a shared read journal } } @@ -143,18 +148,17 @@ abstract class ReplicatedShardingSpec val entityRefs = replicatedSharding.entityRefsFor("id1") val probe = TestProbe[Done]() entityRefs.size shouldEqual 2 - entityRefs.foreach { - case (replica, ref) => ref ! StoreMe(s"from first to ${replica.id}", probe.ref) + entityRefs.foreach { case (replica, ref) => + ref ! StoreMe(s"from first to ${replica.id}", probe.ref) } probe.expectMessage(Done) probe.expectMessage(Done) eventually { - entityRefs.foreach { - case (_, ref) => - val probe = TestProbe[State]() - ref ! GetState(probe.ref) - probe.expectMessageType[State].all.toSet shouldEqual Set(s"from first to R1", s"from first to R2") + entityRefs.foreach { case (_, ref) => + val probe = TestProbe[State]() + ref ! GetState(probe.ref) + probe.expectMessageType[State].all.toSet shouldEqual Set(s"from first to R1", s"from first to R2") } } } @@ -171,8 +175,8 @@ abstract class ReplicatedShardingSpec runOn(second) { val entityRefs = replicatedSharding.entityRefsFor("id2") val probe = TestProbe[Done]() - entityRefs.foreach { - case (replica, ref) => ref ! StoreMe(s"from first to ${replica.id}", probe.ref) + entityRefs.foreach { case (replica, ref) => + ref ! StoreMe(s"from first to ${replica.id}", probe.ref) } probe.expectMessage(Done) probe.expectMessage(Done) diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ShardedDaemonProcessRescaleSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ShardedDaemonProcessRescaleSpec.scala index 3d3f61468b0..436b597924e 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ShardedDaemonProcessRescaleSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ShardedDaemonProcessRescaleSpec.scala @@ -42,15 +42,16 @@ object ShardedDaemonProcessRescaleSpec extends MultiNodeConfig { val snitchRouter = ctx.spawn(Routers.group(SnitchServiceKey), "router") snitchRouter ! ProcessActorEvent(id, "Started") - Behaviors.receiveMessagePartial { - case Stop => - snitchRouter ! ProcessActorEvent(id, "Stopped") - Behaviors.stopped + Behaviors.receiveMessagePartial { case Stop => + snitchRouter ! ProcessActorEvent(id, "Stopped") + Behaviors.stopped } } } - commonConfig(ConfigFactory.parseString(""" + commonConfig( + ConfigFactory + .parseString(""" akka.loglevel = DEBUG akka.cluster.sharded-daemon-process { sharding { @@ -60,7 +61,8 @@ object ShardedDaemonProcessRescaleSpec extends MultiNodeConfig { # quick ping to make test swift keep-alive-interval = 1s } - """).withFallback(MultiNodeClusterSpec.clusterConfig)) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -87,10 +89,12 @@ abstract class ShardedDaemonProcessRescaleSpec } enterBarrier("snitch-registered") - topicProbe.awaitAssert({ - typedSystem.receptionist ! Receptionist.Find(SnitchServiceKey, topicProbe.ref) - topicProbe.expectMessageType[Receptionist.Listing].serviceInstances(SnitchServiceKey).size should ===(1) - }, 5.seconds) + topicProbe.awaitAssert( + { + typedSystem.receptionist ! Receptionist.Find(SnitchServiceKey, topicProbe.ref) + topicProbe.expectMessageType[Receptionist.Listing].serviceInstances(SnitchServiceKey).size should ===(1) + }, + 5.seconds) enterBarrier("snitch-seen") } @@ -138,7 +142,7 @@ abstract class ShardedDaemonProcessRescaleSpec val reply = probe.receiveMessage() reply.numberOfProcesses should ===(2) reply.revision should ===(2) - reply.rescaleInProgress === (false) + reply.rescaleInProgress === false } enterBarrier("sharded-daemon-process-query") } diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ShardedDaemonProcessSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ShardedDaemonProcessSpec.scala index d3f6986410f..bbe128ac0b2 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ShardedDaemonProcessSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/ShardedDaemonProcessSpec.scala @@ -39,15 +39,16 @@ object ShardedDaemonProcessSpec extends MultiNodeConfig { val snitchRouter = ctx.spawn(Routers.group(SnitchServiceKey), "router") snitchRouter ! ProcessActorEvent(id, "Started") - Behaviors.receiveMessagePartial { - case Stop => - snitchRouter ! ProcessActorEvent(id, "Stopped") - Behaviors.stopped + Behaviors.receiveMessagePartial { case Stop => + snitchRouter ! ProcessActorEvent(id, "Stopped") + Behaviors.stopped } } } - commonConfig(ConfigFactory.parseString(""" + commonConfig( + ConfigFactory + .parseString(""" akka.loglevel = DEBUG akka.cluster.sharded-daemon-process { sharding { @@ -57,7 +58,8 @@ object ShardedDaemonProcessSpec extends MultiNodeConfig { # quick ping to make test swift keep-alive-interval = 1s } - """).withFallback(MultiNodeClusterSpec.clusterConfig)) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -83,10 +85,12 @@ abstract class ShardedDaemonProcessSpec } enterBarrier("snitch-registered") - probe.awaitAssert({ - typedSystem.receptionist ! Receptionist.Find(SnitchServiceKey, probe.ref) - probe.expectMessageType[Receptionist.Listing].serviceInstances(SnitchServiceKey).size should ===(1) - }, 5.seconds) + probe.awaitAssert( + { + typedSystem.receptionist ! Receptionist.Find(SnitchServiceKey, probe.ref) + probe.expectMessageType[Receptionist.Listing].serviceInstances(SnitchServiceKey).size should ===(1) + }, + 5.seconds) enterBarrier("snitch-seen") } diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/delivery/DeliveryThroughputSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/delivery/DeliveryThroughputSpec.scala index 8aa65e45cfb..255a749864c 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/delivery/DeliveryThroughputSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/delivery/DeliveryThroughputSpec.scala @@ -110,10 +110,9 @@ object DeliveryThroughputSpec extends MultiNodeConfig { case Stop => Behaviors.stopped } - .receiveSignal { - case (_, PostStop) => - rateReporter.halt() - Behaviors.same + .receiveSignal { case (_, PostStop) => + rateReporter.halt() + Behaviors.same } } @@ -166,7 +165,7 @@ object DeliveryThroughputSpec extends MultiNodeConfig { resultReporter: BenchmarkFileReporter): Unit = { val numberOfMessages = testSettings.totalMessages val took = NANOSECONDS.toMillis(System.nanoTime - startTime) - val throughput = (numberOfMessages * 1000.0 / took) + val throughput = numberOfMessages * 1000.0 / took resultReporter.reportResults( s"=== ${resultReporter.testName} ${testSettings.testName}: " + @@ -198,7 +197,7 @@ object DeliveryThroughputSpec extends MultiNodeConfig { context.messageAdapter[WorkPullingProducerController.RequestNext[Consumer.Command]](WrappedRequestNext(_)) var startTime = System.nanoTime() var remaining = numberOfMessages + context.system.settings.config - .getInt("akka.reliable-delivery.consumer-controller.flow-control-window") + .getInt("akka.reliable-delivery.consumer-controller.flow-control-window") Behaviors.receiveMessage { case WrappedRequestNext(next) => @@ -245,7 +244,7 @@ object DeliveryThroughputSpec extends MultiNodeConfig { context.messageAdapter[ShardingProducerController.RequestNext[Consumer.Command]](WrappedRequestNext(_)) var startTime = System.nanoTime() var remaining = numberOfMessages + context.system.settings.config - .getInt("akka.reliable-delivery.sharding.consumer-controller.flow-control-window") + .getInt("akka.reliable-delivery.sharding.consumer-controller.flow-control-window") var latestDemand: ShardingProducerController.RequestNext[Consumer.Command] = null var messagesSentToEachEntity: Map[String, Long] = Map.empty[String, Long].withDefaultValue(0L) @@ -279,7 +278,7 @@ object DeliveryThroughputSpec extends MultiNodeConfig { remaining, latestDemand, messagesSentToEachEntity, - (remaining % testSettings.numberOfConsumers)) + remaining % testSettings.numberOfConsumers) Behaviors.same } } @@ -370,7 +369,7 @@ abstract class DeliveryThroughputSpec runPerfFlames(first, second, third)(delay = 5.seconds) runOn(second, third) { - val range = if (myself == second) (1 to numberOfConsumers by 2) else (2 to numberOfConsumers by 2) + val range = if (myself == second) 1 to numberOfConsumers by 2 else 2 to numberOfConsumers by 2 val consumers = range.map { n => val consumerController = spawn(ConsumerController[Consumer.Command](serviceKey(testName)), s"consumerController-$n-$testName") diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/ReplicatedShardingSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/ReplicatedShardingSpec.scala index 391a1e3b668..375a1beffaa 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/ReplicatedShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/ReplicatedShardingSpec.scala @@ -39,27 +39,37 @@ import akka.util.ccompat._ @ccompatUsedUntil213 object ReplicatedShardingSpec { - def commonConfig = ConfigFactory.parseString(""" + def commonConfig = ConfigFactory + .parseString(""" akka.loglevel = DEBUG akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] akka.actor.provider = "cluster" - akka.remote.artery.canonical.port = 0""").withFallback(PersistenceTestKitPlugin.config) + akka.remote.artery.canonical.port = 0""") + .withFallback(PersistenceTestKitPlugin.config) - def roleAConfig = ConfigFactory.parseString(""" + def roleAConfig = ConfigFactory + .parseString(""" akka.cluster.roles = ["DC-A"] - """.stripMargin).withFallback(commonConfig) + """.stripMargin) + .withFallback(commonConfig) - def roleBConfig = ConfigFactory.parseString(""" + def roleBConfig = ConfigFactory + .parseString(""" akka.cluster.roles = ["DC-B"] - """.stripMargin).withFallback(commonConfig) + """.stripMargin) + .withFallback(commonConfig) - def dcAConfig = ConfigFactory.parseString(""" + def dcAConfig = ConfigFactory + .parseString(""" akka.cluster.multi-data-center.self-data-center = "DC-A" - """).withFallback(commonConfig) + """) + .withFallback(commonConfig) - def dcBConfig = ConfigFactory.parseString(""" + def dcBConfig = ConfigFactory + .parseString(""" akka.cluster.multi-data-center.self-data-center = "DC-B" - """).withFallback(commonConfig) + """) + .withFallback(commonConfig) sealed trait ReplicationType case object Role extends ReplicationType @@ -186,10 +196,9 @@ object ProxyActor { case ForwardToAllString(entityId, cmd) => val entityRefs = replicatedShardingStringSet.entityRefsFor(entityId) ctx.log.infoN("Entity refs {}", entityRefs) - entityRefs.foreach { - case (replica, ref) => - ctx.log.infoN("Forwarding to replica {} ref {}", replica, ref) - ref ! cmd + entityRefs.foreach { case (replica, ref) => + ctx.log.infoN("Forwarding to replica {} ref {}", replica, ref) + ref ! cmd } Behaviors.same case ForwardToRandomString(entityId, cmd) => @@ -200,8 +209,8 @@ object ProxyActor { chosen ! cmd Behaviors.same case ForwardToAllInt(entityId, cmd) => - replicatedShardingIntSet.entityRefsFor(entityId).foreach { - case (_, ref) => ref ! cmd + replicatedShardingIntSet.entityRefsFor(entityId).foreach { case (_, ref) => + ref ! cmd } Behaviors.same case ForwardToRandomInt(entityId, cmd) => diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/delivery/DurableShardingSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/delivery/DurableShardingSpec.scala index 2de717473f1..739ca66a706 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/delivery/DurableShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/delivery/DurableShardingSpec.scala @@ -69,10 +69,9 @@ class DurableShardingSpec TestConsumer.JobDelivery(d.message, d.confirmTo, d.producerId, d.seqNr) } c ! ConsumerController.Start(deliveryAdapter) - Behaviors.receiveMessagePartial { - case jobDelivery: TestConsumer.JobDelivery => - consumerProbe.ref ! jobDelivery - Behaviors.same + Behaviors.receiveMessagePartial { case jobDelivery: TestConsumer.JobDelivery => + consumerProbe.ref ! jobDelivery + Behaviors.same } } diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/internal/Murmur2Spec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/internal/Murmur2Spec.scala index 2d9da94923f..53f1985799c 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/internal/Murmur2Spec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/internal/Murmur2Spec.scala @@ -14,11 +14,10 @@ class Murmur2Spec extends AnyWordSpecLike with Matchers { // expected correct hash values from the kafka murmur2 impl // https://github.com/apache/kafka/blob/db42afd6e24ef4291390b4d1c1f10758beedefed/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L500 Seq("1" -> -1993445489, "12" -> 126087238, "123" -> -267702483, "1234" -> -1614185708, "12345" -> -1188365604) - .foreach { - case (string, expectedHash) => - s"calculate the correct checksum for '$string'" in { - Murmur2.murmur2(string.getBytes(UTF_8)) should ===(expectedHash) - } + .foreach { case (string, expectedHash) => + s"calculate the correct checksum for '$string'" in { + Murmur2.murmur2(string.getBytes(UTF_8)) should ===(expectedHash) + } } } } diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala index 3e8d70faf16..69e2b3cb5d5 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala @@ -84,10 +84,9 @@ object ClusterShardingSpec { toMe ! "Hello!" Behaviors.same } - .receiveSignal { - case (_, PostStop) => - stopProbe.foreach(_ ! "PostStop") - Behaviors.same + .receiveSignal { case (_, PostStop) => + stopProbe.foreach(_ ! "PostStop") + Behaviors.same } def behaviorWithId() = Behaviors.receive[IdTestProtocol] { diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingStateSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingStateSpec.scala index 19a5db3c312..1ec9c423e3a 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingStateSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingStateSpec.scala @@ -53,10 +53,10 @@ class ClusterShardingStateSpec shardingRef ! IdReplyPlz("id1", replyProbe.ref) replyProbe.expectMessage("Hello!") - //#get-region-state + // #get-region-state ClusterSharding(system).shardState ! GetShardRegionState(typeKey, probe.ref) val state = probe.receiveMessage() - //#get-region-state + // #get-region-state state.shards should be(Set(ShardState(shardExtractor.shardId("id1"), Set("id1")))) } } diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcessRescaleSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcessRescaleSpec.scala index 13b59319c75..b3455bebcd4 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcessRescaleSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcessRescaleSpec.scala @@ -55,10 +55,9 @@ object ShardedDaemonProcessRescaleSpec { def apply(id: Int, totalCount: Int, probe: ActorRef[Any]): Behavior[Command] = Behaviors.setup { ctx => probe ! Started(id, totalCount, ctx.self) - Behaviors.receiveMessage { - case Stop => - probe ! Stopping(ctx.self) - Behaviors.stopped + Behaviors.receiveMessage { case Stop => + probe ! Stopping(ctx.self) + Behaviors.stopped } } @@ -81,9 +80,11 @@ class ShardedDaemonProcessRescaleSpec "have a single node cluster running first" in { val probe = createTestProbe() Cluster(system).manager ! Join(Cluster(system).selfMember.address) - probe.awaitAssert({ - Cluster(system).selfMember.status == MemberStatus.Up - }, 3.seconds) + probe.awaitAssert( + { + Cluster(system).selfMember.status == MemberStatus.Up + }, + 3.seconds) } "start 4 workers" in { diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcessSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcessSpec.scala index 2a47b370f70..3b3ef729519 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcessSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ShardedDaemonProcessSpec.scala @@ -52,9 +52,8 @@ object ShardedDaemonProcessSpec { def apply(id: Int, probe: ActorRef[Any]): Behavior[Command] = Behaviors.setup { ctx => probe ! Started(id, ctx.self) - Behaviors.receiveMessage { - case Stop => - Behaviors.stopped + Behaviors.receiveMessage { case Stop => + Behaviors.stopped } } @@ -74,9 +73,11 @@ class ShardedDaemonProcessSpec "have a single node cluster running first" in { val probe = createTestProbe() Cluster(system).manager ! Join(Cluster(system).selfMember.address) - probe.awaitAssert({ - Cluster(system).selfMember.status == MemberStatus.Up - }, 3.seconds) + probe.awaitAssert( + { + Cluster(system).selfMember.status == MemberStatus.Up + }, + 3.seconds) } "start N actors with unique ids" in { @@ -113,9 +114,11 @@ class ShardedDaemonProcessSpec "have a single node cluster running first" in { val probe = createTestProbe() Cluster(system).manager ! Join(Cluster(system).selfMember.address) - probe.awaitAssert({ - Cluster(system).selfMember.status == MemberStatus.Up - }, 3.seconds) + probe.awaitAssert( + { + Cluster(system).selfMember.status == MemberStatus.Up + }, + 3.seconds) } "send keep alive messages to original id scheme when revision is 0" in { diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleDocSpec.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleDocSpec.scala index 01b4ede8916..23ff0afc8e9 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleDocSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleDocSpec.scala @@ -22,7 +22,7 @@ import docs.akka.cluster.sharding.typed.AccountExampleWithEventHandlersInState.A //#testkit class AccountExampleDocSpec extends ScalaTestWithActorTestKit(EventSourcedBehaviorTestKit.config) - //#testkit + // #testkit with AnyWordSpecLike with BeforeAndAfterEach with LogCapturing { diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInDurableState.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInDurableState.scala index a4c30f00da6..bfe7d399788 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInDurableState.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInDurableState.scala @@ -22,17 +22,17 @@ import akka.serialization.jackson.CborSerializable */ object AccountExampleWithCommandHandlersInDurableState { - //#account-entity + // #account-entity object AccountEntity { // Command - //#reply-command + // #reply-command sealed trait Command extends CborSerializable - //#reply-command + // #reply-command final case class CreateAccount(replyTo: ActorRef[StatusReply[Done]]) extends Command final case class Deposit(amount: BigDecimal, replyTo: ActorRef[StatusReply[Done]]) extends Command - //#reply-command + // #reply-command final case class Withdraw(amount: BigDecimal, replyTo: ActorRef[StatusReply[Done]]) extends Command - //#reply-command + // #reply-command final case class GetBalance(replyTo: ActorRef[CurrentBalance]) extends Command final case class CloseAccount(replyTo: ActorRef[StatusReply[Done]]) extends Command @@ -85,7 +85,7 @@ object AccountExampleWithCommandHandlersInDurableState { balance - amount >= Zero } - //#reply + // #reply private def deposit(cmd: Deposit) = { Effect.persist(copy(balance = balance + cmd.amount)).thenReply(cmd.replyTo)(_ => StatusReply.Ack) } @@ -97,7 +97,7 @@ object AccountExampleWithCommandHandlersInDurableState { Effect.reply(cmd.replyTo)( StatusReply.Error(s"Insufficient balance ${balance} to be able to withdraw ${cmd.amount}")) } - //#reply + // #reply } case object ClosedAccount extends Account { @@ -123,13 +123,13 @@ object AccountExampleWithCommandHandlersInDurableState { val TypeKey: EntityTypeKey[Command] = EntityTypeKey[Command]("Account") - //#withEnforcedReplies + // #withEnforcedReplies def apply(persistenceId: PersistenceId): Behavior[Command] = { DurableStateBehavior .withEnforcedReplies[Command, Account](persistenceId, EmptyAccount, (state, cmd) => state.applyCommand(cmd)) } - //#withEnforcedReplies + // #withEnforcedReplies } - //#account-entity + // #account-entity } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInState.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInState.scala index 9095e00f081..e064e7c46fa 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInState.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithCommandHandlersInState.scala @@ -23,7 +23,7 @@ import akka.serialization.jackson.CborSerializable */ object AccountExampleWithCommandHandlersInState { - //#account-entity + // #account-entity object AccountEntity { // Command sealed trait Command extends CborSerializable @@ -102,7 +102,7 @@ object AccountExampleWithCommandHandlersInState { case Deposited(amount) => copy(balance = balance + amount) case Withdrawn(amount) => copy(balance = balance - amount) case AccountClosed => ClosedAccount - case AccountCreated => throw new IllegalStateException(s"unexpected event [$event] in state [OpenedAccount]") + case AccountCreated => throw new IllegalStateException(s"unexpected event [$event] in state [OpenedAccount]") } def canWithdraw(amount: BigDecimal): Boolean = { @@ -145,6 +145,6 @@ object AccountExampleWithCommandHandlersInState { } } - //#account-entity + // #account-entity } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithEventHandlersInState.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithEventHandlersInState.scala index 3253bd0884b..4bc3c1666ea 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithEventHandlersInState.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithEventHandlersInState.scala @@ -25,17 +25,17 @@ import akka.serialization.jackson.CborSerializable */ object AccountExampleWithEventHandlersInState { - //#account-entity + // #account-entity object AccountEntity { // Command - //#reply-command + // #reply-command sealed trait Command extends CborSerializable - //#reply-command + // #reply-command final case class CreateAccount(replyTo: ActorRef[StatusReply[Done]]) extends Command final case class Deposit(amount: BigDecimal, replyTo: ActorRef[StatusReply[Done]]) extends Command - //#reply-command + // #reply-command final case class Withdraw(amount: BigDecimal, replyTo: ActorRef[StatusReply[Done]]) extends Command - //#reply-command + // #reply-command final case class GetBalance(replyTo: ActorRef[CurrentBalance]) extends Command final case class CloseAccount(replyTo: ActorRef[StatusReply[Done]]) extends Command @@ -69,7 +69,7 @@ object AccountExampleWithEventHandlersInState { case Deposited(amount) => copy(balance = balance + amount) case Withdrawn(amount) => copy(balance = balance - amount) case AccountClosed => ClosedAccount - case AccountCreated => throw new IllegalStateException(s"unexpected event [$event] in state [OpenedAccount]") + case AccountCreated => throw new IllegalStateException(s"unexpected event [$event] in state [OpenedAccount]") } def canWithdraw(amount: BigDecimal): Boolean = { @@ -90,11 +90,11 @@ object AccountExampleWithEventHandlersInState { // When filling in the parameters of EventSourcedBehavior.apply you can use IntelliJ alt+Enter > createValue // to generate the stub with types for the command and event handlers. - //#withEnforcedReplies + // #withEnforcedReplies def apply(accountNumber: String, persistenceId: PersistenceId): Behavior[Command] = { EventSourcedBehavior.withEnforcedReplies(persistenceId, EmptyAccount, commandHandler(accountNumber), eventHandler) } - //#withEnforcedReplies + // #withEnforcedReplies private def commandHandler(accountNumber: String): (Account, Command) => ReplyEffect[Event, Account] = { (state, cmd) => @@ -102,7 +102,7 @@ object AccountExampleWithEventHandlersInState { case EmptyAccount => cmd match { case c: CreateAccount => createAccount(c) - case _ => Effect.unhandled.thenNoReply() // CreateAccount before handling any other commands + case _ => Effect.unhandled.thenNoReply() // CreateAccount before handling any other commands } case acc @ OpenedAccount(_) => @@ -149,7 +149,7 @@ object AccountExampleWithEventHandlersInState { Effect.persist(Deposited(cmd.amount)).thenReply(cmd.replyTo)(_ => StatusReply.Ack) } - //#reply + // #reply private def withdraw(acc: OpenedAccount, cmd: Withdraw): ReplyEffect[Event, Account] = { if (acc.canWithdraw(cmd.amount)) Effect.persist(Withdrawn(cmd.amount)).thenReply(cmd.replyTo)(_ => StatusReply.Ack) @@ -157,7 +157,7 @@ object AccountExampleWithEventHandlersInState { Effect.reply(cmd.replyTo)( StatusReply.Error(s"Insufficient balance ${acc.balance} to be able to withdraw ${cmd.amount}")) } - //#reply + // #reply private def getBalance(acc: OpenedAccount, cmd: GetBalance): ReplyEffect[Event, Account] = { Effect.reply(cmd.replyTo)(CurrentBalance(acc.balance)) @@ -171,6 +171,6 @@ object AccountExampleWithEventHandlersInState { } } - //#account-entity + // #account-entity } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionDurableState.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionDurableState.scala index cd65deed500..ad55fab1a5e 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionDurableState.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionDurableState.scala @@ -22,7 +22,7 @@ import akka.serialization.jackson.CborSerializable */ object AccountExampleWithOptionDurableState { - //#account-entity + // #account-entity object AccountEntity { // Command sealed trait Command extends CborSerializable @@ -121,6 +121,6 @@ object AccountExampleWithOptionDurableState { } } } - //#account-entity + // #account-entity } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionState.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionState.scala index e7f69dc4316..c9f2224810a 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionState.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/AccountExampleWithOptionState.scala @@ -23,7 +23,7 @@ import akka.serialization.jackson.CborSerializable */ object AccountExampleWithOptionState { - //#account-entity + // #account-entity object AccountEntity { // Command sealed trait Command extends CborSerializable @@ -86,7 +86,7 @@ object AccountExampleWithOptionState { case Deposited(amount) => copy(balance = balance + amount) case Withdrawn(amount) => copy(balance = balance - amount) case AccountClosed => ClosedAccount - case AccountCreated => throw new IllegalStateException(s"unexpected event [$event] in state [OpenedAccount]") + case AccountCreated => throw new IllegalStateException(s"unexpected event [$event] in state [OpenedAccount]") } def canWithdraw(amount: BigDecimal): Boolean = { @@ -154,6 +154,6 @@ object AccountExampleWithOptionState { } } - //#account-entity + // #account-entity } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/DurableStateStoreQueryUsageCompileOnlySpec.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/DurableStateStoreQueryUsageCompileOnlySpec.scala index 928921e9403..455055d57f2 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/DurableStateStoreQueryUsageCompileOnlySpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/DurableStateStoreQueryUsageCompileOnlySpec.scala @@ -13,7 +13,7 @@ import akka.stream.scaladsl.Source @nowarn object DurableStateStoreQueryUsageCompileOnlySpec { def getQuery[Record](system: ActorSystem, pluginId: String, offset: Offset) = { - //#get-durable-state-store-query-example + // #get-durable-state-store-query-example import akka.persistence.state.DurableStateStoreRegistry import akka.persistence.query.scaladsl.DurableStateStoreQuery import akka.persistence.query.DurableStateChange @@ -26,6 +26,6 @@ object DurableStateStoreQueryUsageCompileOnlySpec { case UpdatedDurableState(persistenceId, revision, value, offset, timestamp) => Some(value) case _: DeletedDurableState[_] => None } - //#get-durable-state-store-query-example + // #get-durable-state-store-query-example } } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala index 44007fa62af..5e47527ef79 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala @@ -16,7 +16,7 @@ import akka.serialization.jackson.CborSerializable object HelloWorldPersistentEntityExample { - //#persistent-entity-usage + // #persistent-entity-usage import akka.cluster.sharding.typed.scaladsl.ClusterSharding import akka.cluster.sharding.typed.scaladsl.Entity import akka.util.Timeout @@ -40,9 +40,9 @@ object HelloWorldPersistentEntityExample { } } - //#persistent-entity-usage + // #persistent-entity-usage - //#persistent-entity + // #persistent-entity import akka.actor.typed.Behavior import akka.cluster.sharding.typed.scaladsl.EntityTypeKey import akka.persistence.typed.scaladsl.Effect @@ -89,6 +89,6 @@ object HelloWorldPersistentEntityExample { } } - //#persistent-entity + // #persistent-entity } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ReplicatedShardingCompileOnlySpec.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ReplicatedShardingCompileOnlySpec.scala index 144696b393b..a37e228d6b0 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ReplicatedShardingCompileOnlySpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ReplicatedShardingCompileOnlySpec.scala @@ -27,35 +27,37 @@ object ReplicatedShardingCompileOnlySpec { def apply(replicationId: ReplicationId): Behavior[Command] = ??? } - //#bootstrap + // #bootstrap ReplicatedEntityProvider[Command]("MyEntityType", Set(ReplicaId("DC-A"), ReplicaId("DC-B"))) { (entityTypeKey, replicaId) => - ReplicatedEntity(replicaId, Entity(entityTypeKey) { entityContext => - // the sharding entity id contains the business entityId, entityType, and replica id - // which you'll need to create a ReplicatedEventSourcedBehavior - val replicationId = ReplicationId.fromString(entityContext.entityId) - MyEventSourcedBehavior(replicationId) - }) + ReplicatedEntity( + replicaId, + Entity(entityTypeKey) { entityContext => + // the sharding entity id contains the business entityId, entityType, and replica id + // which you'll need to create a ReplicatedEventSourcedBehavior + val replicationId = ReplicationId.fromString(entityContext.entityId) + MyEventSourcedBehavior(replicationId) + }) } - //#bootstrap + // #bootstrap - //#bootstrap-dc + // #bootstrap-dc ReplicatedEntityProvider.perDataCenter("MyEntityType", Set(ReplicaId("DC-A"), ReplicaId("DC-B"))) { replicationId => MyEventSourcedBehavior(replicationId) } - //#bootstrap-dc + // #bootstrap-dc - //#bootstrap-role + // #bootstrap-role val provider = ReplicatedEntityProvider.perRole("MyEntityType", Set(ReplicaId("DC-A"), ReplicaId("DC-B"))) { replicationId => MyEventSourcedBehavior(replicationId) } - //#bootstrap-role + // #bootstrap-role - //#sending-messages + // #sending-messages val myReplicatedSharding: ReplicatedSharding[Command] = ReplicatedShardingExtension(system).init(provider) val entityRefs: Map[ReplicaId, EntityRef[Command]] = myReplicatedSharding.entityRefsFor("myEntityId") - //#sending-messages + // #sending-messages } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala index 48b9d08b18c..b875d27d1c7 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala @@ -24,16 +24,16 @@ object ShardingCompileOnlySpec { object Basics { - //#sharding-extension + // #sharding-extension import akka.cluster.sharding.typed.ShardingEnvelope import akka.cluster.sharding.typed.scaladsl.ClusterSharding import akka.cluster.sharding.typed.scaladsl.EntityTypeKey import akka.cluster.sharding.typed.scaladsl.EntityRef val sharding = ClusterSharding(system) - //#sharding-extension + // #sharding-extension - //#counter + // #counter object Counter { sealed trait Command case object Increment extends Command @@ -54,36 +54,36 @@ object ShardingCompileOnlySpec { } } - //#counter + // #counter - //#init + // #init val TypeKey = EntityTypeKey[Counter.Command]("Counter") val shardRegion: ActorRef[ShardingEnvelope[Counter.Command]] = sharding.init(Entity(TypeKey)(createBehavior = entityContext => Counter(entityContext.entityId))) - //#init + // #init - //#send + // #send // With an EntityRef val counterOne: EntityRef[Counter.Command] = sharding.entityRefFor(TypeKey, "counter-1") counterOne ! Counter.Increment // Entity id is specified via an `ShardingEnvelope` shardRegion ! ShardingEnvelope("counter-1", Counter.Increment) - //#send + // #send - //#persistence + // #persistence val BlogTypeKey = EntityTypeKey[Command]("BlogPost") ClusterSharding(system).init(Entity(BlogTypeKey) { entityContext => BlogPostEntity(entityContext.entityId, PersistenceId(entityContext.entityTypeKey.name, entityContext.entityId)) }) - //#persistence + // #persistence - //#roles + // #roles sharding.init( Entity(TypeKey)(createBehavior = entityContext => Counter(entityContext.entityId)).withRole("backend")) - //#roles + // #roles } @@ -91,7 +91,7 @@ object ShardingCompileOnlySpec { import akka.cluster.sharding.typed.scaladsl.ClusterSharding import akka.cluster.sharding.typed.scaladsl.EntityTypeKey - //#counter-passivate + // #counter-passivate object Counter { sealed trait Command case object Increment extends Command @@ -122,14 +122,14 @@ object ShardingCompileOnlySpec { } } } - //#counter-passivate + // #counter-passivate - //#counter-passivate-init + // #counter-passivate-init val TypeKey = EntityTypeKey[Counter.Command]("Counter") ClusterSharding(system).init(Entity(TypeKey)(createBehavior = entityContext => Counter(entityContext.shard, entityContext.entityId)).withStopMessage(Counter.GoodByeCounter)) - //#counter-passivate-init + // #counter-passivate-init } @@ -138,7 +138,7 @@ object ShardingCompileOnlySpec { import akka.cluster.sharding.typed.scaladsl.ClusterSharding import akka.cluster.sharding.typed.scaladsl.EntityTypeKey - //#sharded-response + // #sharded-response // a sharded actor that needs counter updates object CounterConsumer { sealed trait Command @@ -169,7 +169,7 @@ object ShardingCompileOnlySpec { } } - //#sharded-response + // #sharded-response } object ShardRegionStateQuery { @@ -180,14 +180,14 @@ object ShardingCompileOnlySpec { val replyMessageAdapter: ActorRef[akka.cluster.sharding.ShardRegion.CurrentShardRegionState] = ??? - //#get-shard-region-state + // #get-shard-region-state import akka.cluster.sharding.typed.GetShardRegionState import akka.cluster.sharding.ShardRegion.CurrentShardRegionState val replyTo: ActorRef[CurrentShardRegionState] = replyMessageAdapter ClusterSharding(system).shardState ! GetShardRegionState(Counter.TypeKey, replyTo) - //#get-shard-region-state + // #get-shard-region-state } object ClusterShardingStatsQuery { @@ -198,7 +198,7 @@ object ShardingCompileOnlySpec { val replyMessageAdapter: ActorRef[akka.cluster.sharding.ShardRegion.ClusterShardingStats] = ??? - //#get-cluster-sharding-stats + // #get-cluster-sharding-stats import akka.cluster.sharding.typed.GetClusterShardingStats import akka.cluster.sharding.ShardRegion.ClusterShardingStats import scala.concurrent.duration._ @@ -207,7 +207,7 @@ object ShardingCompileOnlySpec { val timeout: FiniteDuration = 5.seconds ClusterSharding(system).shardState ! GetClusterShardingStats(Counter.TypeKey, timeout, replyTo) - //#get-cluster-sharding-stats + // #get-cluster-sharding-stats } } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/delivery/PointToPointDocExample.scala b/akka-cluster-sharding-typed/src/test/scala/docs/delivery/PointToPointDocExample.scala index 2373c72b10c..1b164e4c85e 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/delivery/PointToPointDocExample.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/delivery/PointToPointDocExample.scala @@ -21,7 +21,7 @@ import akka.actor.typed.scaladsl.Behaviors @nowarn("msg=never used") object PointToPointDocExample { - //#producer + // #producer object FibonacciProducer { sealed trait Command @@ -39,21 +39,20 @@ object PointToPointDocExample { } private def fibonacci(n: Long, b: BigInt, a: BigInt): Behavior[Command] = { - Behaviors.receive { - case (context, WrappedRequestNext(next)) => - context.log.info("Generated fibonacci {}: {}", n, a) - next.sendNextTo ! FibonacciConsumer.FibonacciNumber(n, a) - - if (n == 1000) - Behaviors.stopped - else - fibonacci(n + 1, a + b, b) + Behaviors.receive { case (context, WrappedRequestNext(next)) => + context.log.info("Generated fibonacci {}: {}", n, a) + next.sendNextTo ! FibonacciConsumer.FibonacciNumber(n, a) + + if (n == 1000) + Behaviors.stopped + else + fibonacci(n + 1, a + b, b) } } } - //#producer + // #producer - //#consumer + // #consumer import akka.actor.typed.delivery.ConsumerController object FibonacciConsumer { @@ -79,12 +78,12 @@ object PointToPointDocExample { } } } - //#consumer + // #consumer object Guardian { def apply(): Behavior[Nothing] = { Behaviors.setup[Nothing] { context => - //#connect + // #connect val consumerController = context.spawn(ConsumerController[FibonacciConsumer.Command](), "consumerController") context.spawn(FibonacciConsumer(consumerController), "consumer") @@ -95,7 +94,7 @@ object PointToPointDocExample { context.spawn(FibonacciProducer(producerController), "producer") consumerController ! ConsumerController.RegisterToProducerController(producerController) - //#connect + // #connect Behaviors.empty } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/delivery/ShardingDocExample.scala b/akka-cluster-sharding-typed/src/test/scala/docs/delivery/ShardingDocExample.scala index 2e1cb7077a2..f0d72aace6f 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/delivery/ShardingDocExample.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/delivery/ShardingDocExample.scala @@ -23,7 +23,7 @@ import akka.util.Timeout object ShardingDocExample { - //#consumer + // #consumer trait DB { def save(id: String, value: TodoList.State): Future[Done] def load(id: String): Future[TodoList.State] @@ -102,9 +102,9 @@ object ShardingDocExample { } } } - //#consumer + // #consumer - //#producer + // #producer import akka.cluster.sharding.typed.delivery.ShardingProducerController object TodoService { @@ -182,11 +182,11 @@ object ShardingDocExample { } } - //#producer + // #producer def illustrateInit(): Unit = { Behaviors.setup[Nothing] { context => - //#init + // #init import akka.cluster.sharding.typed.scaladsl.ClusterSharding import akka.cluster.sharding.typed.scaladsl.Entity import akka.cluster.sharding.typed.scaladsl.EntityTypeKey @@ -208,7 +208,7 @@ object ShardingDocExample { context.spawn(ShardingProducerController(producerId, region, durableQueueBehavior = None), "producerController") context.spawn(TodoService(producerController), "producer") - //#init + // #init Behaviors.empty } diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/delivery/WorkPullingDocExample.scala b/akka-cluster-sharding-typed/src/test/scala/docs/delivery/WorkPullingDocExample.scala index ea1fb8557b1..a2f7d446efc 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/delivery/WorkPullingDocExample.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/delivery/WorkPullingDocExample.scala @@ -17,12 +17,12 @@ import scala.annotation.nowarn @nowarn("msg=never used") object WorkPullingDocExample { - //#imports + // #imports import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.Behavior - //#imports + // #imports - //#consumer + // #consumer import akka.actor.typed.delivery.ConsumerController import akka.actor.typed.receptionist.ServiceKey @@ -41,27 +41,26 @@ object WorkPullingDocExample { context.spawn(ConsumerController(serviceKey), "consumerController") consumerController ! ConsumerController.Start(deliveryAdapter) - Behaviors.receiveMessage { - case WrappedDelivery(delivery) => - val image = delivery.message.image - val fromFormat = delivery.message.fromFormat - val toFormat = delivery.message.toFormat - // convert image... - // store result with resultId key for later retrieval + Behaviors.receiveMessage { case WrappedDelivery(delivery) => + val image = delivery.message.image + val fromFormat = delivery.message.fromFormat + val toFormat = delivery.message.toFormat + // convert image... + // store result with resultId key for later retrieval - // and when completed confirm - delivery.confirmTo ! ConsumerController.Confirmed + // and when completed confirm + delivery.confirmTo ! ConsumerController.Confirmed - Behaviors.same + Behaviors.same } } } } - //#consumer + // #consumer - //#producer + // #producer import akka.actor.typed.delivery.WorkPullingProducerController import akka.actor.typed.scaladsl.ActorContext import akka.actor.typed.scaladsl.StashBuffer @@ -74,9 +73,9 @@ object WorkPullingDocExample { final case class GetResult(resultId: UUID, replyTo: ActorRef[Option[Array[Byte]]]) extends Command - //#producer + // #producer - //#ask + // #ask final case class ConvertRequest( fromFormat: String, toFormat: String, @@ -91,9 +90,9 @@ object WorkPullingDocExample { private final case class AskReply(resultId: UUID, originalReplyTo: ActorRef[ConvertResponse], timeout: Boolean) extends Command - //#ask + // #ask - //#producer + // #producer def apply(): Behavior[Command] = { Behaviors.setup { context => val requestNextAdapter = @@ -105,8 +104,8 @@ object WorkPullingDocExample { workerServiceKey = ImageConverter.serviceKey, durableQueueBehavior = None), "producerController") - //#producer - //#durable-queue + // #producer + // #durable-queue import akka.persistence.typed.delivery.EventSourcedProducerQueue import akka.persistence.typed.PersistenceId @@ -118,8 +117,8 @@ object WorkPullingDocExample { workerServiceKey = ImageConverter.serviceKey, durableQueueBehavior = Some(durableQueue)), "producerController") - //#durable-queue - //#producer + // #durable-queue + // #producer producerController ! WorkPullingProducerController.Start(requestNextAdapter) Behaviors.withStash(1000) { stashBuffer => @@ -168,9 +167,9 @@ object WorkPullingDocExample { throw new IllegalStateException("Unexpected RequestNext") } } - //#producer + // #producer object askScope { - //#ask + // #ask import WorkPullingProducerController.MessageWithConfirmation import akka.util.Timeout @@ -224,10 +223,10 @@ object WorkPullingDocExample { } } - //#ask + // #ask } - //#producer + // #producer } - //#producer + // #producer } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala index 1535c4a9914..dc63ff1e042 100755 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala @@ -153,7 +153,6 @@ import akka.util.ccompat.JavaConverters._ * then supposed to stop itself. Incoming messages will be buffered by the `ShardRegion` * between reception of `Passivate` and termination of the entity. Such buffered messages * are thereafter delivered to a new incarnation of the entity. - * */ object ClusterSharding extends ExtensionId[ClusterSharding] with ExtensionIdProvider { @@ -167,9 +166,7 @@ object ClusterSharding extends ExtensionId[ClusterSharding] with ExtensionIdProv } -/** - * @see [[ClusterSharding$ ClusterSharding companion object]] - */ +/** @see [[ClusterSharding$ ClusterSharding companion object]] */ class ClusterSharding(system: ExtendedActorSystem) extends Extension { import ClusterShardingGuardian._ import ShardCoordinator.ShardAllocationStrategy @@ -274,9 +271,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { handOffStopMessage) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def internalStart( typeName: String, entityProps: String => Props, @@ -610,21 +605,22 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { dataCenter: Optional[String], messageExtractor: ShardRegion.MessageExtractor): ActorRef = { - startProxy(typeName, Option(role.orElse(null)), Option(dataCenter.orElse(null)), extractEntityId = { - case msg if messageExtractor.entityId(msg) ne null => - (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg)) - }, extractShardId = msg => messageExtractor.shardId(msg)) + startProxy( + typeName, + Option(role.orElse(null)), + Option(dataCenter.orElse(null)), + extractEntityId = { + case msg if messageExtractor.entityId(msg) ne null => + (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg)) + }, + extractShardId = msg => messageExtractor.shardId(msg)) } - /** - * Scala API: get all currently defined sharding type names. - */ + /** Scala API: get all currently defined sharding type names. */ def shardTypeNames: immutable.Set[String] = regions.keySet().asScala.toSet - /** - * Java API: get all currently defined sharding type names. - */ + /** Java API: get all currently defined sharding type names. */ def getShardTypeNames: java.util.Set[String] = regions.keySet() /** @@ -660,9 +656,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { } } - /** - * The default `ShardAllocationStrategy` is configured by `least-shard-allocation-strategy` properties. - */ + /** The default `ShardAllocationStrategy` is configured by `least-shard-allocation-strategy` properties. */ def defaultShardAllocationStrategy(settings: ClusterShardingSettings): ShardAllocationStrategy = { if (settings.tuningParameters.leastShardAllocationAbsoluteLimit > 0) { // new algorithm @@ -678,9 +672,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { } } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[akka] object ClusterShardingGuardian { import ShardCoordinator.ShardAllocationStrategy final case class Start( @@ -831,13 +823,13 @@ private[akka] class ClusterShardingGuardian extends Actor { def receive: Receive = { case Start( - typeName, - entityProps, - settings, - extractEntityId, - extractShardId, - allocationStrategy, - handOffStopMessage) => + typeName, + entityProps, + settings, + extractEntityId, + extractShardId, + allocationStrategy, + handOffStopMessage) => try { val encName = URLEncoder.encode(typeName, ByteString.UTF_8) val cPath = coordinatorPath(encName) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingHealthCheck.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingHealthCheck.scala index 206d6f17dc8..d694c99ea6a 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingHealthCheck.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingHealthCheck.scala @@ -23,9 +23,7 @@ import akka.util.JavaDurationConverters._ import akka.util.Timeout import akka.util.ccompat.JavaConverters._ -/** - * Internal API - */ +/** Internal API */ @InternalApi private[akka] object ClusterShardingHealthCheckSettings { def apply(config: Config): ClusterShardingHealthCheckSettings = @@ -49,9 +47,7 @@ private object ClusterShardingHealthCheck { val Success = Future.successful(true) } -/** - * INTERNAL API (ctr) - */ +/** INTERNAL API (ctr) */ final class ClusterShardingHealthCheck private[akka] ( system: ActorSystem, settings: ClusterShardingHealthCheckSettings, @@ -82,7 +78,7 @@ final class ClusterShardingHealthCheck private[akka] ( if (settings.names.isEmpty || registered) { ClusterShardingHealthCheck.Success } else if (startedTimestamp != 0L && System - .currentTimeMillis() > startedTimestamp + settings.disableAfter.toMillis) { + .currentTimeMillis() > startedTimestamp + settings.disableAfter.toMillis) { ClusterShardingHealthCheck.Success } else { if (startedTimestamp == 0 && isMemberUp()) @@ -110,14 +106,11 @@ final class ClusterShardingHealthCheck private[akka] ( } allRegistered } - .recover { - case _: AskTimeoutException => - if (log.isDebugEnabled) { - log.debug( - "Shard regions [{}] did not respond in time. Failing health check.", - settings.names.mkString(",")) - } - false + .recover { case _: AskTimeoutException => + if (log.isDebugEnabled) { + log.debug("Shard regions [{}] did not respond in time. Failing health check.", settings.names.mkString(",")) + } + false } } } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSerializable.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSerializable.scala index 4185ebe7443..8eba52c3d07 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSerializable.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSerializable.scala @@ -4,7 +4,5 @@ package akka.cluster.sharding -/** - * Marker trait for remote messages and persistent events/snapshots with special serializer. - */ +/** Marker trait for remote messages and persistent events/snapshots with special serializer. */ trait ClusterShardingSerializable extends Serializable diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala index 510051bee9b..6099b8b2419 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala @@ -31,15 +31,11 @@ object ClusterShardingSettings { @InternalApi private[akka] val RememberEntitiesStoreCustom = "custom" - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] val RememberEntitiesStoreDData = "ddata" - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] val RememberEntitiesStoreEventsourced = "eventsourced" @@ -131,15 +127,11 @@ object ClusterShardingSettings { */ def create(config: Config): ClusterShardingSettings = apply(config) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def roleOption(role: String): Option[String] = if (role == "") None else Option(role) - /** - * API MAY CHANGE: Settings for passivation strategies may change after additional testing and feedback. - */ + /** API MAY CHANGE: Settings for passivation strategies may change after additional testing and feedback. */ @ApiMayChange final class PassivationStrategySettings private[akka] ( val idleEntitySettings: Option[PassivationStrategySettings.IdleSettings], @@ -232,9 +224,7 @@ object ClusterShardingSettings { oldSettingUsed) } - /** - * API MAY CHANGE: Settings for passivation strategies may change after additional testing and feedback. - */ + /** API MAY CHANGE: Settings for passivation strategies may change after additional testing and feedback. */ @ApiMayChange object PassivationStrategySettings { val defaults = new PassivationStrategySettings( @@ -610,37 +600,27 @@ object ClusterShardingSettings { defaults.withOldIdleStrategy(idleTimeout) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] sealed trait PassivationStrategy - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case object NoPassivationStrategy extends PassivationStrategy - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object IdlePassivationStrategy { def apply(settings: PassivationStrategySettings.IdleSettings): IdlePassivationStrategy = IdlePassivationStrategy(settings.timeout, settings.interval.getOrElse(settings.timeout / 2)) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case class IdlePassivationStrategy(timeout: FiniteDuration, interval: FiniteDuration) extends PassivationStrategy - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object LeastRecentlyUsedPassivationStrategy { def apply( @@ -659,9 +639,7 @@ object ClusterShardingSettings { } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case class LeastRecentlyUsedPassivationStrategy( limit: Int, @@ -669,16 +647,12 @@ object ClusterShardingSettings { idle: Option[IdlePassivationStrategy]) extends PassivationStrategy - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case class MostRecentlyUsedPassivationStrategy(limit: Int, idle: Option[IdlePassivationStrategy]) extends PassivationStrategy - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object LeastFrequentlyUsedPassivationStrategy { def apply( @@ -688,9 +662,7 @@ object ClusterShardingSettings { LeastFrequentlyUsedPassivationStrategy(limit, settings.dynamicAging, idle) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case class LeastFrequentlyUsedPassivationStrategy( limit: Int, @@ -698,9 +670,7 @@ object ClusterShardingSettings { idle: Option[IdlePassivationStrategy]) extends PassivationStrategy - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object CompositePassivationStrategy { object AdmissionFilter { @@ -777,9 +747,7 @@ object ClusterShardingSettings { } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case class CompositePassivationStrategy( limit: Int, @@ -859,15 +827,15 @@ object ClusterShardingSettings { (if (dynamicAging) " (with dynamic aging)" else "") + idle.fold("")(idle => " and " + describe(idle)) case CompositePassivationStrategy( - limit, - mainStrategy, - windowStrategy, - initialWindowProportion, - minimumWindowProportion, - maximumWindowProportion, - windowOptimizer, - admissionFilter, - idle) => + limit, + mainStrategy, + windowStrategy, + initialWindowProportion, + minimumWindowProportion, + maximumWindowProportion, + windowOptimizer, + admissionFilter, + idle) => val describeWindow = windowStrategy match { case NoPassivationStrategy => "no admission window" case _ => @@ -876,10 +844,10 @@ object ClusterShardingSettings { case CompositePassivationStrategy.NoAdmissionOptimizer => s" with proportion [$initialWindowProportion]" case CompositePassivationStrategy.HillClimbingAdmissionOptimizer( - adjustMultiplier, - initialStep, - restartThreshold, - stepDecay) => + adjustMultiplier, + initialStep, + restartThreshold, + stepDecay) => s" with proportions [initial = $initialWindowProportion, min = $minimumWindowProportion, max = $maximumWindowProportion]" + " adapting with hill-climbing optimizer [" + s"adjust multiplier = $adjustMultiplier, " + @@ -891,10 +859,10 @@ object ClusterShardingSettings { val describeFilter = admissionFilter match { case CompositePassivationStrategy.AlwaysAdmissionFilter => "always admit" case CompositePassivationStrategy.FrequencySketchAdmissionFilter( - widthMultiplier, - resetMultiplier, - depth, - counterBits) => + widthMultiplier, + resetMultiplier, + depth, + counterBits) => "admit using frequency sketch [" + s"width multiplier = $widthMultiplier, " + s"reset multiplier = $resetMultiplier, " + @@ -1264,9 +1232,7 @@ final class ClusterShardingSettings( private[akka] def shouldHostCoordinator(cluster: Cluster): Boolean = coordinatorSingletonRole.forall(cluster.selfMember.roles.contains) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def coordinatorSingletonRole: Option[String] = if (coordinatorSingletonOverrideRole) role else coordinatorSingletonSettings.role @@ -1305,9 +1271,7 @@ final class ClusterShardingSettings( def withPassivateIdleAfter(duration: java.time.Duration): ClusterShardingSettings = copy(passivationStrategySettings = passivationStrategySettings.withOldIdleStrategy(duration.asScala)) - /** - * API MAY CHANGE: Settings for passivation strategies may change after additional testing and feedback. - */ + /** API MAY CHANGE: Settings for passivation strategies may change after additional testing and feedback. */ @ApiMayChange def withPassivationStrategy(settings: ClusterShardingSettings.PassivationStrategySettings): ClusterShardingSettings = copy(passivationStrategySettings = settings) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ConsistentHashingShardAllocationStrategy.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ConsistentHashingShardAllocationStrategy.scala index a1f7327f412..3de55b8601c 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ConsistentHashingShardAllocationStrategy.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ConsistentHashingShardAllocationStrategy.scala @@ -104,28 +104,28 @@ class ConsistentHashingShardAllocationStrategy(rebalanceLimit: Int) rebalanceLimit <= 0 || result.size < rebalanceLimit currentShardAllocations - // deterministic order, at least easier to test - .toVector.sortBy { case (region, _) => nodeForRegion(region) }(Address.addressOrdering).foreach { - case (currentRegion, shardIds) => - shardIds.foreach { shardId => - if (lessThanLimit && !rebalanceInProgress.contains(shardId)) { - val node = consistentHashing.nodeFor(shardId) - regionByNode.get(node) match { - case Some(region) => - if (region != currentRegion) { - log.debug( - "Rebalance needed for shard [{}], from [{}] to [{}]", - shardId, - nodeForRegion(currentRegion), - node) - result += shardId - } - case None => - throw new IllegalStateException(s"currentShardAllocations should include region for node [$node]") + // deterministic order, at least easier to test + .toVector.sortBy { case (region, _) => nodeForRegion(region) }(Address.addressOrdering).foreach { + case (currentRegion, shardIds) => + shardIds.foreach { shardId => + if (lessThanLimit && !rebalanceInProgress.contains(shardId)) { + val node = consistentHashing.nodeFor(shardId) + regionByNode.get(node) match { + case Some(region) => + if (region != currentRegion) { + log.debug( + "Rebalance needed for shard [{}], from [{}] to [{}]", + shardId, + nodeForRegion(currentRegion), + node) + result += shardId + } + case None => + throw new IllegalStateException(s"currentShardAllocations should include region for node [$node]") + } } } - } - } + } Future.successful(result) } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/JoinConfigCompatCheckSharding.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/JoinConfigCompatCheckSharding.scala index 4a3c4c27eed..0b22da9dec1 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/JoinConfigCompatCheckSharding.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/JoinConfigCompatCheckSharding.scala @@ -11,9 +11,7 @@ import com.typesafe.config.Config import akka.annotation.InternalApi import akka.cluster.{ ConfigValidation, JoinConfigCompatChecker } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final class JoinConfigCompatCheckSharding extends JoinConfigCompatChecker { diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala index 457660d09eb..a1c1660bdb7 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala @@ -56,15 +56,13 @@ import akka.persistence.journal.leveldb.SharedLeveldbStore */ object RemoveInternalClusterShardingData { - /** - * @see [[RemoveInternalClusterShardingData$ RemoveInternalClusterShardingData companion object]] - */ + /** @see [[RemoveInternalClusterShardingData$ RemoveInternalClusterShardingData companion object]] */ def main(args: Array[String]): Unit = { if (args.isEmpty) println("Specify the Cluster Sharding type names to remove in program arguments") else { val system = ActorSystem("RemoveInternalClusterShardingData") - val remove2dot3Data = (args(0) == "-2.3") + val remove2dot3Data = args(0) == "-2.3" val typeNames = if (remove2dot3Data) args.tail.toSet else args.toSet if (typeNames.isEmpty) println("Specify the Cluster Sharding type names to remove in program arguments") @@ -104,9 +102,7 @@ object RemoveInternalClusterShardingData { completion.future } - /** - * INTERNAL API: `Props` for [[RemoveInternalClusterShardingData]] actor. - */ + /** INTERNAL API: `Props` for [[RemoveInternalClusterShardingData]] actor. */ private[akka] def props( journalPluginId: String, typeNames: Set[String], @@ -115,9 +111,7 @@ object RemoveInternalClusterShardingData { Props(new RemoveInternalClusterShardingData(journalPluginId, typeNames, completion, remove2dot3Data)) .withDeploy(Deploy.local) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] object RemoveOnePersistenceId { def props(journalPluginId: String, persistenceId: String, replyTo: ActorRef): Props = Props(new RemoveOnePersistenceId(journalPluginId, persistenceId: String, replyTo)) @@ -163,13 +157,13 @@ object RemoveInternalClusterShardingData { }: Receive).orElse(handleFailure) def waitDeleteSnapshotsSuccess: Receive = - ({ - case DeleteSnapshotsSuccess(_) => done() + ({ case DeleteSnapshotsSuccess(_) => + done() }: Receive).orElse(handleFailure) def waitDeleteMessagesSuccess: Receive = - ({ - case DeleteMessagesSuccess(_) => done() + ({ case DeleteMessagesSuccess(_) => + done() }: Receive).orElse(handleFailure) def handleFailure: Receive = { @@ -191,9 +185,7 @@ object RemoveInternalClusterShardingData { } -/** - * @see [[RemoveInternalClusterShardingData$ RemoveInternalClusterShardingData companion object]] - */ +/** @see [[RemoveInternalClusterShardingData$ RemoveInternalClusterShardingData companion object]] */ class RemoveInternalClusterShardingData( journalPluginId: String, typeNames: Set[String], diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala index afbbb668157..6483c37adfe 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala @@ -50,9 +50,7 @@ import akka.util.unused private[akka] object Shard { import ShardRegion.EntityId - /** - * A Shard command - */ + /** A Shard command */ sealed trait RememberEntityCommand /** @@ -67,9 +65,7 @@ private[akka] object Shard { */ final case class EntitiesMovedToOtherShard(ids: Set[ShardRegion.ShardId]) extends RememberEntityCommand - /** - * A query for information about the shard - */ + /** A query for information about the shard */ sealed trait ShardQuery @SerialVersionUID(1L) case object GetCurrentShardState extends ShardQuery @@ -154,7 +150,7 @@ private[akka] object Shard { * +------------------------------------------------------------------------------------------+------------------------------------------------+<-------------+ * stop stored/passivation complete * }}} - **/ + */ sealed trait EntityState { def transition(newState: EntityState, entities: Entities): EntityState final def invalidTransition(to: EntityState, entities: Entities): EntityState = { @@ -179,9 +175,9 @@ private[akka] object Shard { case object NoState extends EntityState { override def transition(newState: EntityState, entities: Entities): EntityState = newState match { case RememberedButNotCreated if entities.rememberingEntities => RememberedButNotCreated - case remembering: RememberingStart => remembering // we go via this state even if not really remembering - case active: Active if !entities.rememberingEntities => active - case _ => invalidTransition(newState, entities) + case remembering: RememberingStart => remembering // we go via this state even if not really remembering + case active: Active if !entities.rememberingEntities => active + case _ => invalidTransition(newState, entities) } } @@ -371,9 +367,7 @@ private[akka] object Shard { // only called for getting shard stats def activeEntityIds(): Set[EntityId] = byRef.values.asScala.toSet - /** - * @return (remembering start, remembering stop) - */ + /** @return (remembering start, remembering stop) */ def pendingRememberEntities(): (Map[EntityId, RememberingStart], Set[EntityId]) = { if (remembering.isEmpty) { (Map.empty, Set.empty) @@ -384,7 +378,7 @@ private[akka] object Shard { entityState(entityId) match { case r: RememberingStart => starts += (entityId -> r) case RememberingStop => stops += entityId - case wat => throw new IllegalStateException(s"$entityId was in the remembering set but has state $wat") + case wat => throw new IllegalStateException(s"$entityId was in the remembering set but has state $wat") }) (starts.result(), stops.result()) } @@ -482,8 +476,8 @@ private[akka] class Shard( case None => 5.seconds // not used } - def receive: Receive = { - case _ => throw new IllegalStateException("Default receive never expected to actually be used") + def receive: Receive = { case _ => + throw new IllegalStateException("Default receive never expected to actually be used") } override def preStart(): Unit = { @@ -545,8 +539,8 @@ private[akka] class Shard( private def tryGetLease(l: Lease): Unit = { log.info("{}: Acquiring lease {}", typeName, l.settings) - pipe(l.acquire(reason => self ! LeaseLost(reason)).map(r => LeaseAcquireResult(r, None)).recover { - case t => LeaseAcquireResult(acquired = false, Some(t)) + pipe(l.acquire(reason => self ! LeaseLost(reason)).map(r => LeaseAcquireResult(r, None)).recover { case t => + LeaseAcquireResult(acquired = false, Some(t)) }).to(self) } @@ -885,9 +879,9 @@ private[akka] class Shard( HandOffStopper.props(typeName, shardId, replyTo, activeEntities, handOffStopMessage, entityHandOffTimeout), "HandOffStopper"))) - //During hand off we only care about watching for termination of the hand off stopper - context.become { - case Terminated(ref) => receiveTerminated(ref) + // During hand off we only care about watching for termination of the hand off stopper + context.become { case Terminated(ref) => + receiveTerminated(ref) } } else { replyTo ! ShardStopped(shardId) @@ -1155,7 +1149,7 @@ private[akka] class Shard( // After entity started def sendMsgBuffer(entityId: EntityId): Unit = { - //Get the buffered messages and remove the buffer + // Get the buffered messages and remove the buffer val messages = messageBuffers.getOrEmpty(entityId) messageBuffers.remove(entityId) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala index eff4e144c47..f412a831adc 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala @@ -40,9 +40,7 @@ import akka.persistence._ import akka.util.PrettyDuration._ import akka.util.Timeout -/** - * @see [[ClusterSharding$ ClusterSharding extension]] - */ +/** @see [[ClusterSharding$ ClusterSharding extension]] */ object ShardCoordinator { import ShardRegion.ShardId @@ -298,8 +296,8 @@ object ShardCoordinator { val (_, leastShards) = mostSuitableRegion(sortedRegionEntries) // even if it is to another new node. val mostShards = sortedRegionEntries - .collect { - case RegionEntry(_, _, shardIds) => shardIds.filterNot(id => rebalanceInProgress(id)) + .collect { case RegionEntry(_, _, shardIds) => + shardIds.filterNot(id => rebalanceInProgress(id)) } .maxBy(_.size) val difference = mostShards.size - leastShards.size @@ -315,43 +313,29 @@ object ShardCoordinator { } } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] object Internal { - /** - * Used as a special termination message from [[ClusterSharding]] - */ + /** Used as a special termination message from [[ClusterSharding]] */ @InternalApi private[cluster] case object Terminate extends DeadLetterSuppression - /** - * Messages sent to the coordinator - */ + /** Messages sent to the coordinator */ sealed trait CoordinatorCommand extends ClusterShardingSerializable - /** - * Messages sent from the coordinator - */ + /** Messages sent from the coordinator */ sealed trait CoordinatorMessage extends ClusterShardingSerializable - /** - * `ShardRegion` registers to `ShardCoordinator`, until it receives [[RegisterAck]]. - */ + /** `ShardRegion` registers to `ShardCoordinator`, until it receives [[RegisterAck]]. */ @SerialVersionUID(1L) final case class Register(shardRegion: ActorRef) extends CoordinatorCommand with DeadLetterSuppression - /** - * `ShardRegion` in proxy only mode registers to `ShardCoordinator`, until it receives [[RegisterAck]]. - */ + /** `ShardRegion` in proxy only mode registers to `ShardCoordinator`, until it receives [[RegisterAck]]. */ @SerialVersionUID(1L) final case class RegisterProxy(shardRegionProxy: ActorRef) extends CoordinatorCommand with DeadLetterSuppression - /** - * Acknowledgement from `ShardCoordinator` that [[Register]] or [[RegisterProxy]] was successful. - */ + /** Acknowledgement from `ShardCoordinator` that [[Register]] or [[RegisterProxy]] was successful. */ @SerialVersionUID(1L) final case class RegisterAck(coordinator: ActorRef) extends CoordinatorMessage /** @@ -362,26 +346,18 @@ object ShardCoordinator { extends CoordinatorCommand with DeadLetterSuppression - /** - * `ShardCoordinator` replies with this message for [[GetShardHome]] requests. - */ + /** `ShardCoordinator` replies with this message for [[GetShardHome]] requests. */ @SerialVersionUID(1L) final case class ShardHome(shard: ShardId, ref: ActorRef) extends CoordinatorMessage - /** - * One or more sent to region directly after registration to speed up new shard startup. - */ + /** One or more sent to region directly after registration to speed up new shard startup. */ final case class ShardHomes(homes: Map[ActorRef, immutable.Seq[ShardId]]) extends CoordinatorMessage with DeadLetterSuppression - /** - * `ShardCoordinator` informs a `ShardRegion` that it is hosting this shard - */ + /** `ShardCoordinator` informs a `ShardRegion` that it is hosting this shard */ @SerialVersionUID(1L) final case class HostShard(shard: ShardId) extends CoordinatorMessage - /** - * `ShardRegion` replies with this message for [[HostShard]] requests which lead to it hosting the shard - */ + /** `ShardRegion` replies with this message for [[HostShard]] requests which lead to it hosting the shard */ @SerialVersionUID(1L) final case class ShardStarted(shard: ShardId) extends CoordinatorMessage /** @@ -394,9 +370,7 @@ object ShardCoordinator { */ @SerialVersionUID(1L) final case class BeginHandOff(shard: ShardId) extends CoordinatorMessage - /** - * Acknowledgement of [[BeginHandOff]] - */ + /** Acknowledgement of [[BeginHandOff]] */ @SerialVersionUID(1L) final case class BeginHandOffAck(shard: ShardId) extends CoordinatorCommand /** @@ -407,24 +381,16 @@ object ShardCoordinator { */ @SerialVersionUID(1L) final case class HandOff(shard: ShardId) extends CoordinatorMessage - /** - * Reply to `HandOff` when all entities in the shard have been terminated. - */ + /** Reply to `HandOff` when all entities in the shard have been terminated. */ @SerialVersionUID(1L) final case class ShardStopped(shard: ShardId) extends CoordinatorCommand - /** - * Notification when the entire shard region has stopped - */ + /** Notification when the entire shard region has stopped */ @SerialVersionUID(1L) final case class RegionStopped(shardRegion: ActorRef) extends CoordinatorCommand - /** - * Stop all the listed shards, sender will get a ShardStopped ack for each shard once stopped - */ + /** Stop all the listed shards, sender will get a ShardStopped ack for each shard once stopped */ final case class StopShards(shards: Set[ShardId]) extends CoordinatorCommand - /** - * `ShardRegion` requests full handoff to be able to shutdown gracefully. - */ + /** `ShardRegion` requests full handoff to be able to shutdown gracefully. */ @SerialVersionUID(1L) final case class GracefulShutdownReq(shardRegion: ActorRef) extends CoordinatorCommand with DeadLetterSuppression @@ -484,7 +450,7 @@ object ShardCoordinator { case ShardRegionTerminated(region) => require(regions.contains(region), s"Terminated region $region not registered: $this") val newUnallocatedShards = - if (rememberEntities) (unallocatedShards ++ regions(region)) else unallocatedShards + if (rememberEntities) unallocatedShards ++ regions(region) else unallocatedShards copy(regions = regions - region, shards = shards -- regions(region), unallocatedShards = newUnallocatedShards) case ShardRegionProxyTerminated(proxy) => require(regionProxies.contains(proxy), s"Terminated region proxy $proxy not registered: $this") @@ -493,7 +459,7 @@ object ShardCoordinator { require(regions.contains(region), s"Region $region not registered: $this") require(!shards.contains(shard), s"Shard [$shard] already allocated: $this") val newUnallocatedShards = - if (rememberEntities) (unallocatedShards - shard) else unallocatedShards + if (rememberEntities) unallocatedShards - shard else unallocatedShards copy( shards = shards.updated(shard, region), regions = regions.updated(region, regions(region) :+ shard), @@ -503,7 +469,7 @@ object ShardCoordinator { val region = shards(shard) require(regions.contains(region), s"Region $region for shard [$shard] not registered: $this") val newUnallocatedShards = - if (rememberEntities) (unallocatedShards + shard) else unallocatedShards + if (rememberEntities) unallocatedShards + shard else unallocatedShards copy( shards = shards - shard, regions = regions.updated(region, regions(region).filterNot(_ == shard)), @@ -515,36 +481,26 @@ object ShardCoordinator { } - /** - * Periodic message to trigger rebalance - */ + /** Periodic message to trigger rebalance */ private case object RebalanceTick - /** - * End of rebalance process performed by [[RebalanceWorker]] - */ + /** End of rebalance process performed by [[RebalanceWorker]] */ private final case class RebalanceDone(shard: ShardId, ok: Boolean) - /** - * Check if we've received a shard start request - */ + /** Check if we've received a shard start request */ private final case class ResendShardHost(shard: ShardId, region: ActorRef) private final case class DelayedShardRegionTerminated(region: ActorRef) private final case class StopShardTimeout(requestId: UUID) - /** - * Result of `allocateShard` is piped to self with this message. - */ + /** Result of `allocateShard` is piped to self with this message. */ private final case class AllocateShardResult( shard: ShardId, shardRegion: Option[ActorRef], getShardHomeSender: ActorRef) - /** - * Result of `rebalance` is piped to self with this message. - */ + /** Result of `rebalance` is piped to self with this message. */ private final case class RebalanceResult(shards: Set[ShardId]) private[akka] object RebalanceWorker { @@ -786,10 +742,9 @@ abstract class ShardCoordinator( .map { region => AllocateShardResult(shard, Some(region), getShardHomeSender) } - .recover { - case t => - log.error(t, "{}: Shard [{}] allocation failed.", typeName, shard) - AllocateShardResult(shard, None, getShardHomeSender) + .recover { case t => + log.error(t, "{}: Shard [{}] allocation failed.", typeName, shard) + AllocateShardResult(shard, None, getShardHomeSender) } .pipeTo(self) } @@ -813,7 +768,7 @@ abstract class ShardCoordinator( case ResendShardHost(shard, region) => state.shards.get(shard) match { case Some(`region`) => sendHostShardMsg(shard, region) - case _ => //Reallocated to another region + case _ => // Reallocated to another region } case StopShards(shardIds) => @@ -827,13 +782,12 @@ abstract class ShardCoordinator( val (runningShards, alreadyStoppedShards) = shardIds.partition(state.shards.contains) alreadyStoppedShards.foreach(shardId => sender() ! ShardStopped(shardId)) if (runningShards.nonEmpty) { - waitingForShardsToStop = runningShards.foldLeft(waitingForShardsToStop) { - case (acc, shard) => - val newWaiting = acc.get(shard) match { - case Some(waiting) => waiting + ((sender(), requestId)) - case None => Set((sender(), requestId)) - } - acc.updated(shard, newWaiting) + waitingForShardsToStop = runningShards.foldLeft(waitingForShardsToStop) { case (acc, shard) => + val newWaiting = acc.get(shard) match { + case Some(waiting) => waiting + ((sender(), requestId)) + case None => Set((sender(), requestId)) + } + acc.updated(shard, newWaiting) } // no need to stop already rebalancing val shardsToStop = runningShards.filter(shard => !rebalanceInProgress.contains(shard)) @@ -844,8 +798,8 @@ abstract class ShardCoordinator( requestId) val shardsPerRegion = shardsToStop.flatMap(shardId => state.shards.get(shardId).map(region => region -> shardId)).groupBy(_._1) - shardsPerRegion.foreach { - case (region, shards) => shutdownShards(region, shards.map(_._2)) + shardsPerRegion.foreach { case (region, shards) => + shutdownShards(region, shards.map(_._2)) } val timeout = StopShardTimeout(requestId) timers.startSingleTimer(timeout, timeout, settings.tuningParameters.handOffTimeout) @@ -870,8 +824,8 @@ abstract class ShardCoordinator( .map { shards => RebalanceResult(shards) } - .recover { - case _ => RebalanceResult(Set.empty) + .recover { case _ => + RebalanceResult(Set.empty) } .pipeTo(self) } @@ -958,19 +912,18 @@ abstract class ShardCoordinator( .map(stats => regionActor -> stats) }) .map { allRegionStats => - ShardRegion.ClusterShardingStats(allRegionStats.map { - case (region, stats) => - val regionAddress = region.path.address - val address: Address = - if (regionAddress.hasLocalScope && regionAddress.system == cluster.selfAddress.system) - cluster.selfAddress - else regionAddress - - address -> stats + ShardRegion.ClusterShardingStats(allRegionStats.map { case (region, stats) => + val regionAddress = region.path.address + val address: Address = + if (regionAddress.hasLocalScope && regionAddress.system == cluster.selfAddress.system) + cluster.selfAddress + else regionAddress + + address -> stats }.toMap) } - .recover { - case _: AskTimeoutException => ShardRegion.ClusterShardingStats(Map.empty) + .recover { case _: AskTimeoutException => + ShardRegion.ClusterShardingStats(Map.empty) } .pipeTo(sender()) @@ -1006,11 +959,10 @@ abstract class ShardCoordinator( typeName, requestId, timedOutShards.mkString(", ")) - waitingForShardsToStop = timedOutShards.foldLeft(waitingForShardsToStop) { - case (acc, shard) => - val waiting = acc(shard) - if (waiting.size == 1) acc - shard - else acc.updated(shard, waiting.filterNot { case (_, id) => id == requestId }) + waitingForShardsToStop = timedOutShards.foldLeft(waitingForShardsToStop) { case (acc, shard) => + val waiting = acc(shard) + if (waiting.size == 1) acc - shard + else acc.updated(shard, waiting.filterNot { case (_, id) => id == requestId }) } } @@ -1020,7 +972,7 @@ abstract class ShardCoordinator( private def terminate(): Unit = { if (aliveRegions.exists(_.path.address.hasLocalScope) || gracefulShutdownInProgress.exists( - _.path.address.hasLocalScope)) { + _.path.address.hasLocalScope)) { aliveRegions .find(_.path.address.hasLocalScope) .foreach(region => @@ -1087,19 +1039,17 @@ abstract class ShardCoordinator( // if needed, to not get a single too large message, but group by region to send the // same region actor as few times as possible. state.regions.iterator - .flatMap { - case (regionRef, shards) => - shards.filterNot(rebalanceInProgress.contains).map(shard => regionRef -> shard) + .flatMap { case (regionRef, shards) => + shards.filterNot(rebalanceInProgress.contains).map(shard => regionRef -> shard) } .grouped(batchSize) // cap how much is sent in case of a large number of shards (> 5 000) // to not delay registration ack too much .take(10) .foreach { regions => - val shardsSubMap = regions.foldLeft(Map.empty[ActorRef, List[ShardId]]) { - case (map, (regionRef, shardId)) => - if (map.contains(regionRef)) map.updated(regionRef, shardId :: map(regionRef)) - else map.updated(regionRef, shardId :: Nil) + val shardsSubMap = regions.foldLeft(Map.empty[ActorRef, List[ShardId]]) { case (map, (regionRef, shardId)) => + if (map.contains(regionRef)) map.updated(regionRef, shardId :: map(regionRef)) + else map.updated(regionRef, shardId :: Nil) } ref ! ShardHomes(shardsSubMap) } @@ -1171,13 +1121,12 @@ abstract class ShardCoordinator( // This is an optimization that makes it operationally faster and reduces the // amount of lost messages during startup. val nodes = cluster.state.members.map(_.address) - state.regions.foreach { - case (ref, _) => - val a = ref.path.address - if (a.hasLocalScope || nodes(a)) - context.watch(ref) - else - regionTerminated(ref) // not part of cluster + state.regions.foreach { case (ref, _) => + val a = ref.path.address + if (a.hasLocalScope || nodes(a)) + context.watch(ref) + else + regionTerminated(ref) // not part of cluster } state.regionProxies.foreach { ref => val a = ref.path.address @@ -1249,8 +1198,7 @@ abstract class ShardCoordinator( } } - def shuttingDown: Receive = { - case _ => // ignore all + def shuttingDown: Receive = { case _ => // ignore all } def sendHostShardMsg(shard: ShardId, region: ActorRef): Unit = { @@ -1274,7 +1222,7 @@ abstract class ShardCoordinator( case Some(ref) => getShardHomeSender ! ShardHome(shard, ref) case None => if (state.regions.contains(region) && !gracefulShutdownInProgress(region) && !regionTerminationInProgress - .contains(region)) { + .contains(region)) { update(ShardHomeAllocated(shard, region)) { evt => state = state.updated(evt) log.debug( @@ -1434,7 +1382,7 @@ class PersistentShardCoordinator( if (verboseDebug) log.debug("{}: receiveRecover SnapshotOffer {}", typeName, st) state = st.withRememberEntities(settings.rememberEntities) - //Old versions of the state object may not have unallocatedShard set, + // Old versions of the state object may not have unallocatedShard set, // thus it will be null. if (state.unallocatedShards == null) state = state.copy(unallocatedShards = Set.empty) @@ -1500,9 +1448,7 @@ class PersistentShardCoordinator( override protected def unstashOneGetShardHomeRequest(): Unit = () } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object DDataShardCoordinator { @@ -1721,8 +1667,9 @@ private[akka] class DDataShardCoordinator( case UpdateTimeout(CoordinatorStateKey, Some(`evt`)) => updateStateRetries += 1 - val template = s"$typeName: The ShardCoordinator was unable to update a distributed state within 'updating-state-timeout': ${stateWriteConsistency.timeout.toMillis} millis (${if (terminating) "terminating" - else "retrying"}). Attempt $updateStateRetries. " + + val template = + s"$typeName: The ShardCoordinator was unable to update a distributed state within 'updating-state-timeout': ${stateWriteConsistency.timeout.toMillis} millis (${if (terminating) "terminating" + else "retrying"}). Attempt $updateStateRetries. " + s"Perhaps the ShardRegion has not started on all active nodes yet? event=$evt" if (updateStateRetries < 5) { @@ -1938,9 +1885,8 @@ private[akka] class DDataShardCoordinator( } override def receiveTerminated: Receive = - super.receiveTerminated.orElse { - case RememberEntitiesStoreStopped => - onRememberEntitiesStoreStopped() + super.receiveTerminated.orElse { case RememberEntitiesStoreStopped => + onRememberEntitiesStoreStopped() } def onRememberEntitiesUpdateFailed(shardId: ShardId): Unit = { diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala index 6b568242978..402bad9cec8 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala @@ -34,9 +34,7 @@ import akka.pattern.pipe import akka.util.MessageBufferMap import akka.util.Timeout -/** - * @see [[ClusterSharding$ ClusterSharding extension]] - */ +/** @see [[ClusterSharding$ ClusterSharding extension]] */ object ShardRegion { /** @@ -88,19 +86,13 @@ object ShardRegion { PoisonPill, None)).withDeploy(Deploy.local) - /** - * Marker type of entity identifier (`String`). - */ + /** Marker type of entity identifier (`String`). */ type EntityId = String - /** - * Marker type of shard identifier (`String`). - */ + /** Marker type of shard identifier (`String`). */ type ShardId = String - /** - * Marker type of application messages (`Any`). - */ + /** Marker type of application messages (`Any`). */ type Msg = Any /** @@ -170,9 +162,7 @@ object ShardRegion { */ abstract class HashCodeMessageExtractor(maxNumberOfShards: Int) extends MessageExtractor { - /** - * Default implementation pass on the message as is. - */ + /** Default implementation pass on the message as is. */ override def entityMessage(message: Any): Any = message override def shardId(message: Any): String = { @@ -245,14 +235,10 @@ object ShardRegion { */ def getCurrentRegionsInstance: GetCurrentRegions.type = GetCurrentRegions - /** - * Reply to `GetCurrentRegions` - */ + /** Reply to `GetCurrentRegions` */ @SerialVersionUID(1L) final case class CurrentRegions(regions: Set[Address]) extends ClusterShardingSerializable { - /** - * Java API - */ + /** Java API */ def getRegions: java.util.Set[Address] = { import akka.util.ccompat.JavaConverters._ regions.asJava @@ -280,9 +266,7 @@ object ShardRegion { @SerialVersionUID(1L) final case class ClusterShardingStats(regions: Map[Address, ShardRegionStats]) extends ClusterShardingSerializable { - /** - * Java API - */ + /** Java API */ def getRegions(): java.util.Map[Address, ShardRegionStats] = { import akka.util.ccompat.JavaConverters._ regions.asJava @@ -301,13 +285,10 @@ object ShardRegion { */ @SerialVersionUID(1L) case object GetShardRegionStats extends ShardRegionQuery with ClusterShardingSerializable - /** - * Java API: - */ + /** Java API: */ def getRegionStatsInstance = GetShardRegionStats /** - * * @param stats the region stats mapping of `ShardId` to number of entities * @param failed set of shards if any failed to respond within the timeout */ @@ -315,9 +296,7 @@ object ShardRegion { extends ClusterShardingSerializable with Product { - /** - * Java API - */ + /** Java API */ def getStats(): java.util.Map[ShardId, Int] = { import akka.util.ccompat.JavaConverters._ stats.asJava @@ -365,9 +344,7 @@ object ShardRegion { */ @SerialVersionUID(1L) case object GetShardRegionState extends ShardRegionQuery with ClusterShardingSerializable - /** - * Java API: - */ + /** Java API: */ def getShardRegionStateInstance = GetShardRegionState /** @@ -424,9 +401,7 @@ object ShardRegion { @SerialVersionUID(1L) final case class ShardState(shardId: ShardId, entityIds: Set[EntityId]) { - /** - * Java API: - */ + /** Java API: */ def getEntityIds(): java.util.Set[EntityId] = { import akka.util.ccompat.JavaConverters._ entityIds.asJava @@ -572,9 +547,7 @@ object ShardRegion { } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object HandOffStopper { private case object StopTimeout private case object StopTimeoutWarning @@ -804,8 +777,7 @@ private[akka] class ShardRegion( preparingForShutdown = true case _: MemberEvent => // these are expected, no need to warn about them - - case _ => unhandled(evt) + case _ => unhandled(evt) } private def addMember(m: Member): Unit = { @@ -830,7 +802,7 @@ private[akka] class ShardRegion( regionByShard = regionByShard.updated(shard, self) regions = regions.updated(self, regions.getOrElse(self, Set.empty) + shard) - //Start the shard, if already started this does nothing + // Start the shard, if already started this does nothing getShard(shard) sender() ! ShardStarted(shard) } @@ -841,9 +813,8 @@ private[akka] class ShardRegion( case ShardHomes(homes) => if (log.isDebugEnabled) log.debug("Got shard homes for regions [{}]", homes.keySet.mkString(", ")) - homes.foreach { - case (shardRegionRef, shards) => - shards.foreach(shardId => receiveShardHome(shardId, shardRegionRef)) + homes.foreach { case (shardRegionRef, shards) => + shards.foreach(shardId => receiveShardHome(shardId, shardRegionRef)) } case RegisterAck(coord) => @@ -874,8 +845,10 @@ private[akka] class ShardRegion( // because they might be forwarded from other regions and there // is a risk or message re-ordering otherwise if (shardBuffers.contains(shard)) { - val dropped = shardBuffers - .drop(shard, "Avoiding reordering of buffered messages at shard handoff", context.system.deadLetters) + val dropped = shardBuffers.drop( + shard, + "Avoiding reordering of buffered messages at shard handoff", + context.system.deadLetters) if (dropped > 0) log.warning( "{}: Dropping [{}] buffered messages to shard [{}] during hand off to avoid re-ordering", @@ -1085,8 +1058,8 @@ private[akka] class ShardRegion( } } - private def askOne[T: ClassTag](shard: ActorRef, msg: Any, shardId: ShardId)( - implicit timeout: Timeout): Future[Either[ShardId, T]] = + private def askOne[T: ClassTag](shard: ActorRef, msg: Any, shardId: ShardId)(implicit + timeout: Timeout): Future[Either[ShardId, T]] = (shard ? msg).mapTo[T].transform { case Success(t) => Success(Right(t)) case Failure(_) => Success(Left(shardId)) @@ -1178,17 +1151,16 @@ private[akka] class ShardRegion( // Have to use vars because MessageBufferMap has no map, only foreach var totalBuffered = 0 var shards = List.empty[String] - shardBuffers.foreach { - case (shard, buf) => - totalBuffered += buf.size - shards ::= shard - log.debug( - "{}: Requesting shard home for [{}] from coordinator at [{}]. [{}] buffered messages.", - typeName, - shard, - coord, - buf.size) - coord ! GetShardHome(shard) + shardBuffers.foreach { case (shard, buf) => + totalBuffered += buf.size + shards ::= shard + log.debug( + "{}: Requesting shard home for [{}] from coordinator at [{}]. [{}] buffered messages.", + typeName, + shard, + coord, + buf.size) + coord ! GetShardHome(shard) } if (retryCount >= 5 && retryCount % 5 == 0 && log.isWarningEnabled) { @@ -1239,17 +1211,16 @@ private[akka] class ShardRegion( val buf = shardBuffers.getOrEmpty(shardId) log.debug("{}: Deliver [{}] buffered messages for shard [{}]", typeName, buf.size, shardId) - buf.foreach { - case (msg, snd) => - msg match { - case msg @ RestartShard(_) if receiver != self => - log.debug( - "{}: Dropping buffered message {}, these are only processed by a local ShardRegion.", - typeName, - msg) - case _ => - receiver.tell(msg, snd) - } + buf.foreach { case (msg, snd) => + msg match { + case msg @ RestartShard(_) if receiver != self => + log.debug( + "{}: Dropping buffered message {}, these are only processed by a local ShardRegion.", + typeName, + msg) + case _ => + receiver.tell(msg, snd) + } } shardBuffers.remove(shardId) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingFlightRecorder.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingFlightRecorder.scala index a07c0ebb082..76ef1d9e69e 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingFlightRecorder.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingFlightRecorder.scala @@ -7,9 +7,7 @@ import akka.actor.{ ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProv import akka.annotation.InternalApi import akka.util.FlightRecorderLoader -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi object ShardingFlightRecorder extends ExtensionId[ShardingFlightRecorder] with ExtensionIdProvider { @@ -22,9 +20,7 @@ object ShardingFlightRecorder extends ExtensionId[ShardingFlightRecorder] with E NoOpShardingFlightRecorder) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait ShardingFlightRecorder extends Extension { def rememberEntityOperation(duration: Long): Unit def rememberEntityAdd(entityId: String): Unit @@ -33,9 +29,7 @@ object ShardingFlightRecorder extends ExtensionId[ShardingFlightRecorder] with E def entityPassivateRestart(entityId: String): Unit } -/** - * INTERNAL - */ +/** INTERNAL */ @InternalApi private[akka] case object NoOpShardingFlightRecorder extends ShardingFlightRecorder { override def rememberEntityOperation(duration: Long): Unit = () diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingLogMarker.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingLogMarker.scala index b72d2ce2e68..8c636a3140d 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingLogMarker.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingLogMarker.scala @@ -15,9 +15,7 @@ import akka.event.LogMarker */ object ShardingLogMarker { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object Properties { val ShardTypeName = "akkaShardTypeName" val ShardId = "akkaShardId" diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingQueries.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingQueries.scala index cfadba6af00..f80839f36dc 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingQueries.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardingQueries.scala @@ -31,7 +31,8 @@ private[sharding] object ShardingQueries { total: Int, timeout: FiniteDuration) { - /** The number of shards queried, which could equal the `total` or, + /** + * The number of shards queried, which could equal the `total` or, * be a subset if this was a retry of those that failed. */ val queried: Int = failed.size + responses.size diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ExternalShardAllocation.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ExternalShardAllocation.scala index bd25cb7c193..02b0def661f 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ExternalShardAllocation.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ExternalShardAllocation.scala @@ -23,14 +23,10 @@ final class ExternalShardAllocation(system: ExtendedActorSystem) extends Extensi new ExternalShardAllocationClientImpl(system, typeName) } - /** - * Scala API - */ + /** Scala API */ def clientFor(typeName: String): scaladsl.ExternalShardAllocationClient = client(typeName) - /** - * Java API - */ + /** Java API */ def getClient(typeName: String): javadsl.ExternalShardAllocationClient = client(typeName) private def client(typeName: String): ExternalShardAllocationClientImpl = { diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ExternalShardAllocationStrategy.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ExternalShardAllocationStrategy.scala index 27a1d9bbaf8..168e87a5a8c 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ExternalShardAllocationStrategy.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ExternalShardAllocationStrategy.scala @@ -34,15 +34,11 @@ object ExternalShardAllocationStrategy { type ShardRegion = ActorRef - /** - * Scala API - */ + /** Scala API */ def apply(systemProvider: ClassicActorSystemProvider, typeName: String): ExternalShardAllocationStrategy = new ExternalShardAllocationStrategy(systemProvider, typeName) - /** - * Java API - */ + /** Java API */ def create(systemProvider: ClassicActorSystemProvider, typeName: String): ExternalShardAllocationStrategy = apply(systemProvider, typeName) @@ -168,13 +164,12 @@ class ExternalShardAllocationStrategy(systemProvider: ClassicActorSystemProvider } } } - .recover { - case _: AskTimeoutException => - log.warning( - "allocate timed out waiting for shard allocation state [{}]. Allocating to requester [{}]", - shardId, - requester) - requester + .recover { case _: AskTimeoutException => + log.warning( + "allocate timed out waiting for shard allocation state [{}]. Allocating to requester [{}]", + shardId, + requester) + requester } } @@ -227,10 +222,9 @@ class ExternalShardAllocationStrategy(systemProvider: ClassicActorSystemProvider } done } - .recover { - case _: AskTimeoutException => - log.warning("rebalance timed out waiting for shard allocation state. Keeping existing allocations") - Set.empty + .recover { case _: AskTimeoutException => + log.warning("rebalance timed out waiting for shard allocation state. Keeping existing allocations") + Set.empty } } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ShardLocations.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ShardLocations.scala index 28c8ef91a6b..b865d7c4531 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ShardLocations.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/ShardLocations.scala @@ -10,8 +10,6 @@ import akka.util.ccompat.JavaConverters._ final class ShardLocations(val locations: Map[ShardId, ShardLocation]) { - /** - * Java API - */ + /** Java API */ def getShardLocations(): java.util.Map[ShardId, ShardLocation] = locations.asJava } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/internal/ExternalShardAllocationClientImpl.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/internal/ExternalShardAllocationClientImpl.scala index 20d2a446094..58940bf1fa7 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/internal/ExternalShardAllocationClientImpl.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/internal/ExternalShardAllocationClientImpl.scala @@ -40,9 +40,7 @@ import akka.util.PrettyDuration._ import akka.util.Timeout import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final private[external] class ExternalShardAllocationClientImpl(system: ActorSystem, typeName: String) extends akka.cluster.sharding.external.scaladsl.ExternalShardAllocationClient @@ -86,7 +84,7 @@ final private[external] class ExternalShardAllocationClientImpl(system: ActorSys case NotFound(_, _) => Future.successful(Map.empty[ShardId, ShardLocation]) case GetFailure(_, _) => - Future.failed((new ClientTimeoutException(s"Unable to get shard locations after ${timeout.duration.pretty}"))) + Future.failed(new ClientTimeoutException(s"Unable to get shard locations after ${timeout.duration.pretty}")) case _ => throw new IllegalArgumentException() // compiler exhaustiveness check pleaser } .map { locations => @@ -99,8 +97,8 @@ final private[external] class ExternalShardAllocationClientImpl(system: ActorSys override def updateShardLocations(locations: Map[ShardId, Address]): Future[Done] = { log.debug("updateShardLocations {} for {}", locations, Key) (replicator ? Update(Key, LWWMap.empty[ShardId, String], WriteLocal, None) { existing => - locations.foldLeft(existing) { - case (acc, (shardId, address)) => acc.put(self, shardId, address.toString) + locations.foldLeft(existing) { case (acc, (shardId, address)) => + acc.put(self, shardId, address.toString) } }).flatMap { case UpdateSuccess(_, _) => Future.successful(Done) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/javadsl/ExternalShardAllocationClient.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/javadsl/ExternalShardAllocationClient.scala index dd018d70859..6ec36133a07 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/javadsl/ExternalShardAllocationClient.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/javadsl/ExternalShardAllocationClient.scala @@ -12,9 +12,7 @@ import akka.annotation.DoNotInherit import akka.cluster.sharding.ShardRegion.ShardId import akka.cluster.sharding.external.ShardLocations -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit trait ExternalShardAllocationClient { @@ -41,8 +39,6 @@ trait ExternalShardAllocationClient { */ def setShardLocations(locations: java.util.Map[ShardId, Address]): CompletionStage[Done] - /** - * Get all the current shard locations that have been set via setShardLocation - */ + /** Get all the current shard locations that have been set via setShardLocation */ def getShardLocations(): CompletionStage[ShardLocations] } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/scaladsl/ExternalShardAllocationClient.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/scaladsl/ExternalShardAllocationClient.scala index 7a5a2154361..c8208d2d51b 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/scaladsl/ExternalShardAllocationClient.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/external/scaladsl/ExternalShardAllocationClient.scala @@ -12,9 +12,7 @@ import akka.annotation.DoNotInherit import akka.cluster.sharding.ShardRegion.ShardId import akka.cluster.sharding.external.ShardLocations -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit trait ExternalShardAllocationClient { @@ -41,8 +39,6 @@ trait ExternalShardAllocationClient { */ def updateShardLocations(locations: Map[ShardId, Address]): Future[Done] - /** - * Get all the current shard locations that have been set via updateShardLocation - */ + /** Get all the current shard locations that have been set via updateShardLocation */ def shardLocations(): Future[ShardLocations] } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/AbstractLeastShardAllocationStrategy.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/AbstractLeastShardAllocationStrategy.scala index 732b737bde6..5940ea016ab 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/AbstractLeastShardAllocationStrategy.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/AbstractLeastShardAllocationStrategy.scala @@ -18,9 +18,7 @@ import akka.cluster.sharding.ShardCoordinator.ActorSystemDependentAllocationStra import akka.cluster.sharding.ShardRegion.ShardId import akka.pattern.after -/** - * INTERNAL API: Common logic for the least shard allocation strategy implementations - */ +/** INTERNAL API: Common logic for the least shard allocation strategy implementations */ @InternalApi private[akka] abstract class AbstractLeastShardAllocationStrategy extends ActorSystemDependentAllocationStrategy diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/ClusterShardAllocationMixin.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/ClusterShardAllocationMixin.scala index 756f720a8e2..ab4b2e80dc3 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/ClusterShardAllocationMixin.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/ClusterShardAllocationMixin.scala @@ -19,9 +19,7 @@ import akka.cluster.sharding.ShardRegion.ShardId import akka.cluster.sharding.internal.ClusterShardAllocationMixin.JoiningCluster import akka.cluster.sharding.internal.ClusterShardAllocationMixin.RegionEntry -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ClusterShardAllocationMixin { type AllocationMap = Map[ActorRef, immutable.IndexedSeq[ShardId]] @@ -54,9 +52,7 @@ import akka.cluster.sharding.internal.ClusterShardAllocationMixin.RegionEntry } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait ClusterShardAllocationMixin { import ClusterShardAllocationMixin.AllocationMap @@ -88,17 +84,16 @@ import akka.cluster.sharding.internal.ClusterShardAllocationMixin.RegionEntry final protected def regionEntriesFor(currentShardAllocations: AllocationMap): Iterable[RegionEntry] = { val addressToMember: Map[Address, Member] = clusterState.members.iterator.map(m => m.address -> m).toMap - currentShardAllocations.flatMap { - case (region, shardIds) => - val regionAddress = { - if (region.path.address.hasLocalScope) selfMember.address - else region.path.address - } - - val memberForRegion = addressToMember.get(regionAddress) - // if the member is unknown (very unlikely but not impossible) because of view not updated yet - // that node is ignored for this invocation - memberForRegion.map(member => RegionEntry(region, member, shardIds)) + currentShardAllocations.flatMap { case (region, shardIds) => + val regionAddress = { + if (region.path.address.hasLocalScope) selfMember.address + else region.path.address + } + + val memberForRegion = addressToMember.get(regionAddress) + // if the member is unknown (very unlikely but not impossible) because of view not updated yet + // that node is ignored for this invocation + memberForRegion.map(member => RegionEntry(region, member, shardIds)) } } } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesCoordinatorStore.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesCoordinatorStore.scala index 855c1abec38..f0e0d5bf7d4 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesCoordinatorStore.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesCoordinatorStore.scala @@ -20,18 +20,14 @@ import akka.cluster.ddata.SelfUniqueAddress import akka.cluster.sharding.ClusterShardingSettings import akka.cluster.sharding.ShardRegion.ShardId -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object DDataRememberEntitiesCoordinatorStore { def props(typeName: String, settings: ClusterShardingSettings, replicator: ActorRef, majorityMinCap: Int): Props = Props(new DDataRememberEntitiesCoordinatorStore(typeName, settings, replicator, majorityMinCap)) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class DDataRememberEntitiesCoordinatorStore( typeName: String, diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesProvider.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesProvider.scala index e69298b7d8b..5986fee108c 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesProvider.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesProvider.scala @@ -10,9 +10,7 @@ import akka.annotation.InternalApi import akka.cluster.sharding.ClusterShardingSettings import akka.cluster.sharding.ShardRegion.ShardId -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class DDataRememberEntitiesProvider( typeName: String, diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesShardStore.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesShardStore.scala index 52c6e4cadf4..001a8e7316d 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesShardStore.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/DDataRememberEntitiesShardStore.scala @@ -36,9 +36,7 @@ import akka.cluster.sharding.ShardRegion.EntityId import akka.cluster.sharding.ShardRegion.ShardId import akka.util.PrettyDuration._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object DDataRememberEntitiesShardStore { @@ -69,9 +67,7 @@ private[akka] object DDataRememberEntitiesShardStore { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class DDataRememberEntitiesShardStore( shardId: ShardId, @@ -95,7 +91,7 @@ private[akka] final class DDataRememberEntitiesShardStore( // Note that the timeout is actually updatingStateTimeout / 4 so that we fit 3 retries and a response in the timeout before the shard sees it as a failure private val writeConsistency = settings.tuningParameters.coordinatorStateWriteMajorityPlus match { case Int.MaxValue => WriteAll(settings.tuningParameters.updatingStateTimeout / 4) - case additional => WriteMajorityPlus(settings.tuningParameters.updatingStateTimeout / 4, additional, majorityMinCap) + case additional => WriteMajorityPlus(settings.tuningParameters.updatingStateTimeout / 4, additional, majorityMinCap) } private val maxUpdateAttempts = 3 // Note: total for all 5 keys @@ -199,22 +195,24 @@ private[akka] final class DDataRememberEntitiesShardStore( } private def onUpdate(update: RememberEntitiesShardStore.Update): Unit = { - val allEvts: Set[Evt] = (update.started.map(Started(_): Evt).union(update.stopped.map(Stopped(_)))) + val allEvts: Set[Evt] = update.started.map(Started(_): Evt).union(update.stopped.map(Stopped(_))) // map from set of evts (for same ddata key) to one update that applies each of them val ddataUpdates: Map[Set[Evt], (Update[ORSet[EntityId]], Int)] = - allEvts.groupBy(evt => key(evt.id)).map { - case (key, evts) => - (evts, (Update(key, ORSet.empty[EntityId], writeConsistency, Some(evts)) { existing => - evts.foldLeft(existing) { - case (acc, Started(id)) => acc :+ id - case (acc, Stopped(id)) => acc.remove(id) - } - }, maxUpdateAttempts)) + allEvts.groupBy(evt => key(evt.id)).map { case (key, evts) => + ( + evts, + ( + Update(key, ORSet.empty[EntityId], writeConsistency, Some(evts)) { existing => + evts.foldLeft(existing) { + case (acc, Started(id)) => acc :+ id + case (acc, Stopped(id)) => acc.remove(id) + } + }, + maxUpdateAttempts)) } - ddataUpdates.foreach { - case (_, (update, _)) => - replicator ! update + ddataUpdates.foreach { case (_, (update, _)) => + replicator ! update } context.become(waitingForUpdates(sender(), update, ddataUpdates)) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EntityPassivationStrategy.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EntityPassivationStrategy.scala index 8022071ab5d..1e8d9d1cee3 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EntityPassivationStrategy.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EntityPassivationStrategy.scala @@ -16,9 +16,7 @@ import akka.util.FastFrequencySketch import akka.util.FrequencySketch import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object EntityPassivationStrategy { type PassivateEntities = immutable.Seq[EntityId] @@ -71,9 +69,7 @@ private[akka] object EntityPassivationStrategy { } } -/** - * INTERNAL API: An entity passivation strategy, which is instantiated per active shard. - */ +/** INTERNAL API: An entity passivation strategy, which is instantiated per active shard. */ @InternalApi private[akka] sealed abstract class EntityPassivationStrategy { import EntityPassivationStrategy.PassivateEntities @@ -118,9 +114,7 @@ private[akka] sealed abstract class EntityPassivationStrategy { def intervalPassed(): PassivateEntities } -/** - * INTERNAL API: No-op passivation strategy for when automatic passivation is disabled. - */ +/** INTERNAL API: No-op passivation strategy for when automatic passivation is disabled. */ @InternalApi private[akka] object DisabledEntityPassivationStrategy extends EntityPassivationStrategy { import EntityPassivationStrategy.PassivateEntities @@ -133,9 +127,7 @@ private[akka] object DisabledEntityPassivationStrategy extends EntityPassivation override def intervalPassed(): PassivateEntities = PassivateEntities.none } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class IdleCheck(val timeout: FiniteDuration, val interval: FiniteDuration) @@ -332,9 +324,7 @@ private[akka] final class LeastFrequentlyUsedEntityPassivationStrategy( } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ActiveEntities { def apply( @@ -749,9 +739,7 @@ private[akka] final class CompositeEntityPassivationStrategy( } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object AdmissionOptimizer { def apply( @@ -759,10 +747,10 @@ private[akka] object AdmissionOptimizer { optimizer: ClusterShardingSettings.CompositePassivationStrategy.AdmissionOptimizer): AdmissionOptimizer = optimizer match { case ClusterShardingSettings.CompositePassivationStrategy.HillClimbingAdmissionOptimizer( - adjustMultiplier, - initialStep, - restartThreshold, - stepDecay) => + adjustMultiplier, + initialStep, + restartThreshold, + stepDecay) => new HillClimbingAdmissionOptimizer(initialLimit, adjustMultiplier, initialStep, restartThreshold, stepDecay) case _ => NoAdmissionOptimizer } @@ -776,14 +764,10 @@ private[akka] object AdmissionOptimizer { @InternalApi private[akka] abstract class AdmissionOptimizer { - /** - * An entity was accessed that is already active. - */ + /** An entity was accessed that is already active. */ def recordActive(): Unit - /** - * An entity was accessed that was passive (needed to be activated). - */ + /** An entity was accessed that was passive (needed to be activated). */ def recordPassive(): Unit /** @@ -859,16 +843,17 @@ private[akka] final class HillClimbingAdmissionOptimizer( } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object AdmissionFilter { def apply( initialCapacity: Int, filter: ClusterShardingSettings.CompositePassivationStrategy.AdmissionFilter): AdmissionFilter = filter match { - case ClusterShardingSettings.CompositePassivationStrategy - .FrequencySketchAdmissionFilter(widthMultiplier, resetMultiplier, depth, counterBits) => + case ClusterShardingSettings.CompositePassivationStrategy.FrequencySketchAdmissionFilter( + widthMultiplier, + resetMultiplier, + depth, + counterBits) => FrequencySketchAdmissionFilter(initialCapacity, widthMultiplier, resetMultiplier, depth, counterBits) case _ => AlwaysAdmissionFilter } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EventSourcedRememberEntitiesCoordinatorStore.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EventSourcedRememberEntitiesCoordinatorStore.scala index 3c653b4e731..ff2807394d0 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EventSourcedRememberEntitiesCoordinatorStore.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EventSourcedRememberEntitiesCoordinatorStore.scala @@ -15,9 +15,7 @@ import akka.cluster.sharding.ShardCoordinator.Internal import akka.cluster.sharding.ShardRegion.ShardId import akka.persistence._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object EventSourcedRememberEntitiesCoordinatorStore { def props(typeName: String, settings: ClusterShardingSettings): Props = @@ -28,9 +26,7 @@ private[akka] object EventSourcedRememberEntitiesCoordinatorStore { case object MigrationMarker extends ClusterShardingSerializable } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class EventSourcedRememberEntitiesCoordinatorStore( typeName: String, diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EventSourcedRememberEntitiesProvider.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EventSourcedRememberEntitiesProvider.scala index d677279bbc6..217f7678b8e 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EventSourcedRememberEntitiesProvider.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EventSourcedRememberEntitiesProvider.scala @@ -9,9 +9,7 @@ import akka.annotation.InternalApi import akka.cluster.sharding.ClusterShardingSettings import akka.cluster.sharding.ShardRegion.ShardId -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class EventSourcedRememberEntitiesProvider(typeName: String, settings: ClusterShardingSettings) extends RememberEntitiesProvider { diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EventSourcedRememberEntitiesShardStore.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EventSourcedRememberEntitiesShardStore.scala index 89ab3938fa9..677fec4eec9 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EventSourcedRememberEntitiesShardStore.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/EventSourcedRememberEntitiesShardStore.scala @@ -22,32 +22,22 @@ import akka.persistence.SaveSnapshotSuccess import akka.persistence.SnapshotOffer import akka.persistence.SnapshotSelectionCriteria -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object EventSourcedRememberEntitiesShardStore { - /** - * A case class which represents a state change for the Shard - */ + /** A case class which represents a state change for the Shard */ sealed trait StateChange extends ClusterShardingSerializable - /** - * Persistent state of the Shard. - */ + /** Persistent state of the Shard. */ final case class State private[akka] (entities: Set[EntityId] = Set.empty) extends ClusterShardingSerializable - /** - * `State` change for starting a set of entities in this `Shard` - */ + /** `State` change for starting a set of entities in this `Shard` */ final case class EntitiesStarted(entities: Set[EntityId]) extends StateChange case object StartedAck - /** - * `State` change for an entity which has terminated. - */ + /** `State` change for an entity which has terminated. */ final case class EntitiesStopped(entities: Set[EntityId]) extends StateChange def props(typeName: String, shardId: ShardRegion.ShardId, settings: ClusterShardingSettings): Props = diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/LeastShardAllocationStrategy.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/LeastShardAllocationStrategy.scala index 73272858fb5..c3e43bac1b6 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/LeastShardAllocationStrategy.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/LeastShardAllocationStrategy.scala @@ -13,9 +13,7 @@ import akka.cluster.sharding.ShardRegion.ShardId import akka.cluster.sharding.internal.ClusterShardAllocationMixin.RegionEntry import akka.cluster.sharding.internal.ClusterShardAllocationMixin.ShardSuitabilityOrdering -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object LeastShardAllocationStrategy { private val emptyRebalanceResult = Future.successful(Set.empty[ShardId]) } @@ -58,11 +56,10 @@ import akka.cluster.sharding.internal.ClusterShardAllocationMixin.ShardSuitabili optimalPerRegion: Int, sortedEntries: Iterable[RegionEntry]): Set[ShardId] = { val selected = Vector.newBuilder[ShardId] - sortedEntries.foreach { - case RegionEntry(_, _, shardIds) => - if (shardIds.size > optimalPerRegion) { - selected ++= shardIds.take(shardIds.size - optimalPerRegion) - } + sortedEntries.foreach { case RegionEntry(_, _, shardIds) => + if (shardIds.size > optimalPerRegion) { + selected ++= shardIds.take(shardIds.size - optimalPerRegion) + } } val result = selected.result() result.take(limit(numberOfShards)).toSet @@ -81,11 +78,10 @@ import akka.cluster.sharding.internal.ClusterShardAllocationMixin.ShardSuitabili emptyRebalanceResult } else { val selected = Vector.newBuilder[ShardId] - sortedEntries.foreach { - case RegionEntry(_, _, shardIds) => - if (shardIds.size >= optimalPerRegion) { - selected += shardIds.head - } + sortedEntries.foreach { case RegionEntry(_, _, shardIds) => + if (shardIds.size >= optimalPerRegion) { + selected += shardIds.head + } } val result = selected.result().take(min(countBelowOptimal, limit(numberOfShards))).toSet Future.successful(result) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/RememberEntityStarter.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/RememberEntityStarter.scala index 23ff4e88a15..71a7cc9fa23 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/RememberEntityStarter.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/internal/RememberEntityStarter.scala @@ -20,9 +20,7 @@ import akka.cluster.sharding.ShardRegion import akka.cluster.sharding.ShardRegion.EntityId import akka.cluster.sharding.ShardRegion.ShardId -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object RememberEntityStarterManager { def props(region: ActorRef, settings: ClusterShardingSettings) = @@ -34,9 +32,7 @@ private[akka] object RememberEntityStarterManager { private case object ContinueAfterDelay extends NoSerializationVerificationNeeded } -/** - * INTERNAL API: Actor responsible for starting entities when rememberEntities is enabled - */ +/** INTERNAL API: Actor responsible for starting entities when rememberEntities is enabled */ @InternalApi private[akka] final class RememberEntityStarterManager(region: ActorRef, settings: ClusterShardingSettings) extends Actor @@ -59,10 +55,9 @@ private[akka] final class RememberEntityStarterManager(region: ActorRef, setting case _: Terminated => // RememberEntityStarter was done } - private val constantStrategyIdle: Receive = { - case s: StartEntities => - start(s, isConstantStrategy = true) - context.become(constantStrategyWaiting(Vector.empty)) + private val constantStrategyIdle: Receive = { case s: StartEntities => + start(s, isConstantStrategy = true) + context.become(constantStrategyWaiting(Vector.empty)) } private def constantStrategyWaiting(workQueue: Vector[StartEntities]): Receive = { @@ -89,9 +84,7 @@ private[akka] final class RememberEntityStarterManager(region: ActorRef, setting } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object RememberEntityStarter { def props( @@ -107,9 +100,7 @@ private[akka] object RememberEntityStarter { private case object ResendUnAcked extends NoSerializationVerificationNeeded } -/** - * INTERNAL API: Actor responsible for starting entities when rememberEntities is enabled - */ +/** INTERNAL API: Actor responsible for starting entities when rememberEntities is enabled */ @InternalApi private[akka] final class RememberEntityStarter( region: ActorRef, diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala index 897271cf96a..7bace19d544 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala @@ -34,9 +34,7 @@ import akka.serialization.SerializerWithStringManifest import akka.util.ccompat._ import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API: Protobuf serializer of ClusterSharding messages. - */ +/** INTERNAL API: Protobuf serializer of ClusterSharding messages. */ @ccompatUsedUntil213 private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest @@ -354,16 +352,15 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy private def coordinatorStateToProto(state: State): sm.CoordinatorState = { val builder = sm.CoordinatorState.newBuilder() - state.shards.foreach { - case (shardId, regionRef) => - val b = sm.CoordinatorState.ShardEntry - .newBuilder() - .setShardId(shardId) - .setRegionRef(Serialization.serializedActorPath(regionRef)) - builder.addShards(b) + state.shards.foreach { case (shardId, regionRef) => + val b = sm.CoordinatorState.ShardEntry + .newBuilder() + .setShardId(shardId) + .setRegionRef(Serialization.serializedActorPath(regionRef)) + builder.addShards(b) } - state.regions.foreach { - case (regionRef, _) => builder.addRegions(Serialization.serializedActorPath(regionRef)) + state.regions.foreach { case (regionRef, _) => + builder.addRegions(Serialization.serializedActorPath(regionRef)) } state.regionProxies.foreach { ref => builder.addRegionProxies(Serialization.serializedActorPath(ref)) @@ -385,8 +382,8 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy val regionsZero: Map[ActorRef, Vector[String]] = state.getRegionsList.asScala.toVector.iterator.map(resolveActorRef(_) -> Vector.empty[String]).toMap val regions: Map[ActorRef, Vector[String]] = - shards.foldLeft(regionsZero) { - case (acc, (shardId, regionRef)) => acc.updated(regionRef, acc(regionRef) :+ shardId) + shards.foldLeft(regionsZero) { case (acc, (shardId, regionRef)) => + acc.updated(regionRef, acc(regionRef) :+ shardId) } val proxies: Set[ActorRef] = state.getRegionProxiesList.asScala.iterator.map { resolveActorRef }.to(immutable.Set) @@ -430,13 +427,12 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy private def shardHomesToProto(sh: ShardHomes): sm.ShardHomes = { sm.ShardHomes .newBuilder() - .addAllHomes(sh.homes.map { - case (regionRef, shards) => - sm.ShardHomesEntry - .newBuilder() - .setRegion(Serialization.serializedActorPath(regionRef)) - .addAllShard(shards.asJava) - .build() + .addAllHomes(sh.homes.map { case (regionRef, shards) => + sm.ShardHomesEntry + .newBuilder() + .setRegion(Serialization.serializedActorPath(regionRef)) + .addAllShard(shards.asJava) + .build() }.asJava) .build() } @@ -485,9 +481,8 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy private def shardRegionStatsToProto(evt: ShardRegionStats): sm.ShardRegionStats = { val b = sm.ShardRegionStats.newBuilder() - evt.stats.foreach { - case (sid, no) => - b.addStats(sm.MapFieldEntry.newBuilder().setKey(sid).setValue(no).build()) + evt.stats.foreach { case (sid, no) => + b.addStats(sm.MapFieldEntry.newBuilder().setKey(sid).setValue(no).build()) } evt.failed.foreach { sid => b.addFailed(sid).build() @@ -508,14 +503,13 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy private def clusterShardingStatsToProto(evt: ClusterShardingStats): sm.ClusterShardingStats = { val b = sm.ClusterShardingStats.newBuilder() - evt.regions.foreach { - case (address, shardRegionStats) => - b.addStats( - sm.ClusterShardingStatsEntry - .newBuilder() - .setAddress(serializeAddress(address)) - .setStats(shardRegionStatsToProto(shardRegionStats)) - .build()) + evt.regions.foreach { case (address, shardRegionStats) => + b.addStats( + sm.ClusterShardingStatsEntry + .newBuilder() + .setAddress(serializeAddress(address)) + .setStats(shardRegionStatsToProto(shardRegionStats)) + .build()) } b.build() } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/GlobalRegistry.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/GlobalRegistry.scala index 708b7d01088..758b7446507 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/GlobalRegistry.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/GlobalRegistry.scala @@ -27,8 +27,8 @@ object GlobalRegistry { def props(registry: ActorRef): Props = Props(new SingletonActor(registry)) - val extractEntityId: ShardRegion.ExtractEntityId = { - case id: Int => (id.toString, id) + val extractEntityId: ShardRegion.ExtractEntityId = { case id: Int => + (id.toString, id) } val extractShardId: ShardRegion.ExtractShardId = { @@ -54,8 +54,8 @@ object GlobalRegistry { registry ! Unregister(key, Cluster(context.system).selfAddress) } - override def receive = { - case i: Int => sender() ! i + override def receive = { case i: Int => + sender() ! i } } } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/GremlinController.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/GremlinController.scala index 2f86eb8fb40..95fded20322 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/GremlinController.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/GremlinController.scala @@ -53,7 +53,7 @@ object GremlinControllerProxy { } class GremlinControllerProxy(target: ActorRef) extends Actor { - override def receive = { - case msg => target.forward(msg) + override def receive = { case msg => + target.forward(msg) } } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/RandomizedBrainResolverIntegrationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/RandomizedBrainResolverIntegrationSpec.scala index f784910a62c..bb8b47f1b1e 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/RandomizedBrainResolverIntegrationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/RandomizedBrainResolverIntegrationSpec.scala @@ -319,19 +319,18 @@ class RandomizedSplitBrainResolverIntegrationSpec nextDelay() } - flaky.foreach { - case (i, (from, to)) => - if (i != 0) { - // heal previous flakiness - val (prevFrom, prevTo) = flaky(i - 1) - for (n <- prevTo) - passThrough(prevFrom, n) - } - - for (n <- to) - blackhole(from, n) - - nextDelay() + flaky.foreach { case (i, (from, to)) => + if (i != 0) { + // heal previous flakiness + val (prevFrom, prevTo) = flaky(i - 1) + for (n <- prevTo) + passThrough(prevFrom, n) + } + + for (n <- to) + blackhole(from, n) + + nextDelay() } if (healLastFlaky) { @@ -404,11 +403,11 @@ class RandomizedSplitBrainResolverIntegrationSpec "SplitBrainResolver with lease" must { for (scenario <- scenarios) { - scenario.toString taggedAs (LongRunningTest) in { + scenario.toString taggedAs LongRunningTest in { // temporarily disabled for aeron-udp in multi-node: https://github.com/akka/akka/pull/30706/ val arteryConfig = system.settings.config.getConfig("akka.remote.artery") if (arteryConfig.getInt("canonical.port") == 6000 && - arteryConfig.getString("transport") == "aeron-udp") { + arteryConfig.getString("transport") == "aeron-udp") { pending } DisposableSys(scenario).verify() diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/SplitBrainResolverIntegrationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/SplitBrainResolverIntegrationSpec.scala index c6900c1b399..a2d152a26ca 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/SplitBrainResolverIntegrationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sbr/SplitBrainResolverIntegrationSpec.scala @@ -446,10 +446,15 @@ class SplitBrainResolverIntegrationSpec Scenario(keepOldestConfig, 3, 3, KeepSide1), Scenario(keepOldestConfig, 1, 1, KeepSide1), Scenario(keepOldestConfig, 1, 2, KeepSide2), // because down-if-alone - Scenario(keepMajorityConfig, 3, 2, KeepAll, { - case `node1` | `node2` | `node3` => "dcA" - case _ => "dcB" - }), + Scenario( + keepMajorityConfig, + 3, + 2, + KeepAll, + { + case `node1` | `node2` | `node3` => "dcA" + case _ => "dcB" + }), Scenario(downAllConfig, 1, 2, ShutdownBoth), Scenario(leaseMajorityConfig, 4, 5, KeepSide2)) @@ -460,7 +465,7 @@ class SplitBrainResolverIntegrationSpec // temporarily disabled for aeron-udp in multi-node: https://github.com/akka/akka/pull/30706/ val arteryConfig = system.settings.config.getConfig("akka.remote.artery") if (arteryConfig.getInt("canonical.port") == 6000 && - arteryConfig.getString("transport") == "aeron-udp") { + arteryConfig.getString("transport") == "aeron-udp") { pending } DisposableSys(scenario).verify() diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowning2Spec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowning2Spec.scala index 0bcb1e94d4f..d455f4ba82f 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowning2Spec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowning2Spec.scala @@ -20,8 +20,8 @@ object ClusterShardCoordinatorDowning2Spec { case class Ping(id: String) extends CborSerializable class Entity extends Actor { - def receive = { - case Ping(_) => sender() ! self + def receive = { case Ping(_) => + sender() ! self } } @@ -36,8 +36,8 @@ object ClusterShardCoordinatorDowning2Spec { } } - val extractEntityId: ShardRegion.ExtractEntityId = { - case m @ Ping(id) => (id, m) + val extractEntityId: ShardRegion.ExtractEntityId = { case m @ Ping(id) => + (id, m) } val extractShardId: ShardRegion.ExtractShardId = { @@ -161,15 +161,14 @@ abstract class ClusterShardCoordinatorDowning2Spec(multiNodeConfig: ClusterShard awaitAssert { val probe = TestProbe() - (originalLocations ++ additionalLocations).foreach { - case (id, ref) => - region.tell(Ping(id), probe.ref) - if (ref.path.address == secondAddress) { - val newRef = probe.expectMsgType[ActorRef](1.second) - newRef should not be (ref) - system.log.debug("Moved [{}] from [{}] to [{}]", id, ref, newRef) - } else - probe.expectMsg(1.second, ref) // should not move + (originalLocations ++ additionalLocations).foreach { case (id, ref) => + region.tell(Ping(id), probe.ref) + if (ref.path.address == secondAddress) { + val newRef = probe.expectMsgType[ActorRef](1.second) + newRef should not be ref + system.log.debug("Moved [{}] from [{}] to [{}]", id, ref, newRef) + } else + probe.expectMsg(1.second, ref) // should not move } } } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowningSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowningSpec.scala index f813b61cb34..ed6664ddd2d 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowningSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardCoordinatorDowningSpec.scala @@ -20,8 +20,8 @@ object ClusterShardCoordinatorDowningSpec { case class Ping(id: String) extends CborSerializable class Entity extends Actor { - def receive = { - case Ping(_) => sender() ! self + def receive = { case Ping(_) => + sender() ! self } } @@ -36,8 +36,8 @@ object ClusterShardCoordinatorDowningSpec { } } - val extractEntityId: ShardRegion.ExtractEntityId = { - case m @ Ping(id) => (id, m) + val extractEntityId: ShardRegion.ExtractEntityId = { case m @ Ping(id) => + (id, m) } val extractShardId: ShardRegion.ExtractShardId = { @@ -163,15 +163,14 @@ abstract class ClusterShardCoordinatorDowningSpec(multiNodeConfig: ClusterShardC awaitAssert { val probe = TestProbe() - (originalLocations ++ additionalLocations).foreach { - case (id, ref) => - region.tell(Ping(id), probe.ref) - if (ref.path.address == firstAddress) { - val newRef = probe.expectMsgType[ActorRef](1.second) - newRef should not be (ref) - system.log.debug("Moved [{}] from [{}] to [{}]", id, ref, newRef) - } else - probe.expectMsg(1.second, ref) // should not move + (originalLocations ++ additionalLocations).foreach { case (id, ref) => + region.tell(Ping(id), probe.ref) + if (ref.path.address == firstAddress) { + val newRef = probe.expectMsgType[ActorRef](1.second) + newRef should not be ref + system.log.debug("Moved [{}] from [{}] to [{}]", id, ref, newRef) + } else + probe.expectMsg(1.second, ref) // should not move } } } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala index 57caf83bb15..cded4436613 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala @@ -175,13 +175,13 @@ abstract class ClusterShardingFailureSpec(multiNodeConfig: ClusterShardingFailur val entity21 = lastSender val shard2 = system.actorSelection(entity21.path.parent) - //Test the ShardCoordinator allocating shards after a journal/network failure + // Test the ShardCoordinator allocating shards after a journal/network failure region ! Add("30", 3) - //Test the Shard starting entities and persisting after a journal/network failure + // Test the Shard starting entities and persisting after a journal/network failure region ! Add("11", 1) - //Test the Shard passivate works after a journal failure + // Test the Shard passivate works after a journal failure shard2.tell(Passivate(PoisonPill), entity21) awaitAssert { diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStateSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStateSpec.scala index afd903a8bb9..5d6944a1e92 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStateSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStateSpec.scala @@ -16,8 +16,8 @@ import akka.testkit.TestProbe object ClusterShardingGetStateSpec { import MultiNodeClusterShardingSpec.PingPongActor - val extractEntityId: ShardRegion.ExtractEntityId = { - case msg @ PingPongActor.Ping(id) => (id.toString, msg) + val extractEntityId: ShardRegion.ExtractEntityId = { case msg @ PingPongActor.Ping(id) => + (id.toString, msg) } val numberOfShards = 2 @@ -30,8 +30,7 @@ object ClusterShardingGetStateSpec { val shardTypeName = "Ping" } -object ClusterShardingGetStateSpecConfig - extends MultiNodeClusterShardingConfig(additionalConfig = """ +object ClusterShardingGetStateSpecConfig extends MultiNodeClusterShardingConfig(additionalConfig = """ akka.cluster.sharding { coordinator-failure-backoff = 3s shard-failure-backoff = 3s @@ -112,8 +111,8 @@ abstract class ClusterShardingGetStateSpec extends MultiNodeClusterShardingSpec( val pingProbe = TestProbe() // trigger starting of 4 entities (1 to 4).foreach(n => region.tell(PingPongActor.Ping(n), pingProbe.ref)) - pingProbe.receiveWhile(messages = 4) { - case PingPongActor.Pong => () + pingProbe.receiveWhile(messages = 4) { case PingPongActor.Pong => + () } } } @@ -136,8 +135,8 @@ abstract class ClusterShardingGetStateSpec extends MultiNodeClusterShardingSpec( system.actorSelection(path).tell(ShardRegion.GetShardRegionState, probe.ref) } - val states = probe.receiveWhile(messages = regions.size) { - case msg: ShardRegion.CurrentShardRegionState => msg + val states = probe.receiveWhile(messages = regions.size) { case msg: ShardRegion.CurrentShardRegionState => + msg } val allEntityIds = for { state <- states diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStatsSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStatsSpec.scala index 173f2c89f81..3bab59631b1 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStatsSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStatsSpec.scala @@ -21,8 +21,8 @@ object ClusterShardingGetStatsSpec { val numberOfShards = 3 - val extractEntityId: ShardRegion.ExtractEntityId = { - case msg @ PingPongActor.Ping(id) => (id.toString, msg) + val extractEntityId: ShardRegion.ExtractEntityId = { case msg @ PingPongActor.Ping(id) => + (id.toString, msg) } val extractShardId: ShardRegion.ExtractShardId = { case PingPongActor.Ping(id) => (id % numberOfShards).toString @@ -30,8 +30,7 @@ object ClusterShardingGetStatsSpec { } } -object ClusterShardingGetStatsSpecConfig - extends MultiNodeClusterShardingConfig(additionalConfig = """ +object ClusterShardingGetStatsSpecConfig extends MultiNodeClusterShardingConfig(additionalConfig = """ akka.log-dead-letters-during-shutdown = off akka.cluster.sharding.updating-state-timeout = 2s akka.cluster.sharding.waiting-for-state-timeout = 2s @@ -123,8 +122,8 @@ abstract class ClusterShardingGetStatsSpec extends MultiNodeClusterShardingSpec( // trigger starting of 2 entities on first and second node // but leave third node without entities List(1, 2, 4, 6).foreach(n => region.tell(PingPongActor.Ping(n), pingProbe.ref)) - pingProbe.receiveWhile(messages = 4) { - case PingPongActor.Pong => () + pingProbe.receiveWhile(messages = 4) { case PingPongActor.Pong => + () } } } @@ -170,8 +169,8 @@ abstract class ClusterShardingGetStatsSpec extends MultiNodeClusterShardingSpec( val pingProbe = TestProbe() // make sure we have the 4 entities still alive across the fewer nodes List(1, 2, 4, 6).foreach(n => region.tell(PingPongActor.Ping(n), pingProbe.ref)) - pingProbe.receiveWhile(messages = 4) { - case PingPongActor.Pong => () + pingProbe.receiveWhile(messages = 4) { case PingPongActor.Pong => + () } } } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala index fde4add48cf..b3e7e18c2a0 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala @@ -17,8 +17,7 @@ import akka.testkit._ abstract class ClusterShardingGracefulShutdownSpecConfig(mode: String) extends MultiNodeClusterShardingConfig( mode, - additionalConfig = - """ + additionalConfig = """ akka.loglevel = info akka.persistence.journal.leveldb-shared.store.native = off # We set this high to allow pausing coordinated shutdown make sure the handoff completes 'immediately' and not diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala index 925577eb703..08fa683e659 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala @@ -7,7 +7,8 @@ package akka.cluster.sharding import akka.testkit._ object ClusterShardingIncorrectSetupSpecConfig - extends MultiNodeClusterShardingConfig(additionalConfig = "akka.cluster.sharding.waiting-for-state-timeout = 100ms") { + extends MultiNodeClusterShardingConfig(additionalConfig = + "akka.cluster.sharding.waiting-for-state-timeout = 100ms") { val first = role("first") val second = role("second") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala index 990e9c3efac..9e8cc1e0f6a 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala @@ -17,8 +17,8 @@ object ClusterShardingLeavingSpec { case class Ping(id: String) extends CborSerializable class Entity extends Actor { - def receive = { - case Ping(_) => sender() ! self + def receive = { case Ping(_) => + sender() ! self } } @@ -33,8 +33,8 @@ object ClusterShardingLeavingSpec { } } - val extractEntityId: ShardRegion.ExtractEntityId = { - case m @ Ping(id) => (id, m) + val extractEntityId: ShardRegion.ExtractEntityId = { case m @ Ping(id) => + (id, m) } val extractShardId: ShardRegion.ExtractShardId = { @@ -47,8 +47,7 @@ abstract class ClusterShardingLeavingSpecConfig(mode: String) extends MultiNodeClusterShardingConfig( mode, loglevel = "DEBUG", - additionalConfig = - """ + additionalConfig = """ akka.cluster.sharding.verbose-debug-logging = on akka.cluster.sharding.rebalance-interval = 1s # make rebalancing more likely to happen to test for https://github.com/akka/akka/issues/29093 akka.cluster.sharding.distributed-data.majority-min-cap = 1 @@ -160,15 +159,14 @@ abstract class ClusterShardingLeavingSpec(multiNodeConfig: ClusterShardingLeavin within(15.seconds) { awaitAssert { val probe = TestProbe() - originalLocations.foreach { - case (id, ref) => - region.tell(Ping(id), probe.ref) - if (leavingNodes.contains(ref.path.address)) { - val newRef = probe.expectMsgType[ActorRef](1.second) - newRef should not be (ref) - system.log.debug("Moved [{}] from [{}] to [{}]", id, ref, newRef) - } else - probe.expectMsg(1.second, ref) // should not move + originalLocations.foreach { case (id, ref) => + region.tell(Ping(id), probe.ref) + if (leavingNodes.contains(ref.path.address)) { + val newRef = probe.expectMsgType[ActorRef](1.second) + newRef should not be ref + system.log.debug("Moved [{}] from [{}] to [{}]", id, ref, newRef) + } else + probe.expectMsg(1.second, ref) // should not move } } } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingQueriesSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingQueriesSpec.scala index dca25563f12..8eda78b715f 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingQueriesSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingQueriesSpec.scala @@ -15,8 +15,8 @@ import akka.testkit.TestProbe object ClusterShardingQueriesSpec { import MultiNodeClusterShardingSpec.PingPongActor - val extractEntityId: ShardRegion.ExtractEntityId = { - case msg @ PingPongActor.Ping(id) => (id.toString, msg) + val extractEntityId: ShardRegion.ExtractEntityId = { case msg @ PingPongActor.Ping(id) => + (id.toString, msg) } val numberOfShards = 6 @@ -29,8 +29,7 @@ object ClusterShardingQueriesSpec { val shardTypeName = "DatatypeA" } -object ClusterShardingQueriesSpecConfig - extends MultiNodeClusterShardingConfig(additionalConfig = """ +object ClusterShardingQueriesSpecConfig extends MultiNodeClusterShardingConfig(additionalConfig = """ akka.log-dead-letters-during-shutdown = off akka.cluster.sharding { shard-region-query-timeout = 2ms @@ -100,8 +99,8 @@ abstract class ClusterShardingQueriesSpec awaitAssert { val pingProbe = TestProbe() (0 to 20).foreach(n => region.tell(PingPongActor.Ping(n), pingProbe.ref)) - pingProbe.receiveWhile(messages = 20) { - case PingPongActor.Pong => () + pingProbe.receiveWhile(messages = 20) { case PingPongActor.Pong => + () } } } @@ -113,17 +112,19 @@ abstract class ClusterShardingQueriesSpec runOn(busy, second, third) { val probe = TestProbe() val region = ClusterSharding(system).shardRegion(shardTypeName) - awaitAssert({ - region.tell(ShardRegion.GetClusterShardingStats(10.seconds), probe.ref) - val regions = probe.expectMsgType[ShardRegion.ClusterShardingStats].regions - regions.size shouldEqual 3 - val timeouts = numberOfShards / regions.size - - // 3 regions, 2 shards per region, all 2 shards/region were unresponsive - // within shard-region-query-timeout, which only on first is 0ms - regions.values.map(_.stats.size).sum shouldEqual 4 - regions.values.map(_.failed.size).sum shouldEqual timeouts - }, max = 10.seconds) + awaitAssert( + { + region.tell(ShardRegion.GetClusterShardingStats(10.seconds), probe.ref) + val regions = probe.expectMsgType[ShardRegion.ClusterShardingStats].regions + regions.size shouldEqual 3 + val timeouts = numberOfShards / regions.size + + // 3 regions, 2 shards per region, all 2 shards/region were unresponsive + // within shard-region-query-timeout, which only on first is 0ms + regions.values.map(_.stats.size).sum shouldEqual 4 + regions.values.map(_.failed.size).sum shouldEqual timeouts + }, + max = 10.seconds) } enterBarrier("received failed stats from timed out shards vs empty") } @@ -132,24 +133,28 @@ abstract class ClusterShardingQueriesSpec runOn(busy) { val probe = TestProbe() val region = ClusterSharding(system).shardRegion(shardTypeName) - awaitAssert({ - region.tell(ShardRegion.GetShardRegionState, probe.ref) - val state = probe.expectMsgType[ShardRegion.CurrentShardRegionState] - state.shards.isEmpty shouldEqual true - state.failed.size shouldEqual 2 - }, max = 10.seconds) + awaitAssert( + { + region.tell(ShardRegion.GetShardRegionState, probe.ref) + val state = probe.expectMsgType[ShardRegion.CurrentShardRegionState] + state.shards.isEmpty shouldEqual true + state.failed.size shouldEqual 2 + }, + max = 10.seconds) } enterBarrier("query-timeout-on-busy-node") runOn(second, third) { val probe = TestProbe() val region = ClusterSharding(system).shardRegion(shardTypeName) - awaitAssert({ - region.tell(ShardRegion.GetShardRegionState, probe.ref) - val state = probe.expectMsgType[ShardRegion.CurrentShardRegionState] - state.shards.size shouldEqual 2 - state.failed.isEmpty shouldEqual true - }, max = 10.seconds) + awaitAssert( + { + region.tell(ShardRegion.GetShardRegionState, probe.ref) + val state = probe.expectMsgType[ShardRegion.CurrentShardRegionState] + state.shards.size shouldEqual 2 + state.failed.isEmpty shouldEqual true + }, + max = 10.seconds) } enterBarrier("done") } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRegistrationCoordinatedShutdownSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRegistrationCoordinatedShutdownSpec.scala index f552e25734d..63516dc4c34 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRegistrationCoordinatedShutdownSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRegistrationCoordinatedShutdownSpec.scala @@ -12,9 +12,7 @@ import akka.actor._ import akka.cluster.MemberStatus import akka.testkit.{ ImplicitSender, TestProbe } -/** - * Test for issue #28416 - */ +/** Test for issue #28416 */ object ClusterShardingRegistrationCoordinatedShutdownSpec extends MultiNodeClusterShardingConfig { val first = role("first") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala index 135250de46b..cfb95c5fdb6 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala @@ -23,15 +23,15 @@ object ClusterShardingRememberEntitiesNewExtractorSpec { log.info("Entity started: " + self.path) probe.foreach(_ ! Started(self)) - def receive = { - case m => sender() ! m + def receive = { case m => + sender() ! m } } val shardCount = 5 - val extractEntityId: ShardRegion.ExtractEntityId = { - case id: Int => (id.toString, id) + val extractEntityId: ShardRegion.ExtractEntityId = { case id: Int => + (id.toString, id) } val extractShardId1: ShardRegion.ExtractShardId = { @@ -182,7 +182,8 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpec( enterBarrier("first-sharding-cluster-stopped") } - "start new nodes with different extractor, and have the entities running on the right shards" in within(30.seconds) { + "start new nodes with different extractor, and have the entities running on the right shards" in within( + 30.seconds) { // start it with a new shard id extractor, which will put the entities // on different shards diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala index 765a22b1125..c94ef63b37a 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala @@ -17,8 +17,8 @@ import akka.util.ccompat._ @ccompatUsedUntil213 object ClusterShardingRememberEntitiesSpec { - val extractEntityId: ShardRegion.ExtractEntityId = { - case id: Int => (id.toString, id) + val extractEntityId: ShardRegion.ExtractEntityId = { case id: Int => + (id.toString, id) } val extractShardId: ShardRegion.ExtractShardId = { diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRolePartitioningSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRolePartitioningSpec.scala index 545c8055c5c..8ec82ab239a 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRolePartitioningSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRolePartitioningSpec.scala @@ -16,8 +16,8 @@ import akka.testkit._ // See https://github.com/akka/akka/issues/28177#issuecomment-555013145 object E1 { val TypeKey = "Datatype1" - val extractEntityId: ShardRegion.ExtractEntityId = { - case id: String => (id, id) + val extractEntityId: ShardRegion.ExtractEntityId = { case id: String => + (id, id) } val extractShardId: ShardRegion.ExtractShardId = { @@ -28,8 +28,8 @@ object E1 { object E2 { val TypeKey = "Datatype2" - val extractEntityId: ShardRegion.ExtractEntityId = { - case id: Int => (id.toString, id) + val extractEntityId: ShardRegion.ExtractEntityId = { case id: Int => + (id.toString, id) } val extractShardId: ShardRegion.ExtractShardId = { diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala index 852a3527021..628d49f6a03 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala @@ -11,9 +11,7 @@ import akka.remote.testconductor.RoleName import akka.remote.testkit.Direction import akka.testkit._ -/** - * one-to-one mapping between shards and entities is not efficient but some use that anyway - */ +/** one-to-one mapping between shards and entities is not efficient but some use that anyway */ object ClusterShardingSingleShardPerEntitySpecConfig extends MultiNodeClusterShardingConfig(additionalConfig = "akka.cluster.sharding.updating-state-timeout = 1s") { diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala index a22672427bd..c4bbfe605c2 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala @@ -26,7 +26,7 @@ import akka.testkit._ import akka.testkit.TestEvent.Mute object ClusterShardingSpec { - //#counter-actor + // #counter-actor case object Increment case object Decrement final case class Get(counterId: Long) @@ -44,20 +44,20 @@ object ClusterShardingSpec { override def persistenceId: String = "Counter-" + self.path.name var count = 0 - //#counter-actor + // #counter-actor override def postStop(): Unit = { super.postStop() // Simulate that the passivation takes some time, to verify passivation buffering Thread.sleep(500) } - //#counter-actor + // #counter-actor def updateState(event: CounterChanged): Unit = count += event.delta - override def receiveRecover: Receive = { - case evt: CounterChanged => updateState(evt) + override def receiveRecover: Receive = { case evt: CounterChanged => + updateState(evt) } override def receiveCommand: Receive = { @@ -68,7 +68,7 @@ object ClusterShardingSpec { case Stop => context.stop(self) } } - //#counter-actor + // #counter-actor val extractEntityId: ShardRegion.ExtractEntityId = { case EntityEnvelope(id, payload) => (id.toString, payload) @@ -93,7 +93,7 @@ object ClusterShardingSpec { class AnotherCounter extends QualifiedCounter("AnotherCounter") - //#supervisor + // #supervisor class CounterSupervisor extends Actor { val counter = context.actorOf(Props[Counter](), "theCounter") @@ -104,11 +104,11 @@ object ClusterShardingSpec { case _: Exception => SupervisorStrategy.Restart } - def receive = { - case msg => counter.forward(msg) + def receive = { case msg => + counter.forward(msg) } } - //#supervisor + // #supervisor } @@ -126,13 +126,16 @@ abstract class ClusterShardingSpecConfig( val fifth = role("fifth") val sixth = role("sixth") - /** This is the only test that creates the shared store regardless of mode, + /** + * This is the only test that creates the shared store regardless of mode, * because it uses a PersistentActor. So unlike all other uses of * `MultiNodeClusterShardingConfig`, we use `MultiNodeConfig.commonConfig` here, * and call `MultiNodeClusterShardingConfig.persistenceConfig` which does not check * mode, then leverage the common config and fallbacks after these specific test configs: */ - commonConfig(ConfigFactory.parseString(s""" + commonConfig( + ConfigFactory + .parseString(s""" akka.loglevel = "DEBUG" akka.cluster.sharding.verbose-debug-logging = on @@ -172,7 +175,9 @@ abstract class ClusterShardingSpecConfig( } - """).withFallback(MultiNodeClusterShardingConfig.persistenceConfig(targetDir)).withFallback(common)) + """) + .withFallback(MultiNodeClusterShardingConfig.persistenceConfig(targetDir)) + .withFallback(common)) nodeConfig(sixth) { ConfigFactory.parseString("""akka.cluster.roles = ["frontend"]""") @@ -183,7 +188,7 @@ abstract class ClusterShardingSpecConfig( object ClusterShardingDocCode { import ClusterShardingSpec._ - //#counter-extractor + // #counter-extractor val extractEntityId: ShardRegion.ExtractEntityId = { case EntityEnvelope(id, payload) => (id.toString, payload) case msg @ Get(id) => (id.toString, msg) @@ -199,10 +204,10 @@ object ClusterShardingDocCode { (id.toLong % numberOfShards).toString case _ => throw new IllegalArgumentException() } - //#counter-extractor + // #counter-extractor { - //#extractShardId-StartEntity + // #extractShardId-StartEntity val extractShardId: ShardRegion.ExtractShardId = { case EntityEnvelope(id, _) => (id % numberOfShards).toString case Get(id) => (id % numberOfShards).toString @@ -211,7 +216,7 @@ object ClusterShardingDocCode { (id.toLong % numberOfShards).toString case _ => throw new IllegalArgumentException() } - //#extractShardId-StartEntity + // #extractShardId-StartEntity extractShardId.toString() // keep the compiler happy } @@ -306,11 +311,13 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) def coordinatorProps(typeName: String, rebalanceEnabled: Boolean, rememberEntities: Boolean): Props = { val allocationStrategy = ShardAllocationStrategy.leastShardAllocationStrategy(absoluteLimit = 2, relativeLimit = 1.0) - val cfg = ConfigFactory.parseString(s""" + val cfg = ConfigFactory + .parseString(s""" handoff-timeout = 10s shard-start-timeout = 10s rebalance-interval = ${if (rebalanceEnabled) "2s" else "3600s"} - """).withFallback(system.settings.config.getConfig("akka.cluster.sharding")) + """) + .withFallback(system.settings.config.getConfig("akka.cluster.sharding")) val settings = ClusterShardingSettings(cfg).withRememberEntities(rememberEntities) if (settings.stateStoreMode == "persistence") @@ -361,12 +368,14 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) } def createRegion(typeName: String, rememberEntities: Boolean): ActorRef = { - val cfg = ConfigFactory.parseString(""" + val cfg = ConfigFactory + .parseString(""" retry-interval = 1s shard-failure-backoff = 1s entity-restart-backoff = 1s buffer-size = 1000 - """).withFallback(system.settings.config.getConfig("akka.cluster.sharding")) + """) + .withFallback(system.settings.config.getConfig("akka.cluster.sharding")) val settings = ClusterShardingSettings(cfg).withRememberEntities(rememberEntities) val rememberEntitiesProvider = if (!rememberEntities) None @@ -501,10 +510,12 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) "support proxy only mode" in within(10.seconds) { runOn(second) { - val cfg = ConfigFactory.parseString(""" + val cfg = ConfigFactory + .parseString(""" retry-interval = 1s buffer-size = 1000 - """).withFallback(system.settings.config.getConfig("akka.cluster.sharding")) + """) + .withFallback(system.settings.config.getConfig("akka.cluster.sharding")) val settings = ClusterShardingSettings(cfg) val proxy = system.actorOf( ShardRegion.proxyProps( @@ -662,7 +673,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) if (probe.lastSender.path == rebalancingRegion.path / (n % 12).toString / n.toString) count += 1 } - count should be >= (2) + count should be >= 2 } } } @@ -673,14 +684,14 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) "easy to use with extensions" in within(50.seconds) { runOn(third, fourth, fifth, sixth) { - //#counter-start + // #counter-start val counterRegion: ActorRef = ClusterSharding(system).start( typeName = "Counter", entityProps = Props[Counter](), settings = ClusterShardingSettings(system), extractEntityId = extractEntityId, extractShardId = extractShardId) - //#counter-start + // #counter-start counterRegion.toString // keep the compiler happy ClusterSharding(system).start( @@ -690,18 +701,18 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) extractEntityId = extractEntityId, extractShardId = extractShardId) - //#counter-supervisor-start + // #counter-supervisor-start ClusterSharding(system).start( typeName = "SupervisedCounter", entityProps = Props[CounterSupervisor](), settings = ClusterShardingSettings(system), extractEntityId = extractEntityId, extractShardId = extractShardId) - //#counter-supervisor-start + // #counter-supervisor-start } enterBarrier("extension-started") runOn(fifth) { - //#counter-usage + // #counter-usage val counterRegion: ActorRef = ClusterSharding(system).shardRegion("Counter") counterRegion ! Get(123) expectMsg(0) @@ -709,7 +720,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) counterRegion ! EntityEnvelope(123, Increment) counterRegion ! Get(123) expectMsg(1) - //#counter-usage + // #counter-usage ClusterSharding(system).shardRegion("AnotherCounter") ! EntityEnvelope(123, Decrement) ClusterSharding(system).shardRegion("AnotherCounter") ! Get(123) @@ -776,7 +787,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) var shard: ActorSelection = null var region: ActorSelection = null runOn(third) { - //Create an increment counter 1 + // Create an increment counter 1 persistentEntitiesRegion ! EntityEnvelope(1, Increment) persistentEntitiesRegion ! Get(1) expectMsg(1) @@ -794,30 +805,36 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) enterBarrier("everybody-hand-off-ack") runOn(third) { - //Stop the shard cleanly + // Stop the shard cleanly region ! HandOff("1") expectMsg(10 seconds, "ShardStopped not received", ShardStopped("1")) val probe = TestProbe() - awaitAssert({ - shard.tell(Identify(1), probe.ref) - probe.expectMsg(1 second, "Shard was still around", ActorIdentity(1, None)) - }, 5 seconds, 500 millis) - - //Get the path to where the shard now resides - awaitAssert({ - persistentEntitiesRegion ! Get(13) - expectMsg(0) - }, 5 seconds, 500 millis) - - //Check that counter 1 is now alive again, even though we have + awaitAssert( + { + shard.tell(Identify(1), probe.ref) + probe.expectMsg(1 second, "Shard was still around", ActorIdentity(1, None)) + }, + 5 seconds, + 500 millis) + + // Get the path to where the shard now resides + awaitAssert( + { + persistentEntitiesRegion ! Get(13) + expectMsg(0) + }, + 5 seconds, + 500 millis) + + // Check that counter 1 is now alive again, even though we have // not sent a message to it via the ShardRegion val counter1 = system.actorSelection(lastSender.path.parent / "1") within(5.seconds) { awaitAssert { val p = TestProbe() counter1.tell(Identify(2), p.ref) - p.expectMsgType[ActorIdentity](2.seconds).ref should not be (None) + p.expectMsgType[ActorIdentity](2.seconds).ref should not be None } } @@ -827,14 +844,14 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) enterBarrier("after-shard-restart") runOn(fourth) { - //Check a second region does not share the same persistent shards + // Check a second region does not share the same persistent shards - //Create a separate 13 counter + // Create a separate 13 counter anotherPersistentRegion ! EntityEnvelope(13, Increment) anotherPersistentRegion ! Get(13) expectMsg(1) - //Check that no counter "1" exists in this shard + // Check that no counter "1" exists in this shard val secondCounter1 = system.actorSelection(lastSender.path.parent / "1") secondCounter1 ! Identify(3) expectMsg(3 seconds, ActorIdentity(3, None)) @@ -850,7 +867,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) enterBarrier("cluster-started-12") runOn(third) { - //Create and increment counter 1 + // Create and increment counter 1 persistentRegion ! EntityEnvelope(1, Increment) persistentRegion ! Get(1) expectMsg(1) @@ -859,7 +876,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) val shard = system.actorSelection(counter1.path.parent) val region = system.actorSelection(counter1.path.parent.parent) - //Create and increment counter 13 + // Create and increment counter 13 persistentRegion ! EntityEnvelope(13, Increment) persistentRegion ! Get(13) expectMsg(1) @@ -868,50 +885,59 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) counter1.path.parent should ===(counter13.path.parent) - //Send the shard the passivate message from the counter + // Send the shard the passivate message from the counter watch(counter1) shard.tell(Passivate(Stop), counter1) - //Watch for the terminated message + // Watch for the terminated message expectTerminated(counter1, 5 seconds) val probe1 = TestProbe() - awaitAssert({ - //Check counter 1 is dead - counter1.tell(Identify(1), probe1.ref) - probe1.expectMsg(1 second, "Entity 1 was still around", ActorIdentity(1, None)) - }, 5 second, 500 millis) - - //Stop the shard cleanly + awaitAssert( + { + // Check counter 1 is dead + counter1.tell(Identify(1), probe1.ref) + probe1.expectMsg(1 second, "Entity 1 was still around", ActorIdentity(1, None)) + }, + 5 second, + 500 millis) + + // Stop the shard cleanly region ! HandOff("1") expectMsg(10 seconds, "ShardStopped not received", ShardStopped("1")) val probe2 = TestProbe() - awaitAssert({ - shard.tell(Identify(2), probe2.ref) - probe2.expectMsg(1 second, "Shard was still around", ActorIdentity(2, None)) - }, 5 seconds, 500 millis) + awaitAssert( + { + shard.tell(Identify(2), probe2.ref) + probe2.expectMsg(1 second, "Shard was still around", ActorIdentity(2, None)) + }, + 5 seconds, + 500 millis) } enterBarrier("shard-shutdown-12") runOn(fourth) { - //Force the shard back up + // Force the shard back up persistentRegion ! Get(25) expectMsg(0) val shard = lastSender.path.parent - //Check counter 1 is still dead + // Check counter 1 is still dead system.actorSelection(shard / "1") ! Identify(3) expectMsg(ActorIdentity(3, None)) - //Check counter 13 is alive again + // Check counter 13 is alive again val probe3 = TestProbe() - awaitAssert({ - system.actorSelection(shard / "13").tell(Identify(4), probe3.ref) - probe3.expectMsgType[ActorIdentity](1 second).ref should not be (None) - }, 5 seconds, 500 millis) + awaitAssert( + { + system.actorSelection(shard / "13").tell(Identify(4), probe3.ref) + probe3.expectMsgType[ActorIdentity](1 second).ref should not be None + }, + 5 seconds, + 500 millis) } enterBarrier("after-13") @@ -924,7 +950,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) enterBarrier("cluster-started-12") runOn(third) { - //Create and increment counter 1 + // Create and increment counter 1 persistentRegion ! EntityEnvelope(1, Increment) persistentRegion ! Get(1) expectMsg(2) @@ -934,10 +960,13 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) counter1 ! Stop val probe = TestProbe() - awaitAssert({ - counter1.tell(Identify(1), probe.ref) - probe.expectMsgType[ActorIdentity](1 second).ref should not be (None) - }, 5.seconds, 500.millis) + awaitAssert( + { + counter1.tell(Identify(1), probe.ref) + probe.expectMsgType[ActorIdentity](1 second).ref should not be None + }, + 5.seconds, + 500.millis) } enterBarrier("after-14") @@ -945,7 +974,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) "be migrated to new regions upon region failure" in within(15.seconds) { - //Start only one region, and force an entity onto that region + // Start only one region, and force an entity onto that region runOn(third) { autoMigrateRegion ! EntityEnvelope(1, Increment) autoMigrateRegion ! Get(1) @@ -953,7 +982,7 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) } enterBarrier("shard1-region3") - //Start another region and test it talks to node 3 + // Start another region and test it talks to node 3 runOn(fourth) { autoMigrateRegion ! EntityEnvelope(1, Increment) @@ -961,20 +990,23 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) expectMsg(2) lastSender.path should ===(node(third) / "user" / "AutoMigrateRememberRegionTestRegion" / "1" / "1") - //Kill region 3 + // Kill region 3 system.actorSelection(lastSender.path.parent.parent) ! PoisonPill } enterBarrier("region4-up") // Wait for migration to happen - //Test the shard, thus counter was moved onto node 4 and started. + // Test the shard, thus counter was moved onto node 4 and started. runOn(fourth) { val counter1 = system.actorSelection(system / "AutoMigrateRememberRegionTestRegion" / "1" / "1") val probe = TestProbe() - awaitAssert({ - counter1.tell(Identify(1), probe.ref) - probe.expectMsgType[ActorIdentity](1 second).ref should not be (None) - }, 5.seconds, 500 millis) + awaitAssert( + { + counter1.tell(Identify(1), probe.ref) + probe.expectMsgType[ActorIdentity](1 second).ref should not be None + }, + 5.seconds, + 500 millis) counter1 ! Get(1) expectMsg(2) @@ -1009,11 +1041,11 @@ abstract class ClusterShardingSpec(multiNodeConfig: ClusterShardingSpecConfig) entity ! Identify(n) receiveOne(3 seconds) match { case ActorIdentity(id, Some(_)) if id == n => count = count + 1 - case ActorIdentity(_, None) => //Not on the fifth shard + case ActorIdentity(_, None) => // Not on the fifth shard case _ => fail() } } - count should be >= (2) + count should be >= 2 } } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ExternalShardAllocationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ExternalShardAllocationSpec.scala index ebdce3fd299..6e10bb31746 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ExternalShardAllocationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ExternalShardAllocationSpec.scala @@ -16,8 +16,7 @@ import akka.cluster.sharding.external.{ ExternalShardAllocation, ExternalShardAl import akka.serialization.jackson.CborSerializable import akka.testkit.{ ImplicitSender, TestProbe } -object ExternalShardAllocationSpecConfig - extends MultiNodeClusterShardingConfig(additionalConfig = """ +object ExternalShardAllocationSpecConfig extends MultiNodeClusterShardingConfig(additionalConfig = """ akka.cluster.sharding { retry-interval = 2000ms waiting-for-state-timeout = 2000ms @@ -42,8 +41,8 @@ object ExternalShardAllocationSpec { case class Get(id: String) extends CborSerializable case class Home(address: Address) extends CborSerializable - val extractEntityId: ShardRegion.ExtractEntityId = { - case g @ Get(id) => (id, g) + val extractEntityId: ShardRegion.ExtractEntityId = { case g @ Get(id) => + (id, g) } // shard == id to make testing easier @@ -59,9 +58,8 @@ object ExternalShardAllocationSpec { log.info("Started on {}", selfAddress) - override def receive: Receive = { - case Get(_) => - sender() ! Home(selfAddress) + override def receive: Receive = { case Get(_) => + sender() ! Home(selfAddress) } } } @@ -123,10 +121,12 @@ abstract class ExternalShardAllocationSpec runOn(second, third) { val probe = TestProbe() - awaitAssert({ - shardRegion.tell(Get(shardToSpecifyLocation), probe.ref) - probe.expectMsg(Home(address(first))) - }, 10.seconds) + awaitAssert( + { + shardRegion.tell(Get(shardToSpecifyLocation), probe.ref) + probe.expectMsg(Home(address(first))) + }, + 10.seconds) } enterBarrier("shard-allocated-to-specific-node") } @@ -144,10 +144,12 @@ abstract class ExternalShardAllocationSpec } enterBarrier("forth-node-joined") runOn(first, second, third) { - awaitAssert({ - shardRegion ! Get(initiallyOnForth) - expectMsg(Home(address(forth))) - }, 10.seconds) + awaitAssert( + { + shardRegion ! Get(initiallyOnForth) + expectMsg(Home(address(forth))) + }, + 10.seconds) } enterBarrier("shard-allocated-to-forth") } @@ -159,10 +161,12 @@ abstract class ExternalShardAllocationSpec } enterBarrier("shard-moved-from-forth-to-first") runOn(first, second, third, forth) { - awaitAssert({ - shardRegion ! Get(initiallyOnForth) - expectMsg(Home(address(first))) - }, 10.seconds) + awaitAssert( + { + shardRegion ! Get(initiallyOnForth) + expectMsg(Home(address(first))) + }, + 10.seconds) } enterBarrier("finished") } diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala index b88fdecfb25..5c37289276d 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala @@ -35,8 +35,8 @@ object MultiDcClusterShardingSpec { } } - val extractEntityId: ShardRegion.ExtractEntityId = { - case m: EntityMsg => (m.id, m) + val extractEntityId: ShardRegion.ExtractEntityId = { case m: EntityMsg => + (m.id, m) } val extractShardId: ShardRegion.ExtractShardId = { @@ -47,7 +47,7 @@ object MultiDcClusterShardingSpec { object MultiDcClusterShardingSpecConfig extends MultiNodeClusterShardingConfig( - loglevel = "DEBUG", //issue #23741 + loglevel = "DEBUG", // issue #23741 additionalConfig = """ akka.cluster { debug.verbose-heartbeat-logging = on @@ -112,11 +112,13 @@ abstract class MultiDcClusterShardingSpec if (a.hasLocalScope) Cluster(system).selfAddress else a private def assertCurrentRegions(expected: Set[Address]): Unit = { - awaitAssert({ - val p = TestProbe() - region.tell(GetCurrentRegions, p.ref) - p.expectMsg(CurrentRegions(expected)) - }, 10.seconds) + awaitAssert( + { + val p = TestProbe() + region.tell(GetCurrentRegions, p.ref) + p.expectMsg(CurrentRegions(expected)) + }, + 10.seconds) } "Cluster sharding in multi data center cluster" must { @@ -126,12 +128,14 @@ abstract class MultiDcClusterShardingSpec join(third, first) join(fourth, first) - awaitAssert({ - withClue(s"Members: ${Cluster(system).state}") { - Cluster(system).state.members.size should ===(4) - Cluster(system).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) - } - }, 10.seconds) + awaitAssert( + { + withClue(s"Members: ${Cluster(system).state}") { + Cluster(system).state.members.size should ===(4) + Cluster(system).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) + } + }, + 10.seconds) runOn(first, second) { assertCurrentRegions(Set(first, second).map(r => node(r).address)) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiNodeClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiNodeClusterShardingSpec.scala index 1d0e0ed9880..9b833043750 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiNodeClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiNodeClusterShardingSpec.scala @@ -31,8 +31,8 @@ object MultiNodeClusterShardingSpec { class EntityActor(probe: ActorRef) extends Actor { probe ! EntityActor.Started(self) - def receive: Receive = { - case m => sender() ! m + def receive: Receive = { case m => + sender() ! m } } @@ -63,8 +63,8 @@ object MultiNodeClusterShardingSpec { } } - val intExtractEntityId: ShardRegion.ExtractEntityId = { - case id: Int => (id.toString, id) + val intExtractEntityId: ShardRegion.ExtractEntityId = { case id: Int => + (id.toString, id) } val intExtractShardId: ShardRegion.ExtractShardId = msg => msg match { @@ -174,8 +174,8 @@ abstract class MultiNodeClusterShardingSpec(val config: MultiNodeClusterSharding protected def isDdataMode = mode == ClusterShardingSettings.StateStoreModeDData protected def persistenceIsNeeded: Boolean = mode == ClusterShardingSettings.StateStoreModePersistence || - system.settings.config - .getString("akka.cluster.sharding.remember-entities-store") == ClusterShardingSettings.RememberEntitiesStoreEventsourced + system.settings.config.getString( + "akka.cluster.sharding.remember-entities-store") == ClusterShardingSettings.RememberEntitiesStoreEventsourced protected def setStoreIfNeeded(sys: ActorSystem, storeOn: RoleName): Unit = if (persistenceIsNeeded) setStore(sys, storeOn) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/RollingUpdateShardAllocationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/RollingUpdateShardAllocationSpec.scala index 6955a90e1ef..546a8e22bb9 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/RollingUpdateShardAllocationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/RollingUpdateShardAllocationSpec.scala @@ -17,9 +17,7 @@ import akka.cluster.MemberStatus.Up import akka.serialization.jackson.CborSerializable import akka.testkit.ImplicitSender -object RollingUpdateShardAllocationSpecConfig - extends MultiNodeClusterShardingConfig( - additionalConfig = """ +object RollingUpdateShardAllocationSpecConfig extends MultiNodeClusterShardingConfig(additionalConfig = """ akka.cluster.sharding { # speed up forming and handovers a bit retry-interval = 500ms @@ -52,8 +50,8 @@ object RollingUpdateShardAllocationSpec { case class Get(id: String) extends CborSerializable case class Home(address: Address) extends CborSerializable - val extractEntityId: ShardRegion.ExtractEntityId = { - case g @ Get(id) => (id, g) + val extractEntityId: ShardRegion.ExtractEntityId = { case g @ Get(id) => + (id, g) } // shard == id to make testing easier @@ -70,9 +68,8 @@ object RollingUpdateShardAllocationSpec { log.info("Started on {}", selfAddress) - override def receive: Receive = { - case Get(_) => - sender() ! Home(selfAddress) + override def receive: Receive = { case Get(_) => + sender() ! Home(selfAddress) } } } @@ -113,7 +110,7 @@ abstract class RollingUpdateShardAllocationSpec // so the folloing allocations end up as one on each node awaitAssert { shardRegion ! ShardRegion.GetCurrentRegions - expectMsgType[ShardRegion.CurrentRegions].regions should have size (2) + expectMsgType[ShardRegion.CurrentRegions].regions should have size 2 } shardRegion ! GiveMeYourHome.Get("id1") @@ -125,7 +122,7 @@ abstract class RollingUpdateShardAllocationSpec val address2 = expectMsgType[GiveMeYourHome.Home].address // one on each node - Set(address1, address2) should have size (2) + Set(address1, address2) should have size 2 } enterBarrier("first-version-started") } @@ -142,7 +139,7 @@ abstract class RollingUpdateShardAllocationSpec // if we didn't the strategy will default it back to the old nodes awaitAssert { shardRegion ! ShardRegion.GetCurrentRegions - expectMsgType[ShardRegion.CurrentRegions].regions should have size (3) + expectMsgType[ShardRegion.CurrentRegions].regions should have size 3 } } enterBarrier("third-region-registered") @@ -174,10 +171,12 @@ abstract class RollingUpdateShardAllocationSpec enterBarrier("first-left") runOn(second, third, fourth) { - awaitAssert({ - shardRegion ! ShardRegion.GetCurrentRegions - expectMsgType[ShardRegion.CurrentRegions].regions should have size (3) - }, 30.seconds) + awaitAssert( + { + shardRegion ! ShardRegion.GetCurrentRegions + expectMsgType[ShardRegion.CurrentRegions].regions should have size 3 + }, + 30.seconds) } enterBarrier("sharding-handed-off") @@ -199,10 +198,12 @@ abstract class RollingUpdateShardAllocationSpec } runOn(third, fourth) { // make sure coordinator has noticed there are only two regions - awaitAssert({ - shardRegion ! ShardRegion.GetCurrentRegions - expectMsgType[ShardRegion.CurrentRegions].regions should have size (2) - }, 30.seconds) + awaitAssert( + { + shardRegion ! ShardRegion.GetCurrentRegions + expectMsgType[ShardRegion.CurrentRegions].regions should have size 2 + }, + 30.seconds) } enterBarrier("second-left") diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingInternalsSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingInternalsSpec.scala index 81459831a1f..56afa33ee9f 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingInternalsSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingInternalsSpec.scala @@ -17,8 +17,7 @@ import akka.testkit.WithLogCapturing object ClusterShardingInternalsSpec { case class HandOffStopMessage() extends NoSerializationVerificationNeeded class EmptyHandlerActor extends Actor { - override def receive: Receive = { - case _ => + override def receive: Receive = { case _ => } override def postStop(): Unit = { @@ -27,14 +26,16 @@ object ClusterShardingInternalsSpec { } } -class ClusterShardingInternalsSpec extends AkkaSpec(""" +class ClusterShardingInternalsSpec + extends AkkaSpec(""" |akka.actor.provider = cluster |akka.remote.artery.canonical.port = 0 |akka.loglevel = DEBUG |akka.cluster.sharding.verbose-debug-logging = on |akka.cluster.sharding.fail-on-invalid-entity-state-transition = on |akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] - |""".stripMargin) with WithLogCapturing { + |""".stripMargin) + with WithLogCapturing { import ClusterShardingInternalsSpec._ case class StartingProxy( diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingLeaseSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingLeaseSpec.scala index 59b233eb0a7..08386a9b708 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingLeaseSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingLeaseSpec.scala @@ -19,7 +19,8 @@ import akka.testkit.{ AkkaSpec, ImplicitSender, WithLogCapturing } import akka.testkit.TestActors.EchoActor object ClusterShardingLeaseSpec { - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString(""" akka.loglevel = DEBUG akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] akka.actor.provider = "cluster" @@ -33,7 +34,8 @@ object ClusterShardingLeaseSpec { verbose-debug-logging = on fail-on-invalid-entity-state-transition = on } - """).withFallback(TestLease.config) + """) + .withFallback(TestLease.config) val persistenceConfig = ConfigFactory.parseString(""" akka.cluster.sharding { @@ -48,8 +50,8 @@ object ClusterShardingLeaseSpec { } """) - val extractEntityId: ShardRegion.ExtractEntityId = { - case msg: Int => (msg.toString, msg) + val extractEntityId: ShardRegion.ExtractEntityId = { case msg: Int => + (msg.toString, msg) } val numOfShards = 10 @@ -136,10 +138,12 @@ class ClusterShardingLeaseSpec(config: Config, rememberEntities: Boolean) testLease.initialPromise.complete(Success(true)) expectMsg(4) testLease.getCurrentCallback()(Option(LeaseFailed("oh dear"))) - awaitAssert({ - region ! 4 - expectMsg(4) - }, max = 10.seconds) + awaitAssert( + { + region ! 4 + expectMsg(4) + }, + max = 10.seconds) } } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingSettingsSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingSettingsSpec.scala index aef4676809a..41f145e8b61 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingSettingsSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingSettingsSpec.scala @@ -757,8 +757,8 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { .withFilter( ClusterShardingSettings.PassivationStrategySettings.AdmissionSettings.FrequencySketchSettings.defaults)) .withReplacementPolicy( - ClusterShardingSettings.PassivationStrategySettings.LeastRecentlyUsedSettings.defaults.withSegmented( - proportions = List(0.2, 0.8))) + ClusterShardingSettings.PassivationStrategySettings.LeastRecentlyUsedSettings.defaults + .withSegmented(proportions = List(0.2, 0.8))) .withIdleEntityPassivation(timeout = 42.minutes)) .passivationStrategy shouldBe ClusterShardingSettings.CompositePassivationStrategy( limit = 42000, @@ -799,8 +799,9 @@ class ClusterShardingSettingsSpec extends AnyWordSpec with Matchers { "disable automatic passivation if idle timeout is set to zero (via factory method)" in { defaultSettings - .withPassivationStrategy(ClusterShardingSettings.PassivationStrategySettings.defaults.withIdleEntityPassivation( - timeout = Duration.Zero)) + .withPassivationStrategy( + ClusterShardingSettings.PassivationStrategySettings.defaults.withIdleEntityPassivation(timeout = + Duration.Zero)) .passivationStrategy shouldBe ClusterShardingSettings.NoPassivationStrategy } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConcurrentStartupShardingSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConcurrentStartupShardingSpec.scala index aee932d7678..b5a219a08a8 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConcurrentStartupShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConcurrentStartupShardingSpec.scala @@ -47,14 +47,18 @@ object ConcurrentStartupShardingSpec { override def preStart(): Unit = { val region = - ClusterSharding(context.system).start(s"type-$n", Props.empty, ClusterShardingSettings(context.system), { - case msg => (msg.toString, msg) - }, _ => "1") + ClusterSharding(context.system).start( + s"type-$n", + Props.empty, + ClusterShardingSettings(context.system), + { case msg => + (msg.toString, msg) + }, + _ => "1") probe ! region } - def receive = { - case _ => + def receive = { case _ => } } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala index b61f831bd5c..04c81c16d28 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala @@ -29,8 +29,8 @@ object CoordinatedShutdownShardingSpec { akka.cluster.sharding.verbose-debug-logging = on """ - val extractEntityId: ShardRegion.ExtractEntityId = { - case msg: Int => (msg.toString, msg) + val extractEntityId: ShardRegion.ExtractEntityId = { case msg: Int => + (msg.toString, msg) } val extractShardId: ShardRegion.ExtractShardId = { @@ -90,17 +90,19 @@ class CoordinatedShutdownShardingSpec extends AkkaSpec(CoordinatedShutdownShardi // Using region 2 as it is not shutdown in either test def pingEntities(): Unit = { - awaitAssert({ - val p1 = TestProbe()(sys2) - region2.tell(1, p1.ref) - p1.expectMsg(1.seconds, 1) - val p2 = TestProbe()(sys2) - region2.tell(2, p2.ref) - p2.expectMsg(1.seconds, 2) - val p3 = TestProbe()(sys2) - region2.tell(3, p3.ref) - p3.expectMsg(1.seconds, 3) - }, 10.seconds) + awaitAssert( + { + val p1 = TestProbe()(sys2) + region2.tell(1, p1.ref) + p1.expectMsg(1.seconds, 1) + val p2 = TestProbe()(sys2) + region2.tell(2, p2.ref) + p2.expectMsg(1.seconds, 2) + val p3 = TestProbe()(sys2) + region2.tell(3, p3.ref) + p3.expectMsg(1.seconds, 3) + }, + 10.seconds) } "Sharding and CoordinatedShutdown" must { diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/DeprecatedLeastShardAllocationStrategySpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/DeprecatedLeastShardAllocationStrategySpec.scala index 96eb83c1b91..db33a0d084a 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/DeprecatedLeastShardAllocationStrategySpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/DeprecatedLeastShardAllocationStrategySpec.scala @@ -201,7 +201,9 @@ class DeprecatedLeastShardAllocationStrategySpec extends AkkaSpec { fakeRegionC, // newest version, up fakeRegionD, // most shards, up fakeLocalRegion, // old app version - fakeRegionA)) // leaving + fakeRegionA + ) + ) // leaving } "not rebalance when rolling update in progress" in { diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/EntityTerminationSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/EntityTerminationSpec.scala index 82d7592140a..ac1a6af1e41 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/EntityTerminationSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/EntityTerminationSpec.scala @@ -56,8 +56,8 @@ class EntityTerminationSpec extends AkkaSpec(EntityTerminationSpec.config) with import EntityTerminationSpec._ - val extractEntityId: ShardRegion.ExtractEntityId = { - case EntityEnvelope(id, payload) => (id.toString, payload) + val extractEntityId: ShardRegion.ExtractEntityId = { case EntityEnvelope(id, payload) => + (id.toString, payload) } val extractShardId: ShardRegion.ExtractShardId = { @@ -97,7 +97,7 @@ class EntityTerminationSpec extends AkkaSpec(EntityTerminationSpec.config) with Thread.sleep(400) // restart backoff is 250 ms sharding ! ShardRegion.GetShardRegionState val regionState = expectMsgType[ShardRegion.CurrentShardRegionState] - regionState.shards should have size (1) + regionState.shards should have size 1 regionState.shards.head.entityIds should be(Set("2")) // make sure the shard didn't crash (coverage for regression bug #29383) @@ -122,12 +122,14 @@ class EntityTerminationSpec extends AkkaSpec(EntityTerminationSpec.config) with expectTerminated(entity) Thread.sleep(400) // restart backoff is 250 ms - awaitAssert({ - sharding ! ShardRegion.GetShardRegionState - val regionState = expectMsgType[ShardRegion.CurrentShardRegionState] - regionState.shards should have size (1) - regionState.shards.head.entityIds should have size (1) - }, 2.seconds) + awaitAssert( + { + sharding ! ShardRegion.GetShardRegionState + val regionState = expectMsgType[ShardRegion.CurrentShardRegionState] + regionState.shards should have size 1 + regionState.shards.head.entityIds should have size 1 + }, + 2.seconds) } "allow terminating entity to passivate if remembering entities" in { @@ -149,8 +151,8 @@ class EntityTerminationSpec extends AkkaSpec(EntityTerminationSpec.config) with sharding ! ShardRegion.GetShardRegionState val regionState = expectMsgType[ShardRegion.CurrentShardRegionState] - regionState.shards should have size (1) - regionState.shards.head.entityIds should have size (0) + regionState.shards should have size 1 + regionState.shards.head.entityIds should have size 0 } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategyRandomizedSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategyRandomizedSpec.scala index 667168c4448..65bf8e8f837 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategyRandomizedSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategyRandomizedSpec.scala @@ -30,9 +30,8 @@ class LeastShardAllocationStrategyRandomizedSpec extends AkkaSpec("akka.loglevel @volatile var clusterMembers: SortedSet[Member] = SortedSet.empty def createAllocations(countPerRegion: Map[ActorRef, Int]): Map[ActorRef, immutable.IndexedSeq[ShardId]] = { - countPerRegion.map { - case (region, count) => - region -> (1 to count).map(n => ("00" + n.toString).takeRight(3)).map(n => s"${region.path.name}-$n").toVector + countPerRegion.map { case (region, count) => + region -> (1 to count).map(n => ("00" + n.toString).takeRight(3)).map(n => s"${region.path.name}-$n").toVector } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala index d33549476ed..762dc253a8a 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala @@ -39,14 +39,13 @@ object LeastShardAllocationStrategySpec { allocationStrategy: ShardAllocationStrategy, allocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], rebalance: Set[ShardId]): Map[ActorRef, immutable.IndexedSeq[ShardId]] = { - val allocationsAfterRemoval = allocations.map { - case (region, shards) => region -> shards.filterNot(rebalance) + val allocationsAfterRemoval = allocations.map { case (region, shards) => + region -> shards.filterNot(rebalance) } - rebalance.toList.sorted.foldLeft(allocationsAfterRemoval) { - case (acc, shard) => - val region = allocationStrategy.allocateShard(DummyActorRef, shard, acc).value.get.get - acc.updated(region, acc(region) :+ shard) + rebalance.toList.sorted.foldLeft(allocationsAfterRemoval) { case (acc, shard) => + val region = allocationStrategy.allocateShard(DummyActorRef, shard, acc).value.get.get + acc.updated(region, acc(region) :+ shard) } } @@ -256,7 +255,9 @@ class LeastShardAllocationStrategySpec extends AkkaSpec { fakeRegionC, // newest version, up fakeRegionD, // most shards, up fakeLocalRegion, // old app version - fakeRegionA)) // leaving + fakeRegionA + ) + ) // leaving } "not rebalance when rolling update in progress" in { diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/PersistentShardingMigrationSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/PersistentShardingMigrationSpec.scala index d508e43db9d..61f61d55dc2 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/PersistentShardingMigrationSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/PersistentShardingMigrationSpec.scala @@ -47,8 +47,8 @@ object PersistentShardingMigrationSpec { akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/PersistentShardingMigrationSpec-${UUID - .randomUUID() - .toString}" + .randomUUID() + .toString}" akka.persistence.journal.leveldb { native = off dir = "target/journal-PersistentShardingMigrationSpec-${UUID.randomUUID()}" @@ -80,17 +80,15 @@ object PersistentShardingMigrationSpec { class PA extends PersistentActor { override def persistenceId: String = "pa-" + self.path.name - override def receiveRecover: Receive = { - case _ => + override def receiveRecover: Receive = { case _ => } - override def receiveCommand: Receive = { - case _ => - sender() ! "ack" + override def receiveCommand: Receive = { case _ => + sender() ! "ack" } } - val extractEntityId: ShardRegion.ExtractEntityId = { - case msg @ Message(id) => (id.toString, msg) + val extractEntityId: ShardRegion.ExtractEntityId = { case msg @ Message(id) => + (id.toString, msg) } def extractShardId(probe: ActorRef): ShardRegion.ExtractShardId = { @@ -180,7 +178,7 @@ class PersistentShardingMigrationSpec extends AkkaSpec(PersistentShardingMigrati def assertRegionRegistrationComplete(region: ActorRef): Unit = { awaitAssert { region ! ShardRegion.GetCurrentRegions - expectMsgType[CurrentRegions].regions should have size (1) + expectMsgType[CurrentRegions].regions should have size 1 } } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ProxyShardingSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ProxyShardingSpec.scala index 9d285779274..f8c431d5be5 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ProxyShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ProxyShardingSpec.scala @@ -34,8 +34,8 @@ class ProxyShardingSpec extends AkkaSpec(ProxyShardingSpec.config) with WithLogC override def entityId(message: Any) = "dummyId" } - val idExtractor: ShardRegion.ExtractEntityId = { - case msg => (msg.toString, msg) + val idExtractor: ShardRegion.ExtractEntityId = { case msg => + (msg.toString, msg) } val shardResolver: ShardRegion.ExtractShardId = { diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesFailureSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesFailureSpec.scala index 3717914f164..d51cda51dd7 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesFailureSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesFailureSpec.scala @@ -271,10 +271,12 @@ class RememberEntitiesFailureSpec stopFailingProbe.expectMsg(Done) // it takes a while - timeout hits and then backoff - awaitAssert({ - sharding.tell(EntityEnvelope(11, "hello-11-2"), probe.ref) - probe.expectMsg("hello-11-2") - }, 10.seconds) + awaitAssert( + { + sharding.tell(EntityEnvelope(11, "hello-11-2"), probe.ref) + probe.expectMsg("hello-11-2") + }, + 10.seconds) system.stop(sharding) } @@ -307,10 +309,12 @@ class RememberEntitiesFailureSpec storeProbe.expectMsg(Done) // it takes a while - timeout hits and then backoff - awaitAssert({ - sharding.tell(EntityEnvelope(1, "hello-2"), probe.ref) - probe.expectMsg("hello-2") - }, 10.seconds) + awaitAssert( + { + sharding.tell(EntityEnvelope(1, "hello-2"), probe.ref) + probe.expectMsg("hello-2") + }, + 10.seconds) system.stop(sharding) } @@ -348,10 +352,12 @@ class RememberEntitiesFailureSpec } // it takes a while? - awaitAssert({ - sharding.tell(EntityEnvelope(1, "hello-2"), probe.ref) - probe.expectMsg("hello-2") - }, 5.seconds) + awaitAssert( + { + sharding.tell(EntityEnvelope(1, "hello-2"), probe.ref) + probe.expectMsg("hello-2") + }, + 5.seconds) system.stop(sharding) } @@ -387,10 +393,12 @@ class RememberEntitiesFailureSpec coordinatorStore.tell(FakeCoordinatorStoreActor.ClearFailShard("1"), storeProbe.ref) storeProbe.expectMsg(Done) - probe.awaitAssert({ - sharding.tell(EntityEnvelope(1, "hello-2"), probe.ref) - probe.expectMsg("hello-2") // should now work again - }, 5.seconds) + probe.awaitAssert( + { + sharding.tell(EntityEnvelope(1, "hello-2"), probe.ref) + probe.expectMsg("hello-2") // should now work again + }, + 5.seconds) system.stop(sharding) } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesShardIdExtractorChangeSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesShardIdExtractorChangeSpec.scala index bcfb80abbc9..03b1db863a4 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesShardIdExtractorChangeSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RememberEntitiesShardIdExtractorChangeSpec.scala @@ -39,8 +39,8 @@ object RememberEntitiesShardIdExtractorChangeSpec { akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/RememberEntitiesShardIdExtractorChangeSpec-${UUID - .randomUUID() - .toString}" + .randomUUID() + .toString}" akka.persistence.journal.leveldb { native = off dir = "target/journal-PersistentShardingMigrationSpec-${UUID.randomUUID()}" @@ -51,12 +51,10 @@ object RememberEntitiesShardIdExtractorChangeSpec { class PA extends PersistentActor { override def persistenceId: String = "pa-" + self.path.name - override def receiveRecover: Receive = { - case _ => + override def receiveRecover: Receive = { case _ => } - override def receiveCommand: Receive = { - case _ => - sender() ! "ack" + override def receiveCommand: Receive = { case _ => + sender() ! "ack" } } @@ -141,7 +139,7 @@ class RememberEntitiesShardIdExtractorChangeSpec def assertRegionRegistrationComplete(region: ActorRef): Unit = { awaitAssert { region ! ShardRegion.GetCurrentRegions - expectMsgType[CurrentRegions].regions should have size (1) + expectMsgType[CurrentRegions].regions should have size 1 } } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala index 063623cb1a5..1df3edb3e28 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala @@ -70,8 +70,7 @@ object RemoveInternalClusterShardingDataSpec { case _ => } - override def receiveCommand: Receive = { - case _ => + override def receiveCommand: Receive = { case _ => } } @@ -89,8 +88,7 @@ object RemoveInternalClusterShardingDataSpec { context.stop(self) } - override def receiveCommand: Receive = { - case _ => + override def receiveCommand: Receive = { case _ => } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ShardRegionSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ShardRegionSpec.scala index 9e529eca0c1..2fa5c18cba0 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ShardRegionSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ShardRegionSpec.scala @@ -57,8 +57,8 @@ object ShardRegionSpec { } class EntityActor extends Actor with ActorLogging { - override def receive: Receive = { - case msg => sender() ! msg + override def receive: Receive = { case msg => + sender() ! msg } } } @@ -139,18 +139,17 @@ class ShardRegionSpec extends AkkaSpec(ShardRegionSpec.config) with WithLogCaptu def statesFor(region: ActorRef, probe: TestProbe, expect: Int) = { region.tell(ShardRegion.GetShardRegionState, probe.ref) probe - .receiveWhile(messages = expect) { - case e: ShardRegion.CurrentShardRegionState => - e.failed.isEmpty shouldEqual true - e.shards.map(_.shardId) + .receiveWhile(messages = expect) { case e: ShardRegion.CurrentShardRegionState => + e.failed.isEmpty shouldEqual true + e.shards.map(_.shardId) } .flatten } def awaitRebalance(region: ActorRef, msg: Int, probe: TestProbe): Boolean = { region.tell(msg, probe.ref) - probe.expectMsgPF(2.seconds) { - case id => if (id == msg) true else awaitRebalance(region, msg, probe) + probe.expectMsgPF(2.seconds) { case id => + if (id == msg) true else awaitRebalance(region, msg, probe) } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ShardWithLeaseSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ShardWithLeaseSpec.scala index 489a003da7f..83f2bca0e8b 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ShardWithLeaseSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ShardWithLeaseSpec.scala @@ -40,10 +40,9 @@ object ShardWithLeaseSpec { """ class EntityActor extends Actor with ActorLogging { - override def receive: Receive = { - case msg => - log.info("Msg {}", msg) - sender() ! s"ack ${msg}" + override def receive: Receive = { case msg => + log.info("Msg {}", msg) + sender() ! s"ack ${msg}" } } @@ -127,7 +126,7 @@ class ShardWithLeaseSpec extends AkkaSpec(ShardWithLeaseSpec.config) with WithLo .error( start = s"$typeName: Shard id [1] lease lost, stopping shard and killing [1] entities. Reason for losing lease: ${classOf[ - BadLease].getName}: bye bye lease", + BadLease].getName}: bye bye lease", occurrences = 1) .intercept { lease.getCurrentCallback().apply(Some(BadLease("bye bye lease"))) diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/StartEntitySpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/StartEntitySpec.scala index 6d88c16b91a..b01ef51c7f4 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/StartEntitySpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/StartEntitySpec.scala @@ -17,9 +17,7 @@ import akka.testkit.AkkaSpec import akka.testkit.ImplicitSender import akka.testkit.WithLogCapturing -/** - * Covers some corner cases around sending triggering an entity with StartEntity - */ +/** Covers some corner cases around sending triggering an entity with StartEntity */ object StartEntitySpec { final case class EntityEnvelope(id: String, msg: Any) @@ -107,12 +105,12 @@ class StartEntitySpec extends AkkaSpec(StartEntitySpec.config) with ImplicitSend // should trigger start of entity again, and an ack expectMsg(ShardRegion.StartEntityAck("1", "1")) - awaitAssert({ + awaitAssert { sharding ! ShardRegion.GetShardRegionState val state = expectMsgType[ShardRegion.CurrentShardRegionState] - state.shards should have size (1) + state.shards should have size 1 state.shards.head.entityIds should ===(Set("1")) - }) + } } } @@ -133,23 +131,23 @@ class StartEntitySpec extends AkkaSpec(StartEntitySpec.config) with ImplicitSend entity ! "just-stop" // Make sure the shard has processed the termination - awaitAssert({ + awaitAssert { sharding ! ShardRegion.GetShardRegionState val state = expectMsgType[ShardRegion.CurrentShardRegionState] - state.shards should have size (1) + state.shards should have size 1 state.shards.head.entityIds should ===(Set.empty[String]) - }) + } // the backoff is 10s by default, so plenty time to // bypass region and send start entity directly to shard system.actorSelection(entity.path.parent) ! ShardRegion.StartEntity("1") expectMsg(ShardRegion.StartEntityAck("1", "1")) - awaitAssert({ + awaitAssert { sharding ! ShardRegion.GetShardRegionState val state = expectMsgType[ShardRegion.CurrentShardRegionState] - state.shards should have size (1) + state.shards should have size 1 state.shards.head.entityIds should ===(Set("1")) - }) + } } } @@ -178,12 +176,12 @@ class StartEntitySpec extends AkkaSpec(StartEntitySpec.config) with ImplicitSend // regardless we should get an ack and the entity should be alive expectMsg(ShardRegion.StartEntityAck("1", "1")) - awaitAssert({ + awaitAssert { sharding ! ShardRegion.GetShardRegionState val state = expectMsgType[ShardRegion.CurrentShardRegionState] - state.shards should have size (1) + state.shards should have size 1 state.shards.head.entityIds should ===(Set("1")) - }) + } } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/StopShardsSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/StopShardsSpec.scala index 1c9b03b43f2..e8e6feee359 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/StopShardsSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/StopShardsSpec.scala @@ -60,10 +60,9 @@ object StopShardsSpec { case class Pong(actorRef: ActorRef) class EntityActor extends Actor with ActorLogging { - override def receive: Receive = { - case _ => - log.debug("ping") - sender() ! context.self + override def receive: Receive = { case _ => + log.debug("ping") + sender() ! context.self } } } @@ -93,10 +92,12 @@ class StopShardsSpec extends AkkaSpec(StopShardsSpec.config) with WithLogCapturi awaitAssert(Cluster(sysB).selfMember.status shouldEqual MemberStatus.Up, 3.seconds) // wait for all regions to be registered - pA.awaitAssert({ - regionA.tell(GetCurrentRegions, pA.ref) - pA.expectMsgType[CurrentRegions].regions should have size (2) - }, 10.seconds) + pA.awaitAssert( + { + regionA.tell(GetCurrentRegions, pA.ref) + pA.expectMsgType[CurrentRegions].regions should have size 2 + }, + 10.seconds) } "start entities in a few shards, then stop the shards" in { diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/internal/RememberEntitiesShardStoreSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/internal/RememberEntitiesShardStoreSpec.scala index 9358920b6ea..8cbd9d3972f 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/internal/RememberEntitiesShardStoreSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/internal/RememberEntitiesShardStoreSpec.scala @@ -16,9 +16,7 @@ import akka.cluster.sharding.ClusterShardingSettings import akka.cluster.sharding.ShardRegion.ShardId import akka.testkit.{ AkkaSpec, ImplicitSender, WithLogCapturing } -/** - * Covers the interaction between the shard and the remember entities store - */ +/** Covers the interaction between the shard and the remember entities store */ object RememberEntitiesShardStoreSpec { def config = ConfigFactory.parseString(s""" @@ -34,8 +32,8 @@ object RememberEntitiesShardStoreSpec { akka.persistence.journal.plugin = "akka.persistence.journal.inmem" akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/${classOf[RememberEntitiesShardStoreSpec].getName}-${UUID - .randomUUID() - .toString}" + .randomUUID() + .toString}" """.stripMargin) } @@ -92,7 +90,9 @@ abstract class RememberEntitiesShardStoreSpec Thread.sleep(500) storeIncarnation3 ! RememberEntitiesShardStore.GetEntities - expectMsgType[RememberEntitiesShardStore.RememberedEntities].entities should ===(Set("1", "2", "4", "5")) // from previous test + expectMsgType[RememberEntitiesShardStore.RememberedEntities].entities should ===( + Set("1", "2", "4", "5") + ) // from previous test } "handle a large batch" in { @@ -102,8 +102,8 @@ abstract class RememberEntitiesShardStoreSpec store ! RememberEntitiesShardStore.Update((1 to 1000).map(_.toString).toSet, (1001 to 2000).map(_.toString).toSet) val response = expectMsgType[RememberEntitiesShardStore.UpdateDone] - response.started should have size (1000) - response.stopped should have size (1000) + response.started should have size 1000 + response.stopped should have size 1000 watch(store) system.stop(store) @@ -111,7 +111,7 @@ abstract class RememberEntitiesShardStoreSpec store = system.actorOf(storeProps("FakeShardIdLarge", "FakeTypeNameLarge", shardingSettings)) store ! RememberEntitiesShardStore.GetEntities - expectMsgType[RememberEntitiesShardStore.RememberedEntities].entities should have size (1000) + expectMsgType[RememberEntitiesShardStore.RememberedEntities].entities should have size 1000 } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/CompositeSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/CompositeSpec.scala index 05126bac315..a4159ab75c9 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/CompositeSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/CompositeSpec.scala @@ -13,7 +13,8 @@ import akka.cluster.sharding.ShardRegion object CompositeSpec { - val admissionWindowAndFilterConfig: Config = ConfigFactory.parseString(""" + val admissionWindowAndFilterConfig: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = lru-fs-slru @@ -39,9 +40,11 @@ object CompositeSpec { } } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) - val admissionFilterNoWindowConfig: Config = ConfigFactory.parseString(""" + val admissionFilterNoWindowConfig: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = fs-lru @@ -55,9 +58,11 @@ object CompositeSpec { } } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) - val adaptiveWindowConfig: Config = ConfigFactory.parseString(""" + val adaptiveWindowConfig: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = lru-fs-lru-hc @@ -86,9 +91,11 @@ object CompositeSpec { } } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) - val idleConfig: Config = ConfigFactory.parseString(""" + val idleConfig: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = default-strategy @@ -98,7 +105,8 @@ object CompositeSpec { } } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) } class AdmissionWindowAndFilterSpec diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/EntityPassivationSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/EntityPassivationSpec.scala index 82c123d54ee..779eb5cd945 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/EntityPassivationSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/EntityPassivationSpec.scala @@ -34,13 +34,15 @@ object EntityPassivationSpec { akka.scheduled-clock-interval = 100 ms """) - val disabledConfig: Config = ConfigFactory.parseString(""" + val disabledConfig: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = none } } - """).withFallback(config) + """) + .withFallback(config) object Entity { case object Stop @@ -68,8 +70,8 @@ object EntityPassivationSpec { } } - val extractEntityId: ShardRegion.ExtractEntityId = { - case Entity.Envelope(_, id, message) => (id.toString, message) + val extractEntityId: ShardRegion.ExtractEntityId = { case Entity.Envelope(_, id, message) => + (id.toString, message) } val extractShardId: ShardRegion.ExtractShardId = { @@ -112,8 +114,8 @@ abstract class AbstractEntityPassivationSpec(config: Config, expectedEntities: I def expectState(region: ActorRef)(expectedShards: (Int, Iterable[Int])*): Unit = eventually { - getState(region).shards should contain theSameElementsAs expectedShards.map { - case (shardId, entityIds) => ShardRegion.ShardState(shardId.toString, entityIds.map(_.toString).toSet) + getState(region).shards should contain theSameElementsAs expectedShards.map { case (shardId, entityIds) => + ShardRegion.ShardState(shardId.toString, entityIds.map(_.toString).toSet) } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/IdleSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/IdleSpec.scala index 9fc6a392bc6..813f4c5f4a1 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/IdleSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/IdleSpec.scala @@ -10,13 +10,15 @@ import com.typesafe.config.Config import com.typesafe.config.ConfigFactory object IdleSpec { - val config: Config = ConfigFactory.parseString(""" + val config: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { default-idle-strategy.idle-entity.timeout = 1s } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) } class IdleSpec extends AbstractEntityPassivationSpec(IdleSpec.config, expectedEntities = 2) { diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/LeastFrequentlyUsedSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/LeastFrequentlyUsedSpec.scala index 3635b77db44..8881cdc454f 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/LeastFrequentlyUsedSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/LeastFrequentlyUsedSpec.scala @@ -13,7 +13,8 @@ import akka.cluster.sharding.ShardRegion object LeastFrequentlyUsedSpec { - val config: Config = ConfigFactory.parseString(""" + val config: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = lfu @@ -23,9 +24,11 @@ object LeastFrequentlyUsedSpec { } } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) - val dynamicAgingConfig: Config = ConfigFactory.parseString(""" + val dynamicAgingConfig: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = lfuda @@ -40,9 +43,11 @@ object LeastFrequentlyUsedSpec { } } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) - val idleConfig: Config = ConfigFactory.parseString(""" + val idleConfig: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = lfu-idle @@ -53,7 +58,8 @@ object LeastFrequentlyUsedSpec { } } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) } class LeastFrequentlyUsedSpec diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/LeastRecentlyUsedSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/LeastRecentlyUsedSpec.scala index fe519952184..c2d61a64f42 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/LeastRecentlyUsedSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/LeastRecentlyUsedSpec.scala @@ -13,7 +13,8 @@ import akka.cluster.sharding.ShardRegion object LeastRecentlyUsedSpec { - val config: Config = ConfigFactory.parseString(""" + val config: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = lru @@ -23,9 +24,11 @@ object LeastRecentlyUsedSpec { } } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) - val segmentedConfig: Config = ConfigFactory.parseString(""" + val segmentedConfig: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = slru @@ -43,14 +46,18 @@ object LeastRecentlyUsedSpec { } } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) val segmentedInitialLimitConfig: Config = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.cluster.sharding.passivation.slru.active-entity-limit = 20 - """).withFallback(segmentedConfig) + """) + .withFallback(segmentedConfig) - val idleConfig: Config = ConfigFactory.parseString(""" + val idleConfig: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = lru-idle @@ -61,7 +68,8 @@ object LeastRecentlyUsedSpec { } } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) } class LeastRecentlyUsedSpec extends AbstractEntityPassivationSpec(LeastRecentlyUsedSpec.config, expectedEntities = 40) { diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/MostRecentlyUsedSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/MostRecentlyUsedSpec.scala index 01e35302db1..58a797b2da7 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/MostRecentlyUsedSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/MostRecentlyUsedSpec.scala @@ -13,7 +13,8 @@ import akka.cluster.sharding.ShardRegion object MostRecentlyUsedSpec { - val config: Config = ConfigFactory.parseString(""" + val config: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = mru @@ -23,9 +24,11 @@ object MostRecentlyUsedSpec { } } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) - val idleConfig: Config = ConfigFactory.parseString(""" + val idleConfig: Config = ConfigFactory + .parseString(""" akka.cluster.sharding { passivation { strategy = mru-idle @@ -36,7 +39,8 @@ object MostRecentlyUsedSpec { } } } - """).withFallback(EntityPassivationSpec.config) + """) + .withFallback(EntityPassivationSpec.config) } class MostRecentlyUsedSpec extends AbstractEntityPassivationSpec(MostRecentlyUsedSpec.config, expectedEntities = 40) { diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/AccessPattern.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/AccessPattern.scala index 811305e8a09..6d26cdfa620 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/AccessPattern.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/AccessPattern.scala @@ -29,41 +29,31 @@ abstract class SyntheticGenerator(events: Int) extends AccessPattern { object SyntheticGenerator { import site.ycsb.generator._ - /** - * Generate a sequence of unique id events. - */ + /** Generate a sequence of unique id events. */ final class Sequence(start: Long, events: Int) extends SyntheticGenerator(events) { private val generator = new CounterGenerator(start) override protected def nextValue(event: Int): Long = generator.nextValue() } - /** - * Generate a looping sequence of id events. - */ + /** Generate a looping sequence of id events. */ final class Loop(start: Long, end: Long, events: Int) extends SyntheticGenerator(events) { private val generator = new SequentialGenerator(start, end) override protected def nextValue(event: Int): Long = generator.nextValue().longValue } - /** - * Generate id events randomly using a uniform distribution, from the inclusive range min to max. - */ + /** Generate id events randomly using a uniform distribution, from the inclusive range min to max. */ final class Uniform(min: Long, max: Long, events: Int) extends SyntheticGenerator(events) { private val generator = new UniformLongGenerator(min, max) override protected def nextValue(event: Int): Long = generator.nextValue() } - /** - * Generate id events based on an exponential distribution given the mean (expected value) of the distribution. - */ + /** Generate id events based on an exponential distribution given the mean (expected value) of the distribution. */ final class Exponential(mean: Double, events: Int) extends SyntheticGenerator(events) { private val generator = new ExponentialGenerator(mean) override protected def nextValue(event: Int): Long = generator.nextValue().longValue } - /** - * Generate id events for a hotspot distribution, where x% ('rate') of operations access y% ('hot') of the id space. - */ + /** Generate id events for a hotspot distribution, where x% ('rate') of operations access y% ('hot') of the id space. */ final class Hotspot(min: Long, max: Long, hot: Double, rate: Double, events: Int) extends SyntheticGenerator(events) { private val generator = new HotspotIntegerGenerator(min, max, hot, rate) override protected def nextValue(event: Int): Long = generator.nextValue() @@ -115,16 +105,12 @@ abstract class TraceFileReader(path: String) extends AccessPattern { object TraceFileReader { - /** - * Simple trace file format: entity id per line. - */ + /** Simple trace file format: entity id per line. */ final class Simple(path: String) extends TraceFileReader(path: String) { override def entityIds: Source[EntityId, NotUsed] = lines } - /** - * Text trace file format with a simple word tokenizer for ASCII text. - */ + /** Text trace file format with a simple word tokenizer for ASCII text. */ final class Text(path: String) extends TraceFileReader(path: String) { override def entityIds: Source[EntityId, NotUsed] = lines.mapConcat { line => line.split("[^\\w-]+").filter(_.nonEmpty).map(_.toLowerCase) @@ -144,9 +130,7 @@ object TraceFileReader { } } - /** - * Read binary traces from R3 Corda traces. - */ + /** Read binary traces from R3 Corda traces. */ final class Corda(path: String) extends AccessPattern { override val isSynthetic = false diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/Simulator.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/Simulator.scala index 30abc2aa26c..a5bd45ce209 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/Simulator.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/Simulator.scala @@ -183,13 +183,12 @@ object Simulator { .map(simulation.strategyCreator.preprocess) // note: mutable state in strategy creator .fold(immutable.Queue.empty[Access])((collected, access) => if (simulation.accessPattern.isSynthetic) collected.enqueue(access) else collected) - .flatMapConcat( - collectedAccesses => - if (simulation.accessPattern.isSynthetic) - Source(collectedAccesses) // use the exact same randomly generated accesses - else - simulation.accessPattern.entityIds.via( // re-read the access pattern - ShardAllocation(simulation.numberOfShards, simulation.numberOfRegions))) + .flatMapConcat(collectedAccesses => + if (simulation.accessPattern.isSynthetic) + Source(collectedAccesses) // use the exact same randomly generated accesses + else + simulation.accessPattern.entityIds.via( // re-read the access pattern + ShardAllocation(simulation.numberOfShards, simulation.numberOfRegions))) .via(ShardingState(simulation.strategyCreator)) .runWith(SimulatorStats()) diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/SimulatorStats.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/SimulatorStats.scala index 22800edb49d..03224ab6881 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/SimulatorStats.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/passivation/simulator/SimulatorStats.scala @@ -83,14 +83,18 @@ object DataTable { f"${stats.passivations}%,d") def apply(stats: RegionStats): DataTable = - DataTable(Headers.RegionStats, stats.shardStats.toSeq.sortBy(_._1).flatMap { - case (shardId, stats) => DataTable(stats).rows.map(shardId +: _) - }) + DataTable( + Headers.RegionStats, + stats.shardStats.toSeq.sortBy(_._1).flatMap { case (shardId, stats) => + DataTable(stats).rows.map(shardId +: _) + }) def apply(stats: ShardingStats): DataTable = - DataTable(Headers.ShardingStats, stats.regionStats.toSeq.sortBy(_._1).flatMap { - case (regionId, stats) => DataTable(stats).rows.map(regionId +: _) - }) + DataTable( + Headers.ShardingStats, + stats.regionStats.toSeq.sortBy(_._1).flatMap { case (regionId, stats) => + DataTable(stats).rows.map(regionId +: _) + }) } object PrintData { @@ -133,7 +137,7 @@ object PrintData { columnWidths.map(width => line * (width + 2)).mkString(start, separator, end) + "\n" private def line(row: DataTable.Row, columnWidths: Seq[Int]): String = - row.zip(columnWidths).map({ case (cell, width) => pad(cell, width) }).mkString("║ ", " │ ", " ║") + "\n" + row.zip(columnWidths).map { case (cell, width) => pad(cell, width) }.mkString("║ ", " │ ", " ║") + "\n" private def pad(string: String, width: Int): String = " " * (width - string.length) + string diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala index 32833867ae0..1d5d0f9f957 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala @@ -73,9 +73,7 @@ object DistributedPubSubSettings { */ def create(config: Config): DistributedPubSubSettings = apply(config) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def roleOption(role: String): Option[String] = if (role == "") None else Option(role) } @@ -155,9 +153,7 @@ final class DistributedPubSubSettings( object DistributedPubSubMediator { - /** - * Scala API: Factory method for `DistributedPubSubMediator` [[akka.actor.Props]]. - */ + /** Scala API: Factory method for `DistributedPubSubMediator` [[akka.actor.Props]]. */ def props(settings: DistributedPubSubSettings): Props = Props(new DistributedPubSubMediator(settings)).withDeploy(Deploy.local) @@ -166,14 +162,10 @@ object DistributedPubSubMediator { @SerialVersionUID(1L) final case class Subscribe(topic: String, group: Option[String], ref: ActorRef) { require(topic != null && topic != "", "topic must be defined") - /** - * Convenience constructor with `group` None - */ + /** Convenience constructor with `group` None */ def this(topic: String, ref: ActorRef) = this(topic, None, ref) - /** - * Java API: constructor with group: String - */ + /** Java API: constructor with group: String */ def this(topic: String, group: String, ref: ActorRef) = this(topic, Some(group), ref) } object Subscribe { @@ -205,9 +197,7 @@ object DistributedPubSubMediator { if (msg == null) throw InvalidMessageException("[null] is not an allowed message") - /** - * Convenience constructor with `localAffinity` false - */ + /** Convenience constructor with `localAffinity` false */ def this(path: String, msg: Any) = this(path, msg, localAffinity = false) override def message: Any = msg @@ -240,15 +230,11 @@ object DistributedPubSubMediator { */ def getTopicsInstance: GetTopics = GetTopics - /** - * Reply to `GetTopics`. - */ + /** Reply to `GetTopics`. */ @SerialVersionUID(1L) final case class CurrentTopics(topics: Set[String]) { - /** - * Java API - */ + /** Java API */ def getTopics(): java.util.Set[String] = { import akka.util.ccompat.JavaConverters._ topics.asJava @@ -272,9 +258,7 @@ object DistributedPubSubMediator { final case class CountSubscribers(topic: String) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] object Internal { case object Prune @@ -322,9 +306,7 @@ object DistributedPubSubMediator { */ trait ChildActorTerminationProtocol - /** - * Passivate-like message sent from child to parent, used to signal that sender has no subscribers and no child actors. - */ + /** Passivate-like message sent from child to parent, used to signal that sender has no subscribers and no child actors. */ case object NoMoreSubscribers extends ChildActorTerminationProtocol /** @@ -456,10 +438,9 @@ object DistributedPubSubMediator { } class Group(val emptyTimeToLive: FiniteDuration, routingLogic: RoutingLogic) extends TopicLike { - def business = { - case SendToOneSubscriber(msg) => - if (subscribers.nonEmpty) - Router(routingLogic, subscribers.map(ActorRefRoutee(_)).toVector).route(wrapIfNeeded(msg), sender()) + def business = { case SendToOneSubscriber(msg) => + if (subscribers.nonEmpty) + Router(routingLogic, subscribers.map(ActorRefRoutee(_)).toVector).route(wrapIfNeeded(msg), sender()) } } @@ -480,9 +461,7 @@ object DistributedPubSubMediator { } } -/** - * Marker trait for remote messages with special serializer. - */ +/** Marker trait for remote messages with special serializer. */ trait DistributedPubSubMessage extends Serializable /** @@ -577,7 +556,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) val removedTimeToLiveMillis = removedTimeToLive.toMillis - //Start periodic gossip to random nodes in cluster + // Start periodic gossip to random nodes in cluster import context.dispatcher val gossipTask = context.system.scheduler.scheduleWithFixedDelay(gossipInterval, gossipInterval, self, GossipTick) val pruneInterval: FiniteDuration = removedTimeToLive / 2 @@ -774,13 +753,11 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) } case _: MemberEvent => // not of interest - case Count => - val count = registry.map { - case (_, bucket) => - bucket.content.count { - case (_, valueHolder) => valueHolder.ref.isDefined - } + val count = registry.map { case (_, bucket) => + bucket.content.count { case (_, valueHolder) => + valueHolder.ref.isDefined + } }.sum sender() ! count @@ -862,7 +839,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) def mkKey(path: ActorPath): String = Internal.mkKey(path) - def myVersions: Map[Address, Long] = registry.map { case (owner, bucket) => (owner -> bucket.version) } + def myVersions: Map[Address, Long] = registry.map { case (owner, bucket) => owner -> bucket.version } def collectDelta(otherVersions: Map[Address, Long]): immutable.Iterable[Bucket] = { // missing entries are represented by version 0 @@ -871,8 +848,8 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) filledOtherVersions.collect { case (owner, v) if registry(owner).version > v && count < maxDeltaElements => val bucket = registry(owner) - val deltaContent = bucket.content.filter { - case (_, value) => value.version > v + val deltaContent = bucket.content.filter { case (_, value) => + value.version > v } count += deltaContent.size if (count <= maxDeltaElements) @@ -887,13 +864,11 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) } def otherHasNewerVersions(otherVersions: Map[Address, Long]): Boolean = - otherVersions.exists { - case (owner, v) => v > registry(owner).version + otherVersions.exists { case (owner, v) => + v > registry(owner).version } - /** - * Gossip to peer nodes. - */ + /** Gossip to peer nodes. */ def gossip(): Unit = selectRandomNode((nodes - selfAddress).toVector).foreach(gossipTo) def gossipTo(address: Address): Unit = { @@ -905,13 +880,12 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current.nextInt(addresses.size))) def prune(): Unit = { - registry.foreach { - case (owner, bucket) => - val oldRemoved = bucket.content.collect { - case (key, ValueHolder(version, None)) if (bucket.version - version > removedTimeToLiveMillis) => key - } - if (oldRemoved.nonEmpty) - registry += owner -> bucket.copy(content = bucket.content -- oldRemoved) + registry.foreach { case (owner, bucket) => + val oldRemoved = bucket.content.collect { + case (key, ValueHolder(version, None)) if bucket.version - version > removedTimeToLiveMillis => key + } + if (oldRemoved.nonEmpty) + registry += owner -> bucket.copy(content = bucket.content -- oldRemoved) } } @@ -948,9 +922,7 @@ class DistributedPubSub(system: ExtendedActorSystem) extends Extension { def isTerminated: Boolean = Cluster(system).isTerminated || !settings.role.forall(Cluster(system).selfRoles.contains) - /** - * The [[DistributedPubSubMediator]] - */ + /** The [[DistributedPubSubMediator]] */ val mediator: ActorRef = { if (isTerminated) system.deadLetters diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/PerGroupingBuffer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/PerGroupingBuffer.scala index 1984c3c1645..e9159a5ea21 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/PerGroupingBuffer.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/PerGroupingBuffer.scala @@ -30,8 +30,8 @@ private[pubsub] trait PerGroupingBuffer { } private def forwardMessages(messages: MessageBuffer, recipient: ActorRef): Unit = { - messages.foreach { - case (message, originalSender) => recipient.tell(message, originalSender) + messages.foreach { case (message, originalSender) => + recipient.tell(message, originalSender) } } diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala index 4d1a2d77334..3431988cf0f 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala @@ -23,9 +23,7 @@ import akka.serialization._ import akka.util.ccompat._ import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API: Protobuf serializer of DistributedPubSubMediator messages. - */ +/** INTERNAL API: Protobuf serializer of DistributedPubSubMediator messages. */ @ccompatUsedUntil213 private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest @@ -116,9 +114,8 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor private def statusToProto(status: Status): dm.Status = { val versions = status.versions - .map { - case (a, v) => - dm.Status.Version.newBuilder().setAddress(addressToProto(a)).setTimestamp(v).build() + .map { case (a, v) => + dm.Status.Version.newBuilder().setAddress(addressToProto(a)).setTimestamp(v).build() } .toVector .asJava @@ -139,11 +136,10 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor val buckets = delta.buckets .map { b => val entries = b.content - .map { - case (key, value) => - val b = dm.Delta.Entry.newBuilder().setKey(key).setVersion(value.version) - value.ref.foreach(r => b.setRef(Serialization.serializedActorPath(r))) - b.build() + .map { case (key, value) => + val b = dm.Delta.Entry.newBuilder().setKey(key).setVersion(value.version) + value.ref.foreach(r => b.setRef(Serialization.serializedActorPath(r))) + b.build() } .toVector .asJava diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala index b546c8db0cf..83bc39e27b5 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala @@ -50,8 +50,8 @@ object ClusterSingletonManagerSettings { */ def apply(system: ActorSystem): ClusterSingletonManagerSettings = apply(system.settings.config.getConfig("akka.cluster.singleton")) - // note that this setting has some additional logic inside the ClusterSingletonManager - // falling back to DowningProvider.downRemovalMargin if it is off/Zero + // note that this setting has some additional logic inside the ClusterSingletonManager + // falling back to DowningProvider.downRemovalMargin if it is off/Zero .withRemovalMargin(Cluster(system).settings.DownRemovalMargin) /** @@ -89,9 +89,7 @@ object ClusterSingletonManagerSettings { */ def create(config: Config): ClusterSingletonManagerSettings = apply(config) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def roleOption(role: String): Option[String] = if (role == "") None else Option(role) @@ -169,16 +167,12 @@ final class ClusterSingletonManagerSettings( new ClusterSingletonManagerSettings(singletonName, role, removalMargin, handOverRetryInterval, leaseSettings) } -/** - * Marker trait for remote messages with special serializer. - */ +/** Marker trait for remote messages with special serializer. */ sealed trait ClusterSingletonMessage extends Serializable object ClusterSingletonManager { - /** - * Scala API: Factory method for `ClusterSingletonManager` [[akka.actor.Props]]. - */ + /** Scala API: Factory method for `ClusterSingletonManager` [[akka.actor.Props]]. */ def props(singletonProps: Props, terminationMessage: Any, settings: ClusterSingletonManagerSettings): Props = Props(new ClusterSingletonManager(singletonProps, terminationMessage, settings)) .withDispatcher(Dispatchers.InternalDispatcherId) @@ -196,9 +190,7 @@ object ClusterSingletonManager { */ sealed trait Data - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] object Internal { /** @@ -267,14 +259,10 @@ object ClusterSingletonManager { object OldestChangedBuffer { - /** - * Request to deliver one more event. - */ + /** Request to deliver one more event. */ case object GetNext - /** - * The first event, corresponding to CurrentClusterState. - */ + /** The first event, corresponding to CurrentClusterState. */ final case class InitialOldestState(oldest: List[UniqueAddress], safeToBeOldest: Boolean) final case class OldestChanged(oldest: Option[UniqueAddress]) @@ -480,7 +468,6 @@ class ClusterSingletonManagerIsStuck(message: String) extends AkkaException(mess * * Not intended for subclassing by user code. * - * * @param singletonProps [[akka.actor.Props]] of the singleton actor instance. * * @param terminationMessage When handing over to a new oldest node @@ -1198,12 +1185,12 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se stay() // silence } - onTransition { - case from -> to => logInfo("ClusterSingletonManager state change [{} -> {}]", from, to) + onTransition { case from -> to => + logInfo("ClusterSingletonManager state change [{} -> {}]", from, to) } - onTransition { - case _ -> BecomingOldest => startSingleTimer(HandOverRetryTimer, HandOverRetry(1), handOverRetryInterval) + onTransition { case _ -> BecomingOldest => + startSingleTimer(HandOverRetryTimer, HandOverRetry(1), handOverRetryInterval) } onTransition { @@ -1218,24 +1205,23 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se logInfo("Releasing lease as leaving AcquiringLease going to [{}]", to) import context.dispatcher lease.foreach(l => - pipe(l.release().map[Any](ReleaseLeaseResult(_)).recover { - case t => ReleaseLeaseFailure(t) + pipe(l.release().map[Any](ReleaseLeaseResult(_)).recover { case t => + ReleaseLeaseFailure(t) }).to(self)) case _ => } } - onTransition { - case Oldest -> _ => - lease.foreach { l => - logInfo("Releasing lease as leaving Oldest") - import context.dispatcher - pipe(l.release().map(ReleaseLeaseResult(_))).to(self) - } + onTransition { case Oldest -> _ => + lease.foreach { l => + logInfo("Releasing lease as leaving Oldest") + import context.dispatcher + pipe(l.release().map(ReleaseLeaseResult(_))).to(self) + } } - onTransition { - case _ -> (Younger | Oldest) => getNextOldestChanged() + onTransition { case _ -> (Younger | Oldest) => + getNextOldestChanged() } onTransition { diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala index 525f31525f7..cf7e35c9775 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala @@ -56,9 +56,7 @@ object ClusterSingletonProxySettings { */ def create(config: Config): ClusterSingletonProxySettings = apply(config) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def roleOption(role: String): Option[String] = if (role == "") None else Option(role) @@ -210,9 +208,7 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste } } - /** - * Discard old singleton ActorRef and send a periodic message to self to identify the singleton. - */ + /** Discard old singleton ActorRef and send a periodic message to self to identify the singleton. */ def identifySingleton(): Unit = { import context.dispatcher log.debug("Creating singleton identification timer...") @@ -272,7 +268,6 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste else remove(m) case _: MemberEvent => // do nothing - // singleton identification logic case ActorIdentity(_, Some(s)) => // if the new singleton is defined, deliver all buffered messages diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala index 7910ea19193..97bc6dddd8c 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala @@ -30,15 +30,19 @@ private[akka] class ClusterSingletonMessageSerializer(val system: ExtendedActorS private val emptyByteArray = Array.empty[Byte] - private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef](HandOverToMeManifest -> { _ => - HandOverToMe - }, HandOverInProgressManifest -> { _ => - HandOverInProgress - }, HandOverDoneManifest -> { _ => - HandOverDone - }, TakeOverFromMeManifest -> { _ => - TakeOverFromMe - }) + private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef]( + HandOverToMeManifest -> { _ => + HandOverToMe + }, + HandOverInProgressManifest -> { _ => + HandOverInProgress + }, + HandOverDoneManifest -> { _ => + HandOverDone + }, + TakeOverFromMeManifest -> { _ => + TakeOverFromMe + }) override def manifest(obj: AnyRef): String = obj match { case HandOverToMe => HandOverToMeManifest diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala index a6f445aa094..ef789ad9f5b 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala @@ -62,21 +62,20 @@ object DistributedPubSubMediatorSpec extends MultiNodeConfig { } } - //#publisher + // #publisher class Publisher extends Actor { import DistributedPubSubMediator.Publish // activate the extension val mediator = DistributedPubSub(context.system).mediator - def receive = { - case in: String => - val out = in.toUpperCase - mediator ! Publish("content", out) + def receive = { case in: String => + val out = in.toUpperCase + mediator ! Publish("content", out) } } - //#publisher + // #publisher - //#subscriber + // #subscriber class Subscriber extends Actor with ActorLogging { import DistributedPubSubMediator.{ Subscribe, SubscribeAck } val mediator = DistributedPubSub(context.system).mediator @@ -90,35 +89,33 @@ object DistributedPubSubMediatorSpec extends MultiNodeConfig { log.info("subscribing") } } - //#subscriber + // #subscriber - //#sender + // #sender class Sender extends Actor { import DistributedPubSubMediator.Send // activate the extension val mediator = DistributedPubSub(context.system).mediator - def receive = { - case in: String => - val out = in.toUpperCase - mediator ! Send(path = "/user/destination", msg = out, localAffinity = true) + def receive = { case in: String => + val out = in.toUpperCase + mediator ! Send(path = "/user/destination", msg = out, localAffinity = true) } } - //#sender + // #sender - //#send-destination + // #send-destination class Destination extends Actor with ActorLogging { import DistributedPubSubMediator.Put val mediator = DistributedPubSub(context.system).mediator // register to the path mediator ! Put(self) - def receive = { - case s: String => - log.info("Got {}", s) + def receive = { case s: String => + log.info("Got {}", s) } } - //#send-destination + // #send-destination } @@ -325,8 +322,8 @@ class DistributedPubSubMediatorSpec } runOn(first) { - val names = receiveWhile(messages = 2) { - case "hello all" => lastSender.path.name + val names = receiveWhile(messages = 2) { case "hello all" => + lastSender.path.name } names.toSet should ===(Set("u8", "u9")) } @@ -346,7 +343,7 @@ class DistributedPubSubMediatorSpec awaitCount(10) } - //#start-subscribers + // #start-subscribers runOn(first) { system.actorOf(Props[Subscriber](), "subscriber1") } @@ -354,16 +351,16 @@ class DistributedPubSubMediatorSpec system.actorOf(Props[Subscriber](), "subscriber2") system.actorOf(Props[Subscriber](), "subscriber3") } - //#start-subscribers + // #start-subscribers - //#publish-message + // #publish-message runOn(third) { val publisher = system.actorOf(Props[Publisher](), "publisher") later() // after a while the subscriptions are replicated publisher ! "hello" } - //#publish-message + // #publish-message enterBarrier("after-8") } @@ -373,23 +370,23 @@ class DistributedPubSubMediatorSpec awaitCount(12) } - //#start-send-destinations + // #start-send-destinations runOn(first) { system.actorOf(Props[Destination](), "destination") } runOn(second) { system.actorOf(Props[Destination](), "destination") } - //#start-send-destinations + // #start-send-destinations - //#send-message + // #send-message runOn(third) { val sender = system.actorOf(Props[Sender](), "sender") later() // after a while the destinations are replicated sender ! "hello" } - //#send-message + // #send-message enterBarrier("after-8") } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubRestartSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubRestartSpec.scala index eb95dd856eb..6aeb1f0daba 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubRestartSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubRestartSpec.scala @@ -40,8 +40,8 @@ object DistributedPubSubRestartSpec extends MultiNodeConfig { testTransport(on = true) class Shutdown extends Actor { - def receive = { - case "shutdown" => context.system.terminate() + def receive = { case "shutdown" => + context.system.terminate() } } @@ -139,9 +139,11 @@ class DistributedPubSubRestartSpec Await.result(system.whenTerminated, 10.seconds) val newSystem = { val port = Cluster(system).selfAddress.port.get - val config = ConfigFactory.parseString(s""" + val config = ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port=$port - """).withFallback(system.settings.config) + """) + .withFallback(system.settings.config) ActorSystem(system.name, config) } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala index b3837cd95f5..a4b186ed074 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala @@ -42,14 +42,12 @@ object ClusterSingletonManagerChaosSpec extends MultiNodeConfig { case object EchoStarted - /** - * The singleton actor - */ + /** The singleton actor */ class Echo(testActor: ActorRef) extends Actor { testActor ! EchoStarted - def receive = { - case _ => sender() ! self + def receive = { case _ => + sender() ! self } } } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerDownedSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerDownedSpec.scala index f2f56042244..55a6820b1cb 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerDownedSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerDownedSpec.scala @@ -38,9 +38,7 @@ object ClusterSingletonManagerDownedSpec extends MultiNodeConfig { case object EchoStarted case object EchoStopped - /** - * The singleton actor - */ + /** The singleton actor */ class Echo(testActor: ActorRef) extends Actor { testActor ! EchoStarted @@ -48,8 +46,8 @@ object ClusterSingletonManagerDownedSpec extends MultiNodeConfig { testActor ! EchoStopped } - def receive = { - case _ => sender() ! self + def receive = { case _ => + sender() ! self } } } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaseSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaseSpec.scala index 8ea1c3d2206..04032a4c7d7 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaseSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaseSpec.scala @@ -59,9 +59,8 @@ object ClusterSingletonManagerLeaseSpec extends MultiNodeConfig { override def postStop(): Unit = { log.info("Singleton stopping") } - override def receive: Receive = { - case msg => - sender() ! Response(msg, selfAddress) + override def receive: Receive = { case msg => + sender() ! Response(msg, selfAddress) } } } @@ -175,25 +174,31 @@ class ClusterSingletonManagerLeaseSpec cluster.state.members.size shouldEqual 5 runOn(controller) { cluster.down(address(first)) - awaitAssert({ - cluster.state.members.toList.map(_.status) shouldEqual List(Up, Up, Up, Up) - }, 20.seconds) - val requests = awaitAssert({ - TestLeaseActorClientExt(system).getLeaseActor() ! GetRequests - val msg = expectMsgType[LeaseRequests] - withClue("Requests: " + msg) { - msg.requests.size shouldEqual 2 - } - msg - }, 10.seconds) + awaitAssert( + { + cluster.state.members.toList.map(_.status) shouldEqual List(Up, Up, Up, Up) + }, + 20.seconds) + val requests = awaitAssert( + { + TestLeaseActorClientExt(system).getLeaseActor() ! GetRequests + val msg = expectMsgType[LeaseRequests] + withClue("Requests: " + msg) { + msg.requests.size shouldEqual 2 + } + msg + }, + 10.seconds) requests.requests should contain(Release(address(first).hostPort)) requests.requests should contain(Acquire(address(second).hostPort)) } runOn(second, third, fourth) { - awaitAssert({ - cluster.state.members.toList.map(_.status) shouldEqual List(Up, Up, Up, Up) - }, 20.seconds) + awaitAssert( + { + cluster.state.members.toList.map(_.status) shouldEqual List(Up, Up, Up, Up) + }, + 20.seconds) } enterBarrier("first node downed") val proxy = system.actorOf( diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeave2Spec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeave2Spec.scala index d407ae06fb3..773ad30d7ab 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeave2Spec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeave2Spec.scala @@ -39,9 +39,7 @@ object ClusterSingletonManagerLeave2Spec extends MultiNodeConfig { case object EchoStarted - /** - * The singleton actor - */ + /** The singleton actor */ class Echo(testActor: ActorRef) extends Actor with ActorLogging { override def preStart(): Unit = { log.debug("Started singleton at [{}]", Cluster(context.system).selfAddress) @@ -189,7 +187,7 @@ class ClusterSingletonManagerLeave2Spec p.within(15.seconds) { p.awaitAssert { echoProxy.tell("hello2", p.ref) - p.expectMsgType[ActorRef](1.seconds).path.address should not be (firstAddress) + p.expectMsgType[ActorRef](1.seconds).path.address should not be firstAddress } } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala index 318c282da8f..51ddf5e19a2 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala @@ -33,9 +33,7 @@ object ClusterSingletonManagerLeaveSpec extends MultiNodeConfig { case object EchoStarted - /** - * The singleton actor - */ + /** The singleton actor */ class Echo(testActor: ActorRef) extends Actor { override def preStart(): Unit = { testActor ! "preStart" @@ -148,7 +146,7 @@ class ClusterSingletonManagerLeaveSpec p.within(15.seconds) { p.awaitAssert { echoProxy.tell("hello2", p.ref) - p.expectMsgType[ActorRef](1.seconds).path.address should not be (firstAddress) + p.expectMsgType[ActorRef](1.seconds).path.address should not be firstAddress } } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerPreparingForShutdownSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerPreparingForShutdownSpec.scala index a2bd0565e7d..448b7a6bca1 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerPreparingForShutdownSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerPreparingForShutdownSpec.scala @@ -35,9 +35,7 @@ object ClusterSingletonManagerPreparingForShutdownSpec extends MultiNodeConfig { case object EchoStarted - /** - * The singleton actor - */ + /** The singleton actor */ class Echo(testActor: ActorRef) extends Actor with ActorLogging { override def preStart(): Unit = { log.info("Singleton starting on {}", Cluster(context.system).selfUniqueAddress) @@ -110,11 +108,13 @@ class ClusterSingletonManagerPreparingForShutdownSpec runOn(first) { Cluster(system).prepareForFullClusterShutdown() } - awaitAssert({ - withClue("members: " + Cluster(system).readView.members) { - Cluster(system).selfMember.status shouldEqual MemberStatus.ReadyForShutdown - } - }, 10.seconds) + awaitAssert( + { + withClue("members: " + Cluster(system).readView.members) { + Cluster(system).selfMember.status shouldEqual MemberStatus.ReadyForShutdown + } + }, + 10.seconds) enterBarrier("preparation-complete") runOn(first) { @@ -133,7 +133,8 @@ class ClusterSingletonManagerPreparingForShutdownSpec } } }, - 8.seconds) // this timeout must be lower than coordinated shutdown timeout otherwise it could pass due to the timeout continuing with the cluster exit + 8.seconds + ) // this timeout must be lower than coordinated shutdown timeout otherwise it could pass due to the timeout continuing with the cluster exit // where as this is testing that shutdown happens right away when a cluster is in preparing to shutdown mode enterBarrier("initial-singleton-removed") @@ -152,11 +153,13 @@ class ClusterSingletonManagerPreparingForShutdownSpec Cluster(system).leave(address(third)) Cluster(system).leave(address(second)) } - awaitAssert({ - withClue("self member: " + Cluster(system).selfMember) { - Cluster(system).selfMember.status shouldEqual Removed - } - }, 10.seconds) + awaitAssert( + { + withClue("self member: " + Cluster(system).selfMember) { + Cluster(system).selfMember.status shouldEqual Removed + } + }, + 10.seconds) enterBarrier("done") } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala index 0c8386cdeb8..dee83c30bdd 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala @@ -47,10 +47,10 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { nodeConfig(first, second, third, fourth, fifth, sixth)(ConfigFactory.parseString("akka.cluster.roles =[worker]")) - //#singleton-message-classes + // #singleton-message-classes object PointToPointChannel { case object UnregistrationOk extends CborSerializable - //#singleton-message-classes + // #singleton-message-classes case object RegisterConsumer extends CborSerializable case object UnregisterConsumer extends CborSerializable case object RegistrationOk extends CborSerializable @@ -58,9 +58,9 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { case object UnexpectedUnregistration extends CborSerializable case object Reset extends CborSerializable case object ResetOk extends CborSerializable - //#singleton-message-classes + // #singleton-message-classes } - //#singleton-message-classes + // #singleton-message-classes /** * This channel is extremely strict with regards to @@ -107,18 +107,16 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { } } - //#singleton-message-classes + // #singleton-message-classes object Consumer { case object End extends CborSerializable case object GetCurrent extends CborSerializable case object Ping extends CborSerializable case object Pong extends CborSerializable } - //#singleton-message-classes + // #singleton-message-classes - /** - * The Singleton actor - */ + /** The Singleton actor */ class Consumer(queue: ActorRef, delegateTo: ActorRef) extends Actor with ActorLogging { import Consumer._ @@ -144,7 +142,7 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { delegateTo ! message case GetCurrent => sender() ! current - //#consumer-end + // #consumer-end case End => queue ! UnregisterConsumer case UnregistrationOk => @@ -152,7 +150,7 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { context.stop(self) case Ping => sender() ! Pong - //#consumer-end + // #consumer-end } } @@ -217,35 +215,35 @@ class ClusterSingletonManagerSpec } def createSingleton(): ActorRef = { - //#create-singleton-manager + // #create-singleton-manager system.actorOf( ClusterSingletonManager.props( singletonProps = Props(classOf[Consumer], queue, testActor), terminationMessage = End, settings = ClusterSingletonManagerSettings(system).withRole("worker")), name = "consumer") - //#create-singleton-manager + // #create-singleton-manager } def createSingletonProxy(): ActorRef = { - //#create-singleton-proxy + // #create-singleton-proxy val proxy = system.actorOf( ClusterSingletonProxy.props( singletonManagerPath = "/user/consumer", settings = ClusterSingletonProxySettings(system).withRole("worker")), name = "consumerProxy") - //#create-singleton-proxy + // #create-singleton-proxy proxy } def createSingletonProxyDc(): ActorRef = { - //#create-singleton-proxy-dc + // #create-singleton-proxy-dc val proxyDcB = system.actorOf( ClusterSingletonProxy.props( singletonManagerPath = "/user/consumer", settings = ClusterSingletonProxySettings(system).withDataCenter("B")), name = "consumerProxyDcB") - //#create-singleton-proxy-dc + // #create-singleton-proxy-dc proxyDcB } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala index 4d368159e56..38ff6fc6732 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala @@ -34,13 +34,10 @@ object ClusterSingletonManagerStartupSpec extends MultiNodeConfig { case object EchoStarted - /** - * The singleton actor - */ + /** The singleton actor */ class Echo extends Actor { - def receive = { - case _ => - sender() ! self + def receive = { case _ => + sender() ! self } } } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala index ac9bdc8bf11..845c3b946b3 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala @@ -53,9 +53,8 @@ class MultiDcSingleton extends Actor with ActorLogging { val cluster = Cluster(context.system) - override def receive: Receive = { - case Ping => - sender() ! Pong(cluster.settings.SelfDataCenter, cluster.selfAddress, cluster.selfRoles) + override def receive: Receive = { case Ping => + sender() ! Pong(cluster.settings.SelfDataCenter, cluster.selfAddress, cluster.selfRoles) } } object MultiDcSingleton { diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeaseSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeaseSpec.scala index 7c708468e22..e13a9ab39c8 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeaseSpec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeaseSpec.scala @@ -38,13 +38,15 @@ class ImportantSingleton(lifeCycleProbe: ActorRef) extends Actor with ActorLoggi lifeCycleProbe ! "postStop" } - override def receive: Receive = { - case msg => - sender() ! msg + override def receive: Receive = { case msg => + sender() ! msg } } -class ClusterSingletonLeaseSpec extends AkkaSpec(ConfigFactory.parseString(""" +class ClusterSingletonLeaseSpec + extends AkkaSpec( + ConfigFactory + .parseString(""" akka.loglevel = INFO akka.actor.provider = cluster diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala index 46f9745e7b6..d010178ac0a 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala @@ -34,15 +34,13 @@ object ClusterSingletonLeavingSpeedSpec { probe ! "stopped" } - override def receive: Receive = { - case msg => sender() ! msg + override def receive: Receive = { case msg => + sender() ! msg } } } -class ClusterSingletonLeavingSpeedSpec - extends AkkaSpec( - """ +class ClusterSingletonLeavingSpeedSpec extends AkkaSpec(""" akka.loglevel = DEBUG akka.actor.provider = akka.cluster.ClusterActorRefProvider akka.cluster.downing-provider-class = akka.cluster.testkit.AutoDowning @@ -131,11 +129,10 @@ class ClusterSingletonLeavingSpeedSpec (stoppedDuration, startedDuration) } - durations.zipWithIndex.foreach { - case ((stoppedDuration, startedDuration), i) => - println( - s"Singleton $i stopped in ${stoppedDuration.toMillis} ms, started in ${startedDuration.toMillis} ms, " + - s"diff ${(startedDuration - stoppedDuration).toMillis} ms") + durations.zipWithIndex.foreach { case ((stoppedDuration, startedDuration), i) => + println( + s"Singleton $i stopped in ${stoppedDuration.toMillis} ms, started in ${startedDuration.toMillis} ms, " + + s"diff ${(startedDuration - stoppedDuration).toMillis} ms") } } diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala index b69ec7775ab..7fb9fb3a909 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala @@ -85,10 +85,9 @@ object ClusterSingletonProxySpec { log.info("Singleton created on {}", Cluster(context.system).selfAddress) - def receive: Actor.Receive = { - case msg => - log.info(s"Got $msg") - sender() ! "Got " + msg + def receive: Actor.Receive = { case msg => + log.info(s"Got $msg") + sender() ! "Got " + msg } } diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestart2Spec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestart2Spec.scala index 082db5c445e..288b9699dcf 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestart2Spec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestart2Spec.scala @@ -22,14 +22,13 @@ object ClusterSingletonRestart2Spec { def singletonActorProps: Props = Props(new Singleton) class Singleton extends Actor { - def receive = { - case _ => sender() ! Cluster(context.system).selfUniqueAddress + def receive = { case _ => + sender() ! Cluster(context.system).selfUniqueAddress } } } -class ClusterSingletonRestart2Spec - extends AkkaSpec(""" +class ClusterSingletonRestart2Spec extends AkkaSpec(""" akka.loglevel = INFO akka.cluster.roles = [singleton] akka.actor.provider = akka.cluster.ClusterActorRefProvider @@ -104,9 +103,11 @@ class ClusterSingletonRestart2Spec val sys2port = Cluster(sys2).selfAddress.port.get val sys4Config = - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port=$sys2port - """).withFallback(system.settings.config) + """) + .withFallback(system.settings.config) ActorSystem(system.name, sys4Config) } diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestartSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestartSpec.scala index e977253b0d7..888fba9644a 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestartSpec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestartSpec.scala @@ -16,8 +16,7 @@ import akka.testkit.AkkaSpec import akka.testkit.TestActors import akka.testkit.TestProbe -class ClusterSingletonRestartSpec - extends AkkaSpec(""" +class ClusterSingletonRestartSpec extends AkkaSpec(""" akka.loglevel = INFO akka.actor.provider = akka.cluster.ClusterActorRefProvider akka.cluster.downing-provider-class = akka.cluster.testkit.AutoDowning @@ -75,9 +74,11 @@ class ClusterSingletonRestartSpec val sys1port = Cluster(sys1).selfAddress.port.get val sys3Config = - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port=$sys1port - """).withFallback(system.settings.config) + """) + .withFallback(system.settings.config) ActorSystem(system.name, sys3Config) } diff --git a/akka-cluster-typed/src/main/scala-2.13/akka/cluster/typed/internal/receptionist/ClusterReceptionistProtocol.scala b/akka-cluster-typed/src/main/scala-2.13/akka/cluster/typed/internal/receptionist/ClusterReceptionistProtocol.scala index a8dd16ea9c2..c7bb1956f3e 100644 --- a/akka-cluster-typed/src/main/scala-2.13/akka/cluster/typed/internal/receptionist/ClusterReceptionistProtocol.scala +++ b/akka-cluster-typed/src/main/scala-2.13/akka/cluster/typed/internal/receptionist/ClusterReceptionistProtocol.scala @@ -8,9 +8,7 @@ import akka.actor.typed.ActorRef import akka.actor.typed.internal.receptionist.{ AbstractServiceKey, ReceptionistMessages } import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[receptionist] object ClusterReceptionistProtocol { type SubscriptionsKV[K <: AbstractServiceKey] = ActorRef[ReceptionistMessages.Listing[K#Protocol]] diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala index 452492569ac..ab1d271a5be 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala @@ -18,9 +18,7 @@ import akka.pattern.ask import akka.util.JavaDurationConverters._ import akka.util.Timeout -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ReplicatorBehavior { import akka.cluster.ddata.typed.javadsl.{ Replicator => JReplicator } import akka.cluster.ddata.typed.scaladsl.{ Replicator => SReplicator } @@ -86,8 +84,8 @@ import akka.util.Timeout case rsp: dd.Replicator.GetFailure[d] => JReplicator.GetFailure(rsp.key) case rsp: dd.Replicator.GetDataDeleted[d] => JReplicator.GetDataDeleted(rsp.key) } - .recover { - case _ => JReplicator.GetFailure(cmd.key) + .recover { case _ => + JReplicator.GetFailure(cmd.key) } reply.foreach { cmd.replyTo ! _ } Behaviors.same @@ -115,8 +113,8 @@ import akka.util.Timeout case rsp: dd.Replicator.StoreFailure[d] => JReplicator.StoreFailure(rsp.key) case rsp: dd.Replicator.UpdateDataDeleted[d] => JReplicator.UpdateDataDeleted(rsp.key) } - .recover { - case _ => JReplicator.UpdateTimeout(cmd.key) + .recover { case _ => + JReplicator.UpdateTimeout(cmd.key) } reply.foreach { cmd.replyTo ! _ } Behaviors.same @@ -182,8 +180,8 @@ import akka.util.Timeout case rsp: dd.Replicator.DataDeleted[d] => JReplicator.DataDeleted(rsp.key) case rsp: dd.Replicator.StoreFailure[d] => JReplicator.StoreFailure(rsp.key) } - .recover { - case _ => JReplicator.DeleteFailure(cmd.key) + .recover { case _ => + JReplicator.DeleteFailure(cmd.key) } reply.foreach { cmd.replyTo ! _ } Behaviors.same @@ -207,7 +205,9 @@ import akka.util.Timeout Behaviors.same case unexpected => - throw new RuntimeException(s"Unexpected message: ${unexpected.getClass}") // compiler exhaustiveness check pleaser + throw new RuntimeException( + s"Unexpected message: ${unexpected.getClass}" + ) // compiler exhaustiveness check pleaser } } .receiveSignal { diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/DistributedData.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/DistributedData.scala index c87d05115fd..acf16b528d9 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/DistributedData.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/DistributedData.scala @@ -84,9 +84,7 @@ abstract class DistributedData extends Extension { def selfUniqueAddress: SelfUniqueAddress } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class DistributedDataImpl(system: ActorSystem[_]) extends DistributedData { override val replicator: ActorRef[Replicator.Command] = diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/Replicator.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/Replicator.scala index 239967ffebf..996e5de39e8 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/Replicator.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/Replicator.scala @@ -19,15 +19,11 @@ import akka.cluster.ddata.ReplicatedData import akka.cluster.ddata.typed.internal.ReplicatorBehavior import akka.util.JavaDurationConverters._ -/** - * @see [[akka.cluster.ddata.Replicator]]. - */ +/** @see [[akka.cluster.ddata.Replicator]]. */ object Replicator { import dd.Replicator.DefaultMajorityMinCap - /** - * The `Behavior` for the `Replicator` actor. - */ + /** The `Behavior` for the `Replicator` actor. */ def behavior(settings: dd.ReplicatorSettings): Behavior[Command] = ReplicatorBehavior(settings, underlyingReplicator = None).narrow[Command] @@ -100,14 +96,10 @@ object Replicator { @InternalApi private[akka] override def toClassic = dd.Replicator.WriteAll(timeout.asScala) } - /** - * The `ReadLocal` instance - */ + /** The `ReadLocal` instance */ def readLocal: ReadConsistency = ReadLocal - /** - * The `WriteLocal` instance - */ + /** The `WriteLocal` instance */ def writeLocal: WriteConsistency = WriteLocal /** @@ -124,22 +116,16 @@ object Replicator { def key: Key[A] } - /** - * Reply from `Get`. The data value is retrieved with [[#get]] using the typed key. - */ + /** Reply from `Get`. The data value is retrieved with [[#get]] using the typed key. */ final case class GetSuccess[A <: ReplicatedData](key: Key[A])(data: A) extends GetResponse[A] { - /** - * The data value, with correct type. - */ + /** The data value, with correct type. */ def get[T <: ReplicatedData](key: Key[T]): T = { require(key == this.key, "wrong key used, must use contained key") data.asInstanceOf[T] } - /** - * The data value. Use [[#get]] to get the fully typed value. - */ + /** The data value. Use [[#get]] to get the fully typed value. */ def dataValue: A = data } final case class NotFound[A <: ReplicatedData](key: Key[A]) extends GetResponse[A] @@ -150,9 +136,7 @@ object Replicator { */ final case class GetFailure[A <: ReplicatedData](key: Key[A]) extends GetResponse[A] - /** - * The [[Get]] request couldn't be performed because the entry has been deleted. - */ + /** The [[Get]] request couldn't be performed because the entry has been deleted. */ final case class GetDataDeleted[A <: ReplicatedData](key: Key[A]) extends GetResponse[A] object Update { @@ -217,9 +201,7 @@ object Replicator { */ final case class UpdateTimeout[A <: ReplicatedData](key: Key[A]) extends UpdateFailure[A] - /** - * The [[Update]] couldn't be performed because the entry has been deleted. - */ + /** The [[Update]] couldn't be performed because the entry has been deleted. */ final case class UpdateDataDeleted[A <: ReplicatedData](key: Key[A]) extends UpdateResponse[A] /** @@ -271,9 +253,7 @@ object Replicator { final case class Unsubscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef[SubscribeResponse[A]]) extends Command - /** - * @see [[Replicator.Subscribe]] - */ + /** @see [[Replicator.Subscribe]] */ sealed trait SubscribeResponse[A <: ReplicatedData] extends NoSerializationVerificationNeeded { def key: Key[A] } @@ -294,20 +274,14 @@ object Replicator { data.asInstanceOf[T] } - /** - * The data value. Use [[#get]] to get the fully typed value. - */ + /** The data value. Use [[#get]] to get the fully typed value. */ def dataValue: A = data } - /** - * @see [[Replicator.Subscribe]] - */ + /** @see [[Replicator.Subscribe]] */ final case class Deleted[A <: ReplicatedData](key: Key[A]) extends SubscribeResponse[A] - /** - * @see [[Replicator.Subscribe]] - */ + /** @see [[Replicator.Subscribe]] */ final case class Expired[A <: ReplicatedData](key: Key[A]) extends SubscribeResponse[A] /** @@ -333,9 +307,7 @@ object Replicator { */ final case class GetReplicaCount(replyTo: ActorRef[ReplicaCount]) extends Command - /** - * Current number of replicas. Reply to `GetReplicaCount`. - */ + /** Current number of replicas. Reply to `GetReplicaCount`. */ final case class ReplicaCount(n: Int) /** diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/DistributedData.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/DistributedData.scala index f7f1345ff74..2d1b2d14bed 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/DistributedData.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/DistributedData.scala @@ -108,9 +108,7 @@ class DistributedData(system: ActorSystem[_]) extends Extension { Props.empty.withDispatcherFromConfig(settings.dispatcher)) } - /** - * Returns true if this member is not tagged with the role configured for the replicas. - */ + /** Returns true if this member is not tagged with the role configured for the replicas. */ private def isTerminated: Boolean = dd.DistributedData(system.toClassic).isTerminated } diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/Replicator.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/Replicator.scala index ebe540f266a..5eafdebc9df 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/Replicator.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/Replicator.scala @@ -13,14 +13,10 @@ import akka.cluster.ddata.Key import akka.cluster.ddata.ReplicatedData import akka.cluster.ddata.typed.internal.ReplicatorBehavior -/** - * @see [[akka.cluster.ddata.Replicator]]. - */ +/** @see [[akka.cluster.ddata.Replicator]]. */ object Replicator { - /** - * The `Behavior` for the `Replicator` actor. - */ + /** The `Behavior` for the `Replicator` actor. */ def behavior(settings: ReplicatorSettings): Behavior[Command] = ReplicatorBehavior(settings, underlyingReplicator = None) @@ -75,9 +71,7 @@ object Replicator { object Get { - /** - * Convenience for `ask`. - */ + /** Convenience for `ask`. */ def apply[A <: ReplicatedData](key: Key[A], consistency: ReadConsistency): ActorRef[GetResponse[A]] => Get[A] = replyTo => Get(key, consistency, replyTo) } @@ -92,9 +86,7 @@ object Replicator { replyTo: ActorRef[GetResponse[A]]) extends Command - /** - * Reply from `Get`. The data value is retrieved with [[dd.Replicator.GetSuccess.get]] using the typed key. - */ + /** Reply from `Get`. The data value is retrieved with [[dd.Replicator.GetSuccess.get]] using the typed key. */ type GetResponse[A <: ReplicatedData] = dd.Replicator.GetResponse[A] object GetSuccess { def unapply[A <: ReplicatedData](rsp: GetSuccess[A]): Option[Key[A]] = Some(rsp.key) @@ -114,9 +106,7 @@ object Replicator { def unapply[A <: ReplicatedData](rsp: GetFailure[A]): Option[Key[A]] = Some(rsp.key) } - /** - * The [[Get]] request couldn't be performed because the entry has been deleted. - */ + /** The [[Get]] request couldn't be performed because the entry has been deleted. */ type GetDataDeleted[A <: ReplicatedData] = dd.Replicator.GetDataDeleted[A] object GetDataDeleted { def unapply[A <: ReplicatedData](rsp: GetDataDeleted[A]): Option[Key[A]] = @@ -139,9 +129,7 @@ object Replicator { replyTo: ActorRef[UpdateResponse[A]])(modify: A => A): Update[A] = Update(key, writeConsistency, replyTo)(modifyWithInitial(initial, modify)) - /** - * Convenience for `ask`. - */ + /** Convenience for `ask`. */ def apply[A <: ReplicatedData](key: Key[A], initial: A, writeConsistency: WriteConsistency)( modify: A => A): ActorRef[UpdateResponse[A]] => Update[A] = replyTo => Update(key, writeConsistency, replyTo)(modifyWithInitial(initial, modify)) @@ -201,9 +189,7 @@ object Replicator { Some(rsp.key) } - /** - * The [[Update]] couldn't be performed because the entry has been deleted. - */ + /** The [[Update]] couldn't be performed because the entry has been deleted. */ type UpdateDataDeleted[A <: ReplicatedData] = dd.Replicator.UpdateDataDeleted[A] object UpdateDataDeleted { def unapply[A <: ReplicatedData](rsp: UpdateDataDeleted[A]): Option[Key[A]] = @@ -261,9 +247,7 @@ object Replicator { final case class Unsubscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef[SubscribeResponse[A]]) extends Command - /** - * @see [[Subscribe]] - */ + /** @see [[Subscribe]] */ type SubscribeResponse[A <: ReplicatedData] = dd.Replicator.SubscribeResponse[A] /** @@ -286,25 +270,19 @@ object Replicator { def unapply[A <: ReplicatedData](del: Deleted[A]): Option[Key[A]] = Some(del.key) } - /** - * @see [[Delete]] - */ + /** @see [[Delete]] */ type Deleted[A <: ReplicatedData] = dd.Replicator.Deleted[A] object Expired { def unapply[A <: ReplicatedData](exp: Expired[A]): Option[Key[A]] = Some(exp.key) } - /** - * @see [[Expired]] - */ + /** @see [[Expired]] */ type Expired[A <: ReplicatedData] = dd.Replicator.Expired[A] object Delete { - /** - * Convenience for `ask`. - */ + /** Convenience for `ask`. */ def apply[A <: ReplicatedData]( key: Key[A], consistency: WriteConsistency): ActorRef[DeleteResponse[A]] => Delete[A] = @@ -340,9 +318,7 @@ object Replicator { object GetReplicaCount { - /** - * Convenience for `ask`. - */ + /** Convenience for `ask`. */ def apply(): ActorRef[ReplicaCount] => GetReplicaCount = replyTo => GetReplicaCount(replyTo) } @@ -353,9 +329,7 @@ object Replicator { */ final case class GetReplicaCount(replyTo: ActorRef[ReplicaCount]) extends Command - /** - * Current number of replicas. Reply to `GetReplicaCount`. - */ + /** Current number of replicas. Reply to `GetReplicaCount`. */ type ReplicaCount = dd.Replicator.ReplicaCount object ReplicaCount { def unapply[A <: ReplicatedData](rsp: ReplicaCount): Option[Int] = diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorSettings.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorSettings.scala index c424d541723..34e54ba0afb 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorSettings.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorSettings.scala @@ -11,9 +11,7 @@ import akka.actor.typed.scaladsl.adapter._ import akka.annotation.InternalApi import akka.cluster.{ ddata => dd } -/** - * @see [[akka.cluster.ddata.ReplicatorSettings]]. - */ +/** @see [[akka.cluster.ddata.ReplicatorSettings]]. */ object ReplicatorSettings { /** diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/package.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/package.scala index fa8aa6f0018..3981f6e2423 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/package.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/package.scala @@ -8,8 +8,6 @@ import akka.cluster.{ ddata => dd } package object scaladsl { - /** - * @see [[akka.cluster.ddata.ReplicatorSettings]]. - */ + /** @see [[akka.cluster.ddata.ReplicatorSettings]]. */ type ReplicatorSettings = dd.ReplicatorSettings } diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/Cluster.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/Cluster.scala index 56ef777ca95..d182a995ddd 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/Cluster.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/Cluster.scala @@ -41,9 +41,7 @@ final case class Subscribe[A <: ClusterDomainEvent](subscriber: ActorRef[A], eve object Subscribe { - /** - * Java API - */ + /** Java API */ def create[A <: ClusterDomainEvent](subscriber: ActorRef[A], eventClass: Class[A]): Subscribe[A] = Subscribe(subscriber, eventClass) } @@ -69,9 +67,7 @@ final case class SelfRemoved(previousStatus: MemberStatus) extends ClusterDomain final case class Unsubscribe[T](subscriber: ActorRef[T]) extends ClusterStateSubscription final case class GetCurrentState(recipient: ActorRef[CurrentClusterState]) extends ClusterStateSubscription -/** - * Not intended for user extension. - */ +/** Not intended for user extension. */ @DoNotInherit sealed trait ClusterCommand @@ -91,9 +87,7 @@ final case class Join(address: Address) extends ClusterCommand { object Join { - /** - * Java API - */ + /** Java API */ def create(address: Address): Join = Join(address) } @@ -162,9 +156,7 @@ final case class Leave(address: Address) extends ClusterCommand object Leave { - /** - * Java API - */ + /** Java API */ def create(address: Address): Leave = Leave(address) } @@ -192,22 +184,16 @@ final case class Down(address: Address) extends ClusterCommand case object PrepareForFullClusterShutdown extends PrepareForFullClusterShutdown { - /** - * Java API - */ + /** Java API */ def prepareForFullClusterShutdown(): PrepareForFullClusterShutdown = this } -/** - * Akka Typed Cluster API entry point - */ +/** Akka Typed Cluster API entry point */ object Cluster extends ExtensionId[Cluster] { def createExtension(system: ActorSystem[_]): Cluster = new AdapterClusterImpl(system) - /** - * Java API - */ + /** Java API */ def get(system: ActorSystem[_]): Cluster = apply(system) } @@ -228,14 +214,10 @@ abstract class Cluster extends Extension { /** Current snapshot state of the cluster. */ def state: CurrentClusterState - /** - * @return an actor that allows for subscribing to messages when the cluster state changes - */ + /** @return an actor that allows for subscribing to messages when the cluster state changes */ def subscriptions: ActorRef[ClusterStateSubscription] - /** - * @return an actor that accepts commands to join, leave and down nodes in a cluster - */ + /** @return an actor that accepts commands to join, leave and down nodes in a cluster */ def manager: ActorRef[ClusterCommand] } diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala index 32576e707cb..c2c6099e9e7 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala @@ -12,8 +12,8 @@ import akka.actor.typed._ import akka.annotation.{ DoNotInherit, InternalApi } import akka.cluster.ClusterSettings.DataCenter import akka.cluster.singleton.{ - ClusterSingletonProxySettings, - ClusterSingletonManagerSettings => ClassicClusterSingletonManagerSettings + ClusterSingletonManagerSettings => ClassicClusterSingletonManagerSettings, + ClusterSingletonProxySettings } import akka.cluster.typed.internal.AdaptedClusterSingletonImpl import akka.coordination.lease.LeaseUsageSettings @@ -23,9 +23,7 @@ object ClusterSingletonSettings { def apply(system: ActorSystem[_]): ClusterSingletonSettings = fromConfig(system.settings.config.getConfig("akka.cluster")) - /** - * Java API - */ + /** Java API */ def create(system: ActorSystem[_]): ClusterSingletonSettings = apply(system) def fromConfig(config: Config): ClusterSingletonSettings = { @@ -109,25 +107,19 @@ final class ClusterSingletonSettings( bufferSize, leaseSettings) - /** - * INTERNAL API: - */ + /** INTERNAL API: */ @InternalApi private[akka] def toManagerSettings(singletonName: String): ClassicClusterSingletonManagerSettings = new ClassicClusterSingletonManagerSettings(singletonName, role, removalMargin, handOverRetryInterval, leaseSettings) - /** - * INTERNAL API: - */ + /** INTERNAL API: */ @InternalApi private[akka] def toProxySettings(singletonName: String): ClusterSingletonProxySettings = { new ClusterSingletonProxySettings(singletonName, role, singletonIdentificationInterval, bufferSize) .withDataCenter(dataCenter) } - /** - * INTERNAL API: - */ + /** INTERNAL API: */ @InternalApi private[akka] def shouldRunManager(cluster: Cluster): Boolean = { (role.isEmpty || cluster.selfMember.roles(role.get)) && @@ -142,15 +134,11 @@ object ClusterSingleton extends ExtensionId[ClusterSingleton] { override def createExtension(system: ActorSystem[_]): ClusterSingleton = new AdaptedClusterSingletonImpl(system) - /** - * Java API: - */ + /** Java API: */ def get(system: ActorSystem[_]): ClusterSingleton = apply(system) } -/** - * INTERNAL API: - */ +/** INTERNAL API: */ @InternalApi private[akka] object ClusterSingletonImpl { def managerNameFor(singletonName: String) = s"singletonManager$singletonName" @@ -181,9 +169,7 @@ final class SingletonActor[M] private ( val stopMessage: Option[M], val settings: Option[ClusterSingletonSettings]) { - /** - * [[akka.actor.typed.Props]] of the singleton actor, such as dispatcher settings. - */ + /** [[akka.actor.typed.Props]] of the singleton actor, such as dispatcher settings. */ def withProps(props: Props): SingletonActor[M] = copy(props = props) /** @@ -194,9 +180,7 @@ final class SingletonActor[M] private ( */ def withStopMessage(msg: M): SingletonActor[M] = copy(stopMessage = Option(msg)) - /** - * Additional settings, typically loaded from configuration. - */ + /** Additional settings, typically loaded from configuration. */ def withSettings(settings: ClusterSingletonSettings): SingletonActor[M] = copy(settings = Option(settings)) private def copy( @@ -271,9 +255,7 @@ object ClusterSingletonManagerSettings { */ def create(config: Config): ClusterSingletonManagerSettings = apply(config) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def roleOption(role: String): Option[String] = if (role == "") None else Option(role) diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterImpl.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterImpl.scala index 2476f523ea5..81eb74ba899 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterImpl.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterImpl.scala @@ -17,9 +17,7 @@ import akka.cluster.ClusterEvent.MemberEvent import akka.cluster.typed._ import akka.cluster.typed.PrepareForFullClusterShutdown -/** - * INTERNAL API: - */ +/** INTERNAL API: */ @InternalApi private[akka] object AdapterClusterImpl { @@ -103,12 +101,10 @@ private[akka] object AdapterClusterImpl { case _ => throw new IllegalArgumentException() // compiler exhaustiveness check pleaser } - .receiveSignal { - - case (_, Terminated(ref)) => - upSubscribers = upSubscribers.filterNot(_ == ref) - removedSubscribers = removedSubscribers.filterNot(_ == ref) - Behaviors.same + .receiveSignal { case (_, Terminated(ref)) => + upSubscribers = upSubscribers.filterNot(_ == ref) + removedSubscribers = removedSubscribers.filterNot(_ == ref) + Behaviors.same } .narrow[ClusterStateSubscription] @@ -145,9 +141,7 @@ private[akka] object AdapterClusterImpl { } -/** - * INTERNAL API: - */ +/** INTERNAL API: */ @InternalApi private[akka] final class AdapterClusterImpl(system: ActorSystem[_]) extends Cluster { import AdapterClusterImpl._ diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala index a0d34b9e22f..966ac6eb69c 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala @@ -14,13 +14,11 @@ import akka.actor.typed.internal.adapter.ActorSystemAdapter import akka.actor.typed.scaladsl.Behaviors import akka.annotation.InternalApi import akka.cluster.ClusterSettings.DataCenter -import akka.cluster.singleton.{ ClusterSingletonProxy, ClusterSingletonManager => OldSingletonManager } +import akka.cluster.singleton.{ ClusterSingletonManager => OldSingletonManager, ClusterSingletonProxy } import akka.cluster.typed import akka.cluster.typed.{ Cluster, ClusterSingleton, ClusterSingletonImpl, ClusterSingletonSettings } -/** - * INTERNAL API: - */ +/** INTERNAL API: */ @InternalApi private[akka] final class AdaptedClusterSingletonImpl(system: ActorSystem[_]) extends ClusterSingleton { require( diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AkkaClusterTypedSerializer.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AkkaClusterTypedSerializer.scala index c3e031b5897..eb1bbc2a95a 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AkkaClusterTypedSerializer.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AkkaClusterTypedSerializer.scala @@ -16,9 +16,7 @@ import akka.cluster.typed.internal.receptionist.ClusterReceptionist.Entry import akka.remote.serialization.WrappedPayloadSupport import akka.serialization.{ BaseSerializer, SerializerWithStringManifest } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class AkkaClusterTypedSerializer(override val system: ExtendedActorSystem) extends SerializerWithStringManifest diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/delivery/ReliableDeliverySerializer.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/delivery/ReliableDeliverySerializer.scala index adc61dd3e48..0f9325a01c5 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/delivery/ReliableDeliverySerializer.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/delivery/ReliableDeliverySerializer.scala @@ -25,9 +25,7 @@ import akka.serialization.BaseSerializer import akka.serialization.SerializerWithStringManifest import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class ReliableDeliverySerializer(val system: akka.actor.ExtendedActorSystem) extends SerializerWithStringManifest with BaseSerializer { @@ -173,8 +171,8 @@ import akka.util.ccompat.JavaConverters._ val b = ReliableDelivery.State.newBuilder() b.setCurrentSeqNr(m.currentSeqNr) b.setHighestConfirmedSeqNr(m.highestConfirmedSeqNr) - b.addAllConfirmed(m.confirmedSeqNr.map { - case (qualifier, (seqNr, timestamp)) => durableQueueConfirmedToProto(qualifier, seqNr, timestamp) + b.addAllConfirmed(m.confirmedSeqNr.map { case (qualifier, (seqNr, timestamp)) => + durableQueueConfirmedToProto(qualifier, seqNr, timestamp) }.asJava) b.addAllUnconfirmed(m.unconfirmed.map(durableQueueMessageSentToProto).asJava) b.build().toByteArray() diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionist.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionist.scala index 260565d57d2..e1459c7c6d2 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionist.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionist.scala @@ -103,23 +103,20 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { if (tombstones.isEmpty) this else { val newTombstones: Map[ActorRef[_], Set[(AbstractServiceKey, Deadline)]] = - tombstones.foldLeft(tombstones) { - case (acc, (actorRef, entries)) => - val entriesToKeep = entries.filter { - case (_, deadline) => deadline.hasTimeLeft() - } - if (entriesToKeep.size == entries.size) acc - else if (entriesToKeep.isEmpty) acc - actorRef - else acc.updated(actorRef, entriesToKeep) + tombstones.foldLeft(tombstones) { case (acc, (actorRef, entries)) => + val entriesToKeep = entries.filter { case (_, deadline) => + deadline.hasTimeLeft() + } + if (entriesToKeep.size == entries.size) acc + else if (entriesToKeep.isEmpty) acc - actorRef + else acc.updated(actorRef, entriesToKeep) } if (newTombstones eq tombstones) this else copy(tombstones = newTombstones) } } - /** - * @return (reachable-nodes, all) - */ + /** @return (reachable-nodes, all) */ def activeActorRefsFor[T]( key: ServiceKey[T], selfUniqueAddress: UniqueAddress): (Set[ActorRef[T]], Set[ActorRef[T]]) = { @@ -273,13 +270,12 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { def isOld(entry: Entry): Boolean = (now - entry.createdTimestamp) >= settings.pruneRemovedOlderThan.toMillis val removals = { - state.registry.allServices.foldLeft(Map.empty[AbstractServiceKey, Set[Entry]]) { - case (acc, (key, entries)) => - val removedEntries = - entries.filter(entry => isOnRemovedNode(entry) && (!onlyRemoveOldEntries || isOld(entry))) + state.registry.allServices.foldLeft(Map.empty[AbstractServiceKey, Set[Entry]]) { case (acc, (key, entries)) => + val removedEntries = + entries.filter(entry => isOnRemovedNode(entry) && (!onlyRemoveOldEntries || isOld(entry))) - if (removedEntries.isEmpty) acc // no change - else acc + (key -> removedEntries) + if (removedEntries.isEmpty) acc // no change + else acc + (key -> removedEntries) } } @@ -290,19 +286,18 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { cluster.selfAddress, addresses.mkString(","), removals - .map { - case (key, entries) => key.asServiceKey.id -> entries.mkString("[", ", ", "]") + .map { case (key, entries) => + key.asServiceKey.id -> entries.mkString("[", ", ", "]") } .mkString(",")) // shard changes over the ddata keys they belong to val removalsPerDdataKey = state.registry.entriesPerDdataKey(removals) - removalsPerDdataKey.foreach { - case (ddataKey, removalForKey) => - replicator ! Replicator.Update(ddataKey, EmptyORMultiMap, settings.writeConsistency) { registry => - ServiceRegistry(registry).removeAll(removalForKey).toORMultiMap - } + removalsPerDdataKey.foreach { case (ddataKey, removalForKey) => + replicator ! Replicator.Update(ddataKey, EmptyORMultiMap, settings.writeConsistency) { registry => + ServiceRegistry(registry).removeAll(removalForKey).toORMultiMap + } } } @@ -333,8 +328,11 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { case ReceptionistMessages.Register(key, serviceInstance, maybeReplyTo) => if (serviceInstance.path.address.hasLocalScope) { val entry = Entry(serviceInstance, setup.selfSystemUid)(System.currentTimeMillis()) - ctx.log - .debugN("ClusterReceptionist [{}] - Actor was registered: [{}] [{}]", cluster.selfAddress, key, entry) + ctx.log.debugN( + "ClusterReceptionist [{}] - Actor was registered: [{}] [{}]", + cluster.selfAddress, + key, + entry) // actor already watched after one service key registration if (!state.servicesPerActor.contains(serviceInstance)) ctx.watchWith(serviceInstance, LocalServiceActorTerminated(serviceInstance)) diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala index ebf805f5fb0..6f5f4ab27e4 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala @@ -17,9 +17,7 @@ import akka.cluster.ddata.Replicator.WriteConsistency import akka.cluster.ddata.ReplicatorSettings import akka.util.Helpers.toRootLowerCase -/** - * Internal API - */ +/** Internal API */ @InternalApi private[akka] object ClusterReceptionistSettings { def apply(system: ActorSystem[_]): ClusterReceptionistSettings = @@ -52,9 +50,7 @@ private[akka] object ClusterReceptionistSettings { } } -/** - * Internal API - */ +/** Internal API */ @InternalApi private[akka] case class ClusterReceptionistSettings( writeConsistency: WriteConsistency, diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala index 2a7467c3d6d..04c044aa9ba 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala @@ -12,9 +12,7 @@ import akka.cluster.UniqueAddress import akka.cluster.ddata.{ ORMultiMap, ORMultiMapKey, SelfUniqueAddress } import akka.cluster.typed.internal.receptionist.ClusterReceptionist.{ DDataKey, EmptyORMultiMap, Entry } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ShardedServiceRegistry { def apply(numberOfKeys: Int): ShardedServiceRegistry = { val emptyRegistries = (0 until numberOfKeys).map { n => @@ -31,9 +29,6 @@ import akka.cluster.typed.internal.receptionist.ClusterReceptionist.{ DDataKey, * * Two level structure for keeping service registry to be able to shard entries over multiple ddata keys (to not * get too large ddata messages) - * - - * */ @InternalApi private[akka] final case class ShardedServiceRegistry( serviceRegistries: Map[DDataKey, ServiceRegistry], @@ -59,9 +54,7 @@ import akka.cluster.typed.internal.receptionist.ClusterReceptionist.{ DDataKey, serviceRegistries(ddataKey).actorRefsFor(key) } - /** - * @return keys that has a registered service instance on the given `address` - */ + /** @return keys that has a registered service instance on the given `address` */ def keysFor(address: UniqueAddress)(implicit node: SelfUniqueAddress): Set[AbstractServiceKey] = serviceRegistries.valuesIterator.flatMap(_.keysFor(address)).toSet @@ -82,11 +75,10 @@ import akka.cluster.typed.internal.receptionist.ClusterReceptionist.{ DDataKey, def entriesPerDdataKey( entries: Map[AbstractServiceKey, Set[Entry]]): Map[DDataKey, Map[AbstractServiceKey, Set[Entry]]] = - entries.foldLeft(Map.empty[DDataKey, Map[AbstractServiceKey, Set[Entry]]]) { - case (acc, (key, entries)) => - val ddataKey = ddataKeyFor(key.asServiceKey) - val updated = acc.getOrElse(ddataKey, Map.empty) + (key -> entries) - acc + (ddataKey -> updated) + entries.foldLeft(Map.empty[DDataKey, Map[AbstractServiceKey, Set[Entry]]]) { case (acc, (key, entries)) => + val ddataKey = ddataKeyFor(key.asServiceKey) + val updated = acc.getOrElse(ddataKey, Map.empty) + (key -> entries) + acc + (ddataKey -> updated) } def addNode(node: UniqueAddress): ShardedServiceRegistry = @@ -103,9 +95,7 @@ import akka.cluster.typed.internal.receptionist.ClusterReceptionist.{ DDataKey, } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class ServiceRegistry(entries: ORMultiMap[ServiceKey[_], Entry]) extends AnyVal { // let's hide all the ugly casts we can in here @@ -128,12 +118,10 @@ import akka.cluster.typed.internal.receptionist.ClusterReceptionist.{ DDataKey, copy(entries = entries.removeBinding(node, key, value)) def removeAll(entries: Map[AbstractServiceKey, Set[Entry]])(implicit node: SelfUniqueAddress): ServiceRegistry = { - entries.foldLeft(this) { - case (acc, (key, entries)) => - entries.foldLeft(acc) { - case (innerAcc, entry) => - innerAcc.removeBinding[key.Protocol](key.asServiceKey, entry) - } + entries.foldLeft(this) { case (acc, (key, entries)) => + entries.foldLeft(acc) { case (innerAcc, entry) => + innerAcc.removeBinding[key.Protocol](key.asServiceKey, entry) + } } } @@ -141,9 +129,7 @@ import akka.cluster.typed.internal.receptionist.ClusterReceptionist.{ DDataKey, } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ServiceRegistry { final val Empty = ServiceRegistry(EmptyORMultiMap) diff --git a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/ChunkLargeMessageSpec.scala b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/ChunkLargeMessageSpec.scala index edc0cac5d67..5d780fe385f 100644 --- a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/ChunkLargeMessageSpec.scala +++ b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/ChunkLargeMessageSpec.scala @@ -26,14 +26,17 @@ object ChunkLargeMessageSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(ConfigFactory.parseString(""" + commonConfig( + ConfigFactory + .parseString(""" akka.loglevel = INFO #akka.serialization.jackson.verbose-debug-logging = on akka.remote.artery { advanced.inbound-lanes = 1 advanced.maximum-frame-size = 2 MB } - """).withFallback(MultiNodeClusterSpec.clusterConfig)) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig)) object Producer { sealed trait Command @@ -86,18 +89,17 @@ object ChunkLargeMessageSpec extends MultiNodeConfig { Behaviors.stopped } } - .receiveSignal { - case (context, PostStop) => - if (histogram.getTotalCount > 0) { - context.log.info( - s"=== Latency for [${context.self.path.name}] " + - f"50%%ile: ${percentile(50.0)}%.0f µs, " + - f"90%%ile: ${percentile(90.0)}%.0f µs, " + - f"99%%ile: ${percentile(99.0)}%.0f µs") - println(s"Histogram for [${context.self.path.name}] of RTT latencies in microseconds.") - histogram.outputPercentileDistribution(System.out, 1000.0) - } - Behaviors.same + .receiveSignal { case (context, PostStop) => + if (histogram.getTotalCount > 0) { + context.log.info( + s"=== Latency for [${context.self.path.name}] " + + f"50%%ile: ${percentile(50.0)}%.0f µs, " + + f"90%%ile: ${percentile(90.0)}%.0f µs, " + + f"99%%ile: ${percentile(99.0)}%.0f µs") + println(s"Histogram for [${context.self.path.name}] of RTT latencies in microseconds.") + histogram.outputPercentileDistribution(System.out, 1000.0) + } + Behaviors.same } } diff --git a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/MultiDcClusterSingletonSpec.scala b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/MultiDcClusterSingletonSpec.scala index 7ae0495f0a1..1a8cb23761b 100644 --- a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/MultiDcClusterSingletonSpec.scala +++ b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/MultiDcClusterSingletonSpec.scala @@ -19,9 +19,12 @@ object MultiDcClusterSingletonSpecConfig extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(ConfigFactory.parseString(""" + commonConfig( + ConfigFactory + .parseString(""" akka.loglevel = DEBUG - """).withFallback(MultiNodeClusterSpec.clusterConfig)) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc1" diff --git a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/PubSubSpec.scala b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/PubSubSpec.scala index f52e569972e..7cab48f7464 100644 --- a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/PubSubSpec.scala +++ b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/PubSubSpec.scala @@ -21,9 +21,12 @@ object PubSubSpecConfig extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(ConfigFactory.parseString(""" + commonConfig( + ConfigFactory + .parseString(""" akka.loglevel = INFO - """).withFallback(MultiNodeClusterSpec.clusterConfig)) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc1" @@ -66,10 +69,10 @@ abstract class PubSubSpec extends MultiNodeSpec(PubSubSpecConfig) with MultiNode "see nodes with subscribers registered" in { val statsProbe = TestProbe[Topic.TopicStats]() - statsProbe.awaitAssert({ + statsProbe.awaitAssert { topic ! Topic.GetTopicStats[Message](statsProbe.ref) statsProbe.receiveMessage().topicInstanceCount should ===(3) - }) + } enterBarrier("topic instances with subscribers seen") } @@ -91,10 +94,10 @@ abstract class PubSubSpec extends MultiNodeSpec(PubSubSpecConfig) with MultiNode topic ! Topic.Unsubscribe(topicProbe.ref) // unsubscribe does not need to be gossiped before it is effective val statsProbe = TestProbe[Topic.TopicStats]() - statsProbe.awaitAssert({ + statsProbe.awaitAssert { topic ! Topic.GetTopicStats[Message](statsProbe.ref) statsProbe.receiveMessage().topicInstanceCount should ===(2) - }) + } } enterBarrier("unsubscribed") Thread.sleep(200) // but it needs to reach the topic diff --git a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/internal/ClusterReceptionistUnreachabilitySpec.scala b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/internal/ClusterReceptionistUnreachabilitySpec.scala index 16fdce7c9d6..675b1625f50 100644 --- a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/internal/ClusterReceptionistUnreachabilitySpec.scala +++ b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/internal/ClusterReceptionistUnreachabilitySpec.scala @@ -26,9 +26,12 @@ object ClusterReceptionistUnreachabilitySpecConfig extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(ConfigFactory.parseString(""" + commonConfig( + ConfigFactory + .parseString(""" akka.loglevel = INFO - """).withFallback(MultiNodeClusterSpec.clusterConfig)) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) } @@ -67,22 +70,25 @@ abstract class ClusterReceptionistUnreachabilitySpec } "register a service" in { - val localServiceRef = spawn(Behaviors.receiveMessage[String] { - case msg => + val localServiceRef = spawn( + Behaviors.receiveMessage[String] { case msg => probe.ref ! msg Behaviors.same - }, "my-service") + }, + "my-service") typedSystem.receptionist ! Receptionist.Register(MyServiceKey, localServiceRef) enterBarrier("all registered") } "see registered services" in { - awaitAssert({ - val listing = receptionistProbe.expectMessageType[Receptionist.Listing] - listing.serviceInstances(MyServiceKey) should have size (3) - listing.allServiceInstances(MyServiceKey) should have size (3) - listing.servicesWereAddedOrRemoved should ===(true) - }, 20.seconds) + awaitAssert( + { + val listing = receptionistProbe.expectMessageType[Receptionist.Listing] + listing.serviceInstances(MyServiceKey) should have size 3 + listing.allServiceInstances(MyServiceKey) should have size 3 + listing.servicesWereAddedOrRemoved should ===(true) + }, + 20.seconds) enterBarrier("all seen registered") } @@ -96,21 +102,25 @@ abstract class ClusterReceptionistUnreachabilitySpec runOn(first, third) { // assert service on 2 is not in listing but in all and flag is false - awaitAssert({ - val listing = receptionistProbe.expectMessageType[Receptionist.Listing] - listing.serviceInstances(MyServiceKey) should have size (2) - listing.allServiceInstances(MyServiceKey) should have size (3) - listing.servicesWereAddedOrRemoved should ===(false) - }, 20.seconds) + awaitAssert( + { + val listing = receptionistProbe.expectMessageType[Receptionist.Listing] + listing.serviceInstances(MyServiceKey) should have size 2 + listing.allServiceInstances(MyServiceKey) should have size 3 + listing.servicesWereAddedOrRemoved should ===(false) + }, + 20.seconds) } runOn(second) { // assert service on 1 and 3 is not in listing but in all and flag is false - awaitAssert({ - val listing = receptionistProbe.expectMessageType[Receptionist.Listing] - listing.serviceInstances(MyServiceKey) should have size (1) - listing.allServiceInstances(MyServiceKey) should have size (3) - listing.servicesWereAddedOrRemoved should ===(false) - }, 20.seconds) + awaitAssert( + { + val listing = receptionistProbe.expectMessageType[Receptionist.Listing] + listing.serviceInstances(MyServiceKey) should have size 1 + listing.allServiceInstances(MyServiceKey) should have size 3 + listing.servicesWereAddedOrRemoved should ===(false) + }, + 20.seconds) } enterBarrier("all seen unreachable") } @@ -122,12 +132,12 @@ abstract class ClusterReceptionistUnreachabilitySpec testConductor.passThrough(third, second, Direction.Both).await } - awaitAssert({ + awaitAssert { val listing = receptionistProbe.expectMessageType[Receptionist.Listing] - listing.serviceInstances(MyServiceKey) should have size (3) - listing.allServiceInstances(MyServiceKey) should have size (3) + listing.serviceInstances(MyServiceKey) should have size 3 + listing.allServiceInstances(MyServiceKey) should have size 3 listing.servicesWereAddedOrRemoved should ===(false) - }) + } enterBarrier("all seen reachable-again") } diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorCompileOnlyTest.scala b/akka-cluster-typed/src/test/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorCompileOnlyTest.scala index 9798d1ffeec..6e0c0c6e6dd 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorCompileOnlyTest.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorCompileOnlyTest.scala @@ -50,7 +50,7 @@ object ReplicatorCompileOnlyTest { val replyTo: ActorRef[Int] = ??? val key = GCounterKey("counter") - //#curried-update + // #curried-update // alternative way to define the `createRequest` function // Replicator.Update instance has a curried `apply` method replicatorAdapter.askUpdate( @@ -61,9 +61,9 @@ object ReplicatorCompileOnlyTest { replicatorAdapter.askUpdate( askReplyTo => Replicator.Update(key, GCounter.empty, Replicator.WriteLocal, askReplyTo)(_ :+ 1), InternalUpdateResponse.apply) - //#curried-update + // #curried-update - //#curried-get + // #curried-get // alternative way to define the `createRequest` function // Replicator.Get instance has a curried `apply` method replicatorAdapter.askGet(Replicator.Get(key, Replicator.ReadLocal), value => InternalGetResponse(value, replyTo)) @@ -72,7 +72,7 @@ object ReplicatorCompileOnlyTest { replicatorAdapter.askGet( askReplyTo => Replicator.Get(key, Replicator.ReadLocal, askReplyTo), value => InternalGetResponse(value, replyTo)) - //#curried-get + // #curried-get } def shouldHaveUnapplyForResponseTypes(): Unit = { diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala index bbe8f819c5a..8d1f1da21fc 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala @@ -109,10 +109,13 @@ class ActorSystemSpec "An ActorSystem" must { "start the guardian actor and terminate when it terminates" in { - withSystem("a", Behaviors.receiveMessage[Probe] { p => - p.replyTo ! p.message - Behaviors.stopped - }, doTerminate = false) { sys => + withSystem( + "a", + Behaviors.receiveMessage[Probe] { p => + p.replyTo ! p.message + Behaviors.stopped + }, + doTerminate = false) { sys => val inbox = TestInbox[String]("a") sys ! Probe("hello", inbox.ref) eventually { @@ -138,14 +141,15 @@ class ActorSystemSpec "terminate the guardian actor" in { val inbox = TestInbox[String]("terminate") - val sys = system(Behaviors.setup[Any] { _ => - inbox.ref ! "started" - Behaviors.receiveSignal { - case (_, PostStop) => + val sys = system( + Behaviors.setup[Any] { _ => + inbox.ref ! "started" + Behaviors.receiveSignal { case (_, PostStop) => inbox.ref ! "done" Behaviors.same - } - }, "terminate") + } + }, + "terminate") eventually { inbox.hasMessages should ===(true) @@ -160,9 +164,11 @@ class ActorSystemSpec } "be able to terminate immediately" in { - val sys = system(Behaviors.receiveMessage[Probe] { _ => - Behaviors.unhandled - }, "terminate") + val sys = system( + Behaviors.receiveMessage[Probe] { _ => + Behaviors.unhandled + }, + "terminate") // for this case the guardian might not have been started before // the system terminates and then it will not receive PostStop, which // is OK since it wasn't really started yet diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterDispatcherSelectorSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterDispatcherSelectorSpec.scala index b3c119e74b6..98828b4c1d7 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterDispatcherSelectorSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterDispatcherSelectorSpec.scala @@ -9,7 +9,9 @@ import com.typesafe.config.ConfigFactory import akka.actor.typed.scaladsl.DispatcherSelectorSpec class ClusterDispatcherSelectorSpec - extends DispatcherSelectorSpec(ConfigFactory.parseString(""" + extends DispatcherSelectorSpec( + ConfigFactory + .parseString(""" akka.actor.provider = cluster """).withFallback(DispatcherSelectorSpec.config)) { diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala index debeb7f624e..981791d2ec6 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala @@ -58,9 +58,11 @@ class ClusterSingletonApiSpec val system2 = akka.actor.ActorSystem( system.name, - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.cluster.roles = ["singleton"] - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) val adaptedSystem2 = system2.toTyped val clusterNode2 = Cluster(adaptedSystem2) @@ -95,15 +97,19 @@ class ClusterSingletonApiSpec val node1PongProbe = TestProbe[Pong.type]()(system) val node2PongProbe = TestProbe[Pong.type]()(adaptedSystem2) - node1PongProbe.awaitAssert({ - node1ref ! Ping(node1PongProbe.ref) - node1PongProbe.expectMessage(Pong) - }, 3.seconds) - - node2PongProbe.awaitAssert({ - node2ref ! Ping(node2PongProbe.ref) - node2PongProbe.expectMessage(Pong) - }, 3.seconds) + node1PongProbe.awaitAssert( + { + node1ref ! Ping(node1PongProbe.ref) + node1PongProbe.expectMessage(Pong) + }, + 3.seconds) + + node2PongProbe.awaitAssert( + { + node2ref ! Ping(node2PongProbe.ref) + node2PongProbe.expectMessage(Pong) + }, + 3.seconds) } } diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPoisonPillSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPoisonPillSpec.scala index 0f320349f07..9445e0e1172 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPoisonPillSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPoisonPillSpec.scala @@ -20,10 +20,9 @@ import akka.cluster.typed.ClusterSingletonPoisonPillSpec.GetSelf object ClusterSingletonPoisonPillSpec { final case class GetSelf(replyTo: ActorRef[ActorRef[Any]]) - val sneakyBehavior: Behavior[GetSelf] = Behaviors.receive { - case (ctx, GetSelf(replyTo)) => - replyTo ! ctx.self.unsafeUpcast[Any] - Behaviors.same + val sneakyBehavior: Behavior[GetSelf] = Behaviors.receive { case (ctx, GetSelf(replyTo)) => + replyTo ! ctx.self.unsafeUpcast[Any] + Behaviors.same } } diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/GroupRouterSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/GroupRouterSpec.scala index 364fe50a315..f889e9a6b76 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/GroupRouterSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/GroupRouterSpec.scala @@ -39,10 +39,9 @@ object GroupRouterSpec { case class Pong(workerActor: ActorRef[_]) extends CborSerializable case class Ping(replyTo: ActorRef[Pong]) extends CborSerializable - def apply(): Behavior[Ping] = Behaviors.receive { - case (ctx, Ping(replyTo)) => - replyTo ! Pong(ctx.self) - Behaviors.same + def apply(): Behavior[Ping] = Behaviors.receive { case (ctx, Ping(replyTo)) => + replyTo ! Pong(ctx.self) + Behaviors.same } } @@ -55,23 +54,22 @@ object GroupRouterSpec { requestedPings: Int, tellMeWhenDone: ActorRef[DonePinging]): Behavior[Command] = Behaviors.setup { ctx => - val pongAdapter = ctx.messageAdapter[PingActor.Pong] { - case PingActor.Pong(ref) => SawPong(ref) + val pongAdapter = ctx.messageAdapter[PingActor.Pong] { case PingActor.Pong(ref) => + SawPong(ref) } (0 to requestedPings).foreach(_ => router ! PingActor.Ping(pongAdapter)) var pongs = 0 var uniqueWorkers = Set.empty[ActorRef[_]] - Behaviors.receiveMessage { - case SawPong(worker) => - pongs += 1 - uniqueWorkers += worker - if (pongs >= requestedPings) { - tellMeWhenDone ! DonePinging(pongs, uniqueWorkers) - Behaviors.stopped - } else { - Behaviors.same - } + Behaviors.receiveMessage { case SawPong(worker) => + pongs += 1 + uniqueWorkers += worker + if (pongs >= requestedPings) { + tellMeWhenDone ! DonePinging(pongs, uniqueWorkers) + Behaviors.stopped + } else { + Behaviors.same + } } } } @@ -87,15 +85,13 @@ object GroupRouterSpec { class GroupRouterSpec extends ScalaTestWithActorTestKit(GroupRouterSpec.config) with AnyWordSpecLike with LogCapturing { import GroupRouterSpec._ - /** - * Starts a new pair of nodes, forms a cluster, runs a ping-pong session and hands the result to the test for verification - */ + /** Starts a new pair of nodes, forms a cluster, runs a ping-pong session and hands the result to the test for verification */ def checkGroupRouterBehavior(groupRouter: GroupRouter[PingActor.Ping], settings: GroupRouterSpecSettings): Result = { val resultProbe = testKit.createTestProbe[Pinger.DonePinging]() - val system1 = ActorSystem(Behaviors.setup[Receptionist.Listing] { - ctx => + val system1 = ActorSystem( + Behaviors.setup[Receptionist.Listing] { ctx => (0 until settings.node1WorkerCount).foreach { i => val worker = ctx.spawn(PingActor(), s"ping-pong-$i") ctx.system.receptionist ! Receptionist.Register(pingPongKey, worker) @@ -113,15 +109,20 @@ class GroupRouterSpec extends ScalaTestWithActorTestKit(GroupRouterSpec.config) case _ => Behaviors.same } - }, system.name, config) + }, + system.name, + config) - val system2 = ActorSystem(Behaviors.setup[Unit] { ctx => - (0 until settings.node2WorkerCount).foreach { i => - val worker = ctx.spawn(PingActor(), s"ping-pong-$i") - ctx.system.receptionist ! Receptionist.Register(pingPongKey, worker) - } - Behaviors.empty - }, system.name, config) + val system2 = ActorSystem( + Behaviors.setup[Unit] { ctx => + (0 until settings.node2WorkerCount).foreach { i => + val worker = ctx.spawn(PingActor(), s"ping-pong-$i") + ctx.system.receptionist ! Receptionist.Register(pingPongKey, worker) + } + Behaviors.empty + }, + system.name, + config) try { val node1 = Cluster(system1) @@ -145,33 +146,31 @@ class GroupRouterSpec extends ScalaTestWithActorTestKit(GroupRouterSpec.config) // default is to not preferLocalRoutees List( "random" -> Routers.group(pingPongKey).withRandomRouting(), // default group is same as this - "round robin" -> Routers.group(pingPongKey).withRoundRobinRouting()).foreach { - case (strategy, groupRouter) => - s"use all reachable routees if preferLocalRoutees is not enabled, strategy $strategy" in { - val settings = GroupRouterSpecSettings(node1WorkerCount = 2, node2WorkerCount = 2, messageCount = 100) - val result = checkGroupRouterBehavior(groupRouter, settings) - result.actorsInSystem1 should ===(settings.node1WorkerCount) - result.actorsInSystem2 should ===(settings.node2WorkerCount) - } + "round robin" -> Routers.group(pingPongKey).withRoundRobinRouting()).foreach { case (strategy, groupRouter) => + s"use all reachable routees if preferLocalRoutees is not enabled, strategy $strategy" in { + val settings = GroupRouterSpecSettings(node1WorkerCount = 2, node2WorkerCount = 2, messageCount = 100) + val result = checkGroupRouterBehavior(groupRouter, settings) + result.actorsInSystem1 should ===(settings.node1WorkerCount) + result.actorsInSystem2 should ===(settings.node2WorkerCount) + } } List( "random" -> Routers.group(pingPongKey).withRandomRouting(true), - "round robin" -> Routers.group(pingPongKey).withRoundRobinRouting(true)).foreach { - case (strategy, groupRouter) => - s"only use local routees if preferLocalRoutees is enabled and there are local routees, strategy $strategy" in { - val settings = GroupRouterSpecSettings(node1WorkerCount = 2, node2WorkerCount = 2, messageCount = 100) - val result = checkGroupRouterBehavior(groupRouter, settings) - result.actorsInSystem1 should ===(settings.node1WorkerCount) - result.actorsInSystem2 should ===(0) - } + "round robin" -> Routers.group(pingPongKey).withRoundRobinRouting(true)).foreach { case (strategy, groupRouter) => + s"only use local routees if preferLocalRoutees is enabled and there are local routees, strategy $strategy" in { + val settings = GroupRouterSpecSettings(node1WorkerCount = 2, node2WorkerCount = 2, messageCount = 100) + val result = checkGroupRouterBehavior(groupRouter, settings) + result.actorsInSystem1 should ===(settings.node1WorkerCount) + result.actorsInSystem2 should ===(0) + } - s"use remote routees if preferLocalRoutees is enabled but there is no local routees, strategy $strategy" in { - val settings = GroupRouterSpecSettings(node1WorkerCount = 0, node2WorkerCount = 2, messageCount = 100) - val result = checkGroupRouterBehavior(groupRouter, settings) - result.actorsInSystem1 should ===(0) - result.actorsInSystem2 should ===(settings.node2WorkerCount) - } + s"use remote routees if preferLocalRoutees is enabled but there is no local routees, strategy $strategy" in { + val settings = GroupRouterSpecSettings(node1WorkerCount = 0, node2WorkerCount = 2, messageCount = 100) + val result = checkGroupRouterBehavior(groupRouter, settings) + result.actorsInSystem1 should ===(0) + result.actorsInSystem2 should ===(settings.node2WorkerCount) + } } } } diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala index 63a2b56c869..d924cb57aaf 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala @@ -31,13 +31,15 @@ object RemoteDeployNotAllowedSpec { } """) - def configWithRemoteDeployment(otherSystemPort: Int) = ConfigFactory.parseString(s""" + def configWithRemoteDeployment(otherSystemPort: Int) = ConfigFactory + .parseString(s""" akka.actor.deployment { "/*" { remote = "akka://sampleActorSystem@127.0.0.1:$otherSystemPort" } } - """).withFallback(config) + """) + .withFallback(config) } class RemoteDeployNotAllowedSpec @@ -61,9 +63,11 @@ class RemoteDeployNotAllowedSpec case SpawnChild(name) => // this should throw try { - ctx.spawn(Behaviors.setup[AnyRef] { _ => - Behaviors.empty - }, name) + ctx.spawn( + Behaviors.setup[AnyRef] { _ => + Behaviors.empty + }, + name) } catch { case ex: Exception => probe.ref ! ex } diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteMessageSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteMessageSpec.scala index 2081870ec3d..15426e50528 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteMessageSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteMessageSpec.scala @@ -66,10 +66,12 @@ class RemoteMessageSpec extends AkkaSpec(RemoteMessageSpec.config) { ActorRefResolver(typedSystem2).resolveActorRef[Ping](remoteRefStr) val pongPromise = Promise[Done]() - val recipient = system2.spawn(Behaviors.receive[String] { (_, _) => - pongPromise.success(Done) - Behaviors.stopped - }, "recipient") + val recipient = system2.spawn( + Behaviors.receive[String] { (_, _) => + pongPromise.success(Done) + Behaviors.stopped + }, + "recipient") remoteRef ! Ping(recipient) pingPromise.future.futureValue should ===(Done) diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/delivery/ReliableDeliverySerializerSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/delivery/ReliableDeliverySerializerSpec.scala index 6cbb17e740d..b40a5da2516 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/delivery/ReliableDeliverySerializerSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/delivery/ReliableDeliverySerializerSpec.scala @@ -65,16 +65,15 @@ class ReliableDeliverySerializerSpec extends ScalaTestWithActorTestKit with AnyW ChunkedMessage(ByteString.fromString("abc"), true, true, 20, ""), false, "", - timestamp)).foreach { - case (scenario, item) => - s"resolve serializer for $scenario" in { - val serializer = SerializationExtension(classicSystem) - serializer.serializerFor(item.getClass).getClass should be(classOf[ReliableDeliverySerializer]) - } + timestamp)).foreach { case (scenario, item) => + s"resolve serializer for $scenario" in { + val serializer = SerializationExtension(classicSystem) + serializer.serializerFor(item.getClass).getClass should be(classOf[ReliableDeliverySerializer]) + } - s"serialize and de-serialize $scenario" in { - verifySerialization(item) - } + s"serialize and de-serialize $scenario" in { + verifySerialization(item) + } } } diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala index c032c1fa1f1..296d3458d65 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala @@ -197,10 +197,12 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin clusterNode1.manager ! Leave(clusterNode2.selfMember.address) } - regProbe1.awaitAssert({ - // we will also potentially get an update that the service was unreachable before the expected one - regProbe1.expectMessage(10.seconds, Listing(PingKey, Set(service1))) - }, 10.seconds) + regProbe1.awaitAssert( + { + // we will also potentially get an update that the service was unreachable before the expected one + regProbe1.expectMessage(10.seconds, Listing(PingKey, Set(service1))) + }, + 10.seconds) // register another after removal val service1b = testKit1.spawn(pingPongBehavior) @@ -252,10 +254,12 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin clusterNode2.manager ! Down(clusterNode1.selfMember.address) // service1 removed - regProbe2.awaitAssert({ - // we will also potentially get an update that the service was unreachable before the expected one - regProbe2.expectMessage(10.seconds, Listing(PingKey, Set(service2))) - }, 10.seconds) + regProbe2.awaitAssert( + { + // we will also potentially get an update that the service was unreachable before the expected one + regProbe2.expectMessage(10.seconds, Listing(PingKey, Set(service2))) + }, + 10.seconds) } finally { testKit1.shutdownTestKit() testKit2.shutdownTestKit() @@ -310,11 +314,13 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin system2.terminate() Await.ready(system2.whenTerminated, 10.seconds) clusterNode1.manager ! Down(clusterNode2.selfMember.address) - regProbe1.awaitAssert({ + regProbe1.awaitAssert( + { - // we will also potentially get an update that the service was unreachable before the expected one - regProbe1.expectMessage(10.seconds, Listing(PingKey, Set.empty[ActorRef[PingProtocol]])) - }, 10.seconds) + // we will also potentially get an update that the service was unreachable before the expected one + regProbe1.expectMessage(10.seconds, Listing(PingKey, Set.empty[ActorRef[PingProtocol]])) + }, + 10.seconds) } finally { testKit1.shutdownTestKit() testKit2.shutdownTestKit() @@ -358,11 +364,13 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin val testKit3 = ActorTestKit( system1.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${clusterNode2.selfMember.address.port.get} # retry joining when existing member removed akka.cluster.retry-unsuccessful-join-after = 1s - """).withFallback(config)) + """) + .withFallback(config)) try { val system3 = testKit3.system @@ -384,11 +392,10 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin // make sure it joined fine and node1 has upped it regProbe1.awaitAssert( { - clusterNode1.state.members.exists( - m => - m.uniqueAddress == clusterNode3.selfMember.uniqueAddress && - m.status == MemberStatus.Up && - !clusterNode1.state.unreachable(m)) should ===(true) + clusterNode1.state.members.exists(m => + m.uniqueAddress == clusterNode3.selfMember.uniqueAddress && + m.status == MemberStatus.Up && + !clusterNode1.state.unreachable(m)) should ===(true) }, 10.seconds) @@ -426,7 +433,8 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin GHExcludeAeronTest) in { val testKit1 = ActorTestKit( "ClusterReceptionistSpec-test-7", - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.cluster { failure-detector.acceptable-heartbeat-pause = 20s } @@ -434,7 +442,8 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin # it can be stressed more by using all write-consistency = all } - """).withFallback(ClusterReceptionistSpec.config)) + """) + .withFallback(ClusterReceptionistSpec.config)) val system1 = testKit1.system val testKit2 = ActorTestKit(system1.name, system1.settings.config) val system2 = testKit2.system @@ -473,9 +482,11 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin val testKit3 = ActorTestKit( system1.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${clusterNode2.selfMember.address.port.get} - """).withFallback(config)) + """) + .withFallback(config)) try { val system3 = testKit3.system @@ -527,11 +538,13 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin } "not lose removals on concurrent updates to same key".taggedAs(LongRunningTest, GHExcludeAeronTest) in { - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString(""" # disable delta propagation so we can have repeatable concurrent writes # without delta reaching between nodes already akka.cluster.distributed-data.delta-crdt.enabled=false - """).withFallback(ClusterReceptionistSpec.config) + """) + .withFallback(ClusterReceptionistSpec.config) val testKit1 = ActorTestKit("ClusterReceptionistSpec-test-8", config) val system1 = testKit1.system val testKit2 = ActorTestKit(system1.name, system1.settings.config) @@ -550,12 +563,14 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin regProbe1.awaitAssert(clusterNode1.state.members.count(_.status == MemberStatus.Up) should ===(2)) // one actor on each node up front - val actor1 = testKit1.spawn(Behaviors.receive[AnyRef] { - case (ctx, "stop") => - ctx.log.info("Stopping") - Behaviors.stopped - case _ => Behaviors.same - }, "actor1") + val actor1 = testKit1.spawn( + Behaviors.receive[AnyRef] { + case (ctx, "stop") => + ctx.log.info("Stopping") + Behaviors.stopped + case _ => Behaviors.same + }, + "actor1") val actor2 = testKit2.spawn(Behaviors.empty[AnyRef], "actor2") system1.receptionist ! Register(TheKey, actor1) @@ -716,7 +731,9 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin } - "handle concurrent unregistration and registration on different nodes".taggedAs(LongRunningTest, GHExcludeAeronTest) in { + "handle concurrent unregistration and registration on different nodes".taggedAs( + LongRunningTest, + GHExcludeAeronTest) in { // this covers the fact that with ddata a removal can be lost val testKit1 = ActorTestKit("ClusterReceptionistSpec-test-12", ClusterReceptionistSpec.config) val system1 = testKit1.system @@ -778,13 +795,15 @@ class ClusterReceptionistSpec extends AnyWordSpec with Matchers with LogCapturin "notify subscribers when registering and joining simultaneously".taggedAs(LongRunningTest, GHExcludeAeronTest) in { // failing test reproducer for issue #28792 // It's possible that the registry entry from the ddata update arrives before MemberJoined. - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString(""" # quick dissemination to increase the chance of the race condition akka.cluster.typed.receptionist.distributed-data.write-consistency = all akka.cluster.typed.receptionist.distributed-data.gossip-interval = 500ms # run the RemoveTick cleanup often to exercise that scenario akka.cluster.typed.receptionist.pruning-interval = 50ms - """).withFallback(ClusterReceptionistSpec.config) + """) + .withFallback(ClusterReceptionistSpec.config) val numberOfNodes = 6 // use 9 or more to stress it more val testKits = Vector.fill(numberOfNodes)(ActorTestKit("ClusterReceptionistSpec-13", config)) try { diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/ddata/typed/scaladsl/ReplicatorDocSpec.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/ddata/typed/scaladsl/ReplicatorDocSpec.scala index 8df0c11b8bd..1de4a5bb023 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/ddata/typed/scaladsl/ReplicatorDocSpec.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/ddata/typed/scaladsl/ReplicatorDocSpec.scala @@ -45,16 +45,16 @@ object ReplicatorDocSpec { def apply(key: GCounterKey): Behavior[Command] = Behaviors.setup[Command] { context => - //#selfUniqueAddress + // #selfUniqueAddress implicit val node: SelfUniqueAddress = DistributedData(context.system).selfUniqueAddress - //#selfUniqueAddress + // #selfUniqueAddress // adapter that turns the response messages from the replicator into our own protocol DistributedData.withReplicatorMessageAdapter[Command, GCounter] { replicatorAdapter => - //#subscribe + // #subscribe // Subscribe to changes of the given `key`. replicatorAdapter.subscribe(key, InternalSubscribeResponse.apply) - //#subscribe + // #subscribe def updated(cachedValue: Int): Behavior[Command] = { Behaviors.receiveMessage[Command] { diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala index bfdea5fda24..91a1c686160 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala @@ -50,14 +50,16 @@ akka { #config-seeds """) - val configSystem2 = ConfigFactory.parseString(""" + val configSystem2 = ConfigFactory + .parseString(""" akka.remote.artery.canonical.port = 0 - """).withFallback(configSystem1) + """) + .withFallback(configSystem1) def illustrateJoinSeedNodes(): Unit = { val system: ActorSystem[_] = ??? - //#join-seed-nodes + // #join-seed-nodes import akka.actor.Address import akka.actor.AddressFromURIString import akka.cluster.typed.JoinSeedNodes @@ -65,7 +67,7 @@ akka { val seedNodes: List[Address] = List("akka://ClusterSystem@127.0.0.1:2551", "akka://ClusterSystem@127.0.0.1:2552").map(AddressFromURIString.parse) Cluster(system).manager ! JoinSeedNodes(seedNodes) - //#join-seed-nodes + // #join-seed-nodes } object Backend { @@ -79,21 +81,21 @@ akka { def illustrateRoles(): Unit = { val context: ActorContext[_] = ??? - //#hasRole + // #hasRole val selfMember = Cluster(context.system).selfMember if (selfMember.hasRole("backend")) { context.spawn(Backend(), "back") } else if (selfMember.hasRole("frontend")) { context.spawn(Frontend(), "front") } - //#hasRole + // #hasRole } @nowarn("msg=never used") def illustrateDcAccess(): Unit = { val system: ActorSystem[_] = ??? - //#dcAccess + // #dcAccess val cluster = Cluster(system) // this node's data center val dc = cluster.selfMember.dataCenter @@ -102,7 +104,7 @@ akka { // a specific member's data center val aMember = cluster.state.members.head val aDc = aMember.dataCenter - //#dcAccess + // #dcAccess } } @@ -177,14 +179,14 @@ class BasicClusterManualSpec extends AnyWordSpec with ScalaFutures with Eventual val system2 = ActorSystem[Nothing](Behaviors.empty[Nothing], "ClusterSystem", noPort.withFallback(clusterConfig)) try { - //#cluster-create + // #cluster-create val cluster = Cluster(system) - //#cluster-create + // #cluster-create val cluster2 = Cluster(system2) - //#cluster-join + // #cluster-join cluster.manager ! Join(cluster.selfMember.address) - //#cluster-join + // #cluster-join cluster2.manager ! Join(cluster.selfMember.address) eventually { @@ -192,9 +194,9 @@ class BasicClusterManualSpec extends AnyWordSpec with ScalaFutures with Eventual cluster2.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up, MemberStatus.up) } - //#cluster-leave + // #cluster-leave cluster2.manager ! Leave(cluster2.selfMember.address) - //#cluster-leave + // #cluster-leave eventually { cluster.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up) @@ -220,9 +222,9 @@ class BasicClusterManualSpec extends AnyWordSpec with ScalaFutures with Eventual val probe1 = TestProbe[MemberEvent]()(system1) val subscriber = probe1.ref - //#cluster-subscribe + // #cluster-subscribe cluster.subscriptions ! Subscribe(subscriber, classOf[MemberEvent]) - //#cluster-subscribe + // #cluster-subscribe cluster1.manager ! Join(cluster1.selfMember.address) eventually { @@ -255,10 +257,10 @@ class BasicClusterManualSpec extends AnyWordSpec with ScalaFutures with Eventual } val anotherMemberAddress = cluster2.selfMember.address - //#cluster-leave-example + // #cluster-leave-example cluster.manager ! Leave(anotherMemberAddress) // subscriber will receive events MemberLeft, MemberExited and MemberRemoved - //#cluster-leave-example + // #cluster-leave-example probe1.within(10.seconds) { probe1.expectMessageType[MemberLeft].member.address shouldEqual cluster2.selfMember.address probe1.expectMessageType[MemberExited].member.address shouldEqual cluster2.selfMember.address diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/DistributedPubSubExample.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/DistributedPubSubExample.scala index 36e19488084..937637a3c5e 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/DistributedPubSubExample.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/DistributedPubSubExample.scala @@ -77,13 +77,11 @@ object Publisher { import Ontology._ - /** - * Handles new data type setup: validates schema, registers valid data types, publishes new ones to subscribers - */ + /** Handles new data type setup: validates schema, registers valid data types, publishes new ones to subscribers */ object RegistrationService { def apply(): Behavior[AnyRef] = { - //#publisher + // #publisher Behaviors.setup[AnyRef] { context => import akka.cluster.pubsub.DistributedPubSub import akka.cluster.pubsub.DistributedPubSubMediator @@ -121,7 +119,7 @@ object Publisher { Behaviors.unhandled } } - //#publisher + // #publisher } } @@ -132,7 +130,7 @@ object Ingestion { import Ontology._ def apply(dt: DataType, mediator: akka.actor.ActorRef): Behavior[DataEvent] = { - //#destination + // #destination Behaviors.setup { context => // register to the path import akka.actor.typed.scaladsl.adapter._ @@ -140,7 +138,7 @@ object Ingestion { idle(dt, mediator) } - //#destination + // #destination } private def idle(dt: DataType, mediator: akka.actor.ActorRef): Behavior[DataEvent] = @@ -160,7 +158,7 @@ object Ingestion { /** Would normally be typed more specifically. */ private def active(key: DataKey, sink: Option[DataSink], mediator: akka.actor.ActorRef): Behavior[DataEvent] = - //#publisher + // #publisher Behaviors.setup { context => Behaviors.receiveMessagePartial[DataEvent] { case e: DataEnvelope if e.key == key => @@ -175,7 +173,7 @@ object Ingestion { Behaviors.stopped } } - //#publisher + // #publisher } @@ -184,7 +182,7 @@ object Subscriber { def apply(key: DataKey, mediator: akka.actor.ActorRef): Behavior[DataEvent] = { - //#subscriber + // #subscriber Behaviors.setup[DataEvent] { context => import akka.actor.typed.scaladsl.adapter._ @@ -200,7 +198,7 @@ object Subscriber { wonderland() case IngestionStarted(k, path) if k == key => - //#send + // #send // simulate data sent from various data sources: (1 to 100).foreach { n => mediator ! DistributedPubSubMediator.Send( @@ -208,12 +206,12 @@ object Subscriber { msg = DataEnvelope(key, s"hello-$key-$n"), localAffinity = true) } - //#send + // #send andThen(key, mediator) } } - //#subscriber + // #subscriber } private def wonderland(): Behavior[DataEvent] = { @@ -261,9 +259,9 @@ object DataPlatform { def apply(): Behavior[ProvisionCommand] = { Behaviors.setup { context => - //#mediator + // #mediator val mediator = DistributedPubSub(context.system).mediator - //#mediator + // #mediator val service = context.spawn(DataService(mediator), "data") Behaviors.receiveMessagePartial { diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExample.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExample.scala index 79770822d39..31d09e7c218 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExample.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExample.scala @@ -13,7 +13,7 @@ import akka.actor.typed.scaladsl.Behaviors //#import object PingPongExample { - //#ping-service + // #ping-service object PingService { val PingServiceKey = ServiceKey[Ping]("pingService") @@ -24,18 +24,17 @@ object PingPongExample { Behaviors.setup { context => context.system.receptionist ! Receptionist.Register(PingServiceKey, context.self) - Behaviors.receiveMessage { - case Ping(replyTo) => - context.log.info("Pinged by {}", replyTo) - replyTo ! Pong - Behaviors.same + Behaviors.receiveMessage { case Ping(replyTo) => + context.log.info("Pinged by {}", replyTo) + replyTo ! Pong + Behaviors.same } } } } - //#ping-service + // #ping-service - //#pinger + // #pinger object Pinger { def apply(pingService: ActorRef[PingService.Ping]): Behavior[PingService.Pong.type] = { Behaviors.setup { context => @@ -48,9 +47,9 @@ object PingPongExample { } } } - //#pinger + // #pinger - //#pinger-guardian + // #pinger-guardian object Guardian { def apply(): Behavior[Nothing] = { Behaviors @@ -58,18 +57,17 @@ object PingPongExample { context.spawnAnonymous(PingService()) context.system.receptionist ! Receptionist.Subscribe(PingService.PingServiceKey, context.self) - Behaviors.receiveMessagePartial[Receptionist.Listing] { - case PingService.PingServiceKey.Listing(listings) => - listings.foreach(ps => context.spawnAnonymous(Pinger(ps))) - Behaviors.same + Behaviors.receiveMessagePartial[Receptionist.Listing] { case PingService.PingServiceKey.Listing(listings) => + listings.foreach(ps => context.spawnAnonymous(Pinger(ps))) + Behaviors.same } } .narrow } } - //#pinger-guardian + // #pinger-guardian - //#find + // #find object PingManager { sealed trait Command case object PingAll extends Command @@ -92,12 +90,12 @@ object PingPongExample { } } } - //#find + // #find Behaviors.setup[PingService.Ping] { context => - //#deregister + // #deregister context.system.receptionist ! Receptionist.Deregister(PingService.PingServiceKey, context.self) - //#deregister + // #deregister Behaviors.empty } } diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala index 5bda61154f0..876955104c8 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala @@ -14,7 +14,7 @@ object SingletonCompileOnlySpec { val system = ActorSystem(Behaviors.empty, "Singleton") - //#counter + // #counter object Counter { sealed trait Command case object Increment extends Command @@ -38,9 +38,9 @@ object SingletonCompileOnlySpec { updated(0) } } - //#counter + // #counter - //#singleton + // #singleton import akka.cluster.typed.ClusterSingleton import akka.cluster.typed.SingletonActor @@ -50,24 +50,24 @@ object SingletonCompileOnlySpec { SingletonActor(Behaviors.supervise(Counter()).onFailure[Exception](SupervisorStrategy.restart), "GlobalCounter")) proxy ! Counter.Increment - //#singleton + // #singleton - //#stop-message + // #stop-message val singletonActor = SingletonActor(Counter(), "GlobalCounter").withStopMessage(Counter.GoodByeCounter) singletonManager.init(singletonActor) - //#stop-message + // #stop-message - //#backoff + // #backoff val proxyBackOff: ActorRef[Counter.Command] = singletonManager.init( SingletonActor( Behaviors .supervise(Counter()) .onFailure[Exception](SupervisorStrategy.restartWithBackoff(1.second, 10.seconds, 0.2)), "GlobalCounter")) - //#backoff + // #backoff - //#create-singleton-proxy-dc + // #create-singleton-proxy-dc val singletonProxy: ActorRef[Counter.Command] = ClusterSingleton(system).init( SingletonActor(Counter(), "GlobalCounter").withSettings(ClusterSingletonSettings(system).withDataCenter("dc2"))) - //#create-singleton-proxy-dc + // #create-singleton-proxy-dc } diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index e41bf948bfb..54a5d1571e4 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -35,9 +35,7 @@ import akka.pattern._ import akka.remote.{ UniqueAddress => _, _ } import akka.util.Version -/** - * Cluster Extension Id and factory for creating Cluster extension. - */ +/** Cluster Extension Id and factory for creating Cluster extension. */ object Cluster extends ExtensionId[Cluster] with ExtensionIdProvider { override def get(system: ActorSystem): Cluster = super.get(system) @@ -47,9 +45,7 @@ object Cluster extends ExtensionId[Cluster] with ExtensionIdProvider { override def createExtension(system: ExtendedActorSystem): Cluster = new Cluster(system) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[cluster] final val isAssertInvariantsEnabled: Boolean = System.getProperty("akka.cluster.assert", "off").toLowerCase match { case "on" | "true" => true @@ -89,22 +85,16 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { s"ActorSystem [${system}] needs to have 'akka.actor.provider' set to 'cluster' in the configuration, currently uses [${other.getClass.getName}]") } - /** - * The address of this cluster member. - */ + /** The address of this cluster member. */ def selfAddress: Address = selfUniqueAddress.address /** Data center to which this node belongs to (defaults to "default" if not configured explicitly) */ def selfDataCenter: DataCenter = settings.SelfDataCenter - /** - * roles that this member has - */ + /** roles that this member has */ def selfRoles: Set[String] = settings.Roles - /** - * Java API: roles that this member has - */ + /** Java API: roles that this member has */ @nowarn("msg=deprecated") def getSelfRoles: java.util.Set[String] = scala.collection.JavaConverters.setAsJavaSetConverter(selfRoles).asJava @@ -141,8 +131,8 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { private def checkAutoDownUsage(): Unit = { if (settings.DowningProviderClassName == "akka.cluster.AutoDowning" || - (settings.config.hasPath("auto-down-unreachable-after") && settings.config.getString( - "auto-down-unreachable-after") != "off")) + (settings.config.hasPath("auto-down-unreachable-after") && settings.config.getString( + "auto-down-unreachable-after") != "off")) logWarning( "auto-down has been removed in Akka 2.6.0. See " + "https://doc.akka.io/docs/akka/current/typed/cluster.html#downing for alternatives.") @@ -152,9 +142,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { // ===================== WORK DAEMONS ===================== // ======================================================== - /** - * INTERNAL API - */ + /** INTERNAL API */ private[cluster] val scheduler: Scheduler = { if (system.scheduler.maxFrequency < 1.second / SchedulerTickDuration) { logInfo( @@ -185,12 +173,12 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { override def maxFrequency: Double = systemScheduler.maxFrequency @nowarn("msg=deprecated") - override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = + override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = systemScheduler.schedule(initialDelay, interval, runnable) - override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = + override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = systemScheduler.scheduleOnce(delay, runnable) } } @@ -203,9 +191,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { name = "cluster") } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[cluster] val clusterCore: ActorRef = { implicit val timeout = system.settings.CreationTimeout try { @@ -238,19 +224,13 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { // ===================== PUBLIC API ===================== // ====================================================== - /** - * Returns true if this cluster instance has be shutdown. - */ + /** Returns true if this cluster instance has be shutdown. */ def isTerminated: Boolean = _isTerminated.get - /** - * Current snapshot state of the cluster. - */ + /** Current snapshot state of the cluster. */ def state: CurrentClusterState = readView.state - /** - * Current snapshot of the member itself - */ + /** Current snapshot of the member itself */ def selfMember: Member = readView.self /** @@ -287,9 +267,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { clusterCore ! InternalClusterAction.Subscribe(subscriber, initialStateMode, to.toSet) } - /** - * Unsubscribe to all cluster domain events. - */ + /** Unsubscribe to all cluster domain events. */ def unsubscribe(subscriber: ActorRef): Unit = clusterCore ! InternalClusterAction.Unsubscribe(subscriber, None) @@ -325,9 +303,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { clusterCore ! ClusterUserAction.JoinTo(fillLocal(address)) } - /** - * Change the state of every member in preparation for a full cluster shutdown. - */ + /** Change the state of every member in preparation for a full cluster shutdown. */ def prepareForFullClusterShutdown(): Unit = { clusterCore ! ClusterUserAction.PrepareForShutdown } @@ -509,14 +485,10 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { case _ => // ignore, this is fine } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[cluster] object ClusterLogger extends ClusterLogger(log) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[cluster] class ClusterLogger(log: MarkerLoggingAdapter) { def isDebugEnabled: Boolean = log.isDebugEnabled diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala index db1962bd9e8..36f6f2477e1 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala @@ -91,7 +91,7 @@ private[akka] class ClusterDeployer(_settings: ActorSystem.Settings, _pm: Dynami // user has not specified nr-of-instances val config2 = if (config.hasPath("cluster.enabled") && config.getBoolean("cluster.enabled") && !config.hasPath( - "nr-of-instances")) { + "nr-of-instances")) { val maxTotalNrOfInstances = config.withFallback(default).getInt("cluster.max-total-nr-of-instances") ConfigFactory.parseString("nr-of-instances=" + maxTotalNrOfInstances).withFallback(config) } else config @@ -132,14 +132,10 @@ private[akka] class ClusterDeployer(_settings: ActorSystem.Settings, _pm: Dynami @SerialVersionUID(1L) abstract class ClusterScope extends Scope -/** - * Cluster aware scope of a [[akka.actor.Deploy]] - */ +/** Cluster aware scope of a [[akka.actor.Deploy]] */ case object ClusterScope extends ClusterScope { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this def withFallback(other: Scope): Scope = this diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index 8843e4f0529..0122da658ba 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -29,9 +29,7 @@ import akka.remote.artery.QuarantinedEvent import akka.util.Timeout import akka.util.Version -/** - * Base trait for all cluster messages. All ClusterMessage's are serializable. - */ +/** Base trait for all cluster messages. All ClusterMessage's are serializable. */ trait ClusterMessage extends Serializable /** @@ -50,21 +48,15 @@ private[cluster] object ClusterUserAction { @SerialVersionUID(1L) final case class JoinTo(address: Address) - /** - * Command to leave the cluster. - */ + /** Command to leave the cluster. */ @SerialVersionUID(1L) final case class Leave(address: Address) extends ClusterMessage - /** - * Command to mark node as temporary down. - */ + /** Command to mark node as temporary down. */ @SerialVersionUID(1L) final case class Down(address: Address) extends ClusterMessage - /** - * Command to mark all nodes as shutting down - */ + /** Command to mark all nodes as shutting down */ @SerialVersionUID(1L) case object PrepareForShutdown extends ClusterMessage @@ -76,15 +68,11 @@ private[cluster] object ClusterUserAction { */ case object SetAppVersionLater - /** - * Command to set the `appVersion` after system startup but before joining. - */ + /** Command to set the `appVersion` after system startup but before joining. */ final case class SetAppVersion(appVersion: Version) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[cluster] object InternalClusterAction { @@ -133,31 +121,23 @@ private[cluster] object InternalClusterAction { final case class CompatibleConfig(clusterConfig: Config) extends ConfigCheck - /** - * see JoinSeedNode - */ + /** see JoinSeedNode */ @SerialVersionUID(1L) case class InitJoin(configOfJoiningNode: Config) extends ClusterMessage with DeadLetterSuppression - /** - * see JoinSeedNode - */ + /** see JoinSeedNode */ @SerialVersionUID(1L) final case class InitJoinAck(address: Address, configCheck: ConfigCheck) extends ClusterMessage with DeadLetterSuppression - /** - * see JoinSeedNode - */ + /** see JoinSeedNode */ @SerialVersionUID(1L) final case class InitJoinNack(address: Address) extends ClusterMessage with DeadLetterSuppression final case class ExitingConfirmed(node: UniqueAddress) extends ClusterMessage with DeadLetterSuppression - /** - * Marker interface for periodic tick messages - */ + /** Marker interface for periodic tick messages */ sealed trait Tick case object GossipTick extends Tick @@ -189,9 +169,7 @@ private[cluster] object InternalClusterAction { extends SubscriptionMessage with DeadLetterSuppression - /** - * @param receiver [[akka.cluster.ClusterEvent.CurrentClusterState]] will be sent to the `receiver` - */ + /** @param receiver [[akka.cluster.ClusterEvent.CurrentClusterState]] will be sent to the `receiver` */ final case class SendCurrentClusterState(receiver: ActorRef) extends SubscriptionMessage sealed trait PublishMessage @@ -297,26 +275,22 @@ private[cluster] final class ClusterCoreSupervisor(joinConfigCompatChecker: Join } override val supervisorStrategy = - OneForOneStrategy() { - case NonFatal(e) => - Cluster(context.system).ClusterLogger.logError(e, "crashed, [{}] - shutting down...", e.getMessage) - self ! PoisonPill - Stop + OneForOneStrategy() { case NonFatal(e) => + Cluster(context.system).ClusterLogger.logError(e, "crashed, [{}] - shutting down...", e.getMessage) + self ! PoisonPill + Stop } override def postStop(): Unit = Cluster(context.system).shutdown() - def receive = { - case InternalClusterAction.GetClusterCoreRef => - if (coreDaemon.isEmpty) - createChildren() - coreDaemon.foreach(sender() ! _) + def receive = { case InternalClusterAction.GetClusterCoreRef => + if (coreDaemon.isEmpty) + createChildren() + coreDaemon.foreach(sender() ! _) } } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ @InternalApi private[cluster] object ClusterCoreDaemon { val NumberOfGossipsBeforeShutdownWhenLeaderExits = 5 @@ -325,9 +299,7 @@ private[cluster] object ClusterCoreDaemon { } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ @InternalApi private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatChecker: JoinConfigCompatChecker) extends Actor @@ -400,9 +372,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh var laterAppVersion: Option[Promise[Version]] = None - /** - * Looks up and returns the remote cluster command connection for the specific address. - */ + /** Looks up and returns the remote cluster command connection for the specific address. */ private def clusterCore(address: Address): ActorSelection = context.actorSelection(RootActorPath(address) / "system" / "cluster" / "core" / "daemon") @@ -614,10 +584,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh case ExitingConfirmed(address) => receiveExitingConfirmed(address) }: Actor.Receive).orElse(receiveExitingCompleted) - def receiveExitingCompleted: Actor.Receive = { - case ExitingCompleted => - exitingCompleted() - sender() ! Done // reply to ask + def receiveExitingCompleted: Actor.Receive = { case ExitingCompleted => + exitingCompleted() + sender() ! Done // reply to ask } def receive = uninitialized @@ -765,8 +734,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh address) import akka.pattern.pipe // easiest to just try again via JoinTo when the promise has been completed - val pipeMessage = promise.future.map(_ => ClusterUserAction.JoinTo(address)).recover { - case _ => ClusterUserAction.JoinTo(address) + val pipeMessage = promise.future.map(_ => ClusterUserAction.JoinTo(address)).recover { case _ => + ClusterUserAction.JoinTo(address) } pipe(pipeMessage).to(self) None @@ -879,9 +848,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh promise.future.value.get.get } val newMembers = localMembers + Member(joiningNode, roles, appVersion) + Member( - selfUniqueAddress, - cluster.selfRoles, - selfAppVersion) + selfUniqueAddress, + cluster.selfRoles, + selfAppVersion) val newGossip = latestGossip.copy(members = newMembers) updateLatestGossip(newGossip) @@ -913,9 +882,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh } } - /** - * Accept reply from Join request. - */ + /** Accept reply from Join request. */ def welcome(joinWith: Address, from: UniqueAddress, gossip: Gossip): Unit = { require(latestGossip.members.isEmpty, "Join can only be done from empty state") if (joinWith != from.address) @@ -1029,9 +996,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh exitingConfirmed = exitingConfirmed.filter(n => latestGossip.members.exists(_.uniqueAddress == n)) } - /** - * This method is called when a member sees itself as Exiting or Down. - */ + /** This method is called when a member sees itself as Exiting or Down. */ def shutdown(): Unit = cluster.shutdown() /** @@ -1121,9 +1086,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh } } - /** - * The types of gossip actions that receive gossip has performed. - */ + /** The types of gossip actions that receive gossip has performed. */ sealed trait ReceiveGossipType case object Ignored extends ReceiveGossipType case object Older extends ReceiveGossipType @@ -1131,19 +1094,18 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh case object Same extends ReceiveGossipType case object Merge extends ReceiveGossipType - /** - * Receive new gossip. - */ + /** Receive new gossip. */ def receiveGossip(envelope: GossipEnvelope): ReceiveGossipType = { val from = envelope.from - val remoteGossip = try { - envelope.gossip - } catch { - case NonFatal(t) => - gossipLogger.logWarning("Invalid Gossip. This should only happen during a rolling upgrade. {}", t.getMessage) - Gossip.empty + val remoteGossip = + try { + envelope.gossip + } catch { + case NonFatal(t) => + gossipLogger.logWarning("Invalid Gossip. This should only happen during a rolling upgrade. {}", t.getMessage) + Gossip.empty - } + } val localGossip = latestGossip if (remoteGossip eq Gossip.empty) { @@ -1206,10 +1168,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh // Don't mark gossip state as seen while exiting is in progress, e.g. // shutting down singleton actors. This delays removal of the member until // the exiting tasks have been completed. - membershipState = membershipState.copy( - latestGossip = - if (exitingTasksInProgress) winningGossip - else winningGossip.seen(selfUniqueAddress)) + membershipState = membershipState.copy(latestGossip = + if (exitingTasksInProgress) winningGossip + else winningGossip.seen(selfUniqueAddress)) assertLatestGossip() // for all new nodes we remove them from the failure detector @@ -1289,18 +1250,14 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh } } - /** - * Sends full gossip to `n` other random members. - */ + /** Sends full gossip to `n` other random members. */ def gossipRandomN(n: Int): Unit = { if (!isSingletonCluster && n > 0) { gossipTargetSelector.randomNodesForFullGossip(membershipState, n).foreach(gossipTo) } } - /** - * Initiates a new round of gossip. - */ + /** Initiates a new round of gossip. */ def gossip(): Unit = if (!isSingletonCluster) { gossipTargetSelector.gossipTarget(membershipState) match { @@ -1317,9 +1274,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh } } - /** - * Runs periodic leader actions, such as member status transitions, assigning partitions etc. - */ + /** Runs periodic leader actions, such as member status transitions, assigning partitions etc. */ def leaderActions(): Unit = { if (membershipState.isLeader(selfUniqueAddress)) { // only run the leader actions if we are the LEADER of the data center @@ -1362,8 +1317,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh } def checkForPrepareForShutdown(): Unit = { - if (MembershipState.allowedToPrepareToShutdown(latestGossip.member(selfUniqueAddress).status) && latestGossip.members - .exists(m => MembershipState.prepareForShutdownStates(m.status))) { + if (MembershipState.allowedToPrepareToShutdown( + latestGossip.member(selfUniqueAddress).status) && latestGossip.members.exists(m => + MembershipState.prepareForShutdownStates(m.status))) { logDebug("Detected full cluster shutdown") self ! ClusterUserAction.PrepareForShutdown } @@ -1375,8 +1331,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh // status Down. The down commands should spread before we shutdown. val unreachable = membershipState.dcReachability.allUnreachableOrTerminated val downed = membershipState.dcMembers.collect { case m if m.status == Down => m.uniqueAddress } - if (selfDownCounter >= MaxTicksBeforeShuttingDownMyself || downed.forall( - node => unreachable(node) || latestGossip.seenByNode(node))) { + if (selfDownCounter >= MaxTicksBeforeShuttingDownMyself || downed.forall(node => + unreachable(node) || latestGossip.seenByNode(node))) { // the reason for not shutting down immediately is to give the gossip a chance to spread // the downing information to other downed nodes, so that they can shutdown themselves logInfo("Node has been marked as DOWN. Shutting down myself") @@ -1391,8 +1347,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh } def isMinNrOfMembersFulfilled: Boolean = { - latestGossip.members.size >= MinNrOfMembers && MinNrOfMembersOfRole.forall { - case (role, threshold) => latestGossip.members.count(_.hasRole(role)) >= threshold + latestGossip.members.size >= MinNrOfMembers && MinNrOfMembersOfRole.forall { case (role, threshold) => + latestGossip.members.count(_.hasRole(role)) >= threshold } } @@ -1465,7 +1421,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh val updatedGossip: Gossip = if (removedUnreachable.nonEmpty || removedExitingConfirmed.nonEmpty || changedMembers.nonEmpty || - removedOtherDc.nonEmpty) { + removedOtherDc.nonEmpty) { // replace changed members val removed = removedUnreachable @@ -1530,9 +1486,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh } } - /** - * Gossip the Exiting change to the two oldest nodes for quick dissemination to potential Singleton nodes - */ + /** Gossip the Exiting change to the two oldest nodes for quick dissemination to potential Singleton nodes */ private def gossipExitingMembersToOldest(exitingMembers: Set[Member]): Unit = { val targets = membershipState.gossipTargetsForExitingMembers(exitingMembers) if (targets.nonEmpty) { @@ -1579,9 +1533,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh } - /** - * Reaps the unreachable members according to the failure detector's verdict. - */ + /** Reaps the unreachable members according to the failure detector's verdict. */ def reapUnreachableMembers(): Unit = { if (!isSingletonCluster) { // only scrutinize if we are a non-singleton cluster @@ -1645,15 +1597,12 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh // needed for tests def sendGossipTo(address: Address): Unit = { - latestGossip.members.foreach( - m => - if (m.address == address) - gossipTo(m.uniqueAddress)) + latestGossip.members.foreach(m => + if (m.address == address) + gossipTo(m.uniqueAddress)) } - /** - * Gossips latest gossip to a node. - */ + /** Gossips latest gossip to a node. */ def gossipTo(node: UniqueAddress): Unit = if (membershipState.validNodeForGossip(node)) clusterCore(node.address) ! GossipEnvelope(selfUniqueAddress, node, latestGossip) @@ -1764,9 +1713,7 @@ private[cluster] class OnMemberStatusChangedListener(callback: Runnable, status: } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @SerialVersionUID(1L) private[cluster] final case class GossipStats( @@ -1808,9 +1755,7 @@ private[cluster] final case class GossipStats( } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @SerialVersionUID(1L) private[cluster] final case class VectorClockStats(versionSize: Int = 0, seenLatest: Int = 0) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index 9142dc96432..e1090505e02 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -46,14 +46,10 @@ object ClusterEvent { */ case object InitialStateAsEvents extends SubscriptionInitialStateMode - /** - * Java API - */ + /** Java API */ def initialStateAsSnapshot = InitialStateAsSnapshot - /** - * Java API - */ + /** Java API */ def initialStateAsEvents = InitialStateAsEvents /** @@ -133,43 +129,31 @@ object ClusterEvent { roleLeaderMap: Map[String, Option[Address]] = Map.empty) = this(members, unreachable, seenBy, leader, roleLeaderMap, Set.empty, Set.empty) - /** - * Java API: get current member list. - */ + /** Java API: get current member list. */ def getMembers: java.lang.Iterable[Member] = { import akka.util.ccompat.JavaConverters._ members.asJava } - /** - * Java API: get current unreachable set. - */ + /** Java API: get current unreachable set. */ @nowarn("msg=deprecated") def getUnreachable: java.util.Set[Member] = scala.collection.JavaConverters.setAsJavaSetConverter(unreachable).asJava - /** - * Java API: All data centers in the cluster - */ + /** Java API: All data centers in the cluster */ @nowarn("msg=deprecated") def getUnreachableDataCenters: java.util.Set[String] = scala.collection.JavaConverters.setAsJavaSetConverter(unreachableDataCenters).asJava - /** - * Java API: get current “seen-by” set. - */ + /** Java API: get current “seen-by” set. */ @nowarn("msg=deprecated") def getSeenBy: java.util.Set[Address] = scala.collection.JavaConverters.setAsJavaSetConverter(seenBy).asJava - /** - * Java API: get address of current data center leader, or null if none - */ + /** Java API: get address of current data center leader, or null if none */ def getLeader: Address = leader orNull - /** - * get address of current leader, if any, within the data center that has the given role - */ + /** get address of current leader, if any, within the data center that has the given role */ def roleLeader(role: String): Option[Address] = roleLeaderMap.getOrElse(role, None) /** @@ -178,33 +162,23 @@ object ClusterEvent { */ def getRoleLeader(role: String): Address = roleLeaderMap.get(role).flatten.orNull - /** - * All node roles in the cluster - */ + /** All node roles in the cluster */ def allRoles: Set[String] = roleLeaderMap.keySet - /** - * Java API: All node roles in the cluster - */ + /** Java API: All node roles in the cluster */ @nowarn("msg=deprecated") def getAllRoles: java.util.Set[String] = scala.collection.JavaConverters.setAsJavaSetConverter(allRoles).asJava - /** - * All data centers in the cluster - */ + /** All data centers in the cluster */ def allDataCenters: Set[String] = members.iterator.map(_.dataCenter).to(immutable.Set) - /** - * Java API: All data centers in the cluster - */ + /** Java API: All data centers in the cluster */ @nowarn("msg=deprecated") def getAllDataCenters: java.util.Set[String] = scala.collection.JavaConverters.setAsJavaSetConverter(allDataCenters).asJava - /** - * Replace the set of unreachable datacenters with the given set - */ + /** Replace the set of unreachable datacenters with the given set */ def withUnreachableDataCenters(unreachableDataCenters: Set[DataCenter]): CurrentClusterState = new CurrentClusterState( members, @@ -287,9 +261,7 @@ object ClusterEvent { def member: Member } - /** - * Member status changed to Joining. - */ + /** Member status changed to Joining. */ final case class MemberJoined(member: Member) extends MemberEvent { if (member.status != Joining) throw new IllegalArgumentException("Expected Joining status, got: " + member) } @@ -304,16 +276,12 @@ object ClusterEvent { if (member.status != WeaklyUp) throw new IllegalArgumentException("Expected WeaklyUp status, got: " + member) } - /** - * Member status changed to Up. - */ + /** Member status changed to Up. */ final case class MemberUp(member: Member) extends MemberEvent { if (member.status != Up) throw new IllegalArgumentException("Expected Up status, got: " + member) } - /** - * Member status changed to Leaving. - */ + /** Member status changed to Leaving. */ final case class MemberLeft(member: Member) extends MemberEvent { if (member.status != Leaving) throw new IllegalArgumentException("Expected Leaving status, got: " + member) } @@ -388,9 +356,7 @@ object ClusterEvent { */ case object ClusterShuttingDown extends ClusterDomainEvent - /** - * Java API: get the singleton instance of `ClusterShuttingDown` event - */ + /** Java API: get the singleton instance of `ClusterShuttingDown` event */ def getClusterShuttingDownInstance = ClusterShuttingDown /** @@ -401,9 +367,7 @@ object ClusterEvent { def member: Member } - /** - * A member is considered as unreachable by the failure detector. - */ + /** A member is considered as unreachable by the failure detector. */ final case class UnreachableMember(member: Member) extends ReachabilityEvent /** @@ -419,14 +383,10 @@ object ClusterEvent { */ sealed trait DataCenterReachabilityEvent extends ClusterDomainEvent - /** - * A data center is considered as unreachable when any members from the data center are unreachable - */ + /** A data center is considered as unreachable when any members from the data center are unreachable */ final case class UnreachableDataCenter(dataCenter: DataCenter) extends DataCenterReachabilityEvent - /** - * A data center is considered reachable when all members from the data center are reachable - */ + /** A data center is considered reachable when all members from the data center are reachable */ final case class ReachableDataCenter(dataCenter: DataCenter) extends DataCenterReachabilityEvent /** @@ -437,28 +397,20 @@ object ClusterEvent { @ccompatUsedUntil213 private[cluster] final case class SeenChanged(convergence: Boolean, seenBy: Set[Address]) extends ClusterDomainEvent - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] final case class ReachabilityChanged(reachability: Reachability) extends ClusterDomainEvent - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] final case class CurrentInternalStats(gossipStats: GossipStats, vclockStats: VectorClockStats) extends ClusterDomainEvent - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] final case class MemberTombstonesChanged(tombstones: Set[UniqueAddress]) extends ClusterDomainEvent - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] def diffUnreachable( oldState: MembershipState, @@ -475,9 +427,7 @@ object ClusterEvent { .to(immutable.IndexedSeq) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] def diffReachable( oldState: MembershipState, @@ -488,15 +438,14 @@ object ClusterEvent { oldState.dcReachabilityNoOutsideNodes.allUnreachable.iterator .collect { case node - if newGossip.hasMember(node) && newState.dcReachabilityNoOutsideNodes.isReachable(node) && node != newState.selfUniqueAddress => + if newGossip.hasMember(node) && newState.dcReachabilityNoOutsideNodes.isReachable( + node) && node != newState.selfUniqueAddress => ReachableMember(newGossip.member(node)) } .to(immutable.IndexedSeq) } - /** - * Internal API - */ + /** Internal API */ @InternalApi private[cluster] def isDataCenterReachable(state: MembershipState)(otherDc: DataCenter): Boolean = { val unrelatedDcNodes = state.latestGossip.members.collect { @@ -507,17 +456,15 @@ object ClusterEvent { reachabilityForOtherDc.allUnreachable.isEmpty } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] def diffUnreachableDataCenter( oldState: MembershipState, newState: MembershipState): immutable.Seq[UnreachableDataCenter] = { if (newState eq oldState) Nil else { - val otherDcs = (oldState.latestGossip.allDataCenters - .union(newState.latestGossip.allDataCenters)) - newState.selfDc + val otherDcs = + (oldState.latestGossip.allDataCenters.union(newState.latestGossip.allDataCenters)) - newState.selfDc val oldUnreachableDcs = otherDcs.filterNot(isDataCenterReachable(oldState)) val currentUnreachableDcs = otherDcs.filterNot(isDataCenterReachable(newState)) @@ -526,17 +473,15 @@ object ClusterEvent { } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] def diffReachableDataCenter( oldState: MembershipState, newState: MembershipState): immutable.Seq[ReachableDataCenter] = { if (newState eq oldState) Nil else { - val otherDcs = (oldState.latestGossip.allDataCenters - .union(newState.latestGossip.allDataCenters)) - newState.selfDc + val otherDcs = + (oldState.latestGossip.allDataCenters.union(newState.latestGossip.allDataCenters)) - newState.selfDc val oldUnreachableDcs = otherDcs.filterNot(isDataCenterReachable(oldState)) val currentUnreachableDcs = otherDcs.filterNot(isDataCenterReachable(newState)) @@ -545,9 +490,7 @@ object ClusterEvent { } } - /** - * INTERNAL API. - */ + /** INTERNAL API. */ @InternalApi private[cluster] def diffMemberEvents( oldState: MembershipState, @@ -581,9 +524,7 @@ object ClusterEvent { (new VectorBuilder[MemberEvent]() ++= removedEvents ++= memberEvents).result() } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] def diffLeader( oldState: MembershipState, @@ -593,9 +534,7 @@ object ClusterEvent { else Nil } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] def diffRolesLeader(oldState: MembershipState, newState: MembershipState): Set[RoleLeaderChanged] = { for { @@ -605,9 +544,7 @@ object ClusterEvent { } yield RoleLeaderChanged(role, newLeader.map(_.address)) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] def diffSeen(oldState: MembershipState, newState: MembershipState): immutable.Seq[SeenChanged] = if (oldState eq newState) Nil @@ -619,9 +556,7 @@ object ClusterEvent { else Nil } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] def diffReachability( oldState: MembershipState, @@ -629,9 +564,7 @@ object ClusterEvent { if (newState.overview.reachability eq oldState.overview.reachability) Nil else List(ReachabilityChanged(newState.overview.reachability)) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] def diffTombstones( oldState: MembershipState, @@ -639,9 +572,7 @@ object ClusterEvent { if (newState.latestGossip.tombstones == oldState.latestGossip.tombstones) Nil else MemberTombstonesChanged(newState.latestGossip.tombstones.keySet) :: Nil - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[cluster] def publishDiff(oldState: MembershipState, newState: MembershipState, pub: AnyRef => Unit): Unit = { diffTombstones(oldState, newState).foreach(pub) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala index 63149550fe4..d6ecfc42b86 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala @@ -44,11 +44,10 @@ private[cluster] final class ClusterHeartbeatReceiver(getCluster: () => Cluster) new cluster.ClusterLogger( Logging.withMarker(context.system, ActorWithLogClass(this, ClusterLogClass.ClusterHeartbeat))) - def receive: Receive = { - case hb: Heartbeat => - // TODO log the sequence nr once serializer is enabled - if (verboseHeartbeat) clusterLogger.logDebug("Heartbeat #{} from [{}]", hb.sequenceNr, hb.from) - sender() ! HeartbeatRsp(cluster.selfUniqueAddress, hb.sequenceNr, hb.creationTimeNanos) + def receive: Receive = { case hb: Heartbeat => + // TODO log the sequence nr once serializer is enabled + if (verboseHeartbeat) clusterLogger.logDebug("Heartbeat #{} from [{}]", hb.sequenceNr, hb.from) + sender() ! HeartbeatRsp(cluster.selfUniqueAddress, hb.sequenceNr, hb.creationTimeNanos) } } @@ -63,22 +62,16 @@ private[cluster] object ClusterHeartbeatReceiver { RootActorPath(address) / "system" / "cluster" / name } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[cluster] object ClusterHeartbeatSender { - /** - * Sent at regular intervals for failure detection. - */ + /** Sent at regular intervals for failure detection. */ final case class Heartbeat(from: Address, sequenceNr: Long, creationTimeNanos: Long) extends ClusterMessage with HeartbeatMessage with DeadLetterSuppression - /** - * Sent as reply to [[Heartbeat]] messages. - */ + /** Sent as reply to [[Heartbeat]] messages. */ final case class HeartbeatRsp(from: UniqueAddress, sequenceNr: Long, creationTimeNanos: Long) extends ClusterMessage with HeartbeatMessage @@ -151,9 +144,7 @@ private[cluster] class ClusterHeartbeatSender extends Actor { cluster.unsubscribe(self) } - /** - * Looks up and returns the remote cluster heartbeat connection for the specific address. - */ + /** Looks up and returns the remote cluster heartbeat connection for the specific address. */ def heartbeatReceiver(address: Address): ActorSelection = context.actorSelection(ClusterHeartbeatReceiver.path(address)) @@ -178,16 +169,16 @@ private[cluster] class ClusterHeartbeatSender extends Actor { } def init(snapshot: CurrentClusterState): Unit = { - val nodes = snapshot.members.collect { case m if filterInternalClusterMembers(m) => m.uniqueAddress } + val nodes = snapshot.members.collect { case m if filterInternalClusterMembers(m) => m.uniqueAddress } val unreachable = snapshot.unreachable.collect { case m if filterInternalClusterMembers(m) => m.uniqueAddress } state = state.init(nodes, unreachable) } def addMember(m: Member): Unit = if (m.uniqueAddress != selfUniqueAddress && // is not self - !state.contains(m.uniqueAddress) && // not already added - filterInternalClusterMembers(m) // should be watching members from this DC (internal / external) - ) { + !state.contains(m.uniqueAddress) && // not already added + filterInternalClusterMembers(m) // should be watching members from this DC (internal / external) + ) { state = state.addMember(m.uniqueAddress) } @@ -351,16 +342,12 @@ private[cluster] final case class HeartbeatNodeRing( immutable.SortedSet().union(nodes) } - /** - * Receivers for `selfAddress`. Cached for subsequent access. - */ + /** Receivers for `selfAddress`. Cached for subsequent access. */ lazy val myReceivers: Set[UniqueAddress] = receivers(selfAddress) private val useAllAsReceivers = monitoredByNrOfMembers >= (nodeRing.size - 1) - /** - * The receivers to use from a specified sender. - */ + /** The receivers to use from a specified sender. */ def receivers(sender: UniqueAddress): Set[UniqueAddress] = if (useAllAsReceivers) nodeRing - sender @@ -398,14 +385,10 @@ private[cluster] final case class HeartbeatNodeRing( slice } - /** - * Add a node to the ring. - */ + /** Add a node to the ring. */ def :+(node: UniqueAddress): HeartbeatNodeRing = if (nodes contains node) this else copy(nodes = nodes + node) - /** - * Remove a node from the ring. - */ + /** Remove a node from the ring. */ def :-(node: UniqueAddress): HeartbeatNodeRing = if (nodes.contains(node) || unreachable.contains(node)) copy(nodes = nodes - node, unreachable = unreachable - node) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala index 9dfee50ef39..9befa0d398e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala @@ -13,14 +13,10 @@ import javax.management.StandardMBean import akka.actor.AddressFromURIString import akka.event.LoggingAdapter -/** - * Interface for the cluster JMX MBean. - */ +/** Interface for the cluster JMX MBean. */ trait ClusterNodeMBean { - /** - * Member status for this node. - */ + /** Member status for this node. */ def getMemberStatus: String /** @@ -101,9 +97,7 @@ trait ClusterNodeMBean { */ def getLeader: String - /** - * Does the cluster consist of only one member? - */ + /** Does the cluster consist of only one member? */ def isSingleton: Boolean /** @@ -132,9 +126,7 @@ trait ClusterNodeMBean { def down(address: String): Unit } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { private val mBeanServer = ManagementFactory.getPlatformMBeanServer @@ -147,9 +139,7 @@ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { private def clusterView = cluster.readView import cluster.ClusterLogger._ - /** - * Creates the cluster JMX MBean and registers it in the MBean server. - */ + /** Creates the cluster JMX MBean and registers it in the MBean server. */ def createMBean() = { val mbean = new StandardMBean(classOf[ClusterNodeMBean]) with ClusterNodeMBean { @@ -162,7 +152,7 @@ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { s"""{ | "address": "${m.address}", | "roles": [${if (m.roles.isEmpty) "" - else m.roles.toList.sorted.map("\"" + _ + "\"").mkString("\n ", ",\n ", "\n ")}], + else m.roles.toList.sorted.map("\"" + _ + "\"").mkString("\n ", ",\n ", "\n ")}], | "status": "${m.status}", | "app-version": "${m.appVersion}" | }""".stripMargin @@ -177,7 +167,7 @@ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { s"""{ | "node": "${subject.address}", | "observed-by": [${if (observerAddresses.isEmpty) "" - else observerAddresses.mkString("\n ", ",\n ", "\n ")}] + else observerAddresses.mkString("\n ", ",\n ", "\n ")}] | }""".stripMargin } @@ -230,9 +220,7 @@ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { } } - /** - * Unregisters the cluster JMX MBean from MBean server. - */ + /** Unregisters the cluster JMX MBean from MBean server. */ def unregisterMBean(): Unit = { try { mBeanServer.unregisterMBean(clusterMBeanName) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterLogClass.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterLogClass.scala index b2cceda93b4..a937f7b079c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterLogClass.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterLogClass.scala @@ -6,9 +6,7 @@ package akka.cluster import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ClusterLogClass { val ClusterCore: Class[Cluster] = classOf[Cluster] @@ -17,12 +15,8 @@ import akka.annotation.InternalApi } -/** - * INTERNAL API: Logger class for (verbose) heartbeat logging. - */ +/** INTERNAL API: Logger class for (verbose) heartbeat logging. */ @InternalApi private[akka] class ClusterHeartbeat -/** - * INTERNAL API: Logger class for (verbose) gossip logging. - */ +/** INTERNAL API: Logger class for (verbose) gossip logging. */ @InternalApi private[akka] class ClusterGossip diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterLogMarker.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterLogMarker.scala index 4aa2712f8f0..80abbb1e4a9 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterLogMarker.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterLogMarker.scala @@ -16,9 +16,7 @@ import akka.event.LogMarker */ object ClusterLogMarker { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object Properties { val MemberStatus = "akkaMemberStatus" val SbrDecision = "akkaSbrDecision" @@ -38,9 +36,7 @@ object ClusterLogMarker { def reachable(node: Address): LogMarker = LogMarker("akkaReachable", Map(LogMarker.Properties.RemoteAddress -> node)) - /** - * Marker "akkaHeartbeatStarvation" of log event when scheduled heartbeat was delayed. - */ + /** Marker "akkaHeartbeatStarvation" of log event when scheduled heartbeat was delayed. */ val heartbeatStarvation: LogMarker = LogMarker("akkaHeartbeatStarvation") @@ -51,15 +47,11 @@ object ClusterLogMarker { val leaderIncapacitated: LogMarker = LogMarker("akkaClusterLeaderIncapacitated") - /** - * Marker "akkaClusterLeaderRestored" of log event when leader can perform its duties again. - */ + /** Marker "akkaClusterLeaderRestored" of log event when leader can perform its duties again. */ val leaderRestored: LogMarker = LogMarker("akkaClusterLeaderRestored") - /** - * Marker "akkaJoinFailed" of log event when node couldn't join seed nodes. - */ + /** Marker "akkaJoinFailed" of log event when node couldn't join seed nodes. */ val joinFailed: LogMarker = LogMarker("akkaJoinFailed") @@ -134,9 +126,7 @@ object ClusterLogMarker { def sbrLeaseDenied(reverseDecision: DowningStrategy.Decision): LogMarker = LogMarker("akkaSbrLeaseDenied", Map(Properties.SbrDecision -> reverseDecision)) - /** - * Marker "akkaSbrLeaseReleased" of log event when Split Brain Resolver has released the lease. - */ + /** Marker "akkaSbrLeaseReleased" of log event when Split Brain Resolver has released the lease. */ val sbrLeaseReleased: LogMarker = LogMarker("akkaSbrLeaseReleased") diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala index 1ec2da00c0e..c4318b402cd 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala @@ -20,9 +20,7 @@ import akka.cluster.ClusterEvent._ import akka.dispatch.RequiresMessageQueue import akka.dispatch.UnboundedMessageQueueSemantics -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ClusterReadView { final case class State( clusterState: CurrentClusterState, @@ -42,23 +40,21 @@ import akka.dispatch.UnboundedMessageQueueSemantics import ClusterReadView.State import cluster.ClusterLogger._ - /** - * State for synchronous read access via [[Cluster]] extension. Only updated from the `eventBusListener` actor. - */ + /** State for synchronous read access via [[Cluster]] extension. Only updated from the `eventBusListener` actor. */ private val _state: AtomicReference[State] = new AtomicReference[State]( State( clusterState = CurrentClusterState(), reachability = Reachability.empty, - selfMember = Member(cluster.selfUniqueAddress, cluster.selfRoles, cluster.settings.AppVersion) - .copy(status = MemberStatus.Removed), + selfMember = Member(cluster.selfUniqueAddress, cluster.selfRoles, cluster.settings.AppVersion).copy(status = + MemberStatus.Removed), latestStats = CurrentInternalStats(GossipStats(), VectorClockStats()))) val selfAddress: Address = cluster.selfAddress // create actor that subscribes to the cluster eventBus to update current read view state private val eventBusListener: ActorRef = { - cluster.system - .systemActorOf(Props(new Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] { + cluster.system.systemActorOf( + Props(new Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] { override def preStart(): Unit = cluster.subscribe(this.self, classOf[ClusterDomainEvent]) // make sure that final state has member status Removed @@ -89,19 +85,18 @@ import akka.dispatch.UnboundedMessageQueueSemantics selfRemoved() case MemberRemoved(member, _) => _state.set( - oldState.copy( - clusterState = oldClusterState.copy( - members = oldClusterState.members - member, - unreachable = oldClusterState.unreachable - member))) + oldState.copy(clusterState = oldClusterState.copy( + members = oldClusterState.members - member, + unreachable = oldClusterState.unreachable - member))) case UnreachableMember(member) => // replace current member with new member (might have different status, only address is used in equals) _state.set( - oldState.copy( - clusterState = oldClusterState.copy(unreachable = oldClusterState.unreachable - member + member))) + oldState.copy(clusterState = + oldClusterState.copy(unreachable = oldClusterState.unreachable - member + member))) case ReachableMember(member) => _state.set( - oldState.copy( - clusterState = oldClusterState.copy(unreachable = oldClusterState.unreachable - member))) + oldState.copy(clusterState = + oldClusterState.copy(unreachable = oldClusterState.unreachable - member))) case event: MemberEvent => val member = event.member // replace current member with new member (might have different status, only address is used in equals) @@ -134,7 +129,9 @@ import akka.dispatch.UnboundedMessageQueueSemantics case MemberTombstonesChanged(tombstones) => _state.set(oldState.copy(clusterState = oldClusterState.withMemberTombstones(tombstones))) case unexpected => - throw new IllegalArgumentException(s"Unexpected cluster event type ${unexpected.getClass}") // compiler exhaustiveness check pleaser + throw new IllegalArgumentException( + s"Unexpected cluster event type ${unexpected.getClass}" + ) // compiler exhaustiveness check pleaser } // once captured, optional verbose logging of event @@ -146,26 +143,21 @@ import akka.dispatch.UnboundedMessageQueueSemantics s.members.find(_.uniqueAddress == cluster.selfUniqueAddress).getOrElse(oldState.selfMember) _state.set(oldState.copy(clusterState = s, selfMember = newSelfMember)) } - }).withDispatcher(cluster.settings.UseDispatcher).withDeploy(Deploy.local), name = "clusterEventBusListener") + }).withDispatcher(cluster.settings.UseDispatcher).withDeploy(Deploy.local), + name = "clusterEventBusListener") } def state: CurrentClusterState = _state.get().clusterState def self: Member = _state.get().selfMember - /** - * Returns true if this cluster instance has be shutdown. - */ + /** Returns true if this cluster instance has be shutdown. */ def isTerminated: Boolean = cluster.isTerminated - /** - * Current cluster members, sorted by address. - */ + /** Current cluster members, sorted by address. */ def members: immutable.SortedSet[Member] = _state.get().clusterState.members - /** - * Members that has been detected as unreachable. - */ + /** Members that has been detected as unreachable. */ def unreachableMembers: Set[Member] = _state.get().clusterState.unreachable /** @@ -177,19 +169,13 @@ import akka.dispatch.UnboundedMessageQueueSemantics */ def status: MemberStatus = self.status - /** - * Is this node the current data center leader - */ + /** Is this node the current data center leader */ def isLeader: Boolean = leader.contains(selfAddress) - /** - * Get the address of the current data center leader - */ + /** Get the address of the current data center leader */ def leader: Option[Address] = _state.get().clusterState.leader - /** - * Does the cluster consist of only one member? - */ + /** Does the cluster consist of only one member? */ def isSingletonCluster: Boolean = members.size == 1 /** @@ -211,9 +197,7 @@ import akka.dispatch.UnboundedMessageQueueSemantics */ private[cluster] def seenBy: Set[Address] = _state.get().clusterState.seenBy - /** - * INTERNAL API - */ + /** INTERNAL API */ private[cluster] def latestStats: CurrentInternalStats = _state.get().latestStats private def logInfoVerbose(event: ClusterDomainEvent): Unit = { @@ -240,9 +224,7 @@ import akka.dispatch.UnboundedMessageQueueSemantics } } - /** - * Unsubscribe to cluster events. - */ + /** Unsubscribe to cluster events. */ def close(): Unit = { if (!eventBusListener.isTerminated) eventBusListener ! PoisonPill diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala index 5f02b7a8fd6..2b74735e5dc 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala @@ -22,14 +22,10 @@ import akka.remote.FailureDetectorRegistry import akka.remote.RemoteSettings import akka.remote.RemoteWatcher -/** - * INTERNAL API - */ +/** INTERNAL API */ private[cluster] object ClusterRemoteWatcher { - /** - * Factory method for `ClusterRemoteWatcher` [[akka.actor.Props]]. - */ + /** Factory method for `ClusterRemoteWatcher` [[akka.actor.Props]]. */ def props(failureDetector: FailureDetectorRegistry[Address], settings: RemoteSettings): Props = Props( new ClusterRemoteWatcher( diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 7ccc32e7228..f80b2300968 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -20,15 +20,11 @@ import akka.util.Version object ClusterSettings { type DataCenter = String - /** - * INTERNAL API. - */ + /** INTERNAL API. */ @InternalApi private[akka] val DcRolePrefix = "dc-" - /** - * INTERNAL API. - */ + /** INTERNAL API. */ @InternalApi private[akka] val DefaultDataCenter: DataCenter = "default" @@ -111,9 +107,7 @@ final class ClusterSettings(val config: Config, val systemName: String) { } } - /** - * Is in fact always a `FiniteDuration` but needs to stay `Duration` for binary compatibility - */ + /** Is in fact always a `FiniteDuration` but needs to stay `Duration` for binary compatibility */ val PruneGossipTombstonesAfter: Duration = { val key = "prune-gossip-tombstones-after" cc.getMillisDuration(key).requiring(_ >= Duration.Zero, key + " >= 0s") @@ -169,8 +163,8 @@ final class ClusterSettings(val config: Config, val systemName: String) { cc.getConfig("role") .root .asScala - .collect { - case (key, value: ConfigObject) => key -> value.toConfig.getInt("min-nr-of-members") + .collect { case (key, value: ConfigObject) => + key -> value.toConfig.getInt("min-nr-of-members") } .toMap } diff --git a/akka-cluster/src/main/scala/akka/cluster/CoordinatedShutdownLeave.scala b/akka-cluster/src/main/scala/akka/cluster/CoordinatedShutdownLeave.scala index 03768142684..eba5715d44b 100644 --- a/akka-cluster/src/main/scala/akka/cluster/CoordinatedShutdownLeave.scala +++ b/akka-cluster/src/main/scala/akka/cluster/CoordinatedShutdownLeave.scala @@ -11,18 +11,14 @@ import akka.actor.Props import akka.cluster.ClusterEvent._ import akka.cluster.MemberStatus._ -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object CoordinatedShutdownLeave { def props(): Props = Props[CoordinatedShutdownLeave]() case object LeaveReq } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class CoordinatedShutdownLeave extends Actor { import CoordinatedShutdownLeave.LeaveReq @@ -32,12 +28,11 @@ private[akka] class CoordinatedShutdownLeave extends Actor { cluster.unsubscribe(self) } - def receive = { - case LeaveReq => - // MemberRemoved is needed in case it was downed instead - cluster.leave(cluster.selfAddress) - cluster.subscribe(self, classOf[MemberLeft], classOf[MemberRemoved]) - context.become(waitingLeaveCompleted(sender())) + def receive = { case LeaveReq => + // MemberRemoved is needed in case it was downed instead + cluster.leave(cluster.selfAddress) + cluster.subscribe(self, classOf[MemberLeft], classOf[MemberRemoved]) + context.become(waitingLeaveCompleted(sender())) } def waitingLeaveCompleted(replyTo: ActorRef): Receive = { @@ -46,8 +41,8 @@ private[akka] class CoordinatedShutdownLeave extends Actor { // not joined yet done(replyTo) } else if (s.members.exists(m => - m.uniqueAddress == cluster.selfUniqueAddress && - (m.status == Leaving || m.status == Exiting || m.status == Down))) { + m.uniqueAddress == cluster.selfUniqueAddress && + (m.status == Leaving || m.status == Exiting || m.status == Down))) { done(replyTo) } case MemberLeft(m) => diff --git a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala index 06d389cde30..c5b7208571f 100644 --- a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala @@ -94,9 +94,7 @@ private[cluster] class CrossDcHeartbeatSender extends Actor { cluster.unsubscribe(self) } - /** - * Looks up and returns the remote cluster heartbeat connection for the specific address. - */ + /** Looks up and returns the remote cluster heartbeat connection for the specific address. */ def heartbeatReceiver(address: Address): ActorSelection = context.actorSelection(ClusterHeartbeatReceiver.path(address)) @@ -128,12 +126,11 @@ private[cluster] class CrossDcHeartbeatSender extends Actor { case ClusterHeartbeatSender.ExpectedFirstHeartbeat(from) => triggerFirstHeartbeat(from) } - def introspecting: Actor.Receive = { - case ReportStatus() => - sender() ! { - if (activelyMonitoring) CrossDcHeartbeatSender.MonitoringActive(dataCentersState) - else CrossDcHeartbeatSender.MonitoringDormant() - } + def introspecting: Actor.Receive = { case ReportStatus() => + sender() ! { + if (activelyMonitoring) CrossDcHeartbeatSender.MonitoringActive(dataCentersState) + else CrossDcHeartbeatSender.MonitoringDormant() + } } def init(snapshot: CurrentClusterState): Unit = { @@ -336,21 +333,24 @@ private[cluster] object CrossDcHeartbeatingState { crossDcFailureDetector: FailureDetectorRegistry[Address], nrOfMonitoredNodesPerDc: Int, members: immutable.SortedSet[Member]): CrossDcHeartbeatingState = { - new CrossDcHeartbeatingState(selfDataCenter, crossDcFailureDetector, nrOfMonitoredNodesPerDc, state = { - // TODO unduplicate this with the logic in MembershipState.ageSortedTopOldestMembersPerDc - val groupedByDc = members.filter(atLeastInUpState).groupBy(_.dataCenter) - - if (members.ordering == Member.ageOrdering) { - // we already have the right ordering - groupedByDc - } else { - // we need to enforce the ageOrdering for the SortedSet in each DC - groupedByDc.map { - case (dc, ms) => + new CrossDcHeartbeatingState( + selfDataCenter, + crossDcFailureDetector, + nrOfMonitoredNodesPerDc, + state = { + // TODO unduplicate this with the logic in MembershipState.ageSortedTopOldestMembersPerDc + val groupedByDc = members.filter(atLeastInUpState).groupBy(_.dataCenter) + + if (members.ordering == Member.ageOrdering) { + // we already have the right ordering + groupedByDc + } else { + // we need to enforce the ageOrdering for the SortedSet in each DC + groupedByDc.map { case (dc, ms) => dc -> immutable.SortedSet.empty[Member](Member.ageOrdering).union(ms) + } } - } - }) + }) } } diff --git a/akka-cluster/src/main/scala/akka/cluster/DowningProvider.scala b/akka-cluster/src/main/scala/akka/cluster/DowningProvider.scala index c924b1648a0..9b3902001e3 100644 --- a/akka-cluster/src/main/scala/akka/cluster/DowningProvider.scala +++ b/akka-cluster/src/main/scala/akka/cluster/DowningProvider.scala @@ -9,9 +9,7 @@ import scala.concurrent.duration.FiniteDuration import akka.ConfigurationException import akka.actor.{ ActorSystem, ExtendedActorSystem, Props } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[cluster] object DowningProvider { /** @@ -23,8 +21,8 @@ private[cluster] object DowningProvider { val eas = system.asInstanceOf[ExtendedActorSystem] eas.dynamicAccess .createInstanceFor[DowningProvider](fqcn, List((classOf[ActorSystem], system))) - .recover { - case e => throw new ConfigurationException(s"Could not create cluster downing provider [$fqcn]", e) + .recover { case e => + throw new ConfigurationException(s"Could not create cluster downing provider [$fqcn]", e) } .get } @@ -66,9 +64,7 @@ abstract class DowningProvider { } -/** - * Default downing provider used when no provider is configured. - */ +/** Default downing provider used when no provider is configured. */ final class NoDowning(system: ActorSystem) extends DowningProvider { override def downRemovalMargin: FiniteDuration = Cluster(system).settings.DownRemovalMargin override val downingActorProps: Option[Props] = None diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala index c25911ad514..b2d4dae0460 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala @@ -15,9 +15,7 @@ import MemberStatus._ import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ private[cluster] object Gossip { type Timestamp = Long val emptyMembers: immutable.SortedSet[Member] = immutable.SortedSet.empty @@ -112,60 +110,42 @@ private[cluster] final case class Gossip( members.exists(_.dataCenter != dc1) } - /** - * Increments the version for this 'Node'. - */ + /** Increments the version for this 'Node'. */ def :+(node: VectorClock.Node): Gossip = copy(version = version :+ node) - /** - * Adds a member to the member node ring. - */ + /** Adds a member to the member node ring. */ def :+(member: Member): Gossip = { if (members contains member) this else this.copy(members = members + member) } - /** - * Marks the gossip as seen by this node (address) by updating the address entry in the 'gossip.overview.seen' - */ + /** Marks the gossip as seen by this node (address) by updating the address entry in the 'gossip.overview.seen' */ def seen(node: UniqueAddress): Gossip = { if (seenByNode(node)) this else this.copy(overview = overview.copy(seen = overview.seen + node)) } - /** - * Marks the gossip as seen by only this node (address) by replacing the 'gossip.overview.seen' - */ + /** Marks the gossip as seen by only this node (address) by replacing the 'gossip.overview.seen' */ def onlySeen(node: UniqueAddress): Gossip = { this.copy(overview = overview.copy(seen = Set(node))) } - /** - * Remove all seen entries - */ + /** Remove all seen entries */ def clearSeen(): Gossip = { this.copy(overview = overview.copy(seen = Set.empty)) } - /** - * The nodes that have seen the current version of the Gossip. - */ + /** The nodes that have seen the current version of the Gossip. */ def seenBy: Set[UniqueAddress] = overview.seen - /** - * Has this Gossip been seen by this node. - */ + /** Has this Gossip been seen by this node. */ def seenByNode(node: UniqueAddress): Boolean = overview.seen(node) - /** - * Merges the seen table of two Gossip instances. - */ + /** Merges the seen table of two Gossip instances. */ def mergeSeen(that: Gossip): Gossip = this.copy(overview = overview.copy(seen = overview.seen.union(that.overview.seen))) - /** - * Merges two Gossip instances including membership tables, tombstones, and the VectorClock histories. - */ + /** Merges two Gossip instances including membership tables, tombstones, and the VectorClock histories. */ def merge(that: Gossip): Gossip = { // 1. merge sets of tombstones diff --git a/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatCheckCluster.scala b/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatCheckCluster.scala index 907f1826fec..8848ee5930a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatCheckCluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatCheckCluster.scala @@ -11,9 +11,7 @@ import com.typesafe.config.Config import akka.annotation.InternalApi import akka.cluster.sbr.SplitBrainResolverProvider -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object JoinConfigCompatCheckCluster { private val DowningProviderPath = "akka.cluster.downing-provider-class" private val SbrStrategyPath = "akka.cluster.split-brain-resolver.active-strategy" @@ -22,9 +20,7 @@ import akka.cluster.sbr.SplitBrainResolverProvider private val LightbendSbrProviderClass = "com.lightbend.akka.sbr.SplitBrainResolverProvider" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final class JoinConfigCompatCheckCluster extends JoinConfigCompatChecker { import JoinConfigCompatCheckCluster._ @@ -36,8 +32,8 @@ final class JoinConfigCompatCheckCluster extends JoinConfigCompatChecker { val actualDowningProvider = actualConfig.getString(DowningProviderPath) val downingProviderResult = if (toCheckDowningProvider == actualDowningProvider || Set(toCheckDowningProvider, actualDowningProvider) == Set( - AkkaSbrProviderClass, - LightbendSbrProviderClass)) + AkkaSbrProviderClass, + LightbendSbrProviderClass)) Valid else JoinConfigCompatChecker.checkEquality(List(DowningProviderPath), toCheck, actualConfig) diff --git a/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala b/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala index 3b19852e9f5..5ca489e9bf6 100644 --- a/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala +++ b/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala @@ -63,9 +63,7 @@ object JoinConfigCompatChecker { exists(requiredKeys, toCheck) ++ checkEquality(requiredKeys, toCheck, actualConfig) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def checkEquality( keys: im.Seq[String], toCheck: Config, @@ -179,9 +177,7 @@ sealed trait ConfigValidation { case object Valid extends ConfigValidation { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this } diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index 5c2cd9f7f3f..e9c7a558f7b 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -42,14 +42,12 @@ class Member private[cluster] ( } override def toString: String = { s"Member($address, $status${if (dataCenter == ClusterSettings.DefaultDataCenter) "" else s", $dataCenter"}${if (appVersion == Version.Zero) "" - else s", $appVersion"})" + else s", $appVersion"})" } def hasRole(role: String): Boolean = roles.contains(role) - /** - * Java API - */ + /** Java API */ @nowarn("msg=deprecated") def getRoles: java.util.Set[String] = scala.collection.JavaConverters.setAsJavaSetConverter(roles).asJava @@ -92,9 +90,7 @@ class Member private[cluster] ( } } -/** - * Module with factory and ordering methods for Member instances. - */ +/** Module with factory and ordering methods for Member instances. */ object Member { val none = Set.empty[Member] @@ -107,15 +103,11 @@ object Member { private[akka] def apply(uniqueAddress: UniqueAddress, roles: Set[String], appVersion: Version): Member = new Member(uniqueAddress, Int.MaxValue, Joining, roles, appVersion) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[cluster] def removed(node: UniqueAddress): Member = new Member(node, Int.MaxValue, Removed, Set(ClusterSettings.DcRolePrefix + "-N/A"), Version.Zero) - /** - * `Address` ordering type class, sorts addresses by host and port. - */ + /** `Address` ordering type class, sorts addresses by host and port. */ implicit val addressOrdering: Ordering[Address] = Ordering.fromLessThan[Address] { (a, b) => // cluster node identifier is the host and port of the address; protocol and system is assumed to be the same if (a eq b) false @@ -144,9 +136,7 @@ object Member { } } - /** - * `Member` ordering type class, sorts members by host and port. - */ + /** `Member` ordering type class, sorts members by host and port. */ implicit val ordering: Ordering[Member] = new Ordering[Member] { def compare(a: Member, b: Member): Int = { a.uniqueAddress.compare(b.uniqueAddress) @@ -165,16 +155,12 @@ object Member { a.isOlderThan(b) } - /** - * INTERNAL API. - */ + /** INTERNAL API. */ @InternalApi private[akka] def pickHighestPriority(a: Set[Member], b: Set[Member]): Set[Member] = pickHighestPriority(a, b, Map.empty) - /** - * INTERNAL API. - */ + /** INTERNAL API. */ @InternalApi private[akka] def pickHighestPriority( a: Set[Member], @@ -183,15 +169,14 @@ object Member { // group all members by Address => Seq[Member] val groupedByAddress = (a.toSeq ++ b.toSeq).groupBy(_.uniqueAddress) // pick highest MemberStatus - groupedByAddress.foldLeft(Member.none) { - case (acc, (_, members)) => - if (members.size == 2) acc + members.reduceLeft(highestPriorityOf) - else { - val m = members.head - if (tombstones.contains(m.uniqueAddress) || MembershipState.removeUnreachableWithMemberStatus(m.status)) - acc // removed - else acc + m - } + groupedByAddress.foldLeft(Member.none) { case (acc, (_, members)) => + if (members.size == 2) acc + members.reduceLeft(highestPriorityOf) + else { + val m = members.head + if (tombstones.contains(m.uniqueAddress) || MembershipState.removeUnreachableWithMemberStatus(m.status)) + acc // removed + else acc + m + } } } @@ -245,54 +230,34 @@ object MemberStatus { @SerialVersionUID(1L) case object PreparingForShutdown extends MemberStatus @SerialVersionUID(1L) case object ReadyForShutdown extends MemberStatus - /** - * Java API: retrieve the `Joining` status singleton - */ + /** Java API: retrieve the `Joining` status singleton */ def joining: MemberStatus = Joining - /** - * Java API: retrieve the `WeaklyUp` status singleton. - */ + /** Java API: retrieve the `WeaklyUp` status singleton. */ def weaklyUp: MemberStatus = WeaklyUp - /** - * Java API: retrieve the `Up` status singleton - */ + /** Java API: retrieve the `Up` status singleton */ def up: MemberStatus = Up - /** - * Java API: retrieve the `Leaving` status singleton - */ + /** Java API: retrieve the `Leaving` status singleton */ def leaving: MemberStatus = Leaving - /** - * Java API: retrieve the `Exiting` status singleton - */ + /** Java API: retrieve the `Exiting` status singleton */ def exiting: MemberStatus = Exiting - /** - * Java API: retrieve the `Down` status singleton - */ + /** Java API: retrieve the `Down` status singleton */ def down: MemberStatus = Down - /** - * Java API: retrieve the `Removed` status singleton - */ + /** Java API: retrieve the `Removed` status singleton */ def removed: MemberStatus = Removed - /** - * Java API: retrieve the `ShuttingDown` status singleton - */ + /** Java API: retrieve the `ShuttingDown` status singleton */ def shuttingDown: MemberStatus = PreparingForShutdown - /** - * Java API: retrieve the `ShutDown` status singleton - */ + /** Java API: retrieve the `ShutDown` status singleton */ def shutDown: MemberStatus = ReadyForShutdown - /** - * INTERNAL API - */ + /** INTERNAL API */ private[cluster] val allowedTransitions: Map[MemberStatus, Set[MemberStatus]] = Map( Joining -> Set(WeaklyUp, Up, Leaving, Down, Removed), diff --git a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala index c9aba6790a1..0ad0ea8a203 100644 --- a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala +++ b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala @@ -16,9 +16,7 @@ import akka.cluster.ClusterSettings.DataCenter import akka.cluster.MemberStatus._ import akka.util.ccompat._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @ccompatUsedUntil213 @InternalApi private[akka] object MembershipState { import MemberStatus._ @@ -31,9 +29,7 @@ import akka.util.ccompat._ val prepareForShutdownStates = Set[MemberStatus](PreparingForShutdown, ReadyForShutdown) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class MembershipState( latestGossip: Gossip, selfUniqueAddress: UniqueAddress, @@ -69,11 +65,10 @@ import akka.util.ccompat._ // only assigned once. def memberHinderingConvergenceExists = { val memberStatus = if (firstMemberInDc) convergenceMemberStatus + Joining + WeaklyUp else convergenceMemberStatus - members.exists( - member => - (firstMemberInDc || member.dataCenter == selfDc) && - memberStatus(member.status) && - !(latestGossip.seenByNode(member.uniqueAddress) || exitingConfirmed(member.uniqueAddress))) + members.exists(member => + (firstMemberInDc || member.dataCenter == selfDc) && + memberStatus(member.status) && + !(latestGossip.seenByNode(member.uniqueAddress) || exitingConfirmed(member.uniqueAddress))) } // Find cluster members in the data center that are unreachable from other members of the data center @@ -104,9 +99,7 @@ import akka.util.ccompat._ latestGossip.member(r.subject).dataCenter != selfDc } - /** - * @return reachability for data center nodes, with observations from outside the data center or from downed nodes filtered out - */ + /** @return reachability for data center nodes, with observations from outside the data center or from downed nodes filtered out */ lazy val dcReachabilityExcludingDownedObservers: Reachability = { val membersToExclude = members.collect { case m if m.status == Down || m.dataCenter != selfDc => m.uniqueAddress } overview.reachability @@ -117,9 +110,7 @@ import akka.util.ccompat._ lazy val dcReachabilityNoOutsideNodes: Reachability = overview.reachability.remove(members.collect { case m if m.dataCenter != selfDc => m.uniqueAddress }) - /** - * @return Up to `crossDcConnections` oldest members for each DC - */ + /** @return Up to `crossDcConnections` oldest members for each DC */ lazy val ageSortedTopOldestMembersPerDc: Map[DataCenter, immutable.SortedSet[Member]] = { latestGossip.members.foldLeft(Map.empty[DataCenter, immutable.SortedSet[Member]]) { (acc, member) => acc.get(member.dataCenter) match { @@ -174,11 +165,10 @@ import akka.util.ccompat._ val reachableMembersInDc = if (reachability.isAllReachable) mbrs.filter(m => m.dataCenter == selfDc && m.status != Down) else - mbrs.filter( - m => - m.dataCenter == selfDc && - m.status != Down && - (reachability.isReachable(m.uniqueAddress) || m.uniqueAddress == selfUniqueAddress)) + mbrs.filter(m => + m.dataCenter == selfDc && + m.status != Down && + (reachability.isReachable(m.uniqueAddress) || m.uniqueAddress == selfUniqueAddress)) if (reachableMembersInDc.isEmpty) None else reachableMembersInDc @@ -204,9 +194,7 @@ import akka.util.ccompat._ mbrs.maxBy(m => if (m.upNumber == Int.MaxValue) 0 else m.upNumber) } - /** - * The Exiting change is gossiped to the two oldest nodes for quick dissemination to potential Singleton nodes - */ + /** The Exiting change is gossiped to the two oldest nodes for quick dissemination to potential Singleton nodes */ def gossipTargetsForExitingMembers(exitingMembers: Set[Member]): Set[Member] = { if (exitingMembers.nonEmpty) { val roles = exitingMembers.flatten(_.roles).filterNot(_.startsWith(ClusterSettings.DcRolePrefix)) @@ -232,9 +220,7 @@ import akka.util.ccompat._ } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class GossipTargetSelector( reduceGossipDifferentViewProbability: Double, crossDcGossipProbability: Double) { @@ -247,9 +233,7 @@ import akka.util.ccompat._ if (state.latestGossip.isMultiDc) multiDcGossipTargets(state) else localDcGossipTargets(state) - /** - * Select `n` random nodes to gossip to (used to quickly inform the rest of the cluster when leaving for example) - */ + /** Select `n` random nodes to gossip to (used to quickly inform the rest of the cluster when leaving for example) */ def randomNodesForFullGossip(state: MembershipState, n: Int): Vector[UniqueAddress] = if (state.latestGossip.isMultiDc && state.ageSortedTopOldestMembersPerDc(state.selfDc).contains(state.selfMember)) { // this node is one of the N oldest in the cluster, gossip to one cross-dc but mostly locally @@ -316,9 +300,7 @@ import akka.util.ccompat._ } - /** - * Choose cross-dc nodes if this one of the N oldest nodes, and if not fall back to gossip locally in the dc - */ + /** Choose cross-dc nodes if this one of the N oldest nodes, and if not fall back to gossip locally in the dc */ protected def multiDcGossipTargets(state: MembershipState): Vector[UniqueAddress] = { // only a fraction of the time across data centers if (selectDcLocalNodes(state)) diff --git a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala index 3fa3fff7486..5496f63a89a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala @@ -9,9 +9,7 @@ import scala.collection.immutable import akka.annotation.InternalApi import akka.util.ccompat._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @ccompatUsedUntil213 private[cluster] object Reachability { val empty = new Reachability(Vector.empty, Map.empty) @@ -146,9 +144,9 @@ private[cluster] class Reachability private ( if (oldRecord.status == Terminated || oldRecord.status == status) this else { - if (status == Reachable && oldObserverRows.forall { - case (_, r) => r.status == Reachable || r.subject == subject - }) { + if (status == Reachable && oldObserverRows.forall { case (_, r) => + r.status == Reachable || r.subject == subject + }) { // all Reachable, prune by removing the records of the observer, and bump the version new Reachability(records.filterNot(_.observer == observer), newVersions) } else { @@ -234,9 +232,7 @@ private[cluster] class Reachability private ( */ def isReachable(node: UniqueAddress): Boolean = isAllReachable || !allUnreachableOrTerminated.contains(node) - /** - * @return true if the given observer node can reach the subject node. - */ + /** @return true if the given observer node can reach the subject node. */ def isReachable(observer: UniqueAddress, subject: UniqueAddress): Boolean = status(observer, subject) == Reachable diff --git a/akka-cluster/src/main/scala/akka/cluster/SeedNodeProcess.scala b/akka-cluster/src/main/scala/akka/cluster/SeedNodeProcess.scala index 38d5aaca9d7..ffd1f6a26de 100644 --- a/akka-cluster/src/main/scala/akka/cluster/SeedNodeProcess.scala +++ b/akka-cluster/src/main/scala/akka/cluster/SeedNodeProcess.scala @@ -11,9 +11,7 @@ import akka.actor.{ Actor, ActorRef, Address, CoordinatedShutdown, ReceiveTimeou import akka.annotation.{ InternalApi, InternalStableApi } import akka.util.unused -/** - * INTERNAL API. - */ +/** INTERNAL API. */ @InternalApi private[cluster] abstract class SeedNodeProcess(joinConfigCompatChecker: JoinConfigCompatChecker) extends Actor { import ClusterUserAction.JoinTo @@ -280,7 +278,6 @@ private[cluster] final class JoinSeedNodeProcess( receiveInitJoinAckIncompatibleConfig(joinTo = address, origin = sender(), behavior = Some(done)) case InitJoinNack(_) => // that seed was uninitialized - case ReceiveTimeout => if (attempt >= 2) logWarning( diff --git a/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala index 19efb191a05..0518a0ff1e3 100644 --- a/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala +++ b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala @@ -16,9 +16,7 @@ import scala.collection.immutable.TreeMap */ private[cluster] object VectorClock { - /** - * Hash representation of a versioned node name. - */ + /** Hash representation of a versioned node name. */ type Node = String object Node { @@ -31,7 +29,7 @@ private[cluster] object VectorClock { val digester = MessageDigest.getInstance("MD5") digester.update(name.getBytes("UTF-8")) digester.digest.map { h => - "%02x".format(0xFF & h) + "%02x".format(0xff & h) }.mkString } } @@ -47,14 +45,10 @@ private[cluster] object VectorClock { case object Same extends Ordering case object Concurrent extends Ordering - /** - * Marker to ensure that we do a full order comparison instead of bailing out early. - */ + /** Marker to ensure that we do a full order comparison instead of bailing out early. */ private case object FullOrder extends Ordering - /** - * Marker to signal that we have reached the end of a vector clock. - */ + /** Marker to signal that we have reached the end of a vector clock. */ private val cmpEndMarker = (VectorClock.Node("endmarker"), Timestamp.EndMarker) } @@ -74,32 +68,22 @@ final case class VectorClock(versions: TreeMap[VectorClock.Node, Long] = TreeMap import VectorClock._ - /** - * Increment the version for the node passed as argument. Returns a new VectorClock. - */ + /** Increment the version for the node passed as argument. Returns a new VectorClock. */ def :+(node: Node): VectorClock = { val currentTimestamp = versions.getOrElse(node, Timestamp.Zero) copy(versions = versions.updated(node, currentTimestamp + 1)) } - /** - * Returns true if this and that are concurrent else false. - */ + /** Returns true if this and that are concurrent else false. */ def <>(that: VectorClock): Boolean = compareOnlyTo(that, Concurrent) eq Concurrent - /** - * Returns true if this is before that else false. - */ + /** Returns true if this is before that else false. */ def <(that: VectorClock): Boolean = compareOnlyTo(that, Before) eq Before - /** - * Returns true if this is after that else false. - */ + /** Returns true if this is after that else false. */ def >(that: VectorClock): Boolean = compareOnlyTo(that, After) eq After - /** - * Returns true if this VectorClock has the same history as the 'that' VectorClock else false. - */ + /** Returns true if this VectorClock has the same history as the 'that' VectorClock else false. */ def ==(that: VectorClock): Boolean = compareOnlyTo(that, Same) eq Same /** @@ -176,9 +160,7 @@ final case class VectorClock(versions: TreeMap[VectorClock.Node, Long] = TreeMap compareOnlyTo(that, FullOrder) } - /** - * Merges this VectorClock with another VectorClock. E.g. merges its versioned history. - */ + /** Merges this VectorClock with another VectorClock. E.g. merges its versioned history. */ def merge(that: VectorClock): VectorClock = { var mergedVersions = that.versions for ((node, time) <- versions) { diff --git a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala index a56ff118c38..0983b563f00 100644 --- a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala +++ b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala @@ -28,9 +28,7 @@ import akka.util.Version import akka.util.ccompat._ import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @ccompatUsedUntil213 private[akka] object ClusterMessageSerializer { @@ -69,9 +67,7 @@ private[akka] object ClusterMessageSerializer { private final val BufferSize = 1024 * 4 } -/** - * Protobuf serializer of cluster messages. - */ +/** Protobuf serializer of cluster messages. */ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest with BaseSerializer { @@ -358,13 +354,15 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) private def uniqueAddressFromProto(uniqueAddress: cm.UniqueAddress): UniqueAddress = { - UniqueAddress(addressFromProto(uniqueAddress.getAddress), if (uniqueAddress.hasUid2) { - // new remote node join the two parts of the long uid back - (uniqueAddress.getUid2.toLong << 32) | (uniqueAddress.getUid & 0XFFFFFFFFL) - } else { - // old remote node - uniqueAddress.getUid.toLong - }) + UniqueAddress( + addressFromProto(uniqueAddress.getAddress), + if (uniqueAddress.hasUid2) { + // new remote node join the two parts of the long uid back + (uniqueAddress.getUid2.toLong << 32) | (uniqueAddress.getUid & 0xffffffffL) + } else { + // old remote node + uniqueAddress.getUid.toLong + }) } private val memberStatusToInt = scala.collection.immutable.HashMap[MemberStatus, Int]( @@ -465,22 +463,20 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) .setAppVersionIndex(mapAppVersion(member.appVersion)) def reachabilityToProto(reachability: Reachability): Iterable[cm.ObserverReachability.Builder] = { - reachability.versions.map { - case (observer, version) => - val subjectReachability = reachability - .recordsFrom(observer) - .map( - r => - cm.SubjectReachability - .newBuilder() - .setAddressIndex(mapUniqueAddress(r.subject)) - .setStatus(cm.ReachabilityStatus.forNumber(reachabilityStatusToInt(r.status))) - .setVersion(r.version)) - cm.ObserverReachability - .newBuilder() - .setAddressIndex(mapUniqueAddress(observer)) - .setVersion(version) - .addAllSubjectReachability(subjectReachability.map(_.build).asJava) + reachability.versions.map { case (observer, version) => + val subjectReachability = reachability + .recordsFrom(observer) + .map(r => + cm.SubjectReachability + .newBuilder() + .setAddressIndex(mapUniqueAddress(r.subject)) + .setStatus(cm.ReachabilityStatus.forNumber(reachabilityStatusToInt(r.status))) + .setVersion(r.version)) + cm.ObserverReachability + .newBuilder() + .setAddressIndex(mapUniqueAddress(observer)) + .setVersion(version) + .addAllSubjectReachability(subjectReachability.map(_.build).asJava) } } @@ -507,9 +503,8 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) } private def vectorClockToProto(version: VectorClock, hashMapping: Map[String, Int]): cm.VectorClock.Builder = { - val versions: Iterable[cm.VectorClock.Version.Builder] = version.versions.map { - case (n, t) => - cm.VectorClock.Version.newBuilder().setHashIndex(mapWithErrorMessage(hashMapping, n, "hash")).setTimestamp(t) + val versions: Iterable[cm.VectorClock.Version.Builder] = version.versions.map { case (n, t) => + cm.VectorClock.Version.newBuilder().setHashIndex(mapWithErrorMessage(hashMapping, n, "hash")).setTimestamp(t) } cm.VectorClock.newBuilder().setTimestamp(0).addAllVersions(versions.map(_.build).asJava) } diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala index f37e59bbd8e..ef82ca2d323 100644 --- a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala +++ b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala @@ -50,7 +50,7 @@ object ClusterRouterGroupSettings { routeesPaths = immutableSeq(config.getStringList("routees.paths")), allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"), useRoles = config.getStringList("cluster.use-roles").asScala.toSet ++ ClusterRouterSettingsBase.useRoleOption( - config.getString("cluster.use-role"))) + config.getString("cluster.use-role"))) def apply( totalInstances: Int, @@ -63,9 +63,7 @@ object ClusterRouterGroupSettings { Some((settings.totalInstances, settings.routeesPaths, settings.allowLocalRoutees, settings.useRoles)) } -/** - * `totalInstances` of cluster router must be > 0 - */ +/** `totalInstances` of cluster router must be > 0 */ @SerialVersionUID(1L) final class ClusterRouterGroupSettings( val totalInstances: Int, @@ -107,9 +105,7 @@ final class ClusterRouterGroupSettings( override def toString: String = s"ClusterRouterGroupSettings($totalInstances,$routeesPaths,$allowLocalRoutees,$useRoles)" - /** - * Java API - */ + /** Java API */ def this( totalInstances: Int, routeesPaths: java.lang.Iterable[String], @@ -134,9 +130,7 @@ final class ClusterRouterGroupSettings( def withUseRoles(useRoles: String*): ClusterRouterGroupSettings = new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles.toSet) - /** - * Java API - */ + /** Java API */ def withUseRoles(useRoles: java.util.Set[String]): ClusterRouterGroupSettings = new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles.asScala.toSet) } @@ -165,7 +159,7 @@ object ClusterRouterPoolSettings { maxInstancesPerNode = config.getInt("cluster.max-nr-of-instances-per-node"), allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"), useRoles = config.getStringList("cluster.use-roles").asScala.toSet ++ ClusterRouterSettingsBase.useRoleOption( - config.getString("cluster.use-role"))) + config.getString("cluster.use-role"))) def unapply(settings: ClusterRouterPoolSettings): Option[(Int, Int, Boolean, Set[String])] = Some((settings.totalInstances, settings.maxInstancesPerNode, settings.allowLocalRoutees, settings.useRoles)) @@ -217,9 +211,7 @@ final class ClusterRouterPoolSettings( override def toString: String = s"ClusterRouterPoolSettings($totalInstances,$maxInstancesPerNode,$allowLocalRoutees,$useRoles)" - /** - * Java API - */ + /** Java API */ def this(totalInstances: Int, maxInstancesPerNode: Int, allowLocalRoutees: Boolean, useRoles: java.util.Set[String]) = this(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.asScala.toSet) @@ -233,16 +225,12 @@ final class ClusterRouterPoolSettings( def withUseRoles(useRoles: String*): ClusterRouterPoolSettings = new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.toSet) - /** - * Java API - */ + /** Java API */ def withUseRoles(useRoles: java.util.Set[String]): ClusterRouterPoolSettings = new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.asScala.toSet) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object ClusterRouterSettingsBase { def useRoleOption(role: String): Option[String] = role match { case null | "" => None @@ -262,9 +250,7 @@ private[akka] object ClusterRouterSettingsBase { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] trait ClusterRouterSettingsBase { def totalInstances: Int def allowLocalRoutees: Boolean @@ -295,9 +281,7 @@ final case class ClusterRouterGroup(local: Group, settings: ClusterRouterGroupSe settings.routeesPaths } else Nil - /** - * INTERNAL API - */ + /** INTERNAL API */ override private[akka] def createRouterActor(): RouterActor = new ClusterRouterGroupActor(settings) override def withFallback(other: RouterConfig): RouterConfig = other match { @@ -326,9 +310,7 @@ final case class ClusterRouterPool(local: Pool, settings: ClusterRouterPoolSetti @transient private val childNameCounter = new AtomicInteger - /** - * INTERNAL API - */ + /** INTERNAL API */ override private[akka] def newRoutee(routeeProps: Props, context: ActorContext): Routee = { val name = "c" + childNameCounter.incrementAndGet val ref = context @@ -337,9 +319,7 @@ final case class ClusterRouterPool(local: Pool, settings: ClusterRouterPoolSetti ActorRefRoutee(ref) } - /** - * Initial number of routee instances - */ + /** Initial number of routee instances */ override def nrOfInstances(sys: ActorSystem): Int = if (settings.allowLocalRoutees && settings.useRoles.nonEmpty) { if (settings.useRoles.subsetOf(Cluster(sys).selfRoles)) { @@ -351,9 +331,7 @@ final case class ClusterRouterPool(local: Pool, settings: ClusterRouterPoolSetti override def resizer: Option[Resizer] = local.resizer - /** - * INTERNAL API - */ + /** INTERNAL API */ override private[akka] def createRouterActor(): RouterActor = new ClusterRouterPoolActor(local.supervisorStrategy, settings) @@ -370,9 +348,7 @@ final case class ClusterRouterPool(local: Pool, settings: ClusterRouterPoolSetti } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] trait ClusterRouterConfigBase extends RouterConfig { def local: RouterConfig def settings: ClusterRouterSettingsBase @@ -387,9 +363,7 @@ private[akka] trait ClusterRouterConfigBase extends RouterConfig { msg.isInstanceOf[ClusterDomainEvent] || msg.isInstanceOf[CurrentClusterState] || super.isManagementMessage(msg) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class ClusterRouterPoolActor( supervisorStrategy: SupervisorStrategy, val settings: ClusterRouterPoolSettings) @@ -398,9 +372,7 @@ private[akka] class ClusterRouterPoolActor( override def receive = clusterReceive.orElse(super.receive) - /** - * Adds routees based on totalInstances and maxInstancesPerNode settings - */ + /** Adds routees based on totalInstances and maxInstancesPerNode settings */ override def addRoutees(): Unit = { @tailrec def doAddRoutees(): Unit = selectDeploymentTarget match { @@ -442,9 +414,7 @@ private[akka] class ClusterRouterPoolActor( } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class ClusterRouterGroupActor(val settings: ClusterRouterGroupSettings) extends RouterActor with ClusterRouterActor { @@ -463,9 +433,7 @@ private[akka] class ClusterRouterGroupActor(val settings: ClusterRouterGroupSett else Map.empty - /** - * Adds routees based on totalInstances and maxInstancesPerNode settings - */ + /** Adds routees based on totalInstances and maxInstancesPerNode settings */ override def addRoutees(): Unit = { @tailrec def doAddRoutees(): Unit = selectDeploymentTarget match { @@ -553,14 +521,12 @@ private[akka] trait ClusterRouterActor { this: RouterActor => nodes } - /** - * Fills in self address for local ActorRef - */ + /** Fills in self address for local ActorRef */ def fullAddress(routee: Routee): Address = { val address = routee match { case ActorRefRoutee(ref) => ref.path.address case ActorSelectionRoutee(sel) => sel.anchor.path.address - case unknown => throw new IllegalArgumentException(s"Unsupported routee type: ${unknown.getClass}") + case unknown => throw new IllegalArgumentException(s"Unsupported routee type: ${unknown.getClass}") } address match { case Address(_, _, None, None) => cluster.selfAddress @@ -568,9 +534,7 @@ private[akka] trait ClusterRouterActor { this: RouterActor => } } - /** - * Adds routees based on settings - */ + /** Adds routees based on settings */ def addRoutees(): Unit def addMember(member: Member): Unit = { diff --git a/akka-cluster/src/main/scala/akka/cluster/sbr/DowningStrategy.scala b/akka-cluster/src/main/scala/akka/cluster/sbr/DowningStrategy.scala index e6cd850c7c8..613f87d8538 100644 --- a/akka-cluster/src/main/scala/akka/cluster/sbr/DowningStrategy.scala +++ b/akka-cluster/src/main/scala/akka/cluster/sbr/DowningStrategy.scala @@ -18,9 +18,7 @@ import akka.cluster.Reachability import akka.cluster.UniqueAddress import akka.coordination.lease.scaladsl.Lease -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object DowningStrategy { sealed trait Decision { def isIndirectlyConnected: Boolean @@ -54,9 +52,7 @@ import akka.coordination.lease.scaladsl.Lease } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] abstract class DowningStrategy(val selfDc: DataCenter, selfUniqueAddress: UniqueAddress) { import DowningStrategy._ @@ -87,9 +83,7 @@ import akka.coordination.lease.scaladsl.Lease @InternalStableApi def allMembersInDC: immutable.SortedSet[Member] = _allMembers - /** - * All members in self DC, but doesn't contain Joining, WeaklyUp, Down and Exiting. - */ + /** All members in self DC, but doesn't contain Joining, WeaklyUp, Down and Exiting. */ @InternalStableApi def members: immutable.SortedSet[Member] = members(includingPossiblyUp = false, excludingPossiblyExiting = false) @@ -104,13 +98,12 @@ import akka.coordination.lease.scaladsl.Lease * changed to Exiting on the other side of the partition. */ def members(includingPossiblyUp: Boolean, excludingPossiblyExiting: Boolean): immutable.SortedSet[Member] = - _allMembers.filterNot( - m => - (!includingPossiblyUp && m.status == MemberStatus.Joining) || - (!includingPossiblyUp && m.status == MemberStatus.WeaklyUp) || - (excludingPossiblyExiting && m.status == MemberStatus.Leaving) || - m.status == MemberStatus.Down || - m.status == MemberStatus.Exiting) + _allMembers.filterNot(m => + (!includingPossiblyUp && m.status == MemberStatus.Joining) || + (!includingPossiblyUp && m.status == MemberStatus.WeaklyUp) || + (excludingPossiblyExiting && m.status == MemberStatus.Leaving) || + m.status == MemberStatus.Down || + m.status == MemberStatus.Exiting) def membersWithRole: immutable.SortedSet[Member] = membersWithRole(includingPossiblyUp = false, excludingPossiblyExiting = false) @@ -210,10 +203,9 @@ import akka.coordination.lease.scaladsl.Lease private[sbr] def setReachability(r: Reachability): Unit = { // skip records with Reachability.Reachable, and skip records related to other DC - _reachability = r.filterRecords( - record => - (record.status == Reachability.Unreachable || record.status == Reachability.Terminated) && - isInSelfDc(record.observer) && isInSelfDc(record.subject)) + _reachability = r.filterRecords(record => + (record.status == Reachability.Unreachable || record.status == Reachability.Terminated) && + isInSelfDc(record.observer) && isInSelfDc(record.subject)) } def seenBy: Set[Address] = diff --git a/akka-cluster/src/main/scala/akka/cluster/sbr/SplitBrainResolver.scala b/akka-cluster/src/main/scala/akka/cluster/sbr/SplitBrainResolver.scala index 39149833473..ed99d4947b1 100644 --- a/akka-cluster/src/main/scala/akka/cluster/sbr/SplitBrainResolver.scala +++ b/akka-cluster/src/main/scala/akka/cluster/sbr/SplitBrainResolver.scala @@ -32,9 +32,7 @@ import akka.event.Logging import akka.pattern.pipe import akka.remote.artery.ThisActorSystemQuarantinedEvent -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[sbr] object SplitBrainResolver { def props(stableAfter: FiniteDuration, strategy: DowningStrategy): Props = @@ -42,19 +40,13 @@ import akka.remote.artery.ThisActorSystemQuarantinedEvent case object Tick - /** - * Response (result) of the acquire lease request. - */ + /** Response (result) of the acquire lease request. */ final case class AcquireLeaseResult(holdingLease: Boolean) - /** - * Response (result) of the release lease request. - */ + /** Response (result) of the release lease request. */ final case class ReleaseLeaseResult(released: Boolean) - /** - * For delayed acquire of the lease. - */ + /** For delayed acquire of the lease. */ case object AcquireLease sealed trait ReleaseLeaseCondition @@ -369,10 +361,9 @@ import akka.remote.artery.ThisActorSystemQuarantinedEvent implicit val ec: ExecutionContext = internalDispatcher strategy.lease.foreach( _.acquire() - .recover { - case t => - log.error(t, "SBR acquire of lease failed") - false + .recover { case t => + log.error(t, "SBR acquire of lease failed") + false } .map(AcquireLeaseResult.apply) .pipeTo(self)) @@ -427,9 +418,7 @@ import akka.remote.artery.ThisActorSystemQuarantinedEvent } } - /** - * @return the nodes that were downed - */ + /** @return the nodes that were downed */ def actOnDecision(decision: Decision): Set[UniqueAddress] = { val nodesToDown = try { @@ -475,7 +464,7 @@ import akka.remote.artery.ThisActorSystemQuarantinedEvent log.warning( ClusterLogMarker.sbrDowning(decision), s"SBR took decision $decision and is downing [${nodesToDown.map(_.address).mkString(", ")}]${if (downMyself) " including myself" - else ""}, " + + else ""}, " + s"[${strategy.unreachable.size}] unreachable of [${strategy.members.size}] members" + indirectlyConnectedLogMessage + s", all members in DC [${strategy.allMembersInDC.mkString(", ")}], full reachability status: [${strategy.reachability}]" + diff --git a/akka-cluster/src/main/scala/akka/cluster/sbr/SplitBrainResolverSettings.scala b/akka-cluster/src/main/scala/akka/cluster/sbr/SplitBrainResolverSettings.scala index 7211796ba28..e4adfc17c7a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/sbr/SplitBrainResolverSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/sbr/SplitBrainResolverSettings.scala @@ -16,9 +16,7 @@ import akka.annotation.InternalApi import akka.util.Helpers import akka.util.Helpers.Requiring -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[sbr] object SplitBrainResolverSettings { final val KeepMajorityName = "keep-majority" final val LeaseMajorityName = "lease-majority" @@ -30,9 +28,7 @@ import akka.util.Helpers.Requiring Set(KeepMajorityName, LeaseMajorityName, StaticQuorumName, KeepOldestName, DownAllName) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[sbr] final class SplitBrainResolverSettings(config: Config) { import SplitBrainResolverSettings._ @@ -117,19 +113,13 @@ import akka.util.Helpers.Requiring } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[sbr] final case class StaticQuorumSettings(size: Int, role: Option[String]) -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[sbr] final case class KeepOldestSettings(downIfAlone: Boolean, role: Option[String]) -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[sbr] final case class LeaseMajoritySettings( leaseImplementation: String, acquireLeaseDelayForMinority: FiniteDuration, diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/AttemptSysMsgRedeliverySpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/AttemptSysMsgRedeliverySpec.scala index 8bf3771cbb2..21ff891392c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/AttemptSysMsgRedeliverySpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/AttemptSysMsgRedeliverySpec.scala @@ -27,8 +27,8 @@ object AttemptSysMsgRedeliveryMultiJvmSpec extends MultiNodeConfig { testTransport(on = true) class Echo extends Actor { - def receive = { - case m => sender() ! m + def receive = { case m => + sender() ! m } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala index b3302a9e59c..edd57fe50e0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala @@ -86,20 +86,22 @@ abstract class ClusterDeathWatchSpec val path2 = RootActorPath(second) / "user" / "subject" val path3 = RootActorPath(third) / "user" / "subject" val watchEstablished = TestLatch(2) - system.actorOf(Props(new Actor { - context.actorSelection(path2) ! Identify(path2) - context.actorSelection(path3) ! Identify(path3) - - def receive = { - case ActorIdentity(`path2`, Some(ref)) => - context.watch(ref) - watchEstablished.countDown() - case ActorIdentity(`path3`, Some(ref)) => - context.watch(ref) - watchEstablished.countDown() - case Terminated(actor) => testActor ! actor.path - } - }).withDeploy(Deploy.local), name = "observer1") + system.actorOf( + Props(new Actor { + context.actorSelection(path2) ! Identify(path2) + context.actorSelection(path3) ! Identify(path3) + + def receive = { + case ActorIdentity(`path2`, Some(ref)) => + context.watch(ref) + watchEstablished.countDown() + case ActorIdentity(`path3`, Some(ref)) => + context.watch(ref) + watchEstablished.countDown() + case Terminated(actor) => testActor ! actor.path + } + }).withDeploy(Deploy.local), + name = "observer1") watchEstablished.await enterBarrier("watch-established") @@ -221,8 +223,8 @@ abstract class ClusterDeathWatchSpec // subject5 is not in RemoteWatcher.watching, the terminated for subject5 is from testActor.watch. // You can not verify that it is the testActor receiving it, though the remoteWatcher stats proves // it above - receiveWhile(messages = 2) { - case Terminated(ref) => ref.path.name + receiveWhile(messages = 2) { case Terminated(ref) => + ref.path.name }.toSet shouldEqual Set("subject5", "subject6") awaitAssert { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterShutdownSpec.scala index 15b8b4dfb1c..b9f3cdb50f7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterShutdownSpec.scala @@ -42,11 +42,13 @@ abstract class ClusterShutdownSpec extends MultiNodeClusterSpec(ClusterShutdownS } runOn(first, second, third) { - awaitAssert({ - withClue("members: " + Cluster(system).readView.members) { - Cluster(system).selfMember.status shouldEqual MemberStatus.ReadyForShutdown - } - }, 10.seconds) + awaitAssert( + { + withClue("members: " + Cluster(system).readView.members) { + Cluster(system).selfMember.status shouldEqual MemberStatus.ReadyForShutdown + } + }, + 10.seconds) } } "spread around the cluster" in { @@ -73,26 +75,30 @@ abstract class ClusterShutdownSpec extends MultiNodeClusterSpec(ClusterShutdownS runOn(first) { Cluster(system).leave(address(first)) } - awaitAssert({ - withClue("members: " + Cluster(system).readView.members) { - runOn(second, third) { - Cluster(system).readView.members.size shouldEqual 2 - } - runOn(first) { - Cluster(system).selfMember.status shouldEqual Removed + awaitAssert( + { + withClue("members: " + Cluster(system).readView.members) { + runOn(second, third) { + Cluster(system).readView.members.size shouldEqual 2 + } + runOn(first) { + Cluster(system).selfMember.status shouldEqual Removed + } } - } - }, 10.seconds) + }, + 10.seconds) enterBarrier("first-gone") runOn(second) { Cluster(system).leave(address(second)) Cluster(system).leave(address(third)) } - awaitAssert({ - withClue("self member: " + Cluster(system).selfMember) { - Cluster(system).selfMember.status shouldEqual Removed - } - }, 10.seconds) + awaitAssert( + { + withClue("self member: " + Cluster(system).selfMember) { + Cluster(system).selfMember.status shouldEqual Removed + } + }, + 10.seconds) enterBarrier("all-gone") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index 912beb93211..6723ae4922b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -20,10 +20,12 @@ final case class ConvergenceMultiNodeConfig(failureDetectorPuppet: Boolean) exte val fourth = role("fourth") commonConfig( - debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" akka.cluster.failure-detector.threshold = 4 akka.cluster.allow-weakly-up-members = off - """)).withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) + """)) + .withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) } class ConvergenceWithFailureDetectorPuppetMultiJvmNode1 extends ConvergenceSpec(failureDetectorPuppet = true) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/DowningWhenOtherHasQuarantinedThisActorSystemSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/DowningWhenOtherHasQuarantinedThisActorSystemSpec.scala index c21dafbc22f..a5d440773c0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/DowningWhenOtherHasQuarantinedThisActorSystemSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/DowningWhenOtherHasQuarantinedThisActorSystemSpec.scala @@ -29,8 +29,7 @@ object DowningWhenOtherHasQuarantinedThisActorSystemSpec extends MultiNodeConfig commonConfig( debugConfig(on = false) .withFallback(MultiNodeClusterSpec.clusterConfig) - .withFallback( - ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.remote.artery.enabled = on akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider" akka.cluster.split-brain-resolver.stable-after = 10s diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala index 08978aaeec5..0670971b93d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala @@ -20,8 +20,10 @@ object InitialHeartbeatMultiJvmSpec extends MultiNodeConfig { val second = role("second") commonConfig( - debugConfig(on = false).withFallback(ConfigFactory.parseString(""" - akka.cluster.failure-detector.threshold = 4""")).withFallback(MultiNodeClusterSpec.clusterConfig)) + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" + akka.cluster.failure-detector.threshold = 4""")) + .withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) } @@ -45,19 +47,23 @@ abstract class InitialHeartbeatSpec extends MultiNodeClusterSpec(InitialHeartbea runOn(first) { within(10 seconds) { - awaitAssert({ - cluster.sendCurrentClusterState(testActor) - expectMsgType[CurrentClusterState].members.map(_.address) should contain(secondAddress) - }, interval = 50.millis) + awaitAssert( + { + cluster.sendCurrentClusterState(testActor) + expectMsgType[CurrentClusterState].members.map(_.address) should contain(secondAddress) + }, + interval = 50.millis) } } runOn(second) { cluster.join(first) within(10 seconds) { - awaitAssert({ - cluster.sendCurrentClusterState(testActor) - expectMsgType[CurrentClusterState].members.map(_.address) should contain(firstAddress) - }, interval = 50.millis) + awaitAssert( + { + cluster.sendCurrentClusterState(testActor) + expectMsgType[CurrentClusterState].members.map(_.address) should contain(firstAddress) + }, + interval = 50.millis) } } enterBarrier("second-joined") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialMembersOfNewDcSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialMembersOfNewDcSpec.scala index 334d3bb3b3e..302899e4a73 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialMembersOfNewDcSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialMembersOfNewDcSpec.scala @@ -78,9 +78,9 @@ abstract class InitialMembersOfNewDcSpec "see all dc1 nodes see each other as up" in { runOn(two, three) { within(20.seconds) { - awaitAssert({ + awaitAssert { cluster.state.members.filter(_.status == MemberStatus.Up) should have size 3 - }) + } } } enterBarrier("dc1 fully up") @@ -97,9 +97,9 @@ abstract class InitialMembersOfNewDcSpec // Check how long it takes for all other nodes to see every node as up runOn(one, two, three, four) { within(20.seconds) { - awaitAssert({ + awaitAssert { cluster.state.members.filter(_.status == MemberStatus.Up) should have size 4 - }) + } } val totalTime = System.nanoTime() - startTime log.info("Can see new node (and all others as up): {}ms", totalTime.nanos.toMillis) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala index bad60f600bd..27f66c62561 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala @@ -16,7 +16,9 @@ object JoinInProgressMultiJvmSpec extends MultiNodeConfig { val second = role("second") commonConfig( - debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + debugConfig(on = false).withFallback( + ConfigFactory + .parseString(""" akka.cluster { # simulate delay in gossip by turning it off gossip-interval = 300 s @@ -24,7 +26,8 @@ object JoinInProgressMultiJvmSpec extends MultiNodeConfig { threshold = 4 acceptable-heartbeat-pause = 1 second } - }""").withFallback(MultiNodeClusterSpec.clusterConfig))) + }""") + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class JoinInProgressMultiJvmNode1 extends JoinInProgressSpec diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala index 429cbace838..43fecf5a24c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala @@ -156,7 +156,7 @@ abstract class LargeMessageClusterSpec // for non Aeron transport we use the Slow message and SlowSerializer to slow down // to not completely overload the machine/network, see issue #24576 val arterySettings = ArterySettings(system.settings.config.getConfig("akka.remote.artery")) - val aeronUdpEnabled = (arterySettings.Enabled && arterySettings.Transport == ArterySettings.AeronUpd) + val aeronUdpEnabled = arterySettings.Enabled && arterySettings.Transport == ArterySettings.AeronUpd runOn(second) { val largeEcho2 = identify(second, "largeEcho") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index e54dbe51af0..240a9ebc1cd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -58,7 +58,7 @@ abstract class LeaderElectionSpec(multiNodeConfig: LeaderElectionMultiNodeConfig def shutdownLeaderAndVerifyNewLeader(alreadyShutdown: Int): Unit = { val currentRoles = sortedRoles.drop(alreadyShutdown) - currentRoles.size should be >= (2) + currentRoles.size should be >= 2 val leader = currentRoles.head val aUser = currentRoles.last val remainingRoles = currentRoles.tail @@ -87,7 +87,7 @@ abstract class LeaderElectionSpec(multiNodeConfig: LeaderElectionMultiNodeConfig // user marks the shutdown leader as DOWN cluster.down(leaderAddress) // removed - awaitAssert(clusterView.unreachableMembers.map(_.address) should not contain (leaderAddress)) + awaitAssert(clusterView.unreachableMembers.map(_.address) should not contain leaderAddress) enterBarrier("after-down" + n, "completed" + n) case _ if remainingRoles.contains(myself) => @@ -116,7 +116,8 @@ abstract class LeaderElectionSpec(multiNodeConfig: LeaderElectionMultiNodeConfig enterBarrier("after-2") } - "be able to 're-elect' a single leader after leader has left (again)" taggedAs LongRunningTest in within(30 seconds) { + "be able to 're-elect' a single leader after leader has left (again)" taggedAs LongRunningTest in within( + 30 seconds) { shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 1) enterBarrier("after-3") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index f1e8e7f1be7..c113f40f3c4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -62,15 +62,17 @@ abstract class LeaderLeavingSpec extends MultiNodeClusterSpec(LeaderLeavingMulti val exitingLatch = TestLatch() - cluster.subscribe(system.actorOf(Props(new Actor { - def receive = { - case state: CurrentClusterState => - if (state.members.exists(m => m.address == oldLeaderAddress && m.status == Exiting)) - exitingLatch.countDown() - case MemberExited(m) if m.address == oldLeaderAddress => exitingLatch.countDown() - case _ => // ignore - } - }).withDeploy(Deploy.local)), classOf[MemberEvent]) + cluster.subscribe( + system.actorOf(Props(new Actor { + def receive = { + case state: CurrentClusterState => + if (state.members.exists(m => m.address == oldLeaderAddress && m.status == Exiting)) + exitingLatch.countDown() + case MemberExited(m) if m.address == oldLeaderAddress => exitingLatch.countDown() + case _ => // ignore + } + }).withDeploy(Deploy.local)), + classOf[MemberEvent]) enterBarrier("registered-listener") enterBarrier("leader-left") @@ -82,13 +84,13 @@ abstract class LeaderLeavingSpec extends MultiNodeClusterSpec(LeaderLeavingMulti markNodeAsUnavailable(oldLeaderAddress) // verify that the LEADER is no longer part of the 'members' set - awaitAssert(clusterView.members.map(_.address) should not contain (oldLeaderAddress)) + awaitAssert(clusterView.members.map(_.address) should not contain oldLeaderAddress) // verify that the LEADER is not part of the 'unreachable' set - awaitAssert(clusterView.unreachableMembers.map(_.address) should not contain (oldLeaderAddress)) + awaitAssert(clusterView.unreachableMembers.map(_.address) should not contain oldLeaderAddress) // verify that we have a new LEADER - awaitAssert(clusterView.leader should not be (oldLeaderAddress)) + awaitAssert(clusterView.leader should not be oldLeaderAddress) } enterBarrier("finished") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala index 96f0b0a242a..971f2fa7925 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala @@ -22,11 +22,14 @@ object MBeanMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" akka.cluster.jmx.enabled = on akka.cluster.roles = [testNode] akka.cluster.app-version = "1.2.3" - """)).withFallback(MultiNodeClusterSpec.clusterConfig)) + """)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MemberWeaklyUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MemberWeaklyUpSpec.scala index c4c1a010cd0..f3604a603d9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MemberWeaklyUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MemberWeaklyUpSpec.scala @@ -21,9 +21,12 @@ object MemberWeaklyUpSpec extends MultiNodeConfig { val fourth = role("fourth") val fifth = role("fifth") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" akka.cluster.allow-weakly-up-members = 3 s - """)).withFallback(MultiNodeClusterSpec.clusterConfig)) + """)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index a436b637687..460898be30a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -36,17 +36,19 @@ abstract class MembershipChangeListenerUpSpec extends MultiNodeClusterSpec(Membe runOn(first, second) { val latch = TestLatch() val expectedAddresses = Set(first, second).map(address) - cluster.subscribe(system.actorOf(Props(new Actor { - var members = Set.empty[Member] - def receive = { - case state: CurrentClusterState => members = state.members - case MemberUp(m) => - members = members - m + m - if (members.map(_.address) == expectedAddresses) - latch.countDown() - case _ => // ignore - } - }).withDeploy(Deploy.local)), classOf[MemberEvent]) + cluster.subscribe( + system.actorOf(Props(new Actor { + var members = Set.empty[Member] + def receive = { + case state: CurrentClusterState => members = state.members + case MemberUp(m) => + members = members - m + m + if (members.map(_.address) == expectedAddresses) + latch.countDown() + case _ => // ignore + } + }).withDeploy(Deploy.local)), + classOf[MemberEvent]) enterBarrier("listener-1-registered") cluster.join(first) latch.await @@ -63,17 +65,19 @@ abstract class MembershipChangeListenerUpSpec extends MultiNodeClusterSpec(Membe val latch = TestLatch() val expectedAddresses = Set(first, second, third).map(address) - cluster.subscribe(system.actorOf(Props(new Actor { - var members = Set.empty[Member] - def receive = { - case state: CurrentClusterState => members = state.members - case MemberUp(m) => - members = members - m + m - if (members.map(_.address) == expectedAddresses) - latch.countDown() - case _ => // ignore - } - }).withDeploy(Deploy.local)), classOf[MemberEvent]) + cluster.subscribe( + system.actorOf(Props(new Actor { + var members = Set.empty[Member] + def receive = { + case state: CurrentClusterState => members = state.members + case MemberUp(m) => + members = members - m + m + if (members.map(_.address) == expectedAddresses) + latch.countDown() + case _ => // ignore + } + }).withDeploy(Deploy.local)), + classOf[MemberEvent]) enterBarrier("listener-2-registered") runOn(third) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala index 90bb9ab02ed..223a74406c7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala @@ -19,9 +19,12 @@ class MultiDcSpecConfig(crossDcConnections: Int = 5) extends MultiNodeConfig { val fourth = role("fourth") val fifth = role("fifth") - commonConfig(ConfigFactory.parseString(s""" + commonConfig( + ConfigFactory + .parseString(s""" akka.cluster.multi-data-center.cross-data-center-connections = $crossDcConnections - """).withFallback(MultiNodeClusterSpec.clusterConfig)) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc1" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala index fbdd4669737..e2b4ba0a2eb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala @@ -90,7 +90,8 @@ abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeClusterSpec(Multi expectedBetaHeartbeaterNodes = takeNOldestMembers(dataCenter = "beta", 2) expectedBetaHeartbeaterRoles = membersAsRoles(expectedBetaHeartbeaterNodes) - expectedNoActiveHeartbeatSenderRoles = roles.toSet -- (expectedAlphaHeartbeaterRoles ++ expectedBetaHeartbeaterRoles) + expectedNoActiveHeartbeatSenderRoles = + roles.toSet -- (expectedAlphaHeartbeaterRoles ++ expectedBetaHeartbeaterRoles) } "collect information on oldest nodes" taggedAs LongRunningTest in { @@ -155,7 +156,7 @@ abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeClusterSpec(Multi implicit val sender: ActorRef = observer.ref val expectedAlphaMonitoringNodesAfterLeaving = - (takeNOldestMembers(dataCenter = "alpha", 3).filterNot(_.status == MemberStatus.Exiting)) + takeNOldestMembers(dataCenter = "alpha", 3).filterNot(_.status == MemberStatus.Exiting) runOn(membersAsRoles(expectedAlphaMonitoringNodesAfterLeaving).toList: _*) { awaitAssert( { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcJoin2Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcJoin2Spec.scala index 977a9cfd0ea..0a63d2f3741 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcJoin2Spec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcJoin2Spec.scala @@ -82,8 +82,8 @@ abstract class MultiDcJoin2Spec extends MultiNodeClusterSpec(MultiDcJoin2MultiJv Cluster(system).join(first) within(20.seconds) { awaitAssert { - Cluster(system).state.members - .exists(m => m.address == address(fourth) && m.status == MemberStatus.Up) should ===(true) + Cluster(system).state.members.exists(m => + m.address == address(fourth) && m.status == MemberStatus.Up) should ===(true) } } } @@ -93,8 +93,8 @@ abstract class MultiDcJoin2Spec extends MultiNodeClusterSpec(MultiDcJoin2MultiJv Cluster(system).join(second) within(20.seconds) { awaitAssert { - Cluster(system).state.members - .exists(m => m.address == address(fifth) && m.status == MemberStatus.Up) should ===(true) + Cluster(system).state.members.exists(m => + m.address == address(fifth) && m.status == MemberStatus.Up) should ===(true) } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcJoinSpec.scala index f68819d7913..dfa119e7c13 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcJoinSpec.scala @@ -86,8 +86,8 @@ abstract class MultiDcJoinSpec extends MultiNodeClusterSpec(MultiDcJoinMultiJvmS Cluster(system).join(first) within(20.seconds) { awaitAssert { - Cluster(system).state.members - .exists(m => m.address == beta2Address && m.status == MemberStatus.Up) should ===(true) + Cluster(system).state.members.exists(m => + m.address == beta2Address && m.status == MemberStatus.Up) should ===(true) } } } @@ -97,8 +97,8 @@ abstract class MultiDcJoinSpec extends MultiNodeClusterSpec(MultiDcJoinMultiJvmS Cluster(system).join(second) within(10.seconds) { awaitAssert { - Cluster(system).state.members - .exists(m => m.address == beta1Address && m.status == MemberStatus.Up) should ===(true) + Cluster(system).state.members.exists(m => + m.address == beta1Address && m.status == MemberStatus.Up) should ===(true) } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcLastNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcLastNodeSpec.scala index d16e3b403a6..c1940b36e31 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcLastNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcLastNodeSpec.scala @@ -15,9 +15,12 @@ object MultiDcLastNodeSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(ConfigFactory.parseString(""" + commonConfig( + ConfigFactory + .parseString(""" akka.loglevel = INFO - """).withFallback(MultiNodeClusterSpec.clusterConfig)) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc1" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala index ba012b11b5c..081bf4e8ba0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala @@ -23,7 +23,9 @@ object MultiDcSplitBrainMultiJvmSpec extends MultiNodeConfig { val fourth = role("fourth") val fifth = role("fifth") - commonConfig(ConfigFactory.parseString(""" + commonConfig( + ConfigFactory + .parseString(""" akka.loglevel = DEBUG # issue #24955 akka.cluster.debug.verbose-heartbeat-logging = on akka.cluster.debug.verbose-gossip-logging = on @@ -39,7 +41,8 @@ object MultiDcSplitBrainMultiJvmSpec extends MultiNodeConfig { downing-provider-class = akka.cluster.testkit.AutoDowning testkit.auto-down-unreachable-after = 1s } - """).withFallback(MultiNodeClusterSpec.clusterConfig)) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc1" @@ -251,10 +254,12 @@ abstract class MultiDcSplitBrainSpec extends MultiNodeClusterSpec(MultiDcSplitBr val port = Cluster(system).selfAddress.port.get val restartedSystem = ActorSystem( system.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = $port akka.coordinated-shutdown.terminate-actor-system = on - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) Cluster(restartedSystem).join(thirdAddress) Await.ready(restartedSystem.whenTerminated, remaining) } @@ -297,9 +302,9 @@ abstract class MultiDcSplitBrainSpec extends MultiNodeClusterSpec(MultiDcSplitBr } runOn(first, second, third) { - awaitAssert({ + awaitAssert { clusterView.members.map(_.address) should ===(Set(address(first), address(second), address(third))) - }) + } } runOn(remainingRoles: _*) { enterBarrier("restarted-fifth-removed") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala index a0a26d114b5..93a6f57698a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala @@ -79,8 +79,8 @@ abstract class MultiDcSunnyWeatherSpec extends MultiNodeClusterSpec(MultiDcSunny val expectedBetaHeartbeaterNodes = takeNOldestMembers(dataCenter = "beta", 2) val expectedBetaHeartbeaterRoles = membersAsRoles(expectedBetaHeartbeaterNodes) - val expectedNoActiveHeartbeatSenderRoles = roles.toSet -- (expectedAlphaHeartbeaterRoles.union( - expectedBetaHeartbeaterRoles)) + val expectedNoActiveHeartbeatSenderRoles = + roles.toSet -- (expectedAlphaHeartbeaterRoles.union(expectedBetaHeartbeaterRoles)) enterBarrier("found-expectations") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 590101663ad..6c74df57942 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -187,14 +187,10 @@ abstract class MultiNodeClusterSpec(multiNodeconfig: MultiNodeConfig) def clusterView: ClusterReadView = cluster.readView - /** - * Get the cluster node to use. - */ + /** Get the cluster node to use. */ def cluster: Cluster = Cluster(system) - /** - * Use this method for the initial startup of the cluster node. - */ + /** Use this method for the initial startup of the cluster node. */ def startClusterNode(): Unit = { if (clusterView.members.isEmpty) { cluster.join(myself) @@ -238,7 +234,7 @@ abstract class MultiNodeClusterSpec(multiNodeconfig: MultiNodeConfig) awaitCond( { if (memberInState(joinNode, List(MemberStatus.Up)) && - memberInState(myself, List(MemberStatus.Joining, MemberStatus.Up))) + memberInState(myself, List(MemberStatus.Joining, MemberStatus.Up))) true else { cluster.join(joinNode) @@ -283,7 +279,7 @@ abstract class MultiNodeClusterSpec(multiNodeconfig: MultiNodeConfig) */ def assertLeaderIn(nodesInCluster: immutable.Seq[RoleName]): Unit = if (nodesInCluster.contains(myself)) { - nodesInCluster.length should not be (0) + nodesInCluster.length should not be 0 val expectedLeader = roleOfLeader(nodesInCluster) val leader = clusterView.leader val isLeader = leader == Some(clusterView.selfAddress) @@ -303,7 +299,7 @@ abstract class MultiNodeClusterSpec(multiNodeconfig: MultiNodeConfig) timeout: FiniteDuration = 25.seconds): Unit = { within(timeout) { if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set - awaitAssert(canNotBePartOfMemberRing.foreach(a => clusterView.members.map(_.address) should not contain (a))) + awaitAssert(canNotBePartOfMemberRing.foreach(a => clusterView.members.map(_.address) should not contain a)) awaitAssert(clusterView.members.size should ===(numberOfMembers)) awaitAssert(clusterView.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up))) // clusterView.leader is updated by LeaderChanged, await that to be updated also @@ -358,9 +354,7 @@ abstract class MultiNodeClusterSpec(multiNodeconfig: MultiNodeConfig) def awaitAllReachable(): Unit = awaitAssert(clusterView.unreachableMembers should ===(Set.empty)) - /** - * Wait until the specified nodes have seen the same gossip overview. - */ + /** Wait until the specified nodes have seen the same gossip overview. */ def awaitSeenSameState(addresses: Address*): Unit = awaitAssert((addresses.toSet.diff(clusterView.seenBy)) should ===(Set.empty)) @@ -373,13 +367,11 @@ abstract class MultiNodeClusterSpec(multiNodeconfig: MultiNodeConfig) * be determined from the `RoleName`. */ def roleOfLeader(nodesInCluster: immutable.Seq[RoleName] = roles): RoleName = { - nodesInCluster.length should not be (0) + nodesInCluster.length should not be 0 nodesInCluster.sorted.head } - /** - * Sort the roles in the address order used by the cluster node ring. - */ + /** Sort the roles in the address order used by the cluster node ring. */ implicit val clusterOrdering: Ordering[RoleName] = new Ordering[RoleName] { import Member.addressOrdering def compare(x: RoleName, y: RoleName) = addressOrdering.compare(address(x), address(y)) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index cbbe309913f..5705e3ff4b0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -37,16 +37,17 @@ abstract class NodeLeavingAndExitingSpec extends MultiNodeClusterSpec(NodeLeavin runOn(first, third) { val secondAddess = address(second) val exitingLatch = TestLatch() - cluster.subscribe(system.actorOf(Props(new Actor { - def receive = { - case state: CurrentClusterState => - if (state.members.exists(m => m.address == secondAddess && m.status == Exiting)) - exitingLatch.countDown() - case MemberExited(m) if m.address == secondAddess => exitingLatch.countDown() - case _: MemberRemoved => // not tested here - - } - }).withDeploy(Deploy.local)), classOf[MemberEvent]) + cluster.subscribe( + system.actorOf(Props(new Actor { + def receive = { + case state: CurrentClusterState => + if (state.members.exists(m => m.address == secondAddess && m.status == Exiting)) + exitingLatch.countDown() + case MemberExited(m) if m.address == secondAddess => exitingLatch.countDown() + case _: MemberRemoved => // not tested here + } + }).withDeploy(Deploy.local)), + classOf[MemberEvent]) enterBarrier("registered-listener") runOn(third) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index 318e1a3eac8..c2a42050bea 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -51,13 +51,15 @@ abstract class NodeUpSpec extends MultiNodeClusterSpec(NodeUpMultiJvmSpec) { "be unaffected when joining again" in { val unexpected = new AtomicReference[SortedSet[Member]](SortedSet.empty) - cluster.subscribe(system.actorOf(Props(new Actor { - def receive = { - case event: MemberEvent => - unexpected.set(unexpected.get + event.member) - case _: CurrentClusterState => // ignore - } - })), classOf[MemberEvent]) + cluster.subscribe( + system.actorOf(Props(new Actor { + def receive = { + case event: MemberEvent => + unexpected.set(unexpected.get + event.member) + case _: CurrentClusterState => // ignore + } + })), + classOf[MemberEvent]) enterBarrier("listener-registered") runOn(second) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala index d28d68e06c7..5ddd14f4721 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala @@ -68,10 +68,12 @@ abstract class QuickRestartSpec extends MultiNodeClusterSpec(QuickRestartMultiJv ActorSystem( system.name, // use the same port - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.cluster.roles = [round-$n] akka.remote.artery.canonical.port = ${Cluster(restartingSystem).selfAddress.port.get} - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) } log.info("Restarting node has address: {}", Cluster(restartingSystem).selfUniqueAddress) Cluster(restartingSystem).joinSeedNodes(seedNodes) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RemoteFeaturesWithClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RemoteFeaturesWithClusterSpec.scala index 9fd77d32da3..ef79819721d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RemoteFeaturesWithClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RemoteFeaturesWithClusterSpec.scala @@ -27,10 +27,12 @@ object ClusterRemoteFeaturesConfig extends MultiNodeConfig { val third = role("third") private val baseConfig = { - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${MultiNodeSpec.selfPort} akka.log-dead-letters-during-shutdown = off - """).withFallback(MultiNodeClusterSpec.clusterConfig) + """) + .withFallback(MultiNodeClusterSpec.clusterConfig) } commonConfig(debugConfig(on = false).withFallback(baseConfig)) @@ -43,8 +45,8 @@ object ClusterRemoteFeaturesConfig extends MultiNodeConfig { object ClusterRemoteFeatures { class AddressPing extends Actor { - def receive: Receive = { - case "ping" => sender() ! self + def receive: Receive = { case "ping" => + sender() ! self } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala index bf641a96174..d0b07c2af2d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala @@ -60,9 +60,11 @@ abstract class RestartFirstSeedNodeSpec lazy val restartedSeed1System = ActorSystem( system.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${seedNodes.head.port.get} - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) override def afterAll(): Unit = { runOn(seed1) { @@ -77,13 +79,14 @@ abstract class RestartFirstSeedNodeSpec // seed1System is a separate ActorSystem, to be able to simulate restart // we must transfer its address to seed2 and seed3 runOn(seed2, seed3) { - system.actorOf(Props(new Actor { - def receive = { - case a: Address => + system.actorOf( + Props(new Actor { + def receive = { case a: Address => seedNode1Address = a sender() ! "ok" - } - }).withDeploy(Deploy.local), name = "address-receiver") + } + }).withDeploy(Deploy.local), + name = "address-receiver") enterBarrier("seed1-address-receiver-ready") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala index eee3e210c69..882cb36fc22 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala @@ -60,9 +60,11 @@ abstract class RestartNode2SpecSpec extends MultiNodeClusterSpec(RestartNode2Spe // this is the node that will attempt to re-join, keep gate times low so it can retry quickly lazy val restartedSeed1System = ActorSystem( system.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${seedNodes.head.port.get} - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) override def afterAll(): Unit = { runOn(seed1) { @@ -76,13 +78,14 @@ abstract class RestartNode2SpecSpec extends MultiNodeClusterSpec(RestartNode2Spe // seed1System is a separate ActorSystem, to be able to simulate restart // we must transfer its address to seed2 runOn(seed2) { - system.actorOf(Props(new Actor { - def receive = { - case a: Address => + system.actorOf( + Props(new Actor { + def receive = { case a: Address => seedNode1Address = a sender() ! "ok" - } - }).withDeploy(Deploy.local), name = "address-receiver") + } + }).withDeploy(Deploy.local), + name = "address-receiver") enterBarrier("seed1-address-receiver-ready") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala index 93c17a5e56e..6cf7810c8d4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala @@ -61,9 +61,11 @@ abstract class RestartNode3Spec extends MultiNodeClusterSpec(RestartNode3MultiJv lazy val restartedSecondSystem = ActorSystem( system.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${secondUniqueAddress.address.port.get} - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) override def afterAll(): Unit = { runOn(second) { @@ -82,13 +84,14 @@ abstract class RestartNode3Spec extends MultiNodeClusterSpec(RestartNode3MultiJv // secondSystem is a separate ActorSystem, to be able to simulate restart // we must transfer its address to first runOn(first, third) { - system.actorOf(Props(new Actor { - def receive = { - case a: UniqueAddress => + system.actorOf( + Props(new Actor { + def receive = { case a: UniqueAddress => secondUniqueAddress = a sender() ! "ok" - } - }).withDeploy(Deploy.local), name = "address-receiver") + } + }).withDeploy(Deploy.local), + name = "address-receiver") enterBarrier("second-address-receiver-ready") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala index c2656002b67..3ac699fe2cb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala @@ -81,9 +81,11 @@ abstract class RestartNodeSpec extends MultiNodeClusterSpec(RestartNodeMultiJvmS lazy val restartedSecondSystem = ActorSystem( system.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${secondUniqueAddress.address.port.get} - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) override def afterAll(): Unit = { runOn(second) { @@ -100,13 +102,14 @@ abstract class RestartNodeSpec extends MultiNodeClusterSpec(RestartNodeMultiJvmS // secondSystem is a separate ActorSystem, to be able to simulate restart // we must transfer its address to first runOn(first, third) { - system.actorOf(Props(new Actor { - def receive = { - case a: UniqueAddress => + system.actorOf( + Props(new Actor { + def receive = { case a: UniqueAddress => secondUniqueAddress = a sender() ! "ok" - } - }).withDeploy(Deploy.local), name = "address-receiver") + } + }).withDeploy(Deploy.local), + name = "address-receiver") enterBarrier("second-address-receiver-ready") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SharedMediaDriverSupport.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SharedMediaDriverSupport.scala index 1fbc9b5685f..a302181d7a7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SharedMediaDriverSupport.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SharedMediaDriverSupport.scala @@ -43,16 +43,21 @@ object SharedMediaDriverSupport { @tailrec def isDriverInactive(i: Int): Boolean = { if (i < 0) true else { - val active = try CommonContext.isDriverActive(new File(aeronDir), 5000, new Consumer[String] { - override def accept(msg: String): Unit = { - println(msg) + val active = + try + CommonContext.isDriverActive( + new File(aeronDir), + 5000, + new Consumer[String] { + override def accept(msg: String): Unit = { + println(msg) + } + }) + catch { + case NonFatal(e) => + println("Exception checking isDriverActive: " + e.getMessage) + false } - }) - catch { - case NonFatal(e) => - println("Exception checking isDriverActive: " + e.getMessage) - false - } if (active) false else { Thread.sleep(500) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainQuarantineSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainQuarantineSpec.scala index 3e22a4512e0..0177eade900 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainQuarantineSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainQuarantineSpec.scala @@ -25,8 +25,7 @@ object SplitBrainQuarantineSpec extends MultiNodeConfig { commonConfig( debugConfig(on = true) .withFallback(MultiNodeClusterSpec.clusterConfig) - .withFallback(ConfigFactory.parseString( - """ + .withFallback(ConfigFactory.parseString(""" akka.remote.artery.enabled = on akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider" # we dont really want this to hit, but we need the sbr enabled to know the quarantining diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/StreamRefSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/StreamRefSpec.scala index 6a94ef6a51f..a2b6c9ecab7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/StreamRefSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/StreamRefSpec.scala @@ -58,34 +58,33 @@ object StreamRefSpec extends MultiNodeConfig { import context.dispatcher implicit val mat: Materializer = Materializer(context) - def receive = { - case RequestLogs(streamId) => - // materialize the SourceRef: - val (done: Future[Done], ref: SourceRef[String]) = - Source - .fromIterator(() => Iterator.from(1)) - .map(n => s"elem-$n") - .watchTermination()(Keep.right) - .toMat(StreamRefs.sourceRef())(Keep.both) - .mapMaterializedValue { m => - streamLifecycleProbe ! s"started-$streamId" - m - } - .run() - - done.onComplete { - case Success(_) => - streamLifecycleProbe ! s"completed-$streamId" - case Failure(ex) => - log.info("Source stream completed with failure: {}", ex) - streamLifecycleProbe ! s"failed-$streamId" - } - - // wrap the SourceRef in some domain message, such that the sender knows what source it is - val reply = LogsOffer(streamId, ref) - - // reply to sender - sender() ! reply + def receive = { case RequestLogs(streamId) => + // materialize the SourceRef: + val (done: Future[Done], ref: SourceRef[String]) = + Source + .fromIterator(() => Iterator.from(1)) + .map(n => s"elem-$n") + .watchTermination()(Keep.right) + .toMat(StreamRefs.sourceRef())(Keep.both) + .mapMaterializedValue { m => + streamLifecycleProbe ! s"started-$streamId" + m + } + .run() + + done.onComplete { + case Success(_) => + streamLifecycleProbe ! s"completed-$streamId" + case Failure(ex) => + log.info("Source stream completed with failure: {}", ex) + streamLifecycleProbe ! s"failed-$streamId" + } + + // wrap the SourceRef in some domain message, such that the sender knows what source it is + val reply = LogsOffer(streamId, ref) + + // reply to sender + sender() ! reply } } @@ -104,32 +103,31 @@ object StreamRefSpec extends MultiNodeConfig { import context.dispatcher implicit val mat: Materializer = Materializer(context) - def receive = { - case PrepareUpload(nodeId) => - // materialize the SinkRef (the remote is like a source of data for us): - val (ref: SinkRef[String], done: Future[Done]) = - StreamRefs - .sinkRef[String]() - .throttle(1, 1.second) - .toMat(Sink.ignore)(Keep.both) - .mapMaterializedValue { m => - streamLifecycleProbe ! s"started-$nodeId" - m - } - .run() - - done.onComplete { - case Success(_) => streamLifecycleProbe ! s"completed-$nodeId" - case Failure(ex) => - log.info("Sink stream completed with failure: {}", ex) - streamLifecycleProbe ! s"failed-$nodeId" - } - - // wrap the SinkRef in some domain message, such that the sender knows what source it is - val reply = MeasurementsSinkReady(nodeId, ref) - - // reply to sender - sender() ! reply + def receive = { case PrepareUpload(nodeId) => + // materialize the SinkRef (the remote is like a source of data for us): + val (ref: SinkRef[String], done: Future[Done]) = + StreamRefs + .sinkRef[String]() + .throttle(1, 1.second) + .toMat(Sink.ignore)(Keep.both) + .mapMaterializedValue { m => + streamLifecycleProbe ! s"started-$nodeId" + m + } + .run() + + done.onComplete { + case Success(_) => streamLifecycleProbe ! s"completed-$nodeId" + case Failure(ex) => + log.info("Sink stream completed with failure: {}", ex) + streamLifecycleProbe ! s"failed-$nodeId" + } + + // wrap the SinkRef in some domain message, such that the sender knows what source it is + val reply = MeasurementsSinkReady(nodeId, ref) + + // reply to sender + sender() ! reply } } @@ -255,8 +253,8 @@ abstract class StreamRefSpec extends MultiNodeClusterSpec(StreamRefSpec) with Im // and it triggered the subscription timeout. Therefore we must wait more than the // the subscription timeout for a failure val timeout = system.settings.config - .getDuration("akka.stream.materializer.stream-ref.subscription-timeout") - .asScala + 2.seconds + .getDuration("akka.stream.materializer.stream-ref.subscription-timeout") + .asScala + 2.seconds streamLifecycle3.expectMsg(timeout, "failed-system-42-tmp") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala index 34595c9462f..d74c2c421ae 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala @@ -135,8 +135,8 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { val numberOfNodesJoiningToOneNode = getInt("nr-of-nodes-joining-to-one") * nFactor // remaining will join to seed nodes val numberOfNodesJoiningToSeedNodes = (totalNumberOfNodes - numberOfSeedNodes - - numberOfNodesJoiningToSeedNodesInitially - numberOfNodesJoiningOneByOneSmall - - numberOfNodesJoiningOneByOneLarge - numberOfNodesJoiningToOneNode) + numberOfNodesJoiningToSeedNodesInitially - numberOfNodesJoiningOneByOneSmall - + numberOfNodesJoiningOneByOneLarge - numberOfNodesJoiningToOneNode) .requiring(_ >= 0, s"too many configured nr-of-nodes-joining-*, total should be <= ${totalNumberOfNodes}") val numberOfNodesLeavingOneByOneSmall = getInt("nr-of-nodes-leaving-one-by-one-small") * nFactor val numberOfNodesLeavingOneByOneLarge = getInt("nr-of-nodes-leaving-one-by-one-large") * nFactor @@ -270,10 +270,9 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { class ClusterResultHistory extends Actor with ActorLogging { var history = Vector.empty[AggregatedClusterResult] - def receive = { - case result: AggregatedClusterResult => - history :+= result - log.info("Cluster result history\n" + formatHistory) + def receive = { case result: AggregatedClusterResult => + history :+= result + log.info("Cluster result history\n" + formatHistory) } def formatHistory: String = @@ -385,9 +384,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { } } - /** - * Used for remote death watch testing - */ + /** Used for remote death watch testing */ class Watchee extends Actor { def receive = Actor.emptyBehavior } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index 6cc7363c05d..2287e8ff947 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -64,14 +64,16 @@ abstract class SunnyWeatherSpec extends MultiNodeClusterSpec(SunnyWeatherMultiJv log.debug("5 joined") val unexpected = new AtomicReference[SortedSet[Member]](SortedSet.empty) - cluster.subscribe(system.actorOf(Props(new Actor { - def receive = { - case event: MemberEvent => - // we don't expected any changes to the cluster - unexpected.set(unexpected.get + event.member) - case _: CurrentClusterState => // ignore - } - })), classOf[MemberEvent]) + cluster.subscribe( + system.actorOf(Props(new Actor { + def receive = { + case event: MemberEvent => + // we don't expected any changes to the cluster + unexpected.set(unexpected.get + event.member) + case _: CurrentClusterState => // ignore + } + })), + classOf[MemberEvent]) for (n <- 1 to 30) { enterBarrier("period-" + n) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SurviveNetworkInstabilitySpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SurviveNetworkInstabilitySpec.scala index bad58517a59..3e5786208e8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SurviveNetworkInstabilitySpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SurviveNetworkInstabilitySpec.scala @@ -44,8 +44,8 @@ object SurviveNetworkInstabilityMultiJvmSpec extends MultiNodeConfig { testTransport(on = true) class Echo extends Actor { - def receive = { - case m => sender() ! m + def receive = { case m => + sender() ! m } } @@ -225,7 +225,7 @@ abstract class SurviveNetworkInstabilitySpec val joining = Vector(sixth, seventh) val others = Vector(second, third, fourth, fifth) runOn(first) { - for (role1 <- (joining :+ first); role2 <- others) { + for (role1 <- joining :+ first; role2 <- others) { testConductor.blackhole(role1, role2, Direction.Both).await } } @@ -252,7 +252,7 @@ abstract class SurviveNetworkInstabilitySpec enterBarrier("more-unreachable-5") runOn(first) { - for (role1 <- (joining :+ first); role2 <- others) { + for (role1 <- joining :+ first; role2 <- others) { testConductor.passThrough(role1, role2, Direction.Both).await } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index f88485f2780..62d992f33f9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -21,8 +21,7 @@ object TransitionMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback( - ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster.periodic-tasks-initial-delay = 300 s # turn off all periodic tasks akka.cluster.publish-stats-interval = 0 s # always, when it happens """)) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeJoinsAgainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeJoinsAgainSpec.scala index 956edea2085..96f5718e07a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeJoinsAgainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeJoinsAgainSpec.scala @@ -155,12 +155,14 @@ abstract class UnreachableNodeJoinsAgainSpec extends MultiNodeClusterSpec(Unreac runOn(victim) { val victimAddress = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress val freshConfig = - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical { hostname = ${victimAddress.host.get} port = ${victimAddress.port.get} } - """).withFallback(system.settings.config) + """) + .withFallback(system.settings.config) Await.ready(system.whenTerminated, 10 seconds) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala index 1e537dbf316..22f499653fe 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala @@ -57,8 +57,8 @@ abstract class ClusterConsistentHashingGroupSpec } "send to same destinations from different nodes" taggedAs LongRunningTest in { - def hashMapping: ConsistentHashMapping = { - case s: String => s + def hashMapping: ConsistentHashMapping = { case s: String => + s } val paths = List("/user/dest") val router = system.actorOf( diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala index e52b1deb387..179be9b192a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala @@ -27,8 +27,8 @@ import akka.testkit._ object ClusterConsistentHashingRouterMultiJvmSpec extends MultiNodeConfig { class Echo extends Actor { - def receive = { - case _ => sender() ! self + def receive = { case _ => + sender() ! self } } @@ -36,7 +36,9 @@ object ClusterConsistentHashingRouterMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(s""" common-router-settings = { router = consistent-hashing-pool cluster { @@ -51,7 +53,8 @@ object ClusterConsistentHashingRouterMultiJvmSpec extends MultiNodeConfig { /router3 = $${common-router-settings} /router4 = $${common-router-settings} } - """)).withFallback(MultiNodeClusterSpec.clusterConfig)) + """)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -70,9 +73,7 @@ abstract class ClusterConsistentHashingRouterSpec def currentRoutees(router: ActorRef) = Await.result(router ? GetRoutees, timeout.duration).asInstanceOf[Routees].routees - /** - * Fills in self address for local ActorRef - */ + /** Fills in self address for local ActorRef */ private def fullAddress(actorRef: ActorRef): Address = actorRef.path.address match { case Address(_, _, None, None) => cluster.selfAddress case a => a @@ -124,7 +125,8 @@ abstract class ClusterConsistentHashingRouterSpec val router2 = system.actorOf( ClusterRouterPool( local = ConsistentHashingPool(nrOfInstances = 0), - settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 2, allowLocalRoutees = true)) + settings = + ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 2, allowLocalRoutees = true)) .props(Props[Echo]()), "router2") // it may take some time until router receives cluster member events @@ -138,8 +140,8 @@ abstract class ClusterConsistentHashingRouterSpec "handle combination of configured router and programatically defined hashMapping" in { runOn(first) { - def hashMapping: ConsistentHashMapping = { - case s: String => s + def hashMapping: ConsistentHashMapping = { case s: String => + s } val router3 = @@ -155,8 +157,8 @@ abstract class ClusterConsistentHashingRouterSpec "handle combination of configured router and programatically defined hashMapping and ClusterRouterConfig" in { runOn(first) { - def hashMapping: ConsistentHashMapping = { - case s: String => s + def hashMapping: ConsistentHashMapping = { case s: String => + s } val router4 = diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala index 77c4bd004cd..f7332f8b9ed 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala @@ -33,8 +33,8 @@ object ClusterRoundRobinMultiJvmSpec extends MultiNodeConfig { class SomeActor(routeeType: RouteeType) extends Actor { def this() = this(PoolRoutee) - def receive = { - case "hit" => sender() ! Reply(routeeType, self) + def receive = { case "hit" => + sender() ! Reply(routeeType, self) } } @@ -49,7 +49,9 @@ object ClusterRoundRobinMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(s""" akka.actor { serialization-bindings { "akka.cluster.routing.ClusterRoundRobinMultiJvmSpec$$Reply" = java-test @@ -89,7 +91,8 @@ object ClusterRoundRobinMultiJvmSpec extends MultiNodeConfig { } } } - """)).withFallback(MultiNodeClusterSpec.clusterConfig)) + """)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString("""akka.cluster.roles =["a", "c"]""")) nodeConfig(third)(ConfigFactory.parseString("""akka.cluster.roles =["b", "c"]""")) @@ -122,16 +125,14 @@ abstract class ClusterRoundRobinSpec def receiveReplies(routeeType: RouteeType, expectedReplies: Int): Map[Address, Int] = { val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0) - receiveWhile(5 seconds, messages = expectedReplies) { - case Reply(`routeeType`, ref) => fullAddress(ref) - }.foldLeft(zero) { - case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) + receiveWhile(5 seconds, messages = expectedReplies) { case Reply(`routeeType`, ref) => + fullAddress(ref) + }.foldLeft(zero) { case (replyMap, address) => + replyMap + (address -> (replyMap(address) + 1)) } } - /** - * Fills in self address for local ActorRef - */ + /** Fills in self address for local ActorRef */ private def fullAddress(actorRef: ActorRef): Address = actorRef.path.address match { case Address(_, _, None, None) => cluster.selfAddress case a => a diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala index da3296ecfc6..f85c201ca99 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala @@ -29,10 +29,9 @@ object UseRoleIgnoredMultiJvmSpec extends MultiNodeConfig { def this() = this(PoolRoutee) - def receive = { - case msg => - log.info("msg = {}", msg) - sender() ! Reply(routeeType, self) + def receive = { case msg => + log.info("msg = {}", msg) + sender() ! Reply(routeeType, self) } } @@ -65,16 +64,14 @@ abstract class UseRoleIgnoredSpec def receiveReplies(routeeType: RouteeType, expectedReplies: Int): Map[Address, Int] = { val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0) - (receiveWhile(5 seconds, messages = expectedReplies) { - case Reply(`routeeType`, ref) => fullAddress(ref) - }).foldLeft(zero) { - case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) + receiveWhile(5 seconds, messages = expectedReplies) { case Reply(`routeeType`, ref) => + fullAddress(ref) + }.foldLeft(zero) { case (replyMap, address) => + replyMap + (address -> (replyMap(address) + 1)) } } - /** - * Fills in self address for local ActorRef - */ + /** Fills in self address for local ActorRef */ private def fullAddress(actorRef: ActorRef): Address = actorRef.path.address match { case Address(_, _, None, None) => cluster.selfAddress case a => a diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/sbr/LeaseMajority5NodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/sbr/LeaseMajority5NodeSpec.scala index 32745998011..36c7b586fa6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/sbr/LeaseMajority5NodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/sbr/LeaseMajority5NodeSpec.scala @@ -76,9 +76,7 @@ class LeaseMajority5NodeSpec extends MultiNodeClusterSpec(LeaseMajority5NodeSpec def sortByAddress(roles: RoleName*): List[RoleName] = { - /** - * Sort the roles in the address order used by the cluster node ring. - */ + /** Sort the roles in the address order used by the cluster node ring. */ implicit val clusterOrdering: Ordering[RoleName] = new Ordering[RoleName] { import akka.cluster.Member.addressOrdering def compare(x: RoleName, y: RoleName): Int = addressOrdering.compare(node(x).address, node(y).address) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index a852603edb7..aeb13109610 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -60,14 +60,16 @@ class ClusterConfigSpec extends AkkaSpec { "be able to parse non-default cluster config elements" in { val settings = new ClusterSettings( - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" |akka { | cluster { | roles = [ "hamlet" ] | multi-data-center.self-data-center = "blue" | } |} - """.stripMargin).withFallback(ConfigFactory.load()), + """.stripMargin) + .withFallback(ConfigFactory.load()), system.name) import settings._ Roles should ===(Set("hamlet", ClusterSettings.DcRolePrefix + "blue")) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDeathWatchNotificationSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDeathWatchNotificationSpec.scala index c65713e92f1..3d2831cbadb 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDeathWatchNotificationSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDeathWatchNotificationSpec.scala @@ -15,7 +15,8 @@ import akka.testkit._ object ClusterDeathWatchNotificationSpec { - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString(""" akka { loglevel = INFO actor { @@ -23,7 +24,8 @@ object ClusterDeathWatchNotificationSpec { } } akka.remote.artery.canonical.port = 0 - """).withFallback(ArterySpecSupport.defaultConfig) + """) + .withFallback(ArterySpecSupport.defaultConfig) object Sender { def props(receiver: ActorRef, sendOnStop: Vector[String]): Props = @@ -31,8 +33,8 @@ object ClusterDeathWatchNotificationSpec { } class Sender(receiver: ActorRef, sendOnStop: Vector[String]) extends Actor { - override def receive: Receive = { - case msg => sender() ! msg + override def receive: Receive = { case msg => + sender() ! msg } override def postStop(): Unit = { diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala index d7fd920c0a4..a8ae8b42365 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala @@ -53,7 +53,7 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) { "be able to parse 'akka.actor.deployment._' with specified cluster pool" in { val service = "/user/service1" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service.split("/").drop(1)) - deployment should not be (None) + deployment should not be None deployment should ===( Some(Deploy( @@ -70,7 +70,7 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) { "be able to parse 'akka.actor.deployment._' with specified cluster group" in { val service = "/user/service2" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service.split("/").drop(1)) - deployment should not be (None) + deployment should not be None deployment should ===( Some(Deploy( diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala index 736851ebc41..863a58b7393 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala @@ -48,8 +48,8 @@ class ClusterDomainEventSpec extends AnyWordSpec with Matchers with BeforeAndAft } private[cluster] def converge(gossip: Gossip): (Gossip, Set[UniqueAddress]) = - gossip.members.foldLeft((gossip, Set.empty[UniqueAddress])) { - case ((gs, as), m) => (gs.seen(m.uniqueAddress), as + m.uniqueAddress) + gossip.members.foldLeft((gossip, Set.empty[UniqueAddress])) { case ((gs, as), m) => + (gs.seen(m.uniqueAddress), as + m.uniqueAddress) } private def state(g: Gossip): MembershipState = diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatReceiverSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatReceiverSpec.scala index a2fb3fa15d7..3fe78d918c1 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatReceiverSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatReceiverSpec.scala @@ -7,9 +7,11 @@ package akka.cluster import akka.cluster.ClusterHeartbeatSender.{ Heartbeat, HeartbeatRsp } import akka.testkit.{ AkkaSpec, ImplicitSender } -class ClusterHeartbeatReceiverSpec extends AkkaSpec(""" +class ClusterHeartbeatReceiverSpec + extends AkkaSpec(""" akka.actor.provider = cluster - """.stripMargin) with ImplicitSender { + """.stripMargin) + with ImplicitSender { "ClusterHeartbeatReceiver" should { "respond to heartbeats with the same sequenceNr and sendTime" in { val heartBeater = system.actorOf(ClusterHeartbeatReceiver.props(() => Cluster(system))) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderSpec.scala index 15b3ec7a618..9d851e55c54 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderSpec.scala @@ -23,12 +23,14 @@ object ClusterHeartbeatSenderSpec { } } -class ClusterHeartbeatSenderSpec extends AkkaSpec(""" +class ClusterHeartbeatSenderSpec + extends AkkaSpec(""" akka.loglevel = DEBUG akka.actor.provider = cluster akka.cluster.failure-detector.heartbeat-interval = 0.2s akka.remote.artery.canonical.port = 0 - """.stripMargin) with ImplicitSender { + """.stripMargin) + with ImplicitSender { "ClusterHeartBeatSender" must { "increment heart beat sequence nr" in { @@ -36,8 +38,8 @@ class ClusterHeartbeatSenderSpec extends AkkaSpec(""" val underTest = system.actorOf(Props(new TestClusterHeartBeatSender(probe))) underTest ! CurrentClusterState() underTest ! MemberUp( - Member(UniqueAddress(Address("akka", system.name), 1L), Set("dc-default"), Version.Zero) - .copy(status = MemberStatus.Up)) + Member(UniqueAddress(Address("akka", system.name), 1L), Set("dc-default"), Version.Zero).copy(status = + MemberStatus.Up)) probe.expectMsgType[Heartbeat].sequenceNr shouldEqual 1 probe.expectMsgType[Heartbeat].sequenceNr shouldEqual 2 diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala index a4f47ae6507..a4fe57fd6ce 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala @@ -182,13 +182,13 @@ class ClusterHeartbeatSenderStateSpec extends AnyWordSpec with Matchers { val oldUnreachable = state.oldReceiversNowUnreachable state = state.removeMember(node) // keep unreachable, unless it was the removed - if (oldUnreachable(node))(oldUnreachable.diff(state.activeReceivers)) should ===(Set(node)) + if (oldUnreachable(node)) (oldUnreachable.diff(state.activeReceivers)) should ===(Set(node)) else (oldUnreachable.diff(state.activeReceivers)) should ===(Set.empty) state.failureDetector.isMonitoring(node.address) should ===(false) state.failureDetector.isAvailable(node.address) should ===(true) - state.activeReceivers should not contain (node) + state.activeReceivers should not contain node } case Unreachable => @@ -207,7 +207,7 @@ class ClusterHeartbeatSenderStateSpec extends AnyWordSpec with Matchers { state = state.heartbeatRsp(node) if (oldUnreachable(node)) - state.oldReceiversNowUnreachable should not contain (node) + state.oldReceiversNowUnreachable should not contain node if (oldUnreachable(node) && !oldRingReceivers(node)) state.failureDetector.isMonitoring(node.address) should ===(false) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 4a5a3aa5347..b7028e80c05 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -65,8 +65,8 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { "register jmx mbean" in { val name = new ObjectName("akka:type=Cluster") val info = ManagementFactory.getPlatformMBeanServer.getMBeanInfo(name) - info.getAttributes.length should be > (0) - info.getOperations.length should be > (0) + info.getAttributes.length should be > 0 + info.getOperations.length should be > 0 } "reply with InitJoinNack for InitJoin before joining" in { @@ -333,10 +333,13 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { } "register multiple cluster JMX MBeans with akka.cluster.jmx.multi-mbeans-in-same-jvm = on" in { - def getConfig = (port: Int) => ConfigFactory.parseString(s""" + def getConfig = (port: Int) => + ConfigFactory + .parseString(s""" akka.cluster.jmx.multi-mbeans-in-same-jvm = on akka.remote.artery.canonical.port = $port - """).withFallback(ConfigFactory.parseString(ClusterSpec.config)) + """) + .withFallback(ConfigFactory.parseString(ClusterSpec.config)) val sys1 = ActorSystem("ClusterSpec4", getConfig(2552)) val sys2 = ActorSystem("ClusterSpec4", getConfig(2553)) @@ -347,13 +350,13 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { val name1 = new ObjectName(s"akka:type=Cluster,port=2552") val info1 = ManagementFactory.getPlatformMBeanServer.getMBeanInfo(name1) - info1.getAttributes.length should be > (0) - info1.getOperations.length should be > (0) + info1.getAttributes.length should be > 0 + info1.getOperations.length should be > 0 val name2 = new ObjectName(s"akka:type=Cluster,port=2553") val info2 = ManagementFactory.getPlatformMBeanServer.getMBeanInfo(name2) - info2.getAttributes.length should be > (0) - info2.getOperations.length should be > (0) + info2.getAttributes.length should be > 0 + info2.getOperations.length should be > 0 } finally { shutdown(sys1) shutdown(sys2) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterTestKit.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterTestKit.scala index f98373e7730..f9f69433b38 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterTestKit.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterTestKit.scala @@ -29,9 +29,7 @@ trait ClusterTestKit extends TestKitBase { private var actorSystems: List[ActorSystem] = List.empty - /** - * Register an [[ActorSystem]]. - */ + /** Register an [[ActorSystem]]. */ def register(actorSystem: ActorSystem) = { actorSystems = actorSystems :+ actorSystem actorSystem @@ -47,9 +45,7 @@ trait ClusterTestKit extends TestKitBase { actorSystem } - /** - * Creates a new [[ActorSystem]] using the passed [[Config]] and register it. - */ + /** Creates a new [[ActorSystem]] using the passed [[Config]] and register it. */ def newActorSystem(config: Config): ActorSystem = register(ActorSystem(name, config)) @@ -135,22 +131,20 @@ trait ClusterTestKit extends TestKitBase { // remove from internal list actorSystems = actorSystems.filterNot(_ == actorSystem) - val newConfig = ConfigFactory.parseString(s""" + val newConfig = ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = $port - """).withFallback(config) + """) + .withFallback(config) if (firstSeedNode) newActorSystemAsFirst(newConfig) else newActorSystem(newConfig) } - /** - * Returns true if the cluster instance for the provided [[ActorSystem]] is [[MemberStatus.Up]]. - */ + /** Returns true if the cluster instance for the provided [[ActorSystem]] is [[MemberStatus.Up]]. */ def isMemberUp(system: ActorSystem): Boolean = Cluster(system).selfMember.status == MemberStatus.Up - /** - * Returns true if the cluster instance for the provided [[ActorSystem]] has be shutdown. - */ + /** Returns true if the cluster instance for the provided [[ActorSystem]] has be shutdown. */ def isTerminated(system: ActorSystem): Boolean = Cluster(system).isTerminated } diff --git a/akka-cluster/src/test/scala/akka/cluster/CrossDcHeartbeatSenderSpec.scala b/akka-cluster/src/test/scala/akka/cluster/CrossDcHeartbeatSenderSpec.scala index c61fbc7a3c8..90b86e9c83b 100644 --- a/akka-cluster/src/test/scala/akka/cluster/CrossDcHeartbeatSenderSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/CrossDcHeartbeatSenderSpec.scala @@ -29,7 +29,8 @@ object CrossDcHeartbeatSenderSpec { } } -class CrossDcHeartbeatSenderSpec extends AkkaSpec(""" +class CrossDcHeartbeatSenderSpec + extends AkkaSpec(""" akka.loglevel = DEBUG akka.actor.provider = cluster # should not be used here @@ -39,7 +40,8 @@ class CrossDcHeartbeatSenderSpec extends AkkaSpec(""" failure-detector.heartbeat-interval = 0.2s } akka.remote.artery.canonical.port = 0 - """) with ImplicitSender { + """) + with ImplicitSender { "CrossDcHeartBeatSender" should { "increment heart beat sequence nr" in { @@ -48,11 +50,10 @@ class CrossDcHeartbeatSenderSpec extends AkkaSpec(""" awaitAssert(Cluster(system).selfMember.status == MemberStatus.Up) val underTest = system.actorOf(Props(new TestCrossDcHeartbeatSender(heartbeatProbe))) - underTest ! CurrentClusterState( - members = SortedSet( - Cluster(system).selfMember, - Member(UniqueAddress(Address("akka", system.name), 2L), Set("dc-dc2"), Version.Zero) - .copy(status = MemberStatus.Up))) + underTest ! CurrentClusterState(members = SortedSet( + Cluster(system).selfMember, + Member(UniqueAddress(Address("akka", system.name), 2L), Set("dc-dc2"), Version.Zero).copy(status = + MemberStatus.Up))) awaitAssert { underTest ! ReportStatus() diff --git a/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala b/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala index 5ae1fca0006..9589093f4d1 100644 --- a/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala @@ -39,7 +39,8 @@ class DummyDowningProvider(@unused system: ActorSystem) extends DowningProvider class DowningProviderSpec extends AnyWordSpec with Matchers { - val baseConf = ConfigFactory.parseString(""" + val baseConf = ConfigFactory + .parseString(""" akka { loglevel = WARNING actor.provider = "cluster" @@ -50,7 +51,8 @@ class DowningProviderSpec extends AnyWordSpec with Matchers { } } } - """).withFallback(ConfigFactory.load()) + """) + .withFallback(ConfigFactory.load()) "The downing provider mechanism" should { @@ -63,9 +65,11 @@ class DowningProviderSpec extends AnyWordSpec with Matchers { "use the specified downing provider" in { val system = ActorSystem( "auto-downing", - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.cluster.downing-provider-class="akka.cluster.DummyDowningProvider" - """).withFallback(baseConf)) + """) + .withFallback(baseConf)) Cluster(system).downingProvider shouldBe a[DummyDowningProvider] awaitCond(Cluster(system).downingProvider.asInstanceOf[DummyDowningProvider].actorPropsAccessed.get(), 3.seconds) @@ -76,18 +80,20 @@ class DowningProviderSpec extends AnyWordSpec with Matchers { // race condition where the downing provider failure can be detected and trigger // graceful shutdown fast enough that creating the actor system throws on constructing // thread (or slow enough that we have time to try join the cluster before noticing) - val maybeSystem = try { - Some( - ActorSystem( - "auto-downing", - ConfigFactory.parseString(""" + val maybeSystem = + try { + Some( + ActorSystem( + "auto-downing", + ConfigFactory + .parseString(""" akka.cluster.downing-provider-class="akka.cluster.FailingDowningProvider" """).withFallback(baseConf))) - } catch { - case NonFatal(_) => - // expected to sometimes happen - None - } + } catch { + case NonFatal(_) => + // expected to sometimes happen + None + } maybeSystem.foreach { system => val cluster = Cluster(system) diff --git a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala index a5ff9d0831f..41aa9111573 100644 --- a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala +++ b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala @@ -12,9 +12,7 @@ import akka.event.EventStream import akka.remote.FailureDetector import akka.util.unused -/** - * User controllable "puppet" failure detector. - */ +/** User controllable "puppet" failure detector. */ class FailureDetectorPuppet(@unused config: Config, @unused ev: EventStream) extends FailureDetector { sealed trait Status diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 2d3325cdb46..6f569379ccd 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -96,7 +96,7 @@ class GossipSpec extends AnyWordSpec with Matchers { "not reach convergence when unreachable" in { val r1 = Reachability.empty.unreachable(b1.uniqueAddress, a1.uniqueAddress) - val g1 = (Gossip(members = SortedSet(a1, b1), overview = GossipOverview(reachability = r1))) + val g1 = Gossip(members = SortedSet(a1, b1), overview = GossipOverview(reachability = r1)) .seen(a1.uniqueAddress) .seen(b1.uniqueAddress) state(g1, b1).convergence(Set.empty) should ===(false) @@ -107,7 +107,7 @@ class GossipSpec extends AnyWordSpec with Matchers { "reach convergence when downed node has observed unreachable" in { // e3 is Down val r1 = Reachability.empty.unreachable(e3.uniqueAddress, a1.uniqueAddress) - val g1 = (Gossip(members = SortedSet(a1, b1, e3), overview = GossipOverview(reachability = r1))) + val g1 = Gossip(members = SortedSet(a1, b1, e3), overview = GossipOverview(reachability = r1)) .seen(a1.uniqueAddress) .seen(b1.uniqueAddress) .seen(e3.uniqueAddress) @@ -219,10 +219,9 @@ class GossipSpec extends AnyWordSpec with Matchers { state(g1).youngestMember should ===(b1) val g2 = Gossip( members = SortedSet(a2, b1.copyUp(3), e1), - overview = GossipOverview( - reachability = Reachability.empty - .unreachable(a2.uniqueAddress, b1.uniqueAddress) - .unreachable(a2.uniqueAddress, e1.uniqueAddress))) + overview = GossipOverview(reachability = Reachability.empty + .unreachable(a2.uniqueAddress, b1.uniqueAddress) + .unreachable(a2.uniqueAddress, e1.uniqueAddress))) state(g2).youngestMember should ===(b1) val g3 = Gossip(members = SortedSet(a2, b1.copyUp(3), e2.copyUp(4))) state(g3).youngestMember should ===(e2) @@ -391,10 +390,9 @@ class GossipSpec extends AnyWordSpec with Matchers { "clear out a bunch of stuff when removing a node" in { val g = Gossip( members = SortedSet(dc1a1, dc1b1, dc2d2), - overview = GossipOverview( - reachability = Reachability.empty - .unreachable(dc1b1.uniqueAddress, dc2d2.uniqueAddress) - .unreachable(dc2d2.uniqueAddress, dc1b1.uniqueAddress))) + overview = GossipOverview(reachability = Reachability.empty + .unreachable(dc1b1.uniqueAddress, dc2d2.uniqueAddress) + .unreachable(dc2d2.uniqueAddress, dc1b1.uniqueAddress))) .:+(VectorClock.Node(Gossip.vclockName(dc1b1.uniqueAddress))) .:+(VectorClock.Node(Gossip.vclockName(dc2d2.uniqueAddress))) .remove(dc1b1.uniqueAddress, System.currentTimeMillis()) @@ -422,7 +420,7 @@ class GossipSpec extends AnyWordSpec with Matchers { .remove(dc2d1.uniqueAddress, System.currentTimeMillis()) gdc2.tombstones.keys should contain(dc2d1.uniqueAddress) - gdc2.members should not contain (dc2d1) + gdc2.members should not contain dc2d1 gdc2.overview.reachability.records.filter(r => r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) gdc2.overview.reachability.versions.keys should not contain (dc2d1.uniqueAddress) @@ -432,7 +430,7 @@ class GossipSpec extends AnyWordSpec with Matchers { merged1.members should ===(SortedSet(dc1a1, dc1b1, dc2c1)) merged1.tombstones.keys should contain(dc2d1.uniqueAddress) - merged1.members should not contain (dc2d1) + merged1.members should not contain dc2d1 merged1.overview.reachability.records.filter(r => r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) merged1.overview.reachability.versions.keys should not contain (dc2d1.uniqueAddress) diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala index 19b5aca878a..cc578f63c75 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala @@ -182,7 +182,8 @@ class GossipTargetSelectorSpec extends AnyWordSpec with Matchers { Gossip(members = SortedSet(aDc1, bDc1, cDc1)), aDc1, aDc1.dataCenter, - crossDcConnections = 1) // means only a e and g are oldest + crossDcConnections = 1 + ) // means only a e and g are oldest val randomNodes = defaultSelector.randomNodesForFullGossip(state, 3) @@ -194,7 +195,8 @@ class GossipTargetSelectorSpec extends AnyWordSpec with Matchers { Gossip(members = SortedSet(aDc1, bDc1, cDc1, eDc2, fDc2, gDc3, hDc3)), bDc1, bDc1.dataCenter, - crossDcConnections = 1) // means only a, e and g are oldest + crossDcConnections = 1 + ) // means only a, e and g are oldest val randomNodes = defaultSelector.randomNodesForFullGossip(state, 3) @@ -206,7 +208,8 @@ class GossipTargetSelectorSpec extends AnyWordSpec with Matchers { Gossip(members = SortedSet(aDc1, bDc1, cDc1, eDc2, fDc2)), aDc1, aDc1.dataCenter, - crossDcConnections = 1) // means only a and e are oldest + crossDcConnections = 1 + ) // means only a and e are oldest val randomNodes = defaultSelector.randomNodesForFullGossip(state, 3) diff --git a/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala b/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala index 44bb5d1bab2..6552cf6eef5 100644 --- a/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala @@ -29,7 +29,7 @@ class HeartbeatNodeRingSpec extends AnyWordSpec with Matchers { nodes.foreach { n => val receivers = ring.receivers(n) receivers.size should ===(3) - receivers should not contain (n) + receivers should not contain n } } diff --git a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckClusterSpec.scala index bbbd6f86367..90cb0675010 100644 --- a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckClusterSpec.scala @@ -52,24 +52,32 @@ class JoinConfigCompatCheckClusterSpec extends AkkaSpec { "JoinConfigCompatCheckCluster" must { "be valid when no downing-provider" in { - val oldConfig = ConfigFactory.parseString(""" + val oldConfig = ConfigFactory + .parseString(""" akka.cluster.downing-provider-class = "" - """).withFallback(system.settings.config) - val newConfig = ConfigFactory.parseString(""" + """) + .withFallback(system.settings.config) + val newConfig = ConfigFactory + .parseString(""" akka.cluster.downing-provider-class = "" - """).withFallback(system.settings.config) + """) + .withFallback(system.settings.config) checkInitJoin(oldConfig, newConfig) should ===(Valid) } "be valid when same downing-provider" in { val oldConfig = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider" - """).withFallback(system.settings.config) + """) + .withFallback(system.settings.config) val newConfig = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider" - """).withFallback(system.settings.config) + """) + .withFallback(system.settings.config) checkInitJoin(oldConfig, newConfig) should ===(Valid) } @@ -81,35 +89,45 @@ class JoinConfigCompatCheckClusterSpec extends AkkaSpec { """) .withFallback(system.settings.config) val newConfig = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider" - """).withFallback(system.settings.config) + """) + .withFallback(system.settings.config) checkInitJoin(oldConfig, newConfig) should ===(Valid) } "be invalid when different downing-provider" in { val oldConfig = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.cluster.downing-provider-class = "akka.cluster.testkit.AutoDowning" - """).withFallback(system.settings.config) + """) + .withFallback(system.settings.config) val newConfig = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider" - """).withFallback(system.settings.config) + """) + .withFallback(system.settings.config) checkInitJoin(oldConfig, newConfig).getClass should ===(classOf[Invalid]) } "be invalid when different sbr strategy" in { val oldConfig = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider" akka.cluster.split-brain-resolver.active-strategy = keep-majority - """).withFallback(system.settings.config) + """) + .withFallback(system.settings.config) val newConfig = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider" akka.cluster.split-brain-resolver.active-strategy = keep-oldest - """).withFallback(system.settings.config) + """) + .withFallback(system.settings.config) checkInitJoin(oldConfig, newConfig).getClass should ===(classOf[Invalid]) checkInitJoinAck(oldConfig, newConfig).getClass should ===(classOf[Invalid]) } diff --git a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerRollingUpdateSpec.scala b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerRollingUpdateSpec.scala index 14d59385b19..2ba9775accb 100644 --- a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerRollingUpdateSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerRollingUpdateSpec.scala @@ -14,7 +14,8 @@ import akka.testkit.LongRunningTest object JoinConfigCompatCheckerRollingUpdateSpec { - val baseConfig = ConfigFactory.parseString(""" + val baseConfig = ConfigFactory + .parseString(""" akka.log-dead-letters = off akka.log-dead-letters-during-shutdown = off akka.cluster.downing-provider-class = akka.cluster.testkit.AutoDowning @@ -27,7 +28,8 @@ object JoinConfigCompatCheckerRollingUpdateSpec { periodic-tasks-initial-delay = 300 ms publish-stats-interval = 0 s # always, when it happens } - """).withFallback(JoinConfigCompatCheckerSpec.baseConfig) + """) + .withFallback(JoinConfigCompatCheckerSpec.baseConfig) val v1Config: Config = baseConfig.withFallback(JoinConfigCompatCheckerSpec.configWithChecker) diff --git a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerSpec.scala index b05b8539233..44c1c1cce50 100644 --- a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerSpec.scala @@ -23,7 +23,8 @@ object JoinConfigCompatCheckerSpec { """) val configWithChecker: Config = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.cluster { config-compat-test = "test" sensitive.properties { @@ -41,7 +42,8 @@ object JoinConfigCompatCheckerSpec { } } } - """).withFallback(baseConfig) + """) + .withFallback(baseConfig) } class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala index 9563a1b87a4..28615b89f23 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala @@ -62,15 +62,15 @@ class MemberOrderingSpec extends AnyWordSpec with Matchers { m1 should ===(m2) m1.hashCode should ===(m2.hashCode) - m3 should not be (m2) - m3 should not be (m1) + m3 should not be m2 + m3 should not be m1 m11 should ===(m22) m11.hashCode should ===(m22.hashCode) // different uid - m1 should not be (m11) - m2 should not be (m22) + m1 should not be m11 + m2 should not be m22 } "have consistent ordering and equals" in { diff --git a/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala index de5b4167a15..87aa7a5df2e 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala @@ -21,21 +21,19 @@ class ReachabilityPerfSpec extends AnyWordSpec with Matchers { val node = Address("akka", "sys", "a", 2552) private def createReachabilityOfSize(base: Reachability, size: Int): Reachability = - (1 to size).foldLeft(base) { - case (r, i) => - val observer = UniqueAddress(address.copy(host = Some("node-" + i)), i.toLong) - val j = if (i == size) 1 else i + 1 - val subject = UniqueAddress(address.copy(host = Some("node-" + j)), j.toLong) - r.unreachable(observer, subject).reachable(observer, subject) + (1 to size).foldLeft(base) { case (r, i) => + val observer = UniqueAddress(address.copy(host = Some("node-" + i)), i.toLong) + val j = if (i == size) 1 else i + 1 + val subject = UniqueAddress(address.copy(host = Some("node-" + j)), j.toLong) + r.unreachable(observer, subject).reachable(observer, subject) } @nowarn private def addUnreachable(base: Reachability, count: Int): Reachability = { val observers = base.versions.keySet.take(count) val subjects = Stream.continually(base.versions.keySet).flatten.iterator - observers.foldLeft(base) { - case (r, o) => - (1 to 5).foldLeft(r) { case (r, _) => r.unreachable(o, subjects.next()) } + observers.foldLeft(base) { case (r, o) => + (1 to 5).foldLeft(r) { case (r, _) => r.unreachable(o, subjects.next()) } } } diff --git a/akka-cluster/src/test/scala/akka/cluster/ResetSystemMessageSeqNrSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ResetSystemMessageSeqNrSpec.scala index 2af25dc988b..dff26e0619f 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ResetSystemMessageSeqNrSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ResetSystemMessageSeqNrSpec.scala @@ -13,14 +13,14 @@ import akka.remote.artery.ArteryMultiNodeSpec import akka.testkit.ImplicitSender import akka.testkit.TestActors -/** - * Reproducer for issue #24847 - */ -class ResetSystemMessageSeqNrSpec extends ArteryMultiNodeSpec(""" +/** Reproducer for issue #24847 */ +class ResetSystemMessageSeqNrSpec + extends ArteryMultiNodeSpec(""" akka.loglevel = INFO akka.actor.provider=cluster akka.cluster.jmx.multi-mbeans-in-same-jvm = on - """) with ImplicitSender { + """) + with ImplicitSender { "System messages sequence numbers" should { diff --git a/akka-cluster/src/test/scala/akka/cluster/StartupWithOneThreadSpec.scala b/akka-cluster/src/test/scala/akka/cluster/StartupWithOneThreadSpec.scala index c30da1ec8d5..792aba1b09d 100644 --- a/akka-cluster/src/test/scala/akka/cluster/StartupWithOneThreadSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/StartupWithOneThreadSpec.scala @@ -34,8 +34,8 @@ object StartupWithOneThreadSpec { Props(new Actor with ActorLogging { val cluster = Cluster(context.system) log.debug(s"started ${cluster.selfAddress} ${Thread.currentThread().getName}") - def receive = { - case msg => sender() ! msg + def receive = { case msg => + sender() ! msg } }) } diff --git a/akka-cluster/src/test/scala/akka/cluster/VectorClockPerfSpec.scala b/akka-cluster/src/test/scala/akka/cluster/VectorClockPerfSpec.scala index f7fe26c754c..4f77adba7f5 100644 --- a/akka-cluster/src/test/scala/akka/cluster/VectorClockPerfSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/VectorClockPerfSpec.scala @@ -13,15 +13,14 @@ object VectorClockPerfSpec { import VectorClock._ def createVectorClockOfSize(size: Int): (VectorClock, SortedSet[Node]) = - (1 to size).foldLeft((VectorClock(), SortedSet.empty[Node])) { - case ((vc, nodes), i) => - val node = Node(i.toString) - (vc :+ node, nodes + node) + (1 to size).foldLeft((VectorClock(), SortedSet.empty[Node])) { case ((vc, nodes), i) => + val node = Node(i.toString) + (vc :+ node, nodes + node) } def copyVectorClock(vc: VectorClock): VectorClock = { - val versions = vc.versions.foldLeft(TreeMap.empty[Node, Long]) { - case (versions, (n, t)) => versions.updated(Node.fromHash(n), t) + val versions = vc.versions.foldLeft(TreeMap.empty[Node, Long]) { case (versions, (n, t)) => + versions.updated(Node.fromHash(n), t) } vc.copy(versions = versions) } diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala index 4fee3357181..eb4cc5b1b81 100644 --- a/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala @@ -13,9 +13,8 @@ object ClusterRouterSupervisorSpec { class KillableActor() extends Actor { - def receive = { - case "go away" => - throw new IllegalArgumentException("Goodbye then!") + def receive = { case "go away" => + throw new IllegalArgumentException("Goodbye then!") } } @@ -34,11 +33,12 @@ class ClusterRouterSupervisorSpec extends AkkaSpec(""" "use provided supervisor strategy" in { val router = system.actorOf( ClusterRouterPool( - RoundRobinPool(nrOfInstances = 1, supervisorStrategy = OneForOneStrategy(loggingEnabled = false) { - case _ => + RoundRobinPool( + nrOfInstances = 1, + supervisorStrategy = OneForOneStrategy(loggingEnabled = false) { case _ => testActor ! "supervised" SupervisorStrategy.Stop - }), + }), ClusterRouterPoolSettings(totalInstances = 1, maxInstancesPerNode = 1, allowLocalRoutees = true)) .props(Props(classOf[KillableActor])), name = "therouter") diff --git a/akka-cluster/src/test/scala/akka/cluster/sbr/LeaseMajoritySpec.scala b/akka-cluster/src/test/scala/akka/cluster/sbr/LeaseMajoritySpec.scala index 83527f94d28..c4f59b6497d 100644 --- a/akka-cluster/src/test/scala/akka/cluster/sbr/LeaseMajoritySpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/sbr/LeaseMajoritySpec.scala @@ -12,19 +12,22 @@ import akka.testkit.AkkaSpec class LeaseMajoritySpec extends AkkaSpec() with Eventually { val default = ConfigFactory - .parseString( - """ + .parseString(""" akka.cluster.split-brain-resolver.lease-majority.lease-implementation = "akka.coordination.lease.kubernetes" """) .withFallback(ConfigFactory.load()) - val blank = ConfigFactory.parseString(""" + val blank = ConfigFactory + .parseString(""" akka.cluster.split-brain-resolver.lease-majority { lease-name = " " - }""").withFallback(default) - val named = ConfigFactory.parseString(""" + }""") + .withFallback(default) + val named = ConfigFactory + .parseString(""" akka.cluster.split-brain-resolver.lease-majority { lease-name = "shopping-cart-akka-sbr" - }""").withFallback(default) + }""") + .withFallback(default) "Split Brain Resolver Lease Majority provider" must { diff --git a/akka-cluster/src/test/scala/akka/cluster/sbr/SplitBrainResolverSpec.scala b/akka-cluster/src/test/scala/akka/cluster/sbr/SplitBrainResolverSpec.scala index fc0b63e5fa1..6df28ac7a86 100644 --- a/akka-cluster/src/test/scala/akka/cluster/sbr/SplitBrainResolverSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/sbr/SplitBrainResolverSpec.scala @@ -117,11 +117,13 @@ class SplitBrainResolverSpec new LeaseSettings("akka-sbr", "test", new TimeoutSettings(1.second, 2.minutes, 3.seconds), ConfigFactory.empty) def createReachability(unreachability: Seq[(Member, Member)]): Reachability = { - Reachability(unreachability.map { - case (from, to) => Reachability.Record(from.uniqueAddress, to.uniqueAddress, Reachability.Unreachable, 1) - }.toIndexedSeq, unreachability.map { - case (from, _) => from.uniqueAddress -> 1L - }.toMap) + Reachability( + unreachability.map { case (from, to) => + Reachability.Record(from.uniqueAddress, to.uniqueAddress, Reachability.Unreachable, 1) + }.toIndexedSeq, + unreachability.map { case (from, _) => + from.uniqueAddress -> 1L + }.toMap) } def extSystem: ExtendedActorSystem = system.asInstanceOf[ExtendedActorSystem] @@ -475,8 +477,8 @@ class SplitBrainResolverSpec assertDowningSide(side1, Set(memberA, memberB, memberC)) } - "down indirectly connected when combined with clean partition: {A, (B, C)} | {D, E} => {A}" in new Setup2( - role = None) { + "down indirectly connected when combined with clean partition: {A, (B, C)} | {D, E} => {A}" in new Setup2(role = + None) { side1 = Set(memberA, memberB, memberC) side2 = Set(memberD, memberE) indirectlyConnected = List(memberB -> memberC, memberC -> memberB) @@ -1330,7 +1332,10 @@ class SplitBrainResolverSpec stop() } - "down minority partition" in new SetupKeepMajority(stableAfter = Duration.Zero, memberA.uniqueAddress, role = None) { + "down minority partition" in new SetupKeepMajority( + stableAfter = Duration.Zero, + memberA.uniqueAddress, + role = None) { memberUp(memberA, memberB, memberC, memberD, memberE) leader(memberA) reachabilityChanged(memberA -> memberB, memberC -> memberD) diff --git a/akka-cluster/src/test/scala/akka/cluster/sbr/TestAddresses.scala b/akka-cluster/src/test/scala/akka/cluster/sbr/TestAddresses.scala index be92ee2b059..40e3c785b69 100644 --- a/akka-cluster/src/test/scala/akka/cluster/sbr/TestAddresses.scala +++ b/akka-cluster/src/test/scala/akka/cluster/sbr/TestAddresses.scala @@ -13,9 +13,7 @@ import akka.cluster.MemberStatus.WeaklyUp import akka.cluster.UniqueAddress import akka.util.Version -/** - * Needed since the Member constructor is akka private - */ +/** Needed since the Member constructor is akka private */ object TestAddresses { private def dcRole(dc: ClusterSettings.DataCenter): String = ClusterSettings.DcRolePrefix + dc diff --git a/akka-cluster/src/test/scala/akka/cluster/testkit/AutoDown.scala b/akka-cluster/src/test/scala/akka/cluster/testkit/AutoDown.scala index 0cd963fe787..3fb5a09b24c 100644 --- a/akka-cluster/src/test/scala/akka/cluster/testkit/AutoDown.scala +++ b/akka-cluster/src/test/scala/akka/cluster/testkit/AutoDown.scala @@ -64,7 +64,7 @@ final class AutoDowning(system: ActorSystem) extends DowningProvider { if (clusterSettings.config.hasPath(key)) { toRootLowerCase(clusterSettings.config.getString(key)) match { case "off" => Duration.Undefined - case _ => clusterSettings.config.getMillisDuration(key).requiring(_ >= Duration.Zero, key + " >= 0s, or off") + case _ => clusterSettings.config.getMillisDuration(key).requiring(_ >= Duration.Zero, key + " >= 0s, or off") } } else Duration.Undefined @@ -79,9 +79,7 @@ final class AutoDowning(system: ActorSystem) extends DowningProvider { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[cluster] object AutoDown { def props(autoDownUnreachableAfter: FiniteDuration): Props = @@ -182,7 +180,6 @@ private[cluster] abstract class AutoDownBase(autoDownUnreachableAfter: FiniteDur } case _: ClusterDomainEvent => // not interested in other events - } def unreachableMember(m: Member): Unit = diff --git a/akka-coordination/src/main/scala/akka/coordination/lease/TimeoutSettings.scala b/akka-coordination/src/main/scala/akka/coordination/lease/TimeoutSettings.scala index bd0fc00cd33..d053827cf54 100644 --- a/akka-coordination/src/main/scala/akka/coordination/lease/TimeoutSettings.scala +++ b/akka-coordination/src/main/scala/akka/coordination/lease/TimeoutSettings.scala @@ -29,38 +29,26 @@ final class TimeoutSettings( val heartbeatTimeout: FiniteDuration, val operationTimeout: FiniteDuration) { - /** - * Java API - */ + /** Java API */ def getHeartbeatInterval(): java.time.Duration = heartbeatInterval.asJava - /** - * Java API - */ + /** Java API */ def getHeartbeatTimeout(): java.time.Duration = heartbeatTimeout.asJava - /** - * Java API - */ + /** Java API */ def getOperationTimeout(): java.time.Duration = operationTimeout.asJava - /** - * Java API - */ + /** Java API */ def withHeartbeatInterval(heartbeatInterval: java.time.Duration): TimeoutSettings = { copy(heartbeatInterval = heartbeatInterval.asScala) } - /** - * Java API - */ + /** Java API */ def withHeartbeatTimeout(heartbeatTimeout: java.time.Duration): TimeoutSettings = { copy(heartbeatTimeout = heartbeatTimeout.asScala) } - /** - * Java API - */ + /** Java API */ def withOperationTimeout(operationTimeout: java.time.Duration): TimeoutSettings = { copy(operationTimeout = operationTimeout.asScala) } diff --git a/akka-coordination/src/main/scala/akka/coordination/lease/internal/LeaseAdapter.scala b/akka-coordination/src/main/scala/akka/coordination/lease/internal/LeaseAdapter.scala index b722e397fdd..a61861b2e98 100644 --- a/akka-coordination/src/main/scala/akka/coordination/lease/internal/LeaseAdapter.scala +++ b/akka-coordination/src/main/scala/akka/coordination/lease/internal/LeaseAdapter.scala @@ -18,9 +18,7 @@ import akka.coordination.lease.LeaseSettings import akka.coordination.lease.javadsl.{ Lease => JavaLease } import akka.coordination.lease.scaladsl.{ Lease => ScalaLease } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final private[akka] class LeaseAdapter(delegate: ScalaLease)(implicit val ec: ExecutionContext) extends JavaLease { @@ -35,9 +33,7 @@ final private[akka] class LeaseAdapter(delegate: ScalaLease)(implicit val ec: Ex override def getSettings(): LeaseSettings = delegate.settings } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final private[akka] class LeaseAdapterToScala(val delegate: JavaLease)(implicit val ec: ExecutionContext) extends ScalaLease(delegate.getSettings()) { diff --git a/akka-coordination/src/main/scala/akka/coordination/lease/javadsl/Lease.scala b/akka-coordination/src/main/scala/akka/coordination/lease/javadsl/Lease.scala index 706786c2cca..5135c3a6463 100644 --- a/akka-coordination/src/main/scala/akka/coordination/lease/javadsl/Lease.scala +++ b/akka-coordination/src/main/scala/akka/coordination/lease/javadsl/Lease.scala @@ -40,9 +40,7 @@ abstract class Lease() { */ def acquire(leaseLostCallback: java.util.function.Consumer[Optional[Throwable]]): CompletionStage[java.lang.Boolean] - /** - * Release the lease so some other owner can acquire it. - */ + /** Release the lease so some other owner can acquire it. */ def release(): CompletionStage[java.lang.Boolean] /** diff --git a/akka-coordination/src/main/scala/akka/coordination/lease/scaladsl/Lease.scala b/akka-coordination/src/main/scala/akka/coordination/lease/scaladsl/Lease.scala index 5aa48fb4aee..225656c0836 100644 --- a/akka-coordination/src/main/scala/akka/coordination/lease/scaladsl/Lease.scala +++ b/akka-coordination/src/main/scala/akka/coordination/lease/scaladsl/Lease.scala @@ -37,9 +37,7 @@ abstract class Lease(val settings: LeaseSettings) { */ def acquire(leaseLostCallback: Option[Throwable] => Unit): Future[Boolean] - /** - * Release the lease so some other owner can acquire it. - */ + /** Release the lease so some other owner can acquire it. */ def release(): Future[Boolean] /** diff --git a/akka-coordination/src/main/scala/akka/coordination/lease/scaladsl/LeaseProvider.scala b/akka-coordination/src/main/scala/akka/coordination/lease/scaladsl/LeaseProvider.scala index 68e562d7909..6c0132f2474 100644 --- a/akka-coordination/src/main/scala/akka/coordination/lease/scaladsl/LeaseProvider.scala +++ b/akka-coordination/src/main/scala/akka/coordination/lease/scaladsl/LeaseProvider.scala @@ -66,11 +66,10 @@ final class LeaseProvider(system: ExtendedActorSystem) extends Extension { // Try and load a scala implementation val lease: Try[Lease] = - loadLease[Lease](settings).recoverWith { - case _: ClassCastException => - // Try and load a java implementation - loadLease[akka.coordination.lease.javadsl.Lease](settings).map(javaLease => - new LeaseAdapterToScala(javaLease)(system.dispatchers.internalDispatcher)) + loadLease[Lease](settings).recoverWith { case _: ClassCastException => + // Try and load a java implementation + loadLease[akka.coordination.lease.javadsl.Lease](settings).map(javaLease => + new LeaseAdapterToScala(javaLease)(system.dispatchers.internalDispatcher)) } lease match { diff --git a/akka-discovery/src/main/scala/akka/discovery/Discovery.scala b/akka-discovery/src/main/scala/akka/discovery/Discovery.scala index 0f39ffa58d4..4f4990eeac9 100644 --- a/akka-discovery/src/main/scala/akka/discovery/Discovery.scala +++ b/akka-discovery/src/main/scala/akka/discovery/Discovery.scala @@ -33,9 +33,7 @@ final class Discovery(implicit system: ExtendedActorSystem) extends Extension { private lazy val defaultImpl = loadServiceDiscovery(_defaultImplMethod) - /** - * Default [[ServiceDiscovery]] as configured in `akka.discovery.method`. - */ + /** Default [[ServiceDiscovery]] as configured in `akka.discovery.method`. */ @throws[IllegalArgumentException] def discovery: ServiceDiscovery = defaultImpl @@ -51,9 +49,7 @@ final class Discovery(implicit system: ExtendedActorSystem) extends Extension { implementations.computeIfAbsent(method, factory) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private def createServiceDiscovery(method: String): ServiceDiscovery = { val config = system.settings.config @@ -68,13 +64,11 @@ final class Discovery(implicit system: ExtendedActorSystem) extends Extension { def create(clazzName: String): Try[ServiceDiscovery] = { dynamic .createInstanceFor[ServiceDiscovery](clazzName, (classOf[ExtendedActorSystem] -> system) :: Nil) - .recoverWith { - case _: ClassNotFoundException | _: NoSuchMethodException => - dynamic.createInstanceFor[ServiceDiscovery](clazzName, (classOf[ActorSystem] -> system) :: Nil) + .recoverWith { case _: ClassNotFoundException | _: NoSuchMethodException => + dynamic.createInstanceFor[ServiceDiscovery](clazzName, (classOf[ActorSystem] -> system) :: Nil) } - .recoverWith { - case _: ClassNotFoundException | _: NoSuchMethodException => - dynamic.createInstanceFor[ServiceDiscovery](clazzName, Nil) + .recoverWith { case _: ClassNotFoundException | _: NoSuchMethodException => + dynamic.createInstanceFor[ServiceDiscovery](clazzName, Nil) } } @@ -107,9 +101,7 @@ object Discovery extends ExtensionId[Discovery] with ExtensionIdProvider { override def createExtension(system: ExtendedActorSystem): Discovery = new Discovery()(system) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def checkClassPathForOldDiscovery(system: ExtendedActorSystem): Unit = { try { diff --git a/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala b/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala index ae245b1d274..34cd0da8bf2 100644 --- a/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala +++ b/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala @@ -42,9 +42,7 @@ object ServiceDiscovery { extends DeadLetterSuppression with NoSerializationVerificationNeeded { - /** - * Java API - */ + /** Java API */ def getAddresses: java.util.List[ResolvedTarget] = { import akka.util.ccompat.JavaConverters._ addresses.asJava @@ -96,15 +94,11 @@ object ServiceDiscovery { final class ResolvedTarget(val host: String, val port: Option[Int], val address: Option[InetAddress]) extends NoSerializationVerificationNeeded { - /** - * Java API - */ + /** Java API */ def getPort: Optional[Int] = port.asJava - /** - * Java API - */ + /** Java API */ def getAddress: Optional[InetAddress] = address.asJava @@ -153,15 +147,11 @@ final class Lookup(val serviceName: String, val portName: Option[String], val pr */ def withProtocol(value: String): Lookup = copy(protocol = Some(value)) - /** - * Java API - */ + /** Java API */ def getPortName: Optional[String] = portName.asJava - /** - * Java API - */ + /** Java API */ def getProtocol: Optional[String] = protocol.asJava @@ -197,9 +187,7 @@ case object Lookup { */ def apply(serviceName: String): Lookup = new Lookup(serviceName, None, None) - /** - * Create a service Lookup with `serviceName`, optional `portName` and optional `protocol`. - */ + /** Create a service Lookup with `serviceName`, optional `portName` and optional `protocol`. */ def apply(serviceName: String, portName: Option[String], protocol: Option[String]): Lookup = new Lookup(serviceName, portName, protocol) @@ -265,9 +253,7 @@ case object Lookup { throw new IllegalArgumentException(s"Unable to create Lookup from passed SRV string, invalid format: $str") } - /** - * Returns true if passed string conforms with SRV format. Otherwise returns false. - */ + /** Returns true if passed string conforms with SRV format. Otherwise returns false. */ def isValidSrv(srv: String): Boolean = srv match { case SrvQuery(_, _, serviceName) => validDomainName(serviceName) @@ -279,10 +265,7 @@ case object Lookup { } -/** - * Implement to provide a service discovery method - * - */ +/** Implement to provide a service discovery method */ abstract class ServiceDiscovery { import ServiceDiscovery._ @@ -319,7 +302,6 @@ abstract class ServiceDiscovery { * eagerness to wait for a result for this specific lookup. * * The returned future should be failed once resolveTimeout has passed with a [[DiscoveryTimeoutException]]. - * */ def lookup(query: Lookup, resolveTimeout: java.time.Duration): CompletionStage[Resolved] = { import scala.compat.java8.FutureConverters._ diff --git a/akka-discovery/src/main/scala/akka/discovery/aggregate/AggregateServiceDiscovery.scala b/akka-discovery/src/main/scala/akka/discovery/aggregate/AggregateServiceDiscovery.scala index 376a38e678b..460aecf6c6f 100644 --- a/akka-discovery/src/main/scala/akka/discovery/aggregate/AggregateServiceDiscovery.scala +++ b/akka-discovery/src/main/scala/akka/discovery/aggregate/AggregateServiceDiscovery.scala @@ -20,9 +20,7 @@ import akka.event.Logging import akka.util.Helpers.Requiring import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private final class AggregateServiceDiscoverySettings(config: Config) { @@ -34,17 +32,13 @@ private final class AggregateServiceDiscoverySettings(config: Config) { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private object AggregateServiceDiscovery { type Methods = List[(String, ServiceDiscovery)] } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class AggregateServiceDiscovery(system: ExtendedActorSystem) extends ServiceDiscovery { @@ -59,9 +53,7 @@ private[akka] final class AggregateServiceDiscovery(system: ExtendedActorSystem) } private implicit val ec: MessageDispatcher = system.dispatchers.internalDispatcher - /** - * Each discovery method is given the resolveTimeout rather than reducing it each time between methods. - */ + /** Each discovery method is given the resolveTimeout rather than reducing it each time between methods. */ override def lookup(lookup: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = resolve(methods, lookup, resolveTimeout) @@ -82,10 +74,9 @@ private[akka] final class AggregateServiceDiscovery(system: ExtendedActorSystem) } else Future.successful(resolved) } - .recoverWith { - case NonFatal(t) => - log.error(t, "[{}] Service discovery failed. Trying next discovery method", method) - resolve(tail, query, resolveTimeout) + .recoverWith { case NonFatal(t) => + log.error(t, "[{}] Service discovery failed. Trying next discovery method", method) + resolve(tail, query, resolveTimeout) } case Nil => // this is checked in `discoveryMethods`, but silence compiler warning diff --git a/akka-discovery/src/main/scala/akka/discovery/config/ConfigServiceDiscovery.scala b/akka-discovery/src/main/scala/akka/discovery/config/ConfigServiceDiscovery.scala index 1ac9ed708a3..fa2043cca2d 100644 --- a/akka-discovery/src/main/scala/akka/discovery/config/ConfigServiceDiscovery.scala +++ b/akka-discovery/src/main/scala/akka/discovery/config/ConfigServiceDiscovery.scala @@ -16,9 +16,7 @@ import akka.discovery.ServiceDiscovery.{ Resolved, ResolvedTarget } import akka.event.Logging import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private object ConfigServicesParser { def parse(config: Config): Map[String, Resolved] = { @@ -31,22 +29,19 @@ private object ConfigServicesParser { } .toMap - byService.map { - case (serviceName, full) => - val endpoints = full.getConfigList("endpoints").asScala.toList - val resolvedTargets = endpoints.map { c => - val host = c.getString("host") - val port = if (c.hasPath("port")) Some(c.getInt("port")) else None - ResolvedTarget(host = host, port = port, address = None) - } - (serviceName, Resolved(serviceName, resolvedTargets)) + byService.map { case (serviceName, full) => + val endpoints = full.getConfigList("endpoints").asScala.toList + val resolvedTargets = endpoints.map { c => + val host = c.getString("host") + val port = if (c.hasPath("port")) Some(c.getInt("port")) else None + ResolvedTarget(host = host, port = port, address = None) + } + (serviceName, Resolved(serviceName, resolvedTargets)) } } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class ConfigServiceDiscovery(system: ExtendedActorSystem) extends ServiceDiscovery { diff --git a/akka-discovery/src/main/scala/akka/discovery/dns/DnsServiceDiscovery.scala b/akka-discovery/src/main/scala/akka/discovery/dns/DnsServiceDiscovery.scala index af219cac891..0603c67ec6b 100644 --- a/akka-discovery/src/main/scala/akka/discovery/dns/DnsServiceDiscovery.scala +++ b/akka-discovery/src/main/scala/akka/discovery/dns/DnsServiceDiscovery.scala @@ -30,9 +30,7 @@ import akka.pattern.ask import akka.util.OptionVal import akka.util.Timeout -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private object DnsServiceDiscovery { def srvRecordsToResolved(srvRequest: String, resolved: DnsProtocol.Resolved): Resolved = { @@ -62,9 +60,7 @@ private object DnsServiceDiscovery { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class DnsServiceDiscovery(system: ExtendedActorSystem) extends ServiceDiscovery { diff --git a/akka-discovery/src/test/scala/akka/discovery/DiscoveryConfigurationSpec.scala b/akka-discovery/src/test/scala/akka/discovery/DiscoveryConfigurationSpec.scala index fdcd3be9fc3..5bacaea43fc 100644 --- a/akka-discovery/src/test/scala/akka/discovery/DiscoveryConfigurationSpec.scala +++ b/akka-discovery/src/test/scala/akka/discovery/DiscoveryConfigurationSpec.scala @@ -33,7 +33,8 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { val sys = ActorSystem( "DiscoveryConfigurationSpec", - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.discovery { method = akka-mock-inside @@ -41,7 +42,8 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { class = $className } } - """).withFallback(ConfigFactory.load())) + """) + .withFallback(ConfigFactory.load())) try Discovery(sys).discovery.getClass.getCanonicalName should ===(className) finally TestKit.shutdownActorSystem(sys) @@ -53,7 +55,8 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { val sys = ActorSystem( "DiscoveryConfigurationSpec", - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.discovery { method = mock1 @@ -64,7 +67,8 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { class = $className2 } } - """).withFallback(ConfigFactory.load())) + """) + .withFallback(ConfigFactory.load())) try { Discovery(sys).discovery.getClass.getCanonicalName should ===(className1) @@ -78,7 +82,8 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { val sys = ActorSystem( "DiscoveryConfigurationSpec", - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.discovery { method = mock1 @@ -89,7 +94,8 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { class = $className2 } } - """).withFallback(ConfigFactory.load())) + """) + .withFallback(ConfigFactory.load())) try { (Discovery(sys).loadServiceDiscovery("mock2") should be) @@ -104,14 +110,16 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { val sys = ActorSystem( "DiscoveryConfigurationSpec", - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.discovery { method = "mock1" mock1 { class = $className } } - """).withFallback(ConfigFactory.load())) + """) + .withFallback(ConfigFactory.load())) try { an[DiscoveryException] should be thrownBy Discovery(sys).discovery @@ -123,11 +131,13 @@ class DiscoveryConfigurationSpec extends AnyWordSpec with Matchers { val sys = ActorSystem( "DiscoveryConfigurationSpec", - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.discovery { method = "$className" } - """).withFallback(ConfigFactory.load())) + """) + .withFallback(ConfigFactory.load())) try { an[IllegalArgumentException] should be thrownBy Discovery(sys).discovery diff --git a/akka-discovery/src/test/scala/akka/discovery/dns/DnsDiscoverySpec.scala b/akka-discovery/src/test/scala/akka/discovery/dns/DnsDiscoverySpec.scala index 5549131d316..6c55d2a2cba 100644 --- a/akka-discovery/src/test/scala/akka/discovery/dns/DnsDiscoverySpec.scala +++ b/akka-discovery/src/test/scala/akka/discovery/dns/DnsDiscoverySpec.scala @@ -33,9 +33,11 @@ object DnsDiscoverySpec { lazy val dockerDnsServerPort = SocketUtil.temporaryLocalPort() - val configWithAsyncDnsResolverAsDefault = ConfigFactory.parseString(""" + val configWithAsyncDnsResolverAsDefault = ConfigFactory + .parseString(""" akka.io.dns.resolver = "async-dns" - """).withFallback(config) + """) + .withFallback(config) } diff --git a/akka-discovery/src/test/scala/doc/akka/discovery/CompileOnlySpec.scala b/akka-discovery/src/test/scala/doc/akka/discovery/CompileOnlySpec.scala index 849fd7b75f2..dfe41036647 100644 --- a/akka-discovery/src/test/scala/doc/akka/discovery/CompileOnlySpec.scala +++ b/akka-discovery/src/test/scala/doc/akka/discovery/CompileOnlySpec.scala @@ -12,28 +12,28 @@ import scala.concurrent.ExecutionContext.Implicits.global object CompileOnlySpec { - //#loading + // #loading import akka.discovery.Discovery val system = ActorSystem() val serviceDiscovery = Discovery(system).discovery - //#loading + // #loading - //#basic + // #basic import akka.discovery.Lookup serviceDiscovery.lookup(Lookup("akka.io"), 1.second) // Convenience for a Lookup with only a serviceName serviceDiscovery.lookup("akka.io", 1.second) - //#basic + // #basic - //#full + // #full import akka.discovery.Lookup import akka.discovery.ServiceDiscovery.Resolved val lookup: Future[Resolved] = serviceDiscovery.lookup(Lookup("akka.io").withPortName("remoting").withProtocol("tcp"), 1.second) - //#full + // #full // compiler lookup.foreach(println) diff --git a/akka-distributed-data/src/main/scala-2/akka/cluster/ddata/GSet.scala b/akka-distributed-data/src/main/scala-2/akka/cluster/ddata/GSet.scala index 12ecfa1011a..2865fe64b64 100644 --- a/akka-distributed-data/src/main/scala-2/akka/cluster/ddata/GSet.scala +++ b/akka-distributed-data/src/main/scala-2/akka/cluster/ddata/GSet.scala @@ -10,9 +10,7 @@ object GSet { def apply(): GSet[Any] = _empty private[akka] def apply[A](set: Set[A]): GSet[A] = new GSet(set)(None) - /** - * Java API - */ + /** Java API */ def create[A](): GSet[A] = empty[A] // unapply from case class @@ -39,9 +37,7 @@ final case class GSet[A] private (elements: Set[A])(override val delta: Option[G type T = GSet[A] type D = GSet[A] - /** - * Java API - */ + /** Java API */ def getElements(): java.util.Set[A] = { import akka.util.ccompat.JavaConverters._ elements.asJava @@ -53,14 +49,10 @@ final case class GSet[A] private (elements: Set[A])(override val delta: Option[G def size: Int = elements.size - /** - * Adds an element to the set - */ + /** Adds an element to the set */ def +(element: A): GSet[A] = add(element) - /** - * Adds an element to the set - */ + /** Adds an element to the set */ def add(element: A): GSet[A] = { val newDelta = delta match { case Some(e) => Some(new GSet(e.elements + element)(None)) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DeltaPropagationSelector.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DeltaPropagationSelector.scala index 2e52134aaf1..4c094cc5936 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DeltaPropagationSelector.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DeltaPropagationSelector.scala @@ -97,41 +97,40 @@ private[akka] trait DeltaPropagationSelector { // collect the deltas that have not already been sent to the node and merge // them into a delta group var deltas = Map.empty[KeyId, (ReplicatedData, Long, Long)] - deltaEntries.foreach { - case (key, entries) => - val deltaSentToNodeForKey = deltaSentToNode.getOrElse(key, TreeMap.empty[UniqueAddress, Long]) - val j = deltaSentToNodeForKey.getOrElse(node, 0L) - val deltaEntriesAfterJ = deltaEntriesAfter(entries, j) - if (deltaEntriesAfterJ.nonEmpty) { - val fromSeqNr = deltaEntriesAfterJ.head._1 - val toSeqNr = deltaEntriesAfterJ.last._1 - // in most cases the delta group merging will be the same for each node, - // so we cache the merged results - val cacheKey = (key, fromSeqNr, toSeqNr) - val deltaGroup = cache.get(cacheKey) match { - case None => - val group = deltaEntriesAfterJ.valuesIterator.reduceLeft { (d1, d2) => - val merged = d2 match { - case NoDeltaPlaceholder => NoDeltaPlaceholder - case _ => - // this is fine also if d1 is a NoDeltaPlaceholder - d1.merge(d2.asInstanceOf[d1.T]) - } - merged match { - case s: ReplicatedDeltaSize if s.deltaSize >= maxDeltaSize => - // discard too large deltas - NoDeltaPlaceholder - case _ => merged - } + deltaEntries.foreach { case (key, entries) => + val deltaSentToNodeForKey = deltaSentToNode.getOrElse(key, TreeMap.empty[UniqueAddress, Long]) + val j = deltaSentToNodeForKey.getOrElse(node, 0L) + val deltaEntriesAfterJ = deltaEntriesAfter(entries, j) + if (deltaEntriesAfterJ.nonEmpty) { + val fromSeqNr = deltaEntriesAfterJ.head._1 + val toSeqNr = deltaEntriesAfterJ.last._1 + // in most cases the delta group merging will be the same for each node, + // so we cache the merged results + val cacheKey = (key, fromSeqNr, toSeqNr) + val deltaGroup = cache.get(cacheKey) match { + case None => + val group = deltaEntriesAfterJ.valuesIterator.reduceLeft { (d1, d2) => + val merged = d2 match { + case NoDeltaPlaceholder => NoDeltaPlaceholder + case _ => + // this is fine also if d1 is a NoDeltaPlaceholder + d1.merge(d2.asInstanceOf[d1.T]) } - cache = cache.updated(cacheKey, group) - group - case Some(group) => group - } - deltas = deltas.updated(key, (deltaGroup, fromSeqNr, toSeqNr)) - deltaSentToNode = - deltaSentToNode.updated(key, deltaSentToNodeForKey.updated(node, deltaEntriesAfterJ.lastKey)) + merged match { + case s: ReplicatedDeltaSize if s.deltaSize >= maxDeltaSize => + // discard too large deltas + NoDeltaPlaceholder + case _ => merged + } + } + cache = cache.updated(cacheKey, group) + group + case Some(group) => group } + deltas = deltas.updated(key, (deltaGroup, fromSeqNr, toSeqNr)) + deltaSentToNode = + deltaSentToNode.updated(key, deltaSentToNodeForKey.updated(node, deltaEntriesAfterJ.lastKey)) + } } if (deltas.nonEmpty) { @@ -175,23 +174,21 @@ private[akka] trait DeltaPropagationSelector { if (all.isEmpty) deltaEntries = Map.empty else { - deltaEntries = deltaEntries.map { - case (key, entries) => - val minVersion = findSmallestVersionPropagatedToAllNodes(key, all) + deltaEntries = deltaEntries.map { case (key, entries) => + val minVersion = findSmallestVersionPropagatedToAllNodes(key, all) - val deltaEntriesAfterMin = deltaEntriesAfter(entries, minVersion) + val deltaEntriesAfterMin = deltaEntriesAfter(entries, minVersion) - // TODO perhaps also remove oldest when deltaCounter is too far ahead (e.g. 10 cycles) + // TODO perhaps also remove oldest when deltaCounter is too far ahead (e.g. 10 cycles) - key -> deltaEntriesAfterMin + key -> deltaEntriesAfterMin } } } def cleanupRemovedNode(address: UniqueAddress): Unit = { - deltaSentToNode = deltaSentToNode.map { - case (key, deltaSentToNodeForKey) => - key -> (deltaSentToNodeForKey - address) + deltaSentToNode = deltaSentToNode.map { case (key, deltaSentToNodeForKey) => + key -> (deltaSentToNodeForKey - address) } } } diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala index b1d08a753a8..a4248a90775 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala @@ -35,9 +35,7 @@ class DistributedData(system: ExtendedActorSystem) extends Extension { implicit val selfUniqueAddress: SelfUniqueAddress = SelfUniqueAddress(Cluster(system).selfUniqueAddress) - /** - * `ActorRef` of the [[Replicator]] . - */ + /** `ActorRef` of the [[Replicator]] . */ val replicator: ActorRef = if (isTerminated) { val log = Logging(system, classOf[DistributedData]) @@ -63,8 +61,6 @@ class DistributedData(system: ExtendedActorSystem) extends Extension { } -/** - * Cluster non-specific (typed vs classic) wrapper for [[akka.cluster.UniqueAddress]]. - */ +/** Cluster non-specific (typed vs classic) wrapper for [[akka.cluster.UniqueAddress]]. */ @SerialVersionUID(1L) final case class SelfUniqueAddress(uniqueAddress: UniqueAddress) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala index 6593971dc50..9c9c1ba0d7b 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala @@ -79,9 +79,7 @@ object DurableStore { def this(message: String) = this(message, null) } - /** - * Request to expire (remove) entries. - */ + /** Request to expire (remove) entries. */ final case class Expire(keys: Set[KeyId]) /** @@ -203,50 +201,46 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging { def receive: Receive = init - def init: Receive = { - case LoadAll => - if (dir.exists && dir.list().length > 0) { - val debugEnabled = log.isDebugEnabled - val t0 = if (debugEnabled) System.nanoTime() else 0L - val l = lmdb() - val tx = l.env.txnRead() + def init: Receive = { case LoadAll => + if (dir.exists && dir.list().length > 0) { + val debugEnabled = log.isDebugEnabled + val t0 = if (debugEnabled) System.nanoTime() else 0L + val l = lmdb() + val tx = l.env.txnRead() + try { + val iter = l.db.iterate(tx) try { - val iter = l.db.iterate(tx) - try { - var n = 0 - val loadData = LoadData(iter.asScala.map { entry => - n += 1 - val keyArray = new Array[Byte](entry.key.remaining) - entry.key.get(keyArray) - val key = new String(keyArray, ByteString.UTF_8) - val valArray = new Array[Byte](entry.`val`.remaining) - entry.`val`.get(valArray) - val envelope = serializer.fromBinary(valArray, manifest).asInstanceOf[DurableDataEnvelope] - key -> envelope - }.toMap) - if (loadData.data.nonEmpty) - sender() ! loadData - sender() ! LoadAllCompleted - if (debugEnabled) - log.debug( - "load all of [{}] entries took [{} ms]", - n, - TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t0)) - context.become(active) - } finally { - Try(iter.close()) - } - } catch { - case NonFatal(e) => - throw new LoadFailed("failed to load durable distributed-data", e) + var n = 0 + val loadData = LoadData(iter.asScala.map { entry => + n += 1 + val keyArray = new Array[Byte](entry.key.remaining) + entry.key.get(keyArray) + val key = new String(keyArray, ByteString.UTF_8) + val valArray = new Array[Byte](entry.`val`.remaining) + entry.`val`.get(valArray) + val envelope = serializer.fromBinary(valArray, manifest).asInstanceOf[DurableDataEnvelope] + key -> envelope + }.toMap) + if (loadData.data.nonEmpty) + sender() ! loadData + sender() ! LoadAllCompleted + if (debugEnabled) + log.debug("load all of [{}] entries took [{} ms]", n, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t0)) + context.become(active) } finally { - Try(tx.close()) + Try(iter.close()) } - } else { - // no files to load - sender() ! LoadAllCompleted - context.become(active) + } catch { + case NonFatal(e) => + throw new LoadFailed("failed to load durable distributed-data", e) + } finally { + Try(tx.close()) } + } else { + // no files to load + sender() ! LoadAllCompleted + context.become(active) + } } def active: Receive = { diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Flag.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Flag.scala index 915da67a6a2..3f7fd3055a7 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Flag.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Flag.scala @@ -6,26 +6,18 @@ package akka.cluster.ddata object Flag { - /** - * `Flag` that is initialized to `false`. - */ + /** `Flag` that is initialized to `false`. */ val empty: Flag = new Flag(false) - /** - * `Flag` that is initialized to `false`. - */ + /** `Flag` that is initialized to `false`. */ val Disabled: Flag = empty - /** - * `Flag` that is initialized to `true`. - */ + /** `Flag` that is initialized to `true`. */ val Enabled: Flag = new Flag(true) def apply(): Flag = Disabled - /** - * Java API: `Flag` that is initialized to `false`. - */ + /** Java API: `Flag` that is initialized to `false`. */ def create(): Flag = Disabled // unapply from case class diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala index d192ccb7920..b397ae13ead 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala @@ -13,14 +13,10 @@ object GCounter { val empty: GCounter = new GCounter def apply(): GCounter = empty - /** - * Java API - */ + /** Java API */ def create(): GCounter = empty - /** - * Extract the [[GCounter#value]]. - */ + /** Extract the [[GCounter#value]]. */ def unapply(c: GCounter): Option[BigInt] = Some(c.value) private val Zero = BigInt(0) @@ -55,16 +51,12 @@ final class GCounter private[akka] ( type T = GCounter type D = GCounter - /** - * Scala API: Current total value of the counter. - */ + /** Scala API: Current total value of the counter. */ def value: BigInt = state.values.foldLeft(Zero) { (acc, v) => acc + v } - /** - * Java API: Current total value of the counter. - */ + /** Java API: Current total value of the counter. */ def getValue: BigInteger = value.bigInteger /** @@ -79,14 +71,10 @@ final class GCounter private[akka] ( */ def increment(node: SelfUniqueAddress, n: Long): GCounter = increment(node.uniqueAddress, n) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def increment(key: UniqueAddress): GCounter = increment(key, 1) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def increment(key: UniqueAddress, n: BigInt): GCounter = { require(n >= 0, "Can't decrement a GCounter") if (n == 0) this diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala index e89cc9de102..184f5349ae2 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala @@ -8,9 +8,7 @@ import akka.cluster.ddata.Key.UnspecificKey object Key { - /** - * Extract the [[Key#id]]. - */ + /** Extract the [[Key#id]]. */ def unapply(k: Key[_]): Option[String] = Some(k.id) private[akka] type KeyR = Key[ReplicatedData] diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala index cdcf65bb15f..5d0fb432185 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala @@ -10,9 +10,7 @@ import akka.cluster.ddata.ORMap.ZeroTag object LWWMap { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case object LWWMapTag extends ZeroTag { override def zero: DeltaReplicatedData = LWWMap.empty override final val value: Int = 4 @@ -22,14 +20,10 @@ object LWWMap { def empty[A, B]: LWWMap[A, B] = _empty.asInstanceOf[LWWMap[A, B]] def apply(): LWWMap[Any, Any] = _empty - /** - * Java API - */ + /** Java API */ def create[A, B](): LWWMap[A, B] = empty - /** - * Extract the [[LWWMap#entries]]. - */ + /** Extract the [[LWWMap#entries]]. */ def unapply[A, B](m: LWWMap[A, B]): Option[Map[A, B]] = Some(m.entries) } @@ -65,14 +59,10 @@ final class LWWMap[A, B] private[akka] (private[akka] val underlying: ORMap[A, L type T = LWWMap[A, B] type D = ORMap.DeltaOp - /** - * Scala API: All entries of the map. - */ + /** Scala API: All entries of the map. */ def entries: Map[A, B] = underlying.entries.map { case (k, r) => k -> r.value } - /** - * Java API: All entries of the map. - */ + /** Java API: All entries of the map. */ def getEntries(): java.util.Map[A, B] = { import akka.util.ccompat.JavaConverters._ entries.asJava @@ -86,17 +76,13 @@ final class LWWMap[A, B] private[akka] (private[akka] val underlying: ORMap[A, L def size: Int = underlying.size - /** - * Adds an entry to the map - */ + /** Adds an entry to the map */ def :+(entry: (A, B))(implicit node: SelfUniqueAddress): LWWMap[A, B] = { val (key, value) = entry put(node, key, value) } - /** - * Adds an entry to the map - */ + /** Adds an entry to the map */ def put(node: SelfUniqueAddress, key: A, value: B): LWWMap[A, B] = put(node.uniqueAddress, key, value, defaultClock[B]) @@ -111,9 +97,7 @@ final class LWWMap[A, B] private[akka] (private[akka] val underlying: ORMap[A, L def put(node: SelfUniqueAddress, key: A, value: B, clock: Clock[B]): LWWMap[A, B] = put(node.uniqueAddress, key, value, clock) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def put(node: UniqueAddress, key: A, value: B, clock: Clock[B]): LWWMap[A, B] = { val newRegister = underlying.get(key) match { case Some(r) => r.withValue(node, value, clock) @@ -130,9 +114,7 @@ final class LWWMap[A, B] private[akka] (private[akka] val underlying: ORMap[A, L def remove(node: SelfUniqueAddress, key: A): LWWMap[A, B] = remove(node.uniqueAddress, key) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def remove(node: UniqueAddress, key: A): LWWMap[A, B] = new LWWMap(underlying.remove(node, key)) @@ -161,7 +143,7 @@ final class LWWMap[A, B] private[akka] (private[akka] val underlying: ORMap[A, L // this class cannot be a `case class` because we need different `unapply` - override def toString: String = s"LWW$entries" //e.g. LWWMap(a -> 1, b -> 2) + override def toString: String = s"LWW$entries" // e.g. LWWMap(a -> 1, b -> 2) override def equals(o: Any): Boolean = o match { case other: LWWMap[_, _] => underlying == other.underlying diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala index b8649b32556..a18c5d22c8f 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala @@ -42,9 +42,7 @@ object LWWRegister { */ def reverseClock[A]: Clock[A] = _reverseClock.asInstanceOf[Clock[A]] - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def apply[A](node: UniqueAddress, initialValue: A, clock: Clock[A]): LWWRegister[A] = new LWWRegister(node, initialValue, clock(0L, initialValue)) @@ -61,21 +59,15 @@ object LWWRegister { def create[A](initialValue: A)(implicit node: SelfUniqueAddress, clock: Clock[A] = defaultClock[A]): LWWRegister[A] = apply(node.uniqueAddress, initialValue, clock) - /** - * Java API - */ + /** Java API */ def create[A](node: SelfUniqueAddress, initialValue: A, clock: Clock[A]): LWWRegister[A] = apply(node.uniqueAddress, initialValue, clock) - /** - * Java API - */ + /** Java API */ def create[A](node: SelfUniqueAddress, initialValue: A): LWWRegister[A] = apply(node.uniqueAddress, initialValue, defaultClock[A]) - /** - * Extract the [[LWWRegister#value]]. - */ + /** Extract the [[LWWRegister#value]]. */ def unapply[A](c: LWWRegister[A]): Option[A] = Some(c.value) } @@ -117,9 +109,7 @@ final class LWWRegister[A] private[akka] (private[akka] val node: UniqueAddress, type T = LWWRegister[A] - /** - * Java API - */ + /** Java API */ def getValue(): A = value /** @@ -133,9 +123,7 @@ final class LWWRegister[A] private[akka] (private[akka] val node: UniqueAddress, def withValue(node: SelfUniqueAddress, value: A, clock: Clock[A]): LWWRegister[A] = withValue(node.uniqueAddress, value, clock) - /** - * Change the value of the register. - */ + /** Change the value of the register. */ def withValue(node: SelfUniqueAddress, value: A): LWWRegister[A] = withValue(node, value, defaultClock[A]) @@ -150,14 +138,10 @@ final class LWWRegister[A] private[akka] (private[akka] val node: UniqueAddress, def withValueOf(value: A)(implicit node: SelfUniqueAddress, clock: Clock[A] = defaultClock[A]): LWWRegister[A] = withValue(node, value, clock) - /** - * The current `value` was set by this node. - */ + /** The current `value` was set by this node. */ def updatedBy: UniqueAddress = node - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def withValue(node: UniqueAddress, value: A, clock: Clock[A]): LWWRegister[A] = new LWWRegister(node, value, clock(timestamp, value)) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala index f2e0bed6850..7c6c7931578 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala @@ -17,14 +17,10 @@ object ORMap { def empty[A, B <: ReplicatedData]: ORMap[A, B] = _empty.asInstanceOf[ORMap[A, B]] def apply(): ORMap[Any, ReplicatedData] = _empty - /** - * Java API - */ + /** Java API */ def create[A, B <: ReplicatedData](): ORMap[A, B] = empty[A, B] - /** - * Extract the [[ORMap#entries]]. - */ + /** Extract the [[ORMap#entries]]. */ def unapply[A, B <: ReplicatedData](m: ORMap[A, B]): Option[Map[A, B]] = Some(m.entries) sealed trait DeltaOp extends ReplicatedDelta with RequiresCausalDeliveryOfDeltas with ReplicatedDataSerialization { @@ -42,17 +38,13 @@ object ORMap { def value: Int } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case object VanillaORMapTag extends ZeroTag { override def zero: DeltaReplicatedData = ORMap.empty override final val value: Int = 0 } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] sealed abstract class AtomicDeltaOp[A, B <: ReplicatedData] extends DeltaOp with ReplicatedDeltaSize { @@ -191,14 +183,10 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( type T = ORMap[A, B] type D = ORMap.DeltaOp - /** - * Scala API: All entries of the map. - */ + /** Scala API: All entries of the map. */ def entries: Map[A, B] = values - /** - * Java API: All entries of the map. - */ + /** Java API: All entries of the map. */ def getEntries(): java.util.Map[A, B] = { import akka.util.ccompat.JavaConverters._ entries.asJava @@ -243,9 +231,7 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( */ def put(node: SelfUniqueAddress, key: A, value: B): ORMap[A, B] = put(node.uniqueAddress, key, value) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def put(node: UniqueAddress, key: A, value: B): ORMap[A, B] = if (value.isInstanceOf[ORSet[_]] && values.contains(key)) throw new IllegalArgumentException( @@ -288,9 +274,7 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( def update(node: SelfUniqueAddress, key: A, initial: B, modify: java.util.function.Function[B, B]): ORMap[A, B] = updated(node.uniqueAddress, key, initial)(value => modify.apply(value)) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def updated(node: UniqueAddress, key: A, initial: B, valueDeltas: Boolean = false)( modify: B => B): ORMap[A, B] = { val (oldValue, hasOldValue) = values.get(key) match { @@ -335,9 +319,7 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( */ def remove(node: SelfUniqueAddress, key: A): ORMap[A, B] = remove(node.uniqueAddress, key) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def remove(node: UniqueAddress, key: A): ORMap[A, B] = { // for removals the delta values map emitted will be empty val newKeys = keys.resetDelta.remove(node, key) @@ -412,22 +394,23 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( } var mergedKeys: ORSet[A] = this.keys - var (mergedValues, tombstonedVals): (Map[A, B], Map[A, B]) = this.values.partition { - case (k, _) => this.keys.contains(k) + var (mergedValues, tombstonedVals): (Map[A, B], Map[A, B]) = this.values.partition { case (k, _) => + this.keys.contains(k) } val processDelta: PartialFunction[ORMap.DeltaOp, Unit] = { case putOp: PutDeltaOp[_, _] => val keyDelta = putOp.underlying mergedKeys = mergedKeys.mergeDelta(keyDelta) - mergedValues = mergedValues + putOp + mergedValues = + mergedValues + putOp .asInstanceOf[PutDeltaOp[A, B]] .value // put is destructive and propagates only full values of B! case removeOp: RemoveDeltaOp[_, _] => val removedKey = removeOp.underlying match { // if op is RemoveDeltaOp then it must have exactly one element in the elements case op: ORSet.RemoveDeltaOp[_] => op.underlying.elements.head.asInstanceOf[A] - case _ => throw new IllegalArgumentException("ORMap.RemoveDeltaOp must contain ORSet.RemoveDeltaOp inside") + case _ => throw new IllegalArgumentException("ORMap.RemoveDeltaOp must contain ORSet.RemoveDeltaOp inside") } mergedValues = mergedValues - removedKey mergedKeys = mergedKeys.mergeDelta(removeOp.underlying) @@ -436,41 +419,38 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( case removeKeyOp: RemoveKeyDeltaOp[_, _] => // removeKeyOp tombstones values for later use if (mergedValues.contains(removeKeyOp.asInstanceOf[RemoveKeyDeltaOp[A, B]].removedKey)) { - tombstonedVals = tombstonedVals + (removeKeyOp - .asInstanceOf[RemoveKeyDeltaOp[A, B]] - .removedKey -> mergedValues(removeKeyOp.asInstanceOf[RemoveKeyDeltaOp[A, B]].removedKey)) + tombstonedVals = + tombstonedVals + (removeKeyOp.asInstanceOf[RemoveKeyDeltaOp[A, B]].removedKey -> mergedValues( + removeKeyOp.asInstanceOf[RemoveKeyDeltaOp[A, B]].removedKey)) } mergedValues = mergedValues - removeKeyOp.asInstanceOf[RemoveKeyDeltaOp[A, B]].removedKey mergedKeys = mergedKeys.mergeDelta(removeKeyOp.underlying) case updateOp: UpdateDeltaOp[_, _] => mergedKeys = mergedKeys.mergeDelta(updateOp.underlying) - updateOp.asInstanceOf[UpdateDeltaOp[A, B]].values.foreach { - case (key, value) => - if (mergedKeys.contains(key)) { - if (mergedValues.contains(key)) { - mergedValues = mergedValues + (key -> mergeValue(mergedValues(key), value)) - } else if (tombstonedVals.contains(key)) { - mergedValues = mergedValues + (key -> mergeValue(tombstonedVals(key), value)) - } else { - value match { - case _: ReplicatedDelta => - mergedValues = mergedValues + (key -> mergeValue(value.asInstanceOf[ReplicatedDelta].zero, value)) - case _ => - mergedValues = mergedValues + (key -> value.asInstanceOf[B]) - } + updateOp.asInstanceOf[UpdateDeltaOp[A, B]].values.foreach { case (key, value) => + if (mergedKeys.contains(key)) { + if (mergedValues.contains(key)) { + mergedValues = mergedValues + (key -> mergeValue(mergedValues(key), value)) + } else if (tombstonedVals.contains(key)) { + mergedValues = mergedValues + (key -> mergeValue(tombstonedVals(key), value)) + } else { + value match { + case _: ReplicatedDelta => + mergedValues = mergedValues + (key -> mergeValue(value.asInstanceOf[ReplicatedDelta].zero, value)) + case _ => + mergedValues = mergedValues + (key -> value.asInstanceOf[B]) } } + } } } - val processNestedDelta: PartialFunction[ORMap.DeltaOp, Unit] = { - case ORMap.DeltaGroup(ops) => - ops.foreach { - processDelta.orElse { - case ORMap.DeltaGroup(_) => - throw new IllegalStateException("Cannot nest DeltaGroups") - } + val processNestedDelta: PartialFunction[ORMap.DeltaOp, Unit] = { case ORMap.DeltaGroup(ops) => + ops.foreach { + processDelta.orElse { case ORMap.DeltaGroup(_) => + throw new IllegalStateException("Cannot nest DeltaGroups") } + } } processDelta.orElse(processNestedDelta)(thatDelta) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala index 61f5ddef51b..19e72816b58 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala @@ -10,17 +10,13 @@ import akka.cluster.ddata.ORMap._ object ORMultiMap { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case object ORMultiMapTag extends ZeroTag { override def zero: DeltaReplicatedData = ORMultiMap.empty override final val value: Int = 2 } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case object ORMultiMapWithValueDeltasTag extends ZeroTag { override def zero: DeltaReplicatedData = ORMultiMap.emptyWithValueDeltas override final val value: Int = 3 @@ -30,26 +26,18 @@ object ORMultiMap { val _emptyWithValueDeltas: ORMultiMap[Any, Any] = new ORMultiMap(new ORMap(ORSet.empty, Map.empty, zeroTag = ORMultiMapWithValueDeltasTag), true) - /** - * Provides an empty multimap. - */ + /** Provides an empty multimap. */ def empty[A, B]: ORMultiMap[A, B] = _empty.asInstanceOf[ORMultiMap[A, B]] def emptyWithValueDeltas[A, B]: ORMultiMap[A, B] = _emptyWithValueDeltas.asInstanceOf[ORMultiMap[A, B]] def apply(): ORMultiMap[Any, Any] = _empty - /** - * Java API - */ + /** Java API */ def create[A, B](): ORMultiMap[A, B] = empty[A, B] - /** - * Extract the [[ORMultiMap#entries]]. - */ + /** Extract the [[ORMultiMap#entries]]. */ def unapply[A, B](m: ORMultiMap[A, B]): Option[Map[A, Set[B]]] = Some(m.entries) - /** - * Extract the [[ORMultiMap#entries]] of an `ORMultiMap`. - */ + /** Extract the [[ORMultiMap#entries]] of an `ORMultiMap`. */ def unapply[A, B <: ReplicatedData](value: Any): Option[Map[A, Set[B]]] = value match { case m: ORMultiMap[A, B] @unchecked => Some(m.entries) case _ => None @@ -80,8 +68,8 @@ final class ORMultiMap[A, B] private[akka] ( if (withValueDeltas) { val newUnderlying = underlying.mergeRetainingDeletedValues(that.underlying) // Garbage collect the tombstones we no longer need, i.e. those that have Set() as a value. - val newValues = newUnderlying.values.filterNot { - case (key, value) => !newUnderlying.keys.contains(key) && value.isEmpty + val newValues = newUnderlying.values.filterNot { case (key, value) => + !newUnderlying.keys.contains(key) && value.isEmpty } new ORMultiMap[A, B]( new ORMap(newUnderlying.keys, newValues, newUnderlying.zeroTag, newUnderlying.delta), @@ -90,31 +78,27 @@ final class ORMultiMap[A, B] private[akka] ( new ORMultiMap(underlying.merge(that.underlying), withValueDeltas) } else throw new IllegalArgumentException("Trying to merge two ORMultiMaps of different map sub-type") - /** - * Scala API: All entries of a multimap where keys are strings and values are sets. - */ + /** Scala API: All entries of a multimap where keys are strings and values are sets. */ def entries: Map[A, Set[B]] = if (withValueDeltas) - underlying.entries.collect { case (k, v) if underlying.keys.elements.contains(k) => k -> v.elements } else - underlying.entries.map { case (k, v) => k -> v.elements } + underlying.entries.collect { case (k, v) if underlying.keys.elements.contains(k) => k -> v.elements } + else + underlying.entries.map { case (k, v) => k -> v.elements } - /** - * Java API: All entries of a multimap where keys are strings and values are sets. - */ + /** Java API: All entries of a multimap where keys are strings and values are sets. */ def getEntries(): java.util.Map[A, java.util.Set[B]] = { import akka.util.ccompat.JavaConverters._ val result = new java.util.HashMap[A, java.util.Set[B]] if (withValueDeltas) - underlying.entries.foreach { - case (k, v) => if (underlying.keys.elements.contains(k)) result.put(k, v.elements.asJava) - } else + underlying.entries.foreach { case (k, v) => + if (underlying.keys.elements.contains(k)) result.put(k, v.elements.asJava) + } + else underlying.entries.foreach { case (k, v) => result.put(k, v.elements.asJava) } result } - /** - * Get the set associated with the key if there is one. - */ + /** Get the set associated with the key if there is one. */ def get(key: A): Option[Set[B]] = if (withValueDeltas && !underlying.keys.elements.contains(key)) None @@ -159,9 +143,7 @@ final class ORMultiMap[A, B] private[akka] ( put(node.uniqueAddress, key, value.asScala.toSet) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def put(node: UniqueAddress, key: A, value: Set[B]): ORMultiMap[A, B] = { val newUnderlying = underlying.updated(node, key, ORSet.empty[B], valueDeltas = withValueDeltas) { existing => value.foldLeft(existing.clear()) { (s, element) => @@ -183,9 +165,7 @@ final class ORMultiMap[A, B] private[akka] ( */ def remove(node: SelfUniqueAddress, key: A): ORMultiMap[A, B] = remove(node.uniqueAddress, key) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def remove(node: UniqueAddress, key: A): ORMultiMap[A, B] = { if (withValueDeltas) { val u = underlying.updated(node, key, ORSet.empty[B], valueDeltas = true) { existing => @@ -207,9 +187,7 @@ final class ORMultiMap[A, B] private[akka] ( def addBindingBy(key: A, element: B)(implicit node: SelfUniqueAddress): ORMultiMap[A, B] = addBinding(node, key, element) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def addBinding(node: UniqueAddress, key: A, element: B): ORMultiMap[A, B] = { val newUnderlying = underlying.updated(node, key, ORSet.empty[B], valueDeltas = withValueDeltas)(_.add(node, element)) @@ -227,9 +205,7 @@ final class ORMultiMap[A, B] private[akka] ( def removeBindingBy(key: A, element: B)(implicit node: SelfUniqueAddress): ORMultiMap[A, B] = removeBinding(node, key, element) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def removeBinding(node: UniqueAddress, key: A, element: B): ORMultiMap[A, B] = { val newUnderlying = { val u = underlying.updated(node, key, ORSet.empty[B], valueDeltas = withValueDeltas)(_.remove(node, element)) @@ -256,9 +232,7 @@ final class ORMultiMap[A, B] private[akka] ( def replaceBindingBy(key: A, oldElement: B, newElement: B)(implicit node: SelfUniqueAddress): ORMultiMap[A, B] = replaceBinding(node, key, oldElement, newElement) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def replaceBinding( node: UniqueAddress, key: A, @@ -278,8 +252,8 @@ final class ORMultiMap[A, B] private[akka] ( if (withValueDeltas) { val newUnderlying = underlying.mergeDeltaRetainingDeletedValues(thatDelta) // Garbage collect the tombstones we no longer need, i.e. those that have Set() as a value. - val newValues = newUnderlying.values.filterNot { - case (key, value) => !newUnderlying.keys.contains(key) && value.isEmpty + val newValues = newUnderlying.values.filterNot { case (key, value) => + !newUnderlying.keys.contains(key) && value.isEmpty } new ORMultiMap[A, B]( new ORMap(newUnderlying.keys, newValues, newUnderlying.zeroTag, newUnderlying.delta), diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala index 2ee63194fec..1fe2fe0aab9 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala @@ -16,36 +16,26 @@ object ORSet { def empty[A]: ORSet[A] = _empty.asInstanceOf[ORSet[A]] def apply(): ORSet[Any] = _empty - /** - * Java API - */ + /** Java API */ def create[A](): ORSet[A] = empty[A] - /** - * Extract the [[ORSet#elements]]. - */ + /** Extract the [[ORSet#elements]]. */ def unapply[A](s: ORSet[A]): Option[Set[A]] = Some(s.elements) - /** - * Extract the [[ORSet#elements]] of an `ORSet`. - */ + /** Extract the [[ORSet#elements]] of an `ORSet`. */ def unapply(a: ReplicatedData): Option[Set[Any]] = a match { case s: ORSet[Any] @unchecked => Some(s.elements) case _ => None } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] type Dot = VersionVector sealed trait DeltaOp extends ReplicatedDelta with RequiresCausalDeliveryOfDeltas with ReplicatedDataSerialization { type T = DeltaOp } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] sealed abstract class AtomicDeltaOp[A] extends DeltaOp with ReplicatedDeltaSize { def underlying: ORSet[A] override def zero: ORSet[A] = ORSet.empty @@ -92,9 +82,7 @@ object ORSet { } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final case class DeltaGroup[A](ops: immutable.IndexedSeq[DeltaOp]) extends DeltaOp with ReplicatedDeltaSize { @@ -167,64 +155,63 @@ object ORSet { mergeCommonKeys(commonKeys.iterator, lhs, rhs) private def mergeCommonKeys[A](commonKeys: Iterator[A], lhs: ORSet[A], rhs: ORSet[A]): Map[A, ORSet.Dot] = { - commonKeys.foldLeft(Map.empty[A, ORSet.Dot]) { - case (acc, k) => - val lhsDots = lhs.elementsMap(k) - val rhsDots = rhs.elementsMap(k) - (lhsDots, rhsDots) match { - case (OneVersionVector(n1, v1), OneVersionVector(n2, v2)) => - if (n1 == n2 && v1 == v2) - // one single common dot - acc.updated(k, lhsDots) - else { - // no common, lhsUniqueDots == lhsDots, rhsUniqueDots == rhsDots - val lhsKeep = ORSet.subtractDots(lhsDots, rhs.vvector) - val rhsKeep = ORSet.subtractDots(rhsDots, lhs.vvector) - val merged = lhsKeep.merge(rhsKeep) - // Perfectly possible that an item in both sets should be dropped - if (merged.isEmpty) acc - else acc.updated(k, merged) - } - case (ManyVersionVector(lhsVs), ManyVersionVector(rhsVs)) => - val commonDots = lhsVs.filter { - case (thisDotNode, v) => rhsVs.get(thisDotNode).contains(v) - } - val commonDotsKeys = commonDots.keys - val lhsUniqueDots = lhsVs -- commonDotsKeys - val rhsUniqueDots = rhsVs -- commonDotsKeys - val lhsKeep = ORSet.subtractDots(VersionVector(lhsUniqueDots), rhs.vvector) - val rhsKeep = ORSet.subtractDots(VersionVector(rhsUniqueDots), lhs.vvector) - val merged = lhsKeep.merge(rhsKeep).merge(VersionVector(commonDots)) + commonKeys.foldLeft(Map.empty[A, ORSet.Dot]) { case (acc, k) => + val lhsDots = lhs.elementsMap(k) + val rhsDots = rhs.elementsMap(k) + (lhsDots, rhsDots) match { + case (OneVersionVector(n1, v1), OneVersionVector(n2, v2)) => + if (n1 == n2 && v1 == v2) + // one single common dot + acc.updated(k, lhsDots) + else { + // no common, lhsUniqueDots == lhsDots, rhsUniqueDots == rhsDots + val lhsKeep = ORSet.subtractDots(lhsDots, rhs.vvector) + val rhsKeep = ORSet.subtractDots(rhsDots, lhs.vvector) + val merged = lhsKeep.merge(rhsKeep) // Perfectly possible that an item in both sets should be dropped if (merged.isEmpty) acc else acc.updated(k, merged) - case (ManyVersionVector(lhsVs), OneVersionVector(n2, v2)) => - val commonDots = lhsVs.filter { - case (n1, v1) => v1 == v2 && n1 == n2 - } - val commonDotsKeys = commonDots.keys - val lhsUniqueDots = lhsVs -- commonDotsKeys - val rhsUnique = if (commonDotsKeys.isEmpty) rhsDots else VersionVector.empty - val lhsKeep = ORSet.subtractDots(VersionVector(lhsUniqueDots), rhs.vvector) - val rhsKeep = ORSet.subtractDots(rhsUnique, lhs.vvector) - val merged = lhsKeep.merge(rhsKeep).merge(VersionVector(commonDots)) - // Perfectly possible that an item in both sets should be dropped - if (merged.isEmpty) acc - else acc.updated(k, merged) - case (OneVersionVector(n1, v1), ManyVersionVector(rhsVs)) => - val commonDots = rhsVs.filter { - case (n2, v2) => v1 == v2 && n1 == n2 - } - val commonDotsKeys = commonDots.keys - val lhsUnique = if (commonDotsKeys.isEmpty) lhsDots else VersionVector.empty - val rhsUniqueDots = rhsVs -- commonDotsKeys - val lhsKeep = ORSet.subtractDots(lhsUnique, rhs.vvector) - val rhsKeep = ORSet.subtractDots(VersionVector(rhsUniqueDots), lhs.vvector) - val merged = lhsKeep.merge(rhsKeep).merge(VersionVector(commonDots)) - // Perfectly possible that an item in both sets should be dropped - if (merged.isEmpty) acc - else acc.updated(k, merged) - } + } + case (ManyVersionVector(lhsVs), ManyVersionVector(rhsVs)) => + val commonDots = lhsVs.filter { case (thisDotNode, v) => + rhsVs.get(thisDotNode).contains(v) + } + val commonDotsKeys = commonDots.keys + val lhsUniqueDots = lhsVs -- commonDotsKeys + val rhsUniqueDots = rhsVs -- commonDotsKeys + val lhsKeep = ORSet.subtractDots(VersionVector(lhsUniqueDots), rhs.vvector) + val rhsKeep = ORSet.subtractDots(VersionVector(rhsUniqueDots), lhs.vvector) + val merged = lhsKeep.merge(rhsKeep).merge(VersionVector(commonDots)) + // Perfectly possible that an item in both sets should be dropped + if (merged.isEmpty) acc + else acc.updated(k, merged) + case (ManyVersionVector(lhsVs), OneVersionVector(n2, v2)) => + val commonDots = lhsVs.filter { case (n1, v1) => + v1 == v2 && n1 == n2 + } + val commonDotsKeys = commonDots.keys + val lhsUniqueDots = lhsVs -- commonDotsKeys + val rhsUnique = if (commonDotsKeys.isEmpty) rhsDots else VersionVector.empty + val lhsKeep = ORSet.subtractDots(VersionVector(lhsUniqueDots), rhs.vvector) + val rhsKeep = ORSet.subtractDots(rhsUnique, lhs.vvector) + val merged = lhsKeep.merge(rhsKeep).merge(VersionVector(commonDots)) + // Perfectly possible that an item in both sets should be dropped + if (merged.isEmpty) acc + else acc.updated(k, merged) + case (OneVersionVector(n1, v1), ManyVersionVector(rhsVs)) => + val commonDots = rhsVs.filter { case (n2, v2) => + v1 == v2 && n1 == n2 + } + val commonDotsKeys = commonDots.keys + val lhsUnique = if (commonDotsKeys.isEmpty) lhsDots else VersionVector.empty + val rhsUniqueDots = rhsVs -- commonDotsKeys + val lhsKeep = ORSet.subtractDots(lhsUnique, rhs.vvector) + val rhsKeep = ORSet.subtractDots(VersionVector(rhsUniqueDots), lhs.vvector) + val merged = lhsKeep.merge(rhsKeep).merge(VersionVector(commonDots)) + // Perfectly possible that an item in both sets should be dropped + if (merged.isEmpty) acc + else acc.updated(k, merged) + } } } @@ -244,16 +231,15 @@ object ORSet { elementsMap: Map[A, ORSet.Dot], vvector: VersionVector, accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] = { - keys.foldLeft(accumulator) { - case (acc, k) => - val dots = elementsMap(k) - if (vvector > dots || vvector == dots) - acc - else { - // Optimise the set of stored dots to include only those unseen - val newDots = subtractDots(dots, vvector) - acc.updated(k, newDots) - } + keys.foldLeft(accumulator) { case (acc, k) => + val dots = elementsMap(k) + if (vvector > dots || vvector == dots) + acc + else { + // Optimise the set of stored dots to include only those unseen + val newDots = subtractDots(dots, vvector) + acc.updated(k, newDots) + } } } } @@ -300,14 +286,10 @@ final class ORSet[A] private[akka] ( type T = ORSet[A] type D = ORSet.DeltaOp - /** - * Scala API - */ + /** Scala API */ def elements: Set[A] = elementsMap.keySet - /** - * Java API - */ + /** Java API */ def getElements(): java.util.Set[A] = { import akka.util.ccompat.JavaConverters._ elements.asJava @@ -325,9 +307,7 @@ final class ORSet[A] private[akka] ( /** Adds an element to the set. */ def add(node: SelfUniqueAddress, element: A): ORSet[A] = add(node.uniqueAddress, element) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def add(node: UniqueAddress, element: A): ORSet[A] = { val newVvector = vvector + node val newDot = VersionVector(node, newVvector.versionAt(node)) @@ -354,9 +334,7 @@ final class ORSet[A] private[akka] ( */ def remove(node: SelfUniqueAddress, element: A): ORSet[A] = remove(node.uniqueAddress, element) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def remove(node: UniqueAddress, element: A): ORSet[A] = { val deltaDot = VersionVector(node, vvector.versionAt(node)) val rmOp = ORSet.RemoveDeltaOp(new ORSet(Map(element -> deltaDot), vvector)) @@ -375,9 +353,7 @@ final class ORSet[A] private[akka] ( */ def clear(@unused node: SelfUniqueAddress): ORSet[A] = clear() - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def clear(): ORSet[A] = { val newFullState = new ORSet[A](elementsMap = Map.empty, vvector) val clearOp = ORSet.FullStateDeltaOp(newFullState) @@ -417,7 +393,8 @@ final class ORSet[A] private[akka] ( val entries00 = ORSet.mergeCommonKeys(commonKeys, this, that) val entries0 = if (addDeltaOp) - entries00 ++ this.elementsMap.filter { case (elem, _) => !that.elementsMap.contains(elem) } else { + entries00 ++ this.elementsMap.filter { case (elem, _) => !that.elementsMap.contains(elem) } + else { val thisUniqueKeys = this.elementsMap.keysIterator.filterNot(that.elementsMap.contains) ORSet.mergeDisjointKeys(thisUniqueKeys, this.elementsMap, that.vvector, entries00) } @@ -455,12 +432,11 @@ final class ORSet[A] private[akka] ( def deleteDotsNodes = deleteDots.map { case (dotNode, _) => dotNode } val newElementsMap = { val thisDotOption = this.elementsMap.get(elem) - val deleteDotsAreGreater = deleteDots.forall { - case (dotNode, dotV) => - thisDotOption match { - case Some(thisDot) => thisDot.versionAt(dotNode) <= dotV - case None => false - } + val deleteDotsAreGreater = deleteDots.forall { case (dotNode, dotV) => + thisDotOption match { + case Some(thisDot) => thisDot.versionAt(dotNode) <= dotV + case None => false + } } if (deleteDotsAreGreater) { thisDotOption match { @@ -491,27 +467,25 @@ final class ORSet[A] private[akka] ( vvector.needPruningFrom(removedNode) override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): ORSet[A] = { - val pruned = elementsMap.foldLeft(Map.empty[A, ORSet.Dot]) { - case (acc, (elem, dot)) => - if (dot.needPruningFrom(removedNode)) acc.updated(elem, dot.prune(removedNode, collapseInto)) - else acc + val pruned = elementsMap.foldLeft(Map.empty[A, ORSet.Dot]) { case (acc, (elem, dot)) => + if (dot.needPruningFrom(removedNode)) acc.updated(elem, dot.prune(removedNode, collapseInto)) + else acc } if (pruned.isEmpty) copy(vvector = vvector.prune(removedNode, collapseInto)) else { // re-add elements that were pruned, to bump dots to right vvector val newSet = new ORSet(elementsMap = elementsMap ++ pruned, vvector = vvector.prune(removedNode, collapseInto)) - pruned.keys.foldLeft(newSet) { - case (s, elem) => s.add(collapseInto, elem) + pruned.keys.foldLeft(newSet) { case (s, elem) => + s.add(collapseInto, elem) } } } override def pruningCleanup(removedNode: UniqueAddress): ORSet[A] = { - val updated = elementsMap.foldLeft(elementsMap) { - case (acc, (elem, dot)) => - if (dot.needPruningFrom(removedNode)) acc.updated(elem, dot.pruningCleanup(removedNode)) - else acc + val updated = elementsMap.foldLeft(elementsMap) { case (acc, (elem, dot)) => + if (dot.needPruningFrom(removedNode)) acc.updated(elem, dot.pruningCleanup(removedNode)) + else acc } new ORSet(updated, vvector.pruningCleanup(removedNode)) } diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala index f4e3e94529e..4779f8b1887 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala @@ -14,14 +14,10 @@ object PNCounter { val empty: PNCounter = new PNCounter(GCounter.empty, GCounter.empty) def apply(): PNCounter = empty - /** - * Java API - */ + /** Java API */ def create(): PNCounter = empty - /** - * Extract the [[GCounter#value]]. - */ + /** Extract the [[GCounter#value]]. */ def unapply(c: PNCounter): Option[BigInt] = Some(c.value) } @@ -49,14 +45,10 @@ final class PNCounter private[akka] (private[akka] val increments: GCounter, pri type T = PNCounter type D = PNCounter - /** - * Scala API: Current total value of the counter. - */ + /** Scala API: Current total value of the counter. */ def value: BigInt = increments.value - decrements.value - /** - * Java API: Current total value of the counter. - */ + /** Java API: Current total value of the counter. */ def getValue: BigInteger = value.bigInteger /** diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala index 393cd505cfe..28ff8318459 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala @@ -13,9 +13,7 @@ import akka.cluster.ddata.ORMap._ object PNCounterMap { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case object PNCounterMapTag extends ZeroTag { override def zero: DeltaReplicatedData = PNCounterMap.empty override final val value: Int = 1 @@ -24,14 +22,10 @@ object PNCounterMap { def empty[A]: PNCounterMap[A] = new PNCounterMap(new ORMap(ORSet.empty, Map.empty, zeroTag = PNCounterMapTag)) def apply[A](): PNCounterMap[A] = empty - /** - * Java API - */ + /** Java API */ def create[A](): PNCounterMap[A] = empty - /** - * Extract the [[PNCounterMap#entries]]. - */ + /** Extract the [[PNCounterMap#entries]]. */ def unapply[A](m: PNCounterMap[A]): Option[Map[A, BigInt]] = Some(m.entries) } @@ -58,14 +52,10 @@ final class PNCounterMap[A] private[akka] (private[akka] val underlying: ORMap[A underlying.entries.map { case (k, c) => k -> c.value.bigInteger }.asJava } - /** - * Scala API: The count for a key - */ + /** Scala API: The count for a key */ def get(key: A): Option[BigInt] = underlying.get(key).map(_.value) - /** - * Java API: The count for a key, or `null` if it doesn't exist - */ + /** Java API: The count for a key, or `null` if it doesn't exist */ def getValue(key: A): BigInteger = underlying.get(key).map(_.value.bigInteger).orNull def contains(key: A): Boolean = underlying.contains(key) @@ -95,9 +85,7 @@ final class PNCounterMap[A] private[akka] (private[akka] val underlying: ORMap[A def increment(node: SelfUniqueAddress, key: A, delta: Long): PNCounterMap[A] = increment(node.uniqueAddress, key, delta) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def increment(node: UniqueAddress, key: A, delta: Long): PNCounterMap[A] = new PNCounterMap(underlying.updated(node, key, PNCounter())(_.increment(node, delta))) @@ -117,9 +105,7 @@ final class PNCounterMap[A] private[akka] (private[akka] val underlying: ORMap[A def decrement(node: SelfUniqueAddress, key: A, delta: Long): PNCounterMap[A] = decrement(node.uniqueAddress, key, delta) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def decrement(node: UniqueAddress, key: A, delta: Long): PNCounterMap[A] = { new PNCounterMap(underlying.updated(node, key, PNCounter())(_.decrement(node, delta))) } @@ -132,9 +118,7 @@ final class PNCounterMap[A] private[akka] (private[akka] val underlying: ORMap[A def remove(key: A)(implicit node: SelfUniqueAddress): PNCounterMap[A] = remove(node.uniqueAddress, key) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def remove(node: UniqueAddress, key: A): PNCounterMap[A] = new PNCounterMap(underlying.remove(node, key)) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala index b0c4fe40aa1..73e1b35ef5b 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala @@ -10,9 +10,7 @@ import akka.cluster.Member import akka.cluster.UniqueAddress import akka.util.unused -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object PruningState { final case class PruningInitialized(owner: UniqueAddress, seen: Set[Address]) extends PruningState { override def addSeen(node: Address): PruningState = { @@ -28,9 +26,7 @@ import akka.util.unused } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] sealed trait PruningState { import PruningState._ diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala index 5aeb7f1d9c2..17f388a6675 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala @@ -39,9 +39,7 @@ trait ReplicatedData { */ type T <: ReplicatedData - /** - * Monotonic merge function. - */ + /** Monotonic merge function. */ def merge(that: T): T } @@ -96,9 +94,7 @@ trait DeltaReplicatedData extends ReplicatedData { } -/** - * The delta must implement this type. - */ +/** The delta must implement this type. */ trait ReplicatedDelta extends ReplicatedData { /** @@ -142,15 +138,11 @@ abstract class AbstractReplicatedData[A <: AbstractReplicatedData[A]] extends Re override type T = ReplicatedData - /** - * Delegates to [[#mergeData]], which must be implemented by subclass. - */ + /** Delegates to [[#mergeData]], which must be implemented by subclass. */ final override def merge(that: ReplicatedData): ReplicatedData = mergeData(that.asInstanceOf[A]) - /** - * Java API: Monotonic merge function. - */ + /** Java API: Monotonic merge function. */ def mergeData(that: A): A } @@ -168,9 +160,7 @@ abstract class AbstractDeltaReplicatedData[A <: AbstractDeltaReplicatedData[A, B override type D = ReplicatedDelta - /** - * Delegates to [[#deltaData]], which must be implemented by subclass. - */ + /** Delegates to [[#deltaData]], which must be implemented by subclass. */ final override def delta: Option[ReplicatedDelta] = deltaData.asScala @@ -185,9 +175,7 @@ abstract class AbstractDeltaReplicatedData[A <: AbstractDeltaReplicatedData[A, B */ def deltaData: Optional[B] - /** - * Delegates to [[#mergeDeltaData]], which must be implemented by subclass. - */ + /** Delegates to [[#mergeDeltaData]], which must be implemented by subclass. */ final override def mergeDelta(that: ReplicatedDelta): ReplicatedData = mergeDeltaData(that.asInstanceOf[B]) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala index c7efd94e9a7..e174175b236 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala @@ -108,9 +108,7 @@ object ReplicatorSettings { expiryKeys = parseExpiry(config)) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def roleOption(role: String): Option[String] = if (role == "") None else Option(role) @@ -123,9 +121,7 @@ object ReplicatorSettings { modifier.map(s => s + name.take(1).toUpperCase + name.drop(1)).getOrElse(name) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def parseExpiry(config: Config): Map[KeyId, FiniteDuration] = { import akka.util.ccompat.JavaConverters._ val expiryConfig = config.getConfig("expire-keys-after-inactivity") @@ -413,9 +409,7 @@ final class ReplicatorSettings( @varargs def withRoles(roles: String*): ReplicatorSettings = copy(roles = roles.toSet) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def withRoles(roles: Set[String]): ReplicatorSettings = copy(roles = roles) // for backwards compatibility @@ -451,15 +445,11 @@ final class ReplicatorSettings( def withDurableStoreProps(durableStoreProps: Props): ReplicatorSettings = copy(durableStoreProps = Right(durableStoreProps)) - /** - * Scala API - */ + /** Scala API */ def withDurableKeys(durableKeys: Set[KeyId]): ReplicatorSettings = copy(durableKeys = durableKeys) - /** - * Java API - */ + /** Java API */ def withDurableKeys(durableKeys: java.util.Set[String]): ReplicatorSettings = { import akka.util.ccompat.JavaConverters._ withDurableKeys(durableKeys.asScala.toSet) @@ -477,15 +467,11 @@ final class ReplicatorSettings( def withLogDataSizeExceeding(logDataSizeExceeding: Int): ReplicatorSettings = copy(logDataSizeExceeding = Some(logDataSizeExceeding)) - /** - * Scala API - */ + /** Scala API */ def withExpiryKeys(expiryKeys: Map[KeyId, FiniteDuration]): ReplicatorSettings = copy(expiryKeys = expiryKeys) - /** - * Java API - */ + /** Java API */ def withExpiryKeys(expiryKeys: java.util.Map[String, java.time.Duration]): ReplicatorSettings = { import akka.util.ccompat.JavaConverters._ withExpiryKeys(expiryKeys.asScala.iterator.map { case (key, value) => key -> value.asScala }.toMap) @@ -530,9 +516,7 @@ final class ReplicatorSettings( object Replicator { private type Timestamp = Long - /** - * Factory method for the [[akka.actor.Props]] of the [[Replicator]] actor. - */ + /** Factory method for the [[akka.actor.Props]] of the [[Replicator]] actor. */ def props(settings: ReplicatorSettings): Props = { require( settings.durableKeys.isEmpty || (settings.durableStoreProps != Right(Props.empty)), @@ -551,17 +535,13 @@ object Replicator { final case class ReadFrom(n: Int, timeout: FiniteDuration) extends ReadConsistency { require(n >= 2, "ReadFrom n must be >= 2, use ReadLocal for n=1") - /** - * Java API - */ + /** Java API */ def this(n: Int, timeout: java.time.Duration) = this(n, timeout.asScala) } final case class ReadMajority(timeout: FiniteDuration, minCap: Int = DefaultMajorityMinCap) extends ReadConsistency { def this(timeout: FiniteDuration) = this(timeout, DefaultMajorityMinCap) - /** - * Java API - */ + /** Java API */ def this(timeout: java.time.Duration) = this(timeout.asScala, DefaultMajorityMinCap) } @@ -573,16 +553,12 @@ object Replicator { final case class ReadMajorityPlus(timeout: FiniteDuration, additional: Int, minCap: Int = DefaultMajorityMinCap) extends ReadConsistency { - /** - * Java API - */ + /** Java API */ def this(timeout: java.time.Duration, additional: Int) = this(timeout.asScala, additional, DefaultMajorityMinCap) } final case class ReadAll(timeout: FiniteDuration) extends ReadConsistency { - /** - * Java API - */ + /** Java API */ def this(timeout: java.time.Duration) = this(timeout.asScala) } @@ -595,18 +571,14 @@ object Replicator { final case class WriteTo(n: Int, timeout: FiniteDuration) extends WriteConsistency { require(n >= 2, "WriteTo n must be >= 2, use WriteLocal for n=1") - /** - * Java API - */ + /** Java API */ def this(n: Int, timeout: java.time.Duration) = this(n, timeout.asScala) } final case class WriteMajority(timeout: FiniteDuration, minCap: Int = DefaultMajorityMinCap) extends WriteConsistency { def this(timeout: FiniteDuration) = this(timeout, DefaultMajorityMinCap) - /** - * Java API - */ + /** Java API */ def this(timeout: java.time.Duration) = this(timeout.asScala, DefaultMajorityMinCap) } @@ -618,42 +590,28 @@ object Replicator { final case class WriteMajorityPlus(timeout: FiniteDuration, additional: Int, minCap: Int = DefaultMajorityMinCap) extends WriteConsistency { - /** - * Java API - */ + /** Java API */ def this(timeout: java.time.Duration, additional: Int) = this(timeout.asScala, additional, DefaultMajorityMinCap) } final case class WriteAll(timeout: FiniteDuration) extends WriteConsistency { - /** - * Java API - */ + /** Java API */ def this(timeout: java.time.Duration) = this(timeout.asScala) } - /** - * Java API: The `ReadLocal` instance - */ + /** Java API: The `ReadLocal` instance */ def readLocal = ReadLocal - /** - * Java API: The `WriteLocal` instance - */ + /** Java API: The `WriteLocal` instance */ def writeLocal = WriteLocal - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case object GetKeyIds - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final case class GetKeyIdsResult(keyIds: Set[KeyId]) { - /** - * Java API - */ + /** Java API */ def getKeyIds: java.util.Set[String] = { import akka.util.ccompat.JavaConverters._ keyIds.asJava @@ -676,14 +634,10 @@ object Replicator { extends Command[A] with ReplicatorMessage { - /** - * Java API: `Get` value from local `Replicator`, i.e. `ReadLocal` consistency. - */ + /** Java API: `Get` value from local `Replicator`, i.e. `ReadLocal` consistency. */ def this(key: Key[A], consistency: ReadConsistency) = this(key, consistency, None) - /** - * Java API: `Get` value from local `Replicator`, i.e. `ReadLocal` consistency. - */ + /** Java API: `Get` value from local `Replicator`, i.e. `ReadLocal` consistency. */ def this(key: Key[A], consistency: ReadConsistency, request: Optional[Any]) = this(key, consistency, Option(request.orElse(null))) @@ -696,9 +650,7 @@ object Replicator { def getRequest: Optional[Any] = Optional.ofNullable(request.orNull) } - /** - * Reply from `Get`. The data value is retrieved with [[#get]] using the typed key. - */ + /** Reply from `Get`. The data value is retrieved with [[#get]] using the typed key. */ final case class GetSuccess[A <: ReplicatedData](key: Key[A], request: Option[Any])(data: A) extends GetResponse[A] with ReplicatorMessage { @@ -712,9 +664,7 @@ object Replicator { data.asInstanceOf[T] } - /** - * The data value. Use [[#get]] to get the fully typed value. - */ + /** The data value. Use [[#get]] to get the fully typed value. */ def dataValue: A = data } final case class NotFound[A <: ReplicatedData](key: Key[A], request: Option[Any]) @@ -729,9 +679,7 @@ object Replicator { extends GetResponse[A] with ReplicatorMessage - /** - * The [[Get]] request couldn't be performed because the entry has been deleted. - */ + /** The [[Get]] request couldn't be performed because the entry has been deleted. */ final case class GetDataDeleted[A <: ReplicatedData](key: Key[A], request: Option[Any]) extends GetResponse[A] /** @@ -764,9 +712,7 @@ object Replicator { */ final case class Unsubscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef) extends ReplicatorMessage - /** - * @see [[Replicator.Subscribe]] - */ + /** @see [[Replicator.Subscribe]] */ sealed trait SubscribeResponse[A <: ReplicatedData] extends NoSerializationVerificationNeeded { def key: Key[A] } @@ -789,20 +735,14 @@ object Replicator { data.asInstanceOf[T] } - /** - * The data value. Use [[#get]] to get the fully typed value. - */ + /** The data value. Use [[#get]] to get the fully typed value. */ def dataValue: A = data } - /** - * @see [[Replicator.Subscribe]] - */ + /** @see [[Replicator.Subscribe]] */ final case class Deleted[A <: ReplicatedData](key: Key[A]) extends SubscribeResponse[A] - /** - * @see [[Replicator.Subscribe]] - */ + /** @see [[Replicator.Subscribe]] */ final case class Expired[A <: ReplicatedData](key: Key[A]) extends SubscribeResponse[A] object Update { @@ -907,9 +847,7 @@ object Replicator { */ final case class UpdateTimeout[A <: ReplicatedData](key: Key[A], request: Option[Any]) extends UpdateFailure[A] - /** - * The [[Update]] couldn't be performed because the entry has been deleted. - */ + /** The [[Update]] couldn't be performed because the entry has been deleted. */ final case class UpdateDataDeleted[A <: ReplicatedData](key: Key[A], request: Option[Any]) extends UpdateResponse[A] /** @@ -965,7 +903,7 @@ object Replicator { def key: Key[A] def request: Option[Any] - /** Java API*/ + /** Java API */ def getRequest: Optional[Any] = Optional.ofNullable(request.orNull) } final case class DeleteSuccess[A <: ReplicatedData](key: Key[A], request: Option[Any]) extends DeleteResponse[A] @@ -984,14 +922,10 @@ object Replicator { */ case object GetReplicaCount - /** - * Java API: The `GetReplicaCount` instance - */ + /** Java API: The `GetReplicaCount` instance */ def getReplicaCount = GetReplicaCount - /** - * Current number of replicas. Reply to `GetReplicaCount`. - */ + /** Current number of replicas. Reply to `GetReplicaCount`. */ final case class ReplicaCount(n: Int) /** @@ -1000,9 +934,7 @@ object Replicator { */ case object FlushChanges - /** - * Java API: The `FlushChanges` instance - */ + /** Java API: The `FlushChanges` instance */ def flushChanges = FlushChanges /** @@ -1011,9 +943,7 @@ object Replicator { */ trait ReplicatorMessage extends Serializable - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object Internal { case object GossipTick @@ -1047,9 +977,7 @@ object Replicator { val LazyDigest: Digest = ByteString(0) val NotFoundDigest: Digest = ByteString(-1) - /** - * The `DataEnvelope` wraps a data entry and carries state of the pruning process for the entry. - */ + /** The `DataEnvelope` wraps a data entry and carries state of the pruning process for the entry. */ final case class DataEnvelope( data: ReplicatedData, pruning: Map[UniqueAddress, PruningState] = Map.empty, @@ -1104,14 +1032,13 @@ object Replicator { if (other.data == DeletedData) DeletedEnvelope else { val mergedPruning = - pruning.foldLeft(other.pruning) { - case (acc, (key, thisValue)) => - acc.get(key) match { - case None => - acc.updated(key, thisValue) - case Some(thatValue) => - acc.updated(key, thisValue.merge(thatValue)) - } + pruning.foldLeft(other.pruning) { case (acc, (key, thisValue)) => + acc.get(key) match { + case None => + acc.updated(key, thisValue) + case Some(thatValue) => + acc.updated(key, thisValue.merge(thatValue)) + } } val filteredMergedPruning = { if (mergedPruning.isEmpty) mergedPruning @@ -1168,11 +1095,10 @@ object Replicator { def addSeen(node: Address): DataEnvelope = { var changed = false - val newRemovedNodePruning = pruning.map { - case (removed, pruningState) => - val newPruningState = pruningState.addSeen(node) - changed = (newPruningState ne pruningState) || changed - (removed, newPruningState) + val newRemovedNodePruning = pruning.map { case (removed, pruningState) => + val newPruningState = pruningState.addSeen(node) + changed = (newPruningState ne pruningState) || changed + (removed, newPruningState) } if (changed) copy(pruning = newRemovedNodePruning) else this @@ -1199,10 +1125,10 @@ object Replicator { extends ReplicatorMessage with DestinationSystemUid { override def toString: String = - (digests - .map { - case (key, (bytes, _)) => key + " -> " + bytes.map(byte => f"$byte%02x").mkString("") - }) + digests + .map { case (key, (bytes, _)) => + key + " -> " + bytes.map(byte => f"$byte%02x").mkString("") + } .mkString("Status(", ", ", ")") } final case class Gossip( @@ -1455,8 +1381,8 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog require(!cluster.isTerminated, "Cluster node must not be terminated") require( roles.subsetOf(cluster.selfRoles), - s"This cluster member [$selfAddress] with roles [${cluster.selfRoles - .mkString(", ")}] doesn't have all the roles [${roles.mkString(", ")}]") + s"This cluster member [$selfAddress] with roles [${cluster.selfRoles.mkString( + ", ")}] doesn't have all the roles [${roles.mkString(", ")}]") private val payloadSizeAggregator = { val sizeExceeding = settings.logDataSizeExceeding.getOrElse(Int.MaxValue) @@ -1466,7 +1392,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog new PayloadSizeAggregator(log, sizeExceeding, maxFrameSize) } - //Start periodic gossip to random nodes in cluster + // Start periodic gossip to random nodes in cluster import context.dispatcher val gossipTask = context.system.scheduler.scheduleWithFixedDelay(gossipInterval, gossipInterval, self, GossipTick) val notifyTask = @@ -1668,10 +1594,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog def unstashAll(): Unit = { val originalReplyTo = replyTo - stash.foreach { - case (msg, snd) => - replyTo = snd - normalReceive.applyOrElse(msg, unhandled) + stash.foreach { case (msg, snd) => + replyTo = snd + normalReceive.applyOrElse(msg, unhandled) } stash = Vector.empty replyTo = originalReplyTo @@ -1680,14 +1605,13 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog { case LoadData(data) => count += data.size - data.foreach { - case (key, d) => - write(key, d.dataEnvelope) match { - case Some(newEnvelope) => - if (newEnvelope ne d.dataEnvelope) - durableStore ! Store(key, new DurableDataEnvelope(newEnvelope), None) - case None => - } + data.foreach { case (key, d) => + write(key, d.dataEnvelope) match { + case Some(newEnvelope) => + if (newEnvelope ne d.dataEnvelope) + durableStore ! Store(key, new DurableDataEnvelope(newEnvelope), None) + case None => + } } case LoadAllCompleted => log.debug( @@ -2094,9 +2018,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } } - /** - * @return SHA-1 digest of the serialized data, and the size of the serialized data - */ + /** @return SHA-1 digest of the serialized data, and the size of the serialized data */ def digest(envelope: DataEnvelope): (Digest, Int) = if (envelope.data == DeletedData) (DeletedDigest, 0) else { @@ -2150,10 +2072,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog def updateUsedTimestamp(key: KeyId, timestamp: Timestamp): Unit = { if (expiryEnabled && timestamp != 0) { - dataEntries.get(key).foreach { - case (existingEnvelope, existingDigest, existingTimestamp) => - if (timestamp > existingTimestamp) - dataEntries = dataEntries.updated(key, (existingEnvelope, existingDigest, timestamp)) + dataEntries.get(key).foreach { case (existingEnvelope, existingDigest, existingTimestamp) => + if (timestamp > existingTimestamp) + dataEntries = dataEntries.updated(key, (existingEnvelope, existingDigest, timestamp)) } } } @@ -2183,7 +2104,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog case (k, r) if isWildcard(k) && keyId.startsWith(dropWildcard(k)) => r.withId(keyId) } .getOrElse(throw new IllegalStateException(s"Subscription notification of [$keyId], but no matching " + - s"subscription key in [${subscriptionKeys.keysIterator.mkString(", ")}]")) + s"subscription key in [${subscriptionKeys.keysIterator.mkString(", ")}]")) } getData(keyId) match { case Some(envelope) => @@ -2231,11 +2152,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } def receiveDeltaPropagationTick(): Unit = { - deltaPropagationSelector.collectPropagations().foreach { - case (node, deltaPropagation) => - // TODO split it to several DeltaPropagation if too many entries - if (deltaPropagation.deltas.nonEmpty) - replica(node) ! deltaPropagation + deltaPropagationSelector.collectPropagations().foreach { case (node, deltaPropagation) => + // TODO split it to several DeltaPropagation if too many entries + if (deltaPropagation.deltas.nonEmpty) + replica(node) ! deltaPropagation } if (deltaPropagationSelector.propagationCount % deltaPropagationSelector.gossipIntervalDivisor == 0) @@ -2316,9 +2236,14 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog val to = replica(address) val toSystemUid = Some(address.longUid) if (dataEntries.size <= maxDeltaElements) { - val status = Status(dataEntries.map { - case (key, (_, _, usedTimestamp)) => (key, (getDigest(key), usedTimestamp)) - }, chunk = 0, totChunks = 1, toSystemUid, selfFromSystemUid) + val status = Status( + dataEntries.map { case (key, (_, _, usedTimestamp)) => + (key, (getDigest(key), usedTimestamp)) + }, + chunk = 0, + totChunks = 1, + toSystemUid, + selfFromSystemUid) to ! status } else { val totChunks = dataEntries.size / maxDeltaElements @@ -2330,10 +2255,15 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog statusTotChunks = totChunks } val chunk = (statusCount % totChunks).toInt - val status = Status(dataEntries.collect { - case (key, (_, _, usedTimestamp)) if math.abs(key.hashCode % totChunks) == chunk => - (key, (getDigest(key), usedTimestamp)) - }, chunk, totChunks, toSystemUid, selfFromSystemUid) + val status = Status( + dataEntries.collect { + case (key, (_, _, usedTimestamp)) if math.abs(key.hashCode % totChunks) == chunk => + (key, (getDigest(key), usedTimestamp)) + }, + chunk, + totChunks, + toSystemUid, + selfFromSystemUid) to ! status } } @@ -2354,16 +2284,15 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog log.debug( "Received gossip status from [{}], chunk [{}] of [{}] containing [{}].", replyTo.path.address, - (chunk + 1), + chunk + 1, totChunks, otherDigests.keys.mkString(", ")) // update the usedTimestamp when needed if (expiryEnabled) { - otherDigests.foreach { - case (key, (_, usedTimestamp)) => - updateUsedTimestamp(key, usedTimestamp) - // if we don't have the key it will be updated with the full Gossip + otherDigests.foreach { case (key, (_, usedTimestamp)) => + updateUsedTimestamp(key, usedTimestamp) + // if we don't have the key it will be updated with the full Gossip } } @@ -2466,20 +2395,19 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog if (log.isDebugEnabled) log.debug("Received gossip from [{}], containing [{}].", replyTo.path.address, updatedData.keys.mkString(", ")) var replyKeys = Set.empty[KeyId] - updatedData.foreach { - case (key, (envelope, usedTimestamp)) => - if (!isExpired(key, usedTimestamp)) { - val hadData = dataEntries.contains(key) - writeAndStore(key, envelope, reply = false) - updateUsedTimestamp(key, usedTimestamp) - - if (sendBack) getData(key) match { - case Some(d) => - if (hadData || d.pruning.nonEmpty) - replyKeys += key - case None => - } + updatedData.foreach { case (key, (envelope, usedTimestamp)) => + if (!isExpired(key, usedTimestamp)) { + val hadData = dataEntries.contains(key) + writeAndStore(key, envelope, reply = false) + updateUsedTimestamp(key, usedTimestamp) + + if (sendBack) getData(key) match { + case Some(d) => + if (hadData || d.pruning.nonEmpty) + replyKeys += key + case None => } + } } if (sendBack && replyKeys.nonEmpty) { createGossipMessages(replyKeys, sendBack = false, fromSystemUid).foreach { g => @@ -2508,13 +2436,13 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } def hasSubscriber(subscriber: ActorRef): Boolean = - subscribers.exists { case (_, s) => s.contains(subscriber) } || + subscribers.exists { case (_, s) => s.contains(subscriber) } || wildcardSubscribers.exists { case (_, s) => s.contains(subscriber) } || - newSubscribers.exists { case (_, s) => s.contains(subscriber) } + newSubscribers.exists { case (_, s) => s.contains(subscriber) } private def hasSubscriber(keyId: KeyId): Boolean = - subscribers.contains(keyId) || (wildcardSubscribers.nonEmpty && wildcardSubscribers.exists { - case (k, _) => keyId.startsWith(k) + subscribers.contains(keyId) || (wildcardSubscribers.nonEmpty && wildcardSubscribers.exists { case (k, _) => + keyId.startsWith(k) }) private def getSubscribersIterator(keyId: KeyId): Iterator[ActorRef] = { @@ -2768,9 +2696,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ReadWriteAggregator { case object SendToSecondary val MaxSecondaryNodes = 10 @@ -2781,9 +2707,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] abstract class ReadWriteAggregator extends Actor { import ReadWriteAggregator._ @@ -2830,9 +2754,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object WriteAggregator { def props( key: KeyR, @@ -2861,9 +2783,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog durable)).withDeploy(Deploy.local) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class WriteAggregator( key: KeyR, envelope: Replicator.Internal.DataEnvelope, @@ -2988,9 +2908,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ReadAggregator { def props( key: KeyR, @@ -3008,9 +2926,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class ReadAggregator( key: KeyR, consistency: Replicator.ReadConsistency, @@ -3101,7 +3017,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog replyTo.tell(replyMsg, context.parent) context.stop(self) case _: ReadResult => - //collect late replies + // collect late replies remaining -= sender().path.address case SendToSecondary => case ReceiveTimeout => diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala index e7a5e6e1d90..0afd20326ff 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala @@ -12,9 +12,7 @@ import scala.collection.immutable.TreeMap import akka.annotation.InternalApi import akka.cluster.UniqueAddress -/** - * VersionVector module with helper classes and methods. - */ +/** VersionVector module with helper classes and methods. */ object VersionVector { private val emptyVersions: TreeMap[UniqueAddress, Long] = TreeMap.empty @@ -35,9 +33,7 @@ object VersionVector { else if (versions.tail.isEmpty) apply(versions.head._1, versions.head._2) else apply(emptyVersions ++ versions) - /** - * Java API - */ + /** Java API */ def create(): VersionVector = empty sealed trait Ordering @@ -46,29 +42,19 @@ object VersionVector { case object Same extends Ordering case object Concurrent extends Ordering - /** - * Marker to ensure that we do a full order comparison instead of bailing out early. - */ + /** Marker to ensure that we do a full order comparison instead of bailing out early. */ private case object FullOrder extends Ordering - /** - * Java API: The `VersionVector.After` instance - */ + /** Java API: The `VersionVector.After` instance */ def AfterInstance = After - /** - * Java API: The `VersionVector.Before` instance - */ + /** Java API: The `VersionVector.Before` instance */ def BeforeInstance = Before - /** - * Java API: The `VersionVector.Same` instance - */ + /** Java API: The `VersionVector.Same` instance */ def SameInstance = Same - /** - * Java API: The `VersionVector.Concurrent` instance - */ + /** Java API: The `VersionVector.Concurrent` instance */ def ConcurrentInstance = Concurrent /** INTERNAL API */ @@ -78,9 +64,7 @@ object VersionVector { val counter = new AtomicLong(1L) } - /** - * Marker to signal that we have reached the end of a version vector. - */ + /** Marker to signal that we have reached the end of a version vector. */ private val cmpEndMarker = (null, Timestamp.EndMarker) } @@ -104,9 +88,7 @@ sealed abstract class VersionVector extends ReplicatedData with ReplicatedDataSe import VersionVector._ - /** - * Increment the version for the node passed as argument. Returns a new VersionVector. - */ + /** Increment the version for the node passed as argument. Returns a new VersionVector. */ def :+(node: SelfUniqueAddress): VersionVector = increment(node) /** @@ -115,16 +97,12 @@ sealed abstract class VersionVector extends ReplicatedData with ReplicatedDataSe */ @InternalApi private[akka] def +(node: UniqueAddress): VersionVector = increment(node) - /** - * Increment the version for the node passed as argument. Returns a new VersionVector. - */ + /** Increment the version for the node passed as argument. Returns a new VersionVector. */ def increment(node: SelfUniqueAddress): VersionVector = increment(node.uniqueAddress) def isEmpty: Boolean - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def size: Int /** @@ -133,34 +111,22 @@ sealed abstract class VersionVector extends ReplicatedData with ReplicatedDataSe */ @InternalApi private[akka] def increment(node: UniqueAddress): VersionVector - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def versionAt(node: UniqueAddress): Long - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def contains(node: UniqueAddress): Boolean - /** - * Returns true if this and that are concurrent else false. - */ + /** Returns true if this and that are concurrent else false. */ def <>(that: VersionVector): Boolean = compareOnlyTo(that, Concurrent) eq Concurrent - /** - * Returns true if this is before that else false. - */ + /** Returns true if this is before that else false. */ def <(that: VersionVector): Boolean = compareOnlyTo(that, Before) eq Before - /** - * Returns true if this is after that else false. - */ + /** Returns true if this is after that else false. */ def >(that: VersionVector): Boolean = compareOnlyTo(that, After) eq After - /** - * Returns true if this VersionVector has the same history as the 'that' VersionVector else false. - */ + /** Returns true if this VersionVector has the same history as the 'that' VersionVector else false. */ def ==(that: VersionVector): Boolean = compareOnlyTo(that, Same) eq Same /** @@ -226,9 +192,7 @@ sealed abstract class VersionVector extends ReplicatedData with ReplicatedDataSe else compare(this.versionsIterator, that.versionsIterator, if (order eq Concurrent) FullOrder else order) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def versionsIterator: Iterator[(UniqueAddress, Long)] /** @@ -245,9 +209,7 @@ sealed abstract class VersionVector extends ReplicatedData with ReplicatedDataSe compareOnlyTo(that, FullOrder) } - /** - * Merges this VersionVector with another VersionVector. E.g. merges its versioned history. - */ + /** Merges this VersionVector with another VersionVector. E.g. merges its versioned history. */ def merge(that: VersionVector): VersionVector override def needPruningFrom(removedNode: UniqueAddress): Boolean diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala index a7402969849..9fd1b0d6b6e 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala @@ -256,9 +256,7 @@ private object ReplicatedDataSerializer { } -/** - * Protobuf serializer of ReplicatedData. - */ +/** Protobuf serializer of ReplicatedData. */ class ReplicatedDataSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest with SerializationSupport @@ -621,13 +619,12 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) def gcounterToProto(gcounter: GCounter): rd.GCounter = { val b = rd.GCounter.newBuilder() - gcounter.state.toVector.sortBy { case (address, _) => address }.foreach { - case (address, value) => - b.addEntries( - rd.GCounter.Entry - .newBuilder() - .setNode(uniqueAddressToProto(address)) - .setValue(ByteStringUtils.toProtoByteStringUnsafe(value.toByteArray))) + gcounter.state.toVector.sortBy { case (address, _) => address }.foreach { case (address, value) => + b.addEntries( + rd.GCounter.Entry + .newBuilder() + .setNode(uniqueAddressToProto(address)) + .setValue(ByteStringUtils.toProtoByteStringUnsafe(value.toByteArray))) } b.build() } @@ -636,10 +633,9 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) gcounterFromProto(rd.GCounter.parseFrom(bytes)) def gcounterFromProto(gcounter: rd.GCounter): GCounter = { - new GCounter( - state = gcounter.getEntriesList.asScala.iterator - .map(entry => uniqueAddressFromProto(entry.getNode) -> BigInt(entry.getValue.toByteArray)) - .toMap) + new GCounter(state = gcounter.getEntriesList.asScala.iterator + .map(entry => uniqueAddressFromProto(entry.getNode) -> BigInt(entry.getValue.toByteArray)) + .toMap) } def pncounterToProto(pncounter: PNCounter): rd.PNCounter = @@ -669,8 +665,8 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) PValue <: GeneratedMessageV3]( input: Map[IKey, IValue], createBuilder: () => EntryBuilder, - valueConverter: IValue => PValue)( - implicit comparator: Comparator[PEntry], + valueConverter: IValue => PValue)(implicit + comparator: Comparator[PEntry], eh: ProtoMapEntryWriter[PEntry, EntryBuilder, PValue]): java.lang.Iterable[PEntry] = { // The resulting Iterable needs to be ordered deterministically in order to create same signature upon serializing same data val protoEntries = new util.TreeSet[PEntry](comparator) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala index c86caabd4b2..5cfa2bc14c7 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala @@ -36,9 +36,7 @@ import akka.util.{ ByteString => AkkaByteString } import akka.util.ccompat._ import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @ccompatUsedUntil213 @InternalApi private[akka] object ReplicatorMessageSerializer { @@ -66,9 +64,7 @@ private[akka] object ReplicatorMessageSerializer { // so we use non-volatile private var lastUsed = System.nanoTime() - /** - * Get value from cache or `null` if it doesn't exist. - */ + /** Get value from cache or `null` if it doesn't exist. */ def get(a: A): B = get(a, n.get) private def get(a: A, startPos: Int): B = { @@ -147,9 +143,7 @@ private[akka] object ReplicatorMessageSerializer { } } -/** - * Protobuf serializer of ReplicatorMessage messages. - */ +/** Protobuf serializer of ReplicatorMessage messages. */ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest with SerializationSupport @@ -265,16 +259,15 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) private def statusToProto(status: Status): dm.Status = { val b = dm.Status.newBuilder() b.setChunk(status.chunk).setTotChunks(status.totChunks) - status.digests.foreach { - case (key, (digest, usedTimestamp)) => - val entryBuilder = - dm.Status.Entry - .newBuilder() - .setKey(key) - .setDigest(ByteStringUtils.toProtoByteStringUnsafe(digest.toArrayUnsafe())) - if (usedTimestamp != 0L) - entryBuilder.setUsedTimestamp(usedTimestamp) - b.addEntries(entryBuilder) + status.digests.foreach { case (key, (digest, usedTimestamp)) => + val entryBuilder = + dm.Status.Entry + .newBuilder() + .setKey(key) + .setDigest(ByteStringUtils.toProtoByteStringUnsafe(digest.toArrayUnsafe())) + if (usedTimestamp != 0L) + entryBuilder.setUsedTimestamp(usedTimestamp) + b.addEntries(entryBuilder) } status.toSystemUid.foreach(b.setToSystemUid) // can be None when sending back to a node of version 2.5.21 b.setFromSystemUid(status.fromSystemUid.get) @@ -297,13 +290,12 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) private def gossipToProto(gossip: Gossip): dm.Gossip = { val b = dm.Gossip.newBuilder().setSendBack(gossip.sendBack) - gossip.updatedData.foreach { - case (key, (data, usedTimestamp)) => - val entryBuilder = - dm.Gossip.Entry.newBuilder().setKey(key).setEnvelope(dataEnvelopeToProto(data)) - if (usedTimestamp != 0L) - entryBuilder.setUsedTimestamp(usedTimestamp) - b.addEntries(entryBuilder) + gossip.updatedData.foreach { case (key, (data, usedTimestamp)) => + val entryBuilder = + dm.Gossip.Entry.newBuilder().setKey(key).setEnvelope(dataEnvelopeToProto(data)) + if (usedTimestamp != 0L) + entryBuilder.setUsedTimestamp(usedTimestamp) + b.addEntries(entryBuilder) } gossip.toSystemUid.foreach(b.setToSystemUid) // can be None when sending back to a node of version 2.5.21 b.setFromSystemUid(gossip.fromSystemUid.get) @@ -327,16 +319,15 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) val b = dm.DeltaPropagation.newBuilder().setFromNode(uniqueAddressToProto(deltaPropagation._fromNode)) if (deltaPropagation.reply) b.setReply(deltaPropagation.reply) - deltaPropagation.deltas.foreach { - case (key, Delta(data, fromSeqNr, toSeqNr)) => - val b2 = dm.DeltaPropagation.Entry - .newBuilder() - .setKey(key) - .setEnvelope(dataEnvelopeToProto(data)) - .setFromSeqNr(fromSeqNr) - if (toSeqNr != fromSeqNr) - b2.setToSeqNr(toSeqNr) - b.addEntries(b2) + deltaPropagation.deltas.foreach { case (key, Delta(data, fromSeqNr, toSeqNr)) => + val b2 = dm.DeltaPropagation.Entry + .newBuilder() + .setKey(key) + .setEnvelope(dataEnvelopeToProto(data)) + .setFromSeqNr(fromSeqNr) + if (toSeqNr != fromSeqNr) + b2.setToSeqNr(toSeqNr) + b.addEntries(b2) } b.build() } @@ -356,7 +347,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) private def getToProto(get: Get[_]): dm.Get = { val timoutInMillis = get.consistency.timeout.toMillis - require(timoutInMillis <= 0XFFFFFFFFL, "Timeouts must fit in a 32-bit unsigned int") + require(timoutInMillis <= 0xffffffffL, "Timeouts must fit in a 32-bit unsigned int") val b = dm.Get.newBuilder().setKey(otherMessageToProto(get.key)).setTimeout(timoutInMillis.toInt) @@ -489,23 +480,22 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) } private def pruningToProto(entries: Map[UniqueAddress, PruningState]): Iterable[dm.DataEnvelope.PruningEntry] = { - entries.map { - case (removedAddress, state) => - val b = dm.DataEnvelope.PruningEntry.newBuilder().setRemovedAddress(uniqueAddressToProto(removedAddress)) - state match { - case PruningState.PruningInitialized(owner, seen) => - seen.toVector.sorted(Member.addressOrdering).map(addressToProto).foreach { a => - b.addSeen(a) - } - b.setOwnerAddress(uniqueAddressToProto(owner)) - b.setPerformed(false) - case PruningState.PruningPerformed(obsoleteTime) => - b.setPerformed(true).setObsoleteTime(obsoleteTime) - // TODO ownerAddress is only needed for PruningInitialized, but kept here for - // wire backwards compatibility with 2.4.16 (required field) - b.setOwnerAddress(uniqueAddressToProto(dummyAddress)) - } - b.build() + entries.map { case (removedAddress, state) => + val b = dm.DataEnvelope.PruningEntry.newBuilder().setRemovedAddress(uniqueAddressToProto(removedAddress)) + state match { + case PruningState.PruningInitialized(owner, seen) => + seen.toVector.sorted(Member.addressOrdering).map(addressToProto).foreach { a => + b.addSeen(a) + } + b.setOwnerAddress(uniqueAddressToProto(owner)) + b.setPerformed(false) + case PruningState.PruningPerformed(obsoleteTime) => + b.setPerformed(true).setObsoleteTime(obsoleteTime) + // TODO ownerAddress is only needed for PruningInitialized, but kept here for + // wire backwards compatibility with 2.4.16 (required field) + b.setOwnerAddress(uniqueAddressToProto(dummyAddress)) + } + b.build() } } diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala index 4215db66b0d..28c9b33cd17 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala @@ -25,9 +25,7 @@ import akka.serialization._ import akka.util.ccompat._ import akka.util.ccompat.JavaConverters._ -/** - * Some useful serialization helper methods. - */ +/** Some useful serialization helper methods. */ @ccompatUsedUntil213 trait SerializationSupport { @@ -101,19 +99,20 @@ trait SerializationSupport { .setUid2((uniqueAddress.longUid >> 32).toInt) def uniqueAddressFromProto(uniqueAddress: dm.UniqueAddress): UniqueAddress = - UniqueAddress(addressFromProto(uniqueAddress.getAddress), if (uniqueAddress.hasUid2) { - // new remote node join the two parts of the long uid back - (uniqueAddress.getUid2.toLong << 32) | (uniqueAddress.getUid & 0XFFFFFFFFL) - } else { - // old remote node - uniqueAddress.getUid.toLong - }) + UniqueAddress( + addressFromProto(uniqueAddress.getAddress), + if (uniqueAddress.hasUid2) { + // new remote node join the two parts of the long uid back + (uniqueAddress.getUid2.toLong << 32) | (uniqueAddress.getUid & 0xffffffffL) + } else { + // old remote node + uniqueAddress.getUid.toLong + }) def versionVectorToProto(versionVector: VersionVector): dm.VersionVector = { val b = dm.VersionVector.newBuilder() - versionVector.versionsIterator.foreach { - case (node, value) => - b.addEntries(dm.VersionVector.Entry.newBuilder().setNode(uniqueAddressToProto(node)).setVersion(value)) + versionVector.versionsIterator.foreach { case (node, value) => + b.addEntries(dm.VersionVector.Entry.newBuilder().setNode(uniqueAddressToProto(node)).setVersion(value)) } b.build() } @@ -175,7 +174,5 @@ trait SerializationSupport { } -/** - * Java API - */ +/** Java API */ abstract class AbstractSerializationSupport extends JSerializer with SerializationSupport diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala index 387f06a87a3..b3c7e660300 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala @@ -56,7 +56,8 @@ object DurableDataSpec { if (failStore) reply match { case Some(StoreReply(_, failureMsg, replyTo)) => replyTo ! failureMsg case None => - } else + } + else reply match { case Some(StoreReply(successMsg, _, replyTo)) => replyTo ! successMsg case None => @@ -345,9 +346,11 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig) val sys2 = ActorSystem( "AdditionalSys", // use the same port - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${address.port.get} - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) try { Cluster(sys2).join(address) new TestKit(sys2) with ImplicitSender { diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala index 6f9a14276e3..6f1e048aa4c 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala @@ -81,12 +81,14 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN val replicator2 = startReplicator(sys2) val probe2 = TestProbe()(sys2) Cluster(sys2).join(node(first).address) - awaitAssert({ - Cluster(system).state.members.size should ===(4) - Cluster(system).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) - Cluster(sys2).state.members.size should ===(4) - Cluster(sys2).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) - }, 10.seconds) + awaitAssert( + { + Cluster(system).state.members.size should ===(4) + Cluster(system).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) + Cluster(sys2).state.members.size should ===(4) + Cluster(sys2).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) + }, + 10.seconds) enterBarrier("joined") within(5.seconds) { @@ -160,18 +162,22 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN val address = cluster2.selfAddress val sys3 = ActorSystem( system.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${address.port.get} - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) val cluster3 = Cluster(sys3) val replicator3 = startReplicator(sys3) val probe3 = TestProbe()(sys3) cluster3.join(node(first).address) - awaitAssert({ - cluster.state.members.exists(m => - m.uniqueAddress == cluster3.selfUniqueAddress && m.status == MemberStatus.Up) should ===(true) - }, 10.seconds) + awaitAssert( + { + cluster.state.members.exists(m => + m.uniqueAddress == cluster3.selfUniqueAddress && m.status == MemberStatus.Up) should ===(true) + }, + 10.seconds) within(10.seconds) { var values = Set.empty[Int] @@ -187,10 +193,12 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN } // all must at least have seen it as joining - awaitAssert({ - cluster3.state.members.size should ===(4) - cluster3.state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) - }, 10.seconds) + awaitAssert( + { + cluster3.state.members.size should ===(4) + cluster3.state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) + }, + 10.seconds) // after merging with others replicator3 ! Get(KeyA, ReadAll(remainingOrDefault)) diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala index 7260716ac0f..ff2cd66f6af 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala @@ -125,7 +125,7 @@ class JepsenInspiredInsertSpec writeProbe.receiveOne(3.seconds) } val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] => success } - val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } successWriteAcks.map(_.request.get).toSet should be(myData.toSet) successWriteAcks.size should be(myData.size) failureWriteAcks should be(Nil) @@ -158,7 +158,7 @@ class JepsenInspiredInsertSpec writeProbe.receiveOne(timeout + 1.second) } val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] => success } - val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } successWriteAcks.map(_.request.get).toSet should be(myData.toSet) successWriteAcks.size should be(myData.size) failureWriteAcks should be(Nil) @@ -170,7 +170,7 @@ class JepsenInspiredInsertSpec val readProbe = TestProbe() replicator.tell(Get(key, readMajority), readProbe.ref) val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) => g.get(key) } - //val survivors = result.elements.size + // val survivors = result.elements.size result.elements should be(expectedData) } @@ -202,7 +202,7 @@ class JepsenInspiredInsertSpec writeProbe.receiveOne(3.seconds) } val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] => success } - val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } successWriteAcks.map(_.request.get).toSet should be(myData.toSet) successWriteAcks.size should be(myData.size) failureWriteAcks should be(Nil) @@ -247,7 +247,7 @@ class JepsenInspiredInsertSpec writeProbe.receiveOne(timeout + 1.second) } val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] => success } - val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } runOn(n1, n4, n5) { successWriteAcks.map(_.request.get).toSet should be(myData.toSet) successWriteAcks.size should be(myData.size) @@ -256,7 +256,7 @@ class JepsenInspiredInsertSpec runOn(n2, n3) { // without delays all could theoretically have been written before the blackhole if (delayMillis != 0) - failureWriteAcks should not be (Nil) + failureWriteAcks should not be Nil } (successWriteAcks.size + failureWriteAcks.size) should be(myData.size) @@ -267,7 +267,7 @@ class JepsenInspiredInsertSpec val readProbe = TestProbe() replicator.tell(Get(key, readMajority), readProbe.ref) val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) => g.get(key) } - //val survivors = result.elements.size + // val survivors = result.elements.size result.elements should be(expectedData) } // but on the 3 node side, read from majority doesn't mean that we are guaranteed to see diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala index ddd8965710e..a99c6b359cf 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala @@ -47,11 +47,10 @@ object PerformanceSpec extends MultiNodeConfig { def countDownProps(latch: TestLatch): Props = Props(new CountDown(latch)).withDeploy(Deploy.local) class CountDown(latch: TestLatch) extends Actor { - def receive = { - case _ => - latch.countDown() - if (latch.isOpen) - context.stop(self) + def receive = { case _ => + latch.countDown() + if (latch.isOpen) + context.stop(self) } } @@ -163,9 +162,11 @@ class PerformanceSpec extends MultiNodeSpec(PerformanceSpec) with STMultiNodeSpe val keys = (1 to repeatCount).map(n => ORSetKey[Int]("A" + n)) val n = 1000 * factor val expectedData = (0 until n).toSet - repeat("ORSet Update WriteLocal", keys, n)({ (key, i, replyTo) => - replicator.tell(Update(key, ORSet(), WriteLocal)(_ :+ i), replyTo) - }, key => awaitReplicated(key, expectedData)) + repeat("ORSet Update WriteLocal", keys, n)( + { (key, i, replyTo) => + replicator.tell(Update(key, ORSet(), WriteLocal)(_ :+ i), replyTo) + }, + key => awaitReplicated(key, expectedData)) enterBarrier("after-1") } diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala index d723b91a76c..1eb88127bb4 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala @@ -70,15 +70,14 @@ class ReplicatorChaosSpec extends MultiNodeSpec(ReplicatorChaosSpec) with STMult within(10.seconds) { awaitAssert { replicator ! Get(key, ReadLocal) - val value = expectMsgPF() { - case g @ GetSuccess(`key`, _) => - g.dataValue match { - case c: GCounter => c.value - case c: PNCounter => c.value - case c: GSet[_] => c.elements - case c: ORSet[_] => c.elements - case _ => fail() - } + val value = expectMsgPF() { case g @ GetSuccess(`key`, _) => + g.dataValue match { + case c: GCounter => c.value + case c: PNCounter => c.value + case c: GSet[_] => c.elements + case c: ORSet[_] => c.elements + case _ => fail() + } } value should be(expected) } diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorMapDeltaSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorMapDeltaSpec.scala index a688a2c13c3..5c37ad788f1 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorMapDeltaSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorMapDeltaSpec.scala @@ -167,12 +167,12 @@ object ReplicatorMapDeltaSpec extends MultiNodeConfig { }.toVector } - def addElementToORMap(om: ORMap[String, ORSet[String]], key: String, element: String)( - implicit node: SelfUniqueAddress) = + def addElementToORMap(om: ORMap[String, ORSet[String]], key: String, element: String)(implicit + node: SelfUniqueAddress) = om.updated(node, key, ORSet.empty[String])(_ :+ element) - def removeElementFromORMap(om: ORMap[String, ORSet[String]], key: String, element: String)( - implicit node: SelfUniqueAddress) = + def removeElementFromORMap(om: ORMap[String, ORSet[String]], key: String, element: String)(implicit + node: SelfUniqueAddress) = om.updated(node, key, ORSet.empty[String])(_.remove(element)) } diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorORSetDeltaSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorORSetDeltaSpec.scala index f6fd96670ec..d65ef76313c 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorORSetDeltaSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorORSetDeltaSpec.scala @@ -63,12 +63,11 @@ class ReplicatorORSetDeltaSpec within(10.seconds) { awaitAssert { replicator ! Get(key, ReadLocal) - val value = expectMsgPF() { - case g @ GetSuccess(`key`, _) => - g.dataValue match { - case c: ORSet[_] => c.elements - case _ => fail() - } + val value = expectMsgPF() { case g @ GetSuccess(`key`, _) => + g.dataValue match { + case c: ORSet[_] => c.elements + case _ => fail() + } } value should be(expected) } diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala index eab419216d5..64e2403ea38 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala @@ -165,12 +165,11 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST var values = Set.empty[Int] awaitAssert { replicator ! Get(KeyA, ReadLocal) - expectMsgPF() { - case g @ GetSuccess(KeyA, _) => - val value = g.get(KeyA).value.toInt - values += value - value should be(9) - g.get(KeyA).needPruningFrom(thirdUniqueAddress) should be(false) + expectMsgPF() { case g @ GetSuccess(KeyA, _) => + val value = g.get(KeyA).value.toInt + values += value + value should be(9) + g.get(KeyA).needPruningFrom(thirdUniqueAddress) should be(false) } } values should ===(Set(9)) @@ -178,39 +177,35 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST within(5.seconds) { awaitAssert { replicator ! Get(KeyB, ReadLocal) - expectMsgPF() { - case g @ GetSuccess(KeyB, _) => - g.get(KeyB).elements should be(Set("a", "b", "c")) - g.get(KeyB).needPruningFrom(thirdUniqueAddress) should be(false) + expectMsgPF() { case g @ GetSuccess(KeyB, _) => + g.get(KeyB).elements should be(Set("a", "b", "c")) + g.get(KeyB).needPruningFrom(thirdUniqueAddress) should be(false) } } } within(5.seconds) { awaitAssert { replicator ! Get(KeyC, ReadLocal) - expectMsgPF() { - case g @ GetSuccess(KeyC, _) => - g.get(KeyC).entries should be(Map("x" -> 3L, "y" -> 3L)) - g.get(KeyC).needPruningFrom(thirdUniqueAddress) should be(false) + expectMsgPF() { case g @ GetSuccess(KeyC, _) => + g.get(KeyC).entries should be(Map("x" -> 3L, "y" -> 3L)) + g.get(KeyC).needPruningFrom(thirdUniqueAddress) should be(false) } } } within(5.seconds) { awaitAssert { replicator ! Get(KeyD, ReadLocal) - expectMsgPF() { - case g @ GetSuccess(KeyD, _) => - g.get(KeyD).entries("a") should be(Set("A")) - g.get(KeyD).needPruningFrom(thirdUniqueAddress) should be(false) + expectMsgPF() { case g @ GetSuccess(KeyD, _) => + g.get(KeyD).entries("a") should be(Set("A")) + g.get(KeyD).needPruningFrom(thirdUniqueAddress) should be(false) } } } within(5.seconds) { awaitAssert { replicator ! Get(KeyE, ReadLocal) - expectMsgPF() { - case g @ GetSuccess(KeyE, _) => - g.get(KeyE).needPruningFrom(thirdUniqueAddress) should be(false) + expectMsgPF() { case g @ GetSuccess(KeyE, _) => + g.get(KeyE).needPruningFrom(thirdUniqueAddress) should be(false) } } } @@ -223,19 +218,17 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST // inject data from removed node to simulate bad data existing.merge(oldCounter) :+ 1 } - expectMsgPF() { - case UpdateSuccess(KeyA, _) => - replicator ! Get(KeyA, ReadLocal) - val retrieved = expectMsgType[GetSuccess[GCounter]].dataValue - retrieved.value should be(expectedValue) + expectMsgPF() { case UpdateSuccess(KeyA, _) => + replicator ! Get(KeyA, ReadLocal) + val retrieved = expectMsgType[GetSuccess[GCounter]].dataValue + retrieved.value should be(expectedValue) } } def checkValuePropagated(expectedValue: Int): Unit = awaitAssert { replicator ! Get(KeyA, ReadLocal) - expectMsgPF() { - case g @ GetSuccess(KeyA, _) => - g.get(KeyA).value.toInt should be(expectedValue) + expectMsgPF() { case g @ GetSuccess(KeyA, _) => + g.get(KeyA).value.toInt should be(expectedValue) } } diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala index 5598d955dea..0fadf19ff80 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala @@ -450,10 +450,12 @@ class ReplicatorSpec extends MultiNodeSpec(ReplicatorSpec) with STMultiNodeSpec val probe2 = TestProbe() replicator.tell(Get(KeyE, readMajority), probe2.ref) probe2.expectMsgType[GetSuccess[_]] - replicator.tell(Update(KeyE, GCounter(), writeMajority, None) { data => - probe1.ref ! data.value - data :+ 1 - }, probe2.ref) + replicator.tell( + Update(KeyE, GCounter(), writeMajority, None) { data => + probe1.ref ! data.value + data :+ 1 + }, + probe2.ref) // verify read your own writes, without waiting for the UpdateSuccess reply // note that the order of the replies are not defined, and therefore we use separate probes val probe3 = TestProbe() diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/STMultiNodeSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/STMultiNodeSpec.scala index c0e6006d8ad..6eb9620f9e0 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/STMultiNodeSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/STMultiNodeSpec.scala @@ -10,9 +10,7 @@ import org.scalatest.wordspec.AnyWordSpecLike import akka.remote.testkit.MultiNodeSpecCallbacks -/** - * Hooks up MultiNodeSpec with ScalaTest - */ +/** Hooks up MultiNodeSpec with ScalaTest */ trait STMultiNodeSpec extends MultiNodeSpecCallbacks with AnyWordSpecLike with Matchers with BeforeAndAfterAll { override def beforeAll() = multiNodeSpecBeforeAll() diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala index d02efc75a44..0a72c255df0 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala @@ -21,9 +21,12 @@ object DeltaPropagationSelectorSpec { extends DeltaPropagationSelector { override val gossipIntervalDivisor = 5 override def createDeltaPropagation(deltas: Map[KeyId, (ReplicatedData, Long, Long)]): DeltaPropagation = - DeltaPropagation(selfUniqueAddress, false, deltas.map { - case (key, (d, fromSeqNr, toSeqNr)) => (key, Delta(DataEnvelope(d), fromSeqNr, toSeqNr)) - }) + DeltaPropagation( + selfUniqueAddress, + false, + deltas.map { case (key, (d, fromSeqNr, toSeqNr)) => + (key, Delta(DataEnvelope(d), fromSeqNr, toSeqNr)) + }) override def maxDeltaSize: Int = 10 } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWRegisterSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWRegisterSpec.scala index f8b13d9b8a4..09f9048eafa 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWRegisterSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWRegisterSpec.scala @@ -19,10 +19,9 @@ class LWWRegisterSpec extends AnyWordSpec with Matchers { "A LWWRegister" must { "use latest of successive assignments" in { - val r = (1 to 100).foldLeft(LWWRegister(node1, 0, defaultClock[Int])) { - case (r, n) => - r.value should be(n - 1) - r.withValue(node1, n, defaultClock[Int]) + val r = (1 to 100).foldLeft(LWWRegister(node1, 0, defaultClock[Int])) { case (r, n) => + r.value should be(n - 1) + r.withValue(node1, n, defaultClock[Int]) } r.value should be(100) } @@ -59,12 +58,11 @@ class LWWRegisterSpec extends AnyWordSpec with Matchers { "use monotonically increasing defaultClock" in { implicit val node = SelfUniqueAddress(node1) - (1 to 100).foldLeft(LWWRegister.create(0)) { - case (r, n) => - r.value should be(n - 1) - val r2 = r.withValueOf(n) - r2.timestamp should be > r.timestamp - r2 + (1 to 100).foldLeft(LWWRegister.create(0)) { case (r, n) => + r.value should be(n - 1) + val r2 = r.withValueOf(n) + r2.timestamp should be > r.timestamp + r2 } } @@ -92,12 +90,11 @@ class LWWRegisterSpec extends AnyWordSpec with Matchers { "can be used as first-write-wins-register" in { import LWWRegister.reverseClock - val r = (1 to 100).foldLeft(LWWRegister(node1, 0, reverseClock[Int])) { - case (r, n) => - r.value should be(0) - val newRegister = r.merge(r.withValue(node1, n, reverseClock[Int])) - newRegister should be(r) - newRegister + val r = (1 to 100).foldLeft(LWWRegister(node1, 0, reverseClock[Int])) { case (r, n) => + r.value should be(0) + val newRegister = r.merge(r.withValue(node1, n, reverseClock[Int])) + newRegister should be(r) + newRegister } r.value should be(0) } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala index ee9e70c195b..947b9d84780 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala @@ -30,10 +30,9 @@ object LocalConcurrencySpec { val replicator = DistributedData(context.system).replicator - def receive = { - case s: String => - val update = Replicator.Update(Updater.key, ORSet.empty[String], Replicator.WriteLocal)(_ :+ s) - replicator ! update + def receive = { case s: String => + val update = Replicator.Update(Updater.key, ORSet.empty[String], Replicator.WriteLocal)(_ :+ s) + replicator ! update } } } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LotsOfDataBot.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LotsOfDataBot.scala index a0834baeb33..c4da7e272e7 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LotsOfDataBot.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LotsOfDataBot.scala @@ -33,8 +33,7 @@ object LotsOfDataBot { // Override the configuration of the port val config = ConfigFactory .parseString("akka.remote.artery.canonical.port=" + port) - .withFallback( - ConfigFactory.load(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.load(ConfigFactory.parseString(""" passive = off max-entries = 100000 akka.actor.provider = "cluster" @@ -113,7 +112,6 @@ class LotsOfDataBot extends Actor with ActorLogging { } case _: UpdateResponse[_] => // ignore - case c @ Changed(ORSetKey(id)) => val elements = c.dataValue match { case ORSet(e) => e diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala index 6b0e9c4c5ee..5db20877a33 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala @@ -69,7 +69,7 @@ class ORMapSpec extends AnyWordSpec with Matchers { "be able to remove entry" in { val m = ORMap().put(node1, "a", GSet() + "A").put(node1, "b", GSet() + "B").remove(node1, "a") - m.entries.keySet should not contain ("a") + m.entries.keySet should not contain "a" m.entries.keySet should contain("b") } @@ -83,13 +83,13 @@ class ORMapSpec extends AnyWordSpec with Matchers { m1.entries.keySet should contain("a") val m2 = m1.mergeDelta(removeDelta) - m2.entries.keySet should not contain ("a") + m2.entries.keySet should not contain "a" m2.entries.keySet should contain("b") } "be able to add removed" in { val m = ORMap().put(node1, "a", GSet() + "A").put(node1, "b", GSet() + "B").remove(node1, "a") - m.entries.keySet should not contain ("a") + m.entries.keySet should not contain "a" m.entries.keySet should contain("b") val m2 = m.put(node1, "a", GSet() + "C") m2.entries.keySet should contain("a") @@ -366,7 +366,7 @@ class ORMapSpec extends AnyWordSpec with Matchers { .updated(node1, "b", ORSet.empty[String])(_.add(node1, "B3")) .updated(node2, "b", ORSet.empty[String])(_.add(node2, "B4")) - val merged1 = (m1.merge(m2d)).mergeDelta(m2u.delta.get) + val merged1 = m1.merge(m2d).mergeDelta(m2u.delta.get) merged1.entries("a").elements should be(Set("A")) // note that B1 is lost as it was added and removed earlier in timeline than B2 @@ -391,7 +391,7 @@ class ORMapSpec extends AnyWordSpec with Matchers { merged2.entries("b").elements should be(Set("B3")) merged2.entries("c").elements should be(Set("C")) - val merged3 = (merged1.mergeDelta(m3.delta.get)).mergeDelta(m4.delta.get) + val merged3 = merged1.mergeDelta(m3.delta.get).mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) merged3.entries("b").elements should be(Set("B3")) @@ -418,7 +418,7 @@ class ORMapSpec extends AnyWordSpec with Matchers { merged2.entries("b").elements should be(Set("B2", "B3")) merged2.entries("c").elements should be(Set("C")) - val merged3 = (merged1.mergeDelta(m3.delta.get)).mergeDelta(m4.delta.get) + val merged3 = merged1.mergeDelta(m3.delta.get).mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) merged3.entries("b").elements should be(Set("B2", "B3")) diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala index a8e02eb4653..e7437f8766f 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala @@ -485,10 +485,12 @@ class ORMultiMapSpec extends AnyWordSpec with Matchers { val m3 = m1.mergeDelta(m2.delta.get) val m4 = m1.merge(m2) - m3.underlying.values - .contains("a") should be(false) // tombstone for 'a' has been optimized away at the end of the mergeDelta - m4.underlying.values - .contains("a") should be(false) // tombstone for 'a' has been optimized away at the end of the merge + m3.underlying.values.contains("a") should be( + false + ) // tombstone for 'a' has been optimized away at the end of the mergeDelta + m4.underlying.values.contains("a") should be( + false + ) // tombstone for 'a' has been optimized away at the end of the merge val m5 = ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A1")) m3.mergeDelta(m5.delta.get).entries("a") should ===(Set("A1")) @@ -509,10 +511,12 @@ class ORMultiMapSpec extends AnyWordSpec with Matchers { val um3 = um1.mergeDelta(um2.delta.get) val um4 = um1.merge(um2) - um3.underlying.values - .contains("a") should be(false) // tombstone for 'a' has been optimized away at the end of the mergeDelta - um4.underlying.values - .contains("a") should be(false) // tombstone for 'a' has been optimized away at the end of the merge + um3.underlying.values.contains("a") should be( + false + ) // tombstone for 'a' has been optimized away at the end of the mergeDelta + um4.underlying.values.contains("a") should be( + false + ) // tombstone for 'a' has been optimized away at the end of the merge val um5 = ORMultiMap.emptyWithValueDeltas[String, String].addBinding(node1, "a", "A1") um3.mergeDelta(um5.delta.get).entries("a") should ===(Set("A1")) @@ -542,9 +546,9 @@ class ORMultiMapSpec extends AnyWordSpec with Matchers { ORMultiMap.emptyWithValueDeltas[String, String].addBinding(node1, "a", "A").underlying.remove(node1, "a"), true) tm3.underlying.contains("a") should ===(false) // no tombstone, because remove not removeKey - tm3 - .mergeDelta(tm2.delta.get) - .entries should ===(Map.empty[String, String]) // no tombstone - update delta could not be applied + tm3.mergeDelta(tm2.delta.get).entries should ===( + Map.empty[String, String] + ) // no tombstone - update delta could not be applied tm3.merge(tm2).entries should ===(Map.empty[String, String]) // The only valid value for tombstone created by means of either API call or application of delta propagation is Set() diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala index 74f7b66da10..cade8f8e288 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala @@ -59,16 +59,16 @@ class ORSetSpec extends AnyWordSpec with Matchers { val c4 = c3.remove(node1, user2) val c5 = c4.remove(node1, user1) - c5.elements should not contain (user1) - c5.elements should not contain (user2) + c5.elements should not contain user1 + c5.elements should not contain user2 val c6 = c3.merge(c5) - c6.elements should not contain (user1) - c6.elements should not contain (user2) + c6.elements should not contain user1 + c6.elements should not contain user2 val c7 = c5.merge(c3) - c7.elements should not contain (user1) - c7.elements should not contain (user2) + c7.elements should not contain user1 + c7.elements should not contain user2 } "be able to add removed" in { @@ -77,7 +77,7 @@ class ORSetSpec extends AnyWordSpec with Matchers { val c3 = c2.add(node1, user1) c3.elements should contain(user1) val c4 = c3.remove(node1, user1) - c4.elements should not contain (user1) + c4.elements should not contain user1 val c5 = c4.add(node1, user1) c5.elements should contain(user1) } @@ -88,7 +88,7 @@ class ORSetSpec extends AnyWordSpec with Matchers { val c2 = c1.add(node1, user1) val c3 = c2.add(node1, user2) val c4 = c3.remove(node1, user1) - c4.elements should not contain (user1) + c4.elements should not contain user1 c4.elements should contain(user2) val c5 = c4.add(node1, user1) @@ -99,7 +99,7 @@ class ORSetSpec extends AnyWordSpec with Matchers { val c7 = c6.remove(node1, user1) val c8 = c7.add(node1, user2) val c9 = c8.remove(node1, user1) - c9.elements should not contain (user1) + c9.elements should not contain user1 c9.elements should contain(user2) } @@ -112,20 +112,20 @@ class ORSetSpec extends AnyWordSpec with Matchers { // set 2 val c2 = ORSet().add(node2, user3).add(node2, user4).remove(node2, user3) - c2.elements should not contain (user3) + c2.elements should not contain user3 c2.elements should contain(user4) // merge both ways val merged1 = c1.merge(c2) merged1.elements should contain(user1) merged1.elements should contain(user2) - merged1.elements should not contain (user3) + merged1.elements should not contain user3 merged1.elements should contain(user4) val merged2 = c2.merge(c1) merged2.elements should contain(user1) merged2.elements should contain(user2) - merged2.elements should not contain (user3) + merged2.elements should not contain user3 merged2.elements should contain(user4) } @@ -133,29 +133,29 @@ class ORSetSpec extends AnyWordSpec with Matchers { // set 1 val c1 = ORSet().add(node1, user1).add(node1, user2).add(node1, user3).remove(node1, user1).remove(node1, user3) - c1.elements should not contain (user1) + c1.elements should not contain user1 c1.elements should contain(user2) - c1.elements should not contain (user3) + c1.elements should not contain user3 // set 2 val c2 = ORSet().add(node2, user1).add(node2, user2).add(node2, user3).add(node2, user4).remove(node2, user3) c2.elements should contain(user1) c2.elements should contain(user2) - c2.elements should not contain (user3) + c2.elements should not contain user3 c2.elements should contain(user4) // merge both ways val merged1 = c1.merge(c2) merged1.elements should contain(user1) merged1.elements should contain(user2) - merged1.elements should not contain (user3) + merged1.elements should not contain user3 merged1.elements should contain(user4) val merged2 = c2.merge(c1) merged2.elements should contain(user1) merged2.elements should contain(user2) - merged2.elements should not contain (user3) + merged2.elements should not contain user3 merged2.elements should contain(user4) } @@ -169,19 +169,19 @@ class ORSetSpec extends AnyWordSpec with Matchers { val c2 = c1.add(node2, user1).remove(node2, user2).remove(node2, user3) c2.elements should contain(user1) - c2.elements should not contain (user2) - c2.elements should not contain (user3) + c2.elements should not contain user2 + c2.elements should not contain user3 // merge both ways val merged1 = c1.merge(c2) merged1.elements should contain(user1) - merged1.elements should not contain (user2) - merged1.elements should not contain (user3) + merged1.elements should not contain user2 + merged1.elements should not contain user3 val merged2 = c2.merge(c1) merged2.elements should contain(user1) - merged2.elements should not contain (user2) - merged2.elements should not contain (user3) + merged2.elements should not contain user2 + merged2.elements should not contain user3 val c3 = c1.add(node1, user4).remove(node1, user3).add(node1, user2) @@ -189,13 +189,13 @@ class ORSetSpec extends AnyWordSpec with Matchers { val merged3 = c2.merge(c3) merged3.elements should contain(user1) merged3.elements should contain(user2) - merged3.elements should not contain (user3) + merged3.elements should not contain user3 merged3.elements should contain(user4) val merged4 = c3.merge(c2) merged4.elements should contain(user1) merged4.elements should contain(user2) - merged4.elements should not contain (user3) + merged4.elements should not contain user3 merged4.elements should contain(user4) } @@ -206,23 +206,23 @@ class ORSetSpec extends AnyWordSpec with Matchers { // merge both ways val merged1 = c1.merge(c2) merged1.elements should contain(user1) - merged1.elements should not contain (user2) + merged1.elements should not contain user2 val merged2 = c2.merge(c1) merged2.elements should contain(user1) - merged2.elements should not contain (user2) + merged2.elements should not contain user2 val c3 = c1.add(node1, user3) // merge both ways val merged3 = c3.merge(c2) merged3.elements should contain(user1) - merged3.elements should not contain (user2) + merged3.elements should not contain user2 merged3.elements should contain(user3) val merged4 = c2.merge(c3) merged4.elements should contain(user1) - merged4.elements should not contain (user2) + merged4.elements should not contain user2 merged4.elements should contain(user3) } @@ -521,7 +521,11 @@ class ORSetSpec extends AnyWordSpec with Matchers { "K5" -> VersionVector(nodeA, 2L)) val vvector = VersionVector(TreeMap(nodeA -> 3L, nodeD -> 7L)) val acc: Map[Any, VersionVector] = Map("K1" -> VersionVector(nodeA, 3L)) - val expectedDots = acc ++ Map("K3" -> VersionVector(nodeA, 4L), "K4" -> VersionVector(nodeD, 8L)) // "a" -> 3 removed, optimized to include only those unseen + val expectedDots = + acc ++ Map( + "K3" -> VersionVector(nodeA, 4L), + "K4" -> VersionVector(nodeD, 8L) + ) // "a" -> 3 removed, optimized to include only those unseen ORSet.mergeDisjointKeys(keys, elements, vvector, acc) should be(expectedDots) } @@ -599,7 +603,9 @@ class ORSetSpec extends AnyWordSpec with Matchers { val a2 = ORSet().mergeDelta(a.delta.get).mergeDelta(a1.delta.get) a2.elements should be(Set.empty) - a2.vvector.contains(nodeB) should be(false) // a2 should not be polluted by the nodeB dot, as all operations on it pertained only to elements from nodeA + a2.vvector.contains(nodeB) should be( + false + ) // a2 should not be polluted by the nodeB dot, as all operations on it pertained only to elements from nodeA } "have unapply extractor" in { diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ReplicatorSettingsSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ReplicatorSettingsSpec.scala index fa08c8fdbd6..ddcbd03cd9d 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ReplicatorSettingsSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ReplicatorSettingsSpec.scala @@ -35,12 +35,15 @@ class ReplicatorSettingsSpec } "be able to configure expiry for certain keys" in { - val settings = ReplicatorSettings(ConfigFactory.parseString(""" + val settings = ReplicatorSettings( + ConfigFactory + .parseString(""" expire-keys-after-inactivity { "key-1" = 10 minutes "cache-*" = 2 minutes } - """).withFallback(system.settings.config.getConfig("akka.cluster.distributed-data"))) + """) + .withFallback(system.settings.config.getConfig("akka.cluster.distributed-data"))) settings.expiryKeys("key-1") should ===(10.minutes) settings.expiryKeys("cache-*") should ===(2.minutes) settings.expiryKeys.size should ===(2) diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala index f4d293b64dc..4d93fff68a5 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala @@ -127,14 +127,16 @@ object WriteAggregatorSpec { } } -class WriteAggregatorSpec extends AkkaSpec(s""" +class WriteAggregatorSpec + extends AkkaSpec(s""" akka.actor.provider = "cluster" akka.remote.artery.canonical.port = 0 akka.cluster.distributed-data.durable.lmdb { dir = target/WriteAggregatorSpec-${System.currentTimeMillis}-ddata map-size = 10 MiB } - """) with ImplicitSender { + """) + with ImplicitSender { import WriteAggregatorSpec._ val protocol = "akka" @@ -157,9 +159,7 @@ class WriteAggregatorSpec extends AkkaSpec(s""" def probes(probe: ActorRef): Map[UniqueAddress, ActorRef] = nodes.toSeq.map(_ -> system.actorOf(WriteAggregatorSpec.writeAckAdapterProps(probe))).toMap - /** - * Create a tuple for each node with the WriteAckAdapter and the TestProbe - */ + /** Create a tuple for each node with the WriteAckAdapter and the TestProbe */ def probes(): Map[UniqueAddress, TestMock] = { nodes.toSeq.map(_ -> TestMock()).toMap } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala index d9d62a08d69..5b7cb650322 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala @@ -151,28 +151,27 @@ class ReplicatedDataSerializerSpec } "serialize large GSet" in { - val largeSet = (10000 until 20000).foldLeft(GSet.empty[String]) { - case (acc, n) => acc.resetDelta.add(n.toString) + val largeSet = (10000 until 20000).foldLeft(GSet.empty[String]) { case (acc, n) => + acc.resetDelta.add(n.toString) } val numberOfBytes = checkSerialization(largeSet) info(s"size of GSet with ${largeSet.size} elements: $numberOfBytes bytes") - numberOfBytes should be <= (80000) + numberOfBytes should be <= 80000 } "serialize large ORSet" in { - val largeSet = (10000 until 20000).foldLeft(ORSet.empty[String]) { - case (acc, n) => - val address = (n % 3) match { - case 0 => address1 - case 1 => address2 - case 2 => address3 - } - acc.resetDelta.add(address, n.toString) + val largeSet = (10000 until 20000).foldLeft(ORSet.empty[String]) { case (acc, n) => + val address = (n % 3) match { + case 0 => address1 + case 1 => address2 + case 2 => address3 + } + acc.resetDelta.add(address, n.toString) } val numberOfBytes = checkSerialization(largeSet) // note that ORSet is compressed, and therefore smaller than GSet info(s"size of ORSet with ${largeSet.size} elements: $numberOfBytes bytes") - numberOfBytes should be <= (50000) + numberOfBytes should be <= 50000 } "serialize Flag" in { diff --git a/akka-docs/src/main/scala/docs/persistence/state/MyStateStore.scala b/akka-docs/src/main/scala/docs/persistence/state/MyStateStore.scala index 073dda56acb..8d03da78cad 100644 --- a/akka-docs/src/main/scala/docs/persistence/state/MyStateStore.scala +++ b/akka-docs/src/main/scala/docs/persistence/state/MyStateStore.scala @@ -41,19 +41,13 @@ class MyStateStore[A](system: ExtendedActorSystem, config: Config, cfgPath: Stri */ override def upsertObject(persistenceId: String, revision: Long, value: A, tag: String): Future[Done] = ??? - /** - * Deprecated. Use the deleteObject overload with revision instead. - */ + /** Deprecated. Use the deleteObject overload with revision instead. */ override def deleteObject(persistenceId: String): Future[Done] = deleteObject(persistenceId, 0) - /** - * Will delete the state by setting it to the empty state and the revision number will be incremented by 1. - */ + /** Will delete the state by setting it to the empty state and the revision number will be incremented by 1. */ override def deleteObject(persistenceId: String, revision: Long): Future[Done] = ??? - /** - * Returns the current state for the given persistence id. - */ + /** Returns the current state for the given persistence id. */ override def getObject(persistenceId: String): Future[GetObjectResult[A]] = ??? } //#plugin-api diff --git a/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala b/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala index 2c3522966b1..cc5bd836758 100644 --- a/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala @@ -42,11 +42,11 @@ final case class Message(s: String) //#context-actorOf class FirstActor extends Actor { val child = context.actorOf(Props[MyActor](), name = "myChild") - //#plus-some-behavior - def receive = { - case x => sender() ! x + // #plus-some-behavior + def receive = { case x => + sender() ! x } - //#plus-some-behavior + // #plus-some-behavior } //#context-actorOf @@ -68,7 +68,7 @@ object ValueClassActor { //#actor-with-value-class-argument class DemoActorWrapper extends Actor { - //#props-factory + // #props-factory object DemoActor { /** @@ -82,8 +82,8 @@ class DemoActorWrapper extends Actor { } class DemoActor(magicNumber: Int) extends Actor { - def receive = { - case x: Int => sender() ! (x + magicNumber) + def receive = { case x: Int => + sender() ! (x + magicNumber) } } @@ -91,19 +91,18 @@ class DemoActorWrapper extends Actor { // Props(new DemoActor(42)) would not be safe context.actorOf(DemoActor.props(42), "demo") // ... - //#props-factory - def receive = { - case msg => + // #props-factory + def receive = { case msg => } - //#props-factory + // #props-factory } - //#props-factory + // #props-factory def receive = Actor.emptyBehavior } class ActorWithMessagesWrapper { - //#messages-in-companion + // #messages-in-companion object MyActor { case class Greeting(from: String) case object Goodbye @@ -115,41 +114,40 @@ class ActorWithMessagesWrapper { case Goodbye => log.info("Someone said goodbye to me.") } } - //#messages-in-companion + // #messages-in-companion def receive = Actor.emptyBehavior } class Hook extends Actor { var child: ActorRef = _ - //#preStart + // #preStart override def preStart(): Unit = { child = context.actorOf(Props[MyActor](), "child") } - //#preStart + // #preStart def receive = Actor.emptyBehavior - //#postStop + // #postStop override def postStop(): Unit = { - //#clean-up-some-resources + // #clean-up-some-resources () - //#clean-up-some-resources + // #clean-up-some-resources } - //#postStop + // #postStop } class ReplyException extends Actor { - def receive = { - case _ => - //#reply-exception - try { - val result = operation() - sender() ! result - } catch { - case e: Exception => - sender() ! akka.actor.Status.Failure(e) - throw e - } - //#reply-exception + def receive = { case _ => + // #reply-exception + try { + val result = operation() + sender() ! result + } catch { + case e: Exception => + sender() ! akka.actor.Status.Failure(e) + throw e + } + // #reply-exception } def operation(): String = { "Hi" } @@ -157,7 +155,7 @@ class ReplyException extends Actor { } class StoppingActorsWrapper { - //#stoppingActors-actor + // #stoppingActors-actor class MyActor extends Actor { val child: ActorRef = ??? @@ -172,7 +170,7 @@ class StoppingActorsWrapper { } - //#stoppingActors-actor + // #stoppingActors-actor } //#gracefulStop-actor @@ -200,8 +198,7 @@ class Manager extends Actor { //#gracefulStop-actor class Cruncher extends Actor { - def receive = { - case "crunch" => // crunch... + def receive = { case "crunch" => // crunch... } } @@ -211,14 +208,15 @@ class Swapper extends Actor { import context._ val log = Logging(system, this) - def receive = { - case Swap => - log.info("Hi") - become({ - case Swap => - log.info("Ho") - unbecome() // resets the latest 'become' (just for fun) - }, discardOld = false) // push on top instead of replace + def receive = { case Swap => + log.info("Hi") + become( + { case Swap => + log.info("Ho") + unbecome() // resets the latest 'become' (just for fun) + }, + discardOld = false + ) // push on top instead of replace } } @@ -239,9 +237,8 @@ object SwapperApp extends App { trait ProducerBehavior { this: Actor => - val producerBehavior: Receive = { - case GiveMeThings => - sender() ! Give("thing") + val producerBehavior: Receive = { case GiveMeThings => + sender() ! Give("thing") } } @@ -290,15 +287,15 @@ class ActorDocSpec extends AkkaSpec(""" "import context" in { new AnyRef { - //#import-context + // #import-context class FirstActor extends Actor { import context._ val myActor = actorOf(Props[MyActor](), name = "myactor") - def receive = { - case x => myActor ! x + def receive = { case x => + myActor ! x } } - //#import-context + // #import-context val first = system.actorOf(Props(classOf[FirstActor], this), name = "first") system.stop(first) @@ -331,46 +328,46 @@ class ActorDocSpec extends AkkaSpec(""" } "instantiates a case class" in { - //#immutable-message-instantiation + // #immutable-message-instantiation val user = User("Mike") // create a new case class message val message = Register(user) - //#immutable-message-instantiation + // #immutable-message-instantiation } "use poison pill" in { val victim = system.actorOf(Props[MyActor]()) - //#poison-pill + // #poison-pill watch(victim) victim ! PoisonPill - //#poison-pill + // #poison-pill expectTerminated(victim) } "creating a Props config" in { - //#creating-props + // #creating-props import akka.actor.Props val props1 = Props[MyActor]() val props2 = Props(new ActorWithArgs("arg")) // careful, see below val props3 = Props(classOf[ActorWithArgs], "arg") // no support for value class arguments - //#creating-props + // #creating-props - //#creating-props-deprecated + // #creating-props-deprecated // NOT RECOMMENDED within another actor: // encourages to close over enclosing class val props7 = Props(new MyActor) - //#creating-props-deprecated + // #creating-props-deprecated } "creating actor with Props" in { - //#system-actorOf + // #system-actorOf import akka.actor.ActorSystem // ActorSystem is a heavy object: create only one per application val system = ActorSystem("mySystem") val myActor = system.actorOf(Props[MyActor](), "myactor2") - //#system-actorOf + // #system-actorOf shutdown(system) } private abstract class DummyActorProxy { @@ -383,9 +380,9 @@ class ActorDocSpec extends AkkaSpec(""" case n: Int => sender() ! name case message => val target = testActor - //#forward + // #forward target.forward(message) - //#forward + // #forward } } @@ -393,32 +390,32 @@ class ActorDocSpec extends AkkaSpec(""" val a: DummyActorProxy = new DummyActorProxy() { val applicationContext = this - //#creating-indirectly + // #creating-indirectly import akka.actor.IndirectActorProducer class DependencyInjector(applicationContext: AnyRef, beanName: String) extends IndirectActorProducer { override def actorClass = classOf[Actor] override def produce() = - //#obtain-fresh-Actor-instance-from-DI-framework + // #obtain-fresh-Actor-instance-from-DI-framework new Echo(beanName) def this(beanName: String) = this("", beanName) - //#obtain-fresh-Actor-instance-from-DI-framework + // #obtain-fresh-Actor-instance-from-DI-framework } val actorRef: ActorRef = system.actorOf(Props(classOf[DependencyInjector], applicationContext, "hello"), "helloBean") - //#creating-indirectly + // #creating-indirectly } val actorRef = a.actorRef val message = 42 implicit val self = testActor - //#tell + // #tell actorRef ! message - //#tell + // #tell expectMsg("hello") actorRef ! "huhu" expectMsg("huhu") @@ -426,29 +423,29 @@ class ActorDocSpec extends AkkaSpec(""" "using implicit timeout" in { val myActor = system.actorOf(Props[FirstActor]()) - //#using-implicit-timeout + // #using-implicit-timeout import scala.concurrent.duration._ import akka.util.Timeout import akka.pattern.ask implicit val timeout: Timeout = 5.seconds val future = myActor ? "hello" - //#using-implicit-timeout + // #using-implicit-timeout Await.result(future, timeout.duration) should be("hello") } "using explicit timeout" in { val myActor = system.actorOf(Props[FirstActor]()) - //#using-explicit-timeout + // #using-explicit-timeout import scala.concurrent.duration._ import akka.pattern.ask val future = myActor.ask("hello")(5 seconds) - //#using-explicit-timeout + // #using-explicit-timeout Await.result(future, 5 seconds) should be("hello") } "using receiveTimeout" in { - //#receive-timeout + // #receive-timeout import akka.actor.ReceiveTimeout import scala.concurrent.duration._ class MyActor extends Actor { @@ -464,10 +461,10 @@ class ActorDocSpec extends AkkaSpec(""" throw new RuntimeException("Receive timed out") } } - //#receive-timeout + // #receive-timeout } - //#hot-swap-actor + // #hot-swap-actor class HotSwapActor extends Actor { import context._ def angry: Receive = { @@ -485,35 +482,38 @@ class ActorDocSpec extends AkkaSpec(""" case "bar" => become(happy) } } - //#hot-swap-actor + // #hot-swap-actor "using hot-swap" in { val actor = system.actorOf(Props(classOf[HotSwapActor], this), name = "hot") } "using Stash" in { - //#stash + // #stash import akka.actor.Stash class ActorWithProtocol extends Actor with Stash { def receive = { case "open" => unstashAll() - context.become({ - case "write" => // do writing... - case "close" => - unstashAll() - context.unbecome() - case msg => stash() - }, discardOld = false) // stack on top instead of replacing + context.become( + { + case "write" => // do writing... + case "close" => + unstashAll() + context.unbecome() + case msg => stash() + }, + discardOld = false + ) // stack on top instead of replacing case msg => stash() } } - //#stash + // #stash } "using watch" in { new AnyRef { - //#watch + // #watch import akka.actor.{ Actor, Props, Terminated } class WatchActor extends Actor { @@ -529,7 +529,7 @@ class ActorDocSpec extends AkkaSpec(""" lastSender ! "finished" } } - //#watch + // #watch val victim = system.actorOf(Props(classOf[WatchActor], this)) victim.tell("kill", testActor) @@ -542,7 +542,7 @@ class ActorDocSpec extends AkkaSpec(""" implicit val sender = testActor val context = this - //#kill + // #kill context.watch(victim) // watch the Actor to receive Terminated message once it dies victim ! Kill @@ -550,31 +550,31 @@ class ActorDocSpec extends AkkaSpec(""" expectMsgPF(hint = "expecting victim to terminate") { case Terminated(v) if v == victim => v // the Actor has indeed terminated } - //#kill + // #kill } "demonstrate ActorSelection" in { val context = system - //#selection-local + // #selection-local // will look up this absolute path context.actorSelection("/user/serviceA/aggregator") // will look up sibling beneath same supervisor context.actorSelection("../joe") - //#selection-local - //#selection-wildcard + // #selection-local + // #selection-wildcard // will look all children to serviceB with names starting with worker context.actorSelection("/user/serviceB/worker*") // will look up all siblings beneath same supervisor context.actorSelection("../*") - //#selection-wildcard - //#selection-remote + // #selection-wildcard + // #selection-remote context.actorSelection("akka://app@otherhost:1234/user/serviceB") - //#selection-remote + // #selection-remote } "using Identify" in { new AnyRef { - //#identify + // #identify import akka.actor.{ Actor, ActorIdentity, Identify, Props, Terminated } class Follower extends Actor { @@ -589,11 +589,11 @@ class ActorDocSpec extends AkkaSpec(""" } - def active(another: ActorRef): Actor.Receive = { - case Terminated(`another`) => context.stop(self) + def active(another: ActorRef): Actor.Receive = { case Terminated(`another`) => + context.stop(self) } } - //#identify + // #identify val a = system.actorOf(Props.empty) val b = system.actorOf(Props(classOf[Follower], this)) @@ -605,7 +605,7 @@ class ActorDocSpec extends AkkaSpec(""" "using pattern gracefulStop" in { val actorRef = system.actorOf(Props[Manager]()) - //#gracefulStop + // #gracefulStop import akka.pattern.gracefulStop import scala.concurrent.Await @@ -617,12 +617,12 @@ class ActorDocSpec extends AkkaSpec(""" // the actor wasn't stopped within 5 seconds case e: akka.pattern.AskTimeoutException => } - //#gracefulStop + // #gracefulStop } "using pattern ask / pipeTo" in { val actorA, actorB, actorC, actorD = system.actorOf(Props.empty) - //#ask-pipeTo + // #ask-pipeTo import akka.pattern.{ ask, pipe } import system.dispatcher // The ExecutionContext that will be used final case class Result(x: Int, s: String, d: Double) @@ -639,20 +639,20 @@ class ActorDocSpec extends AkkaSpec(""" f.pipeTo(actorD) // .. or .. pipe(f) to actorD - //#ask-pipeTo + // #ask-pipeTo } class Replier extends Actor { def receive = { case ref: ActorRef => - //#reply-with-sender + // #reply-with-sender sender().tell("reply", context.parent) // replies will go back to parent sender().!("reply")(context.parent) // alternative syntax - //#reply-with-sender + // #reply-with-sender case x => - //#reply-without-sender + // #reply-without-sender sender() ! x // replies will go to this actor - //#reply-without-sender + // #reply-without-sender } } @@ -674,13 +674,13 @@ class ActorDocSpec extends AkkaSpec(""" { // https://github.com/akka/akka/issues/29056 val someActor = system.actorOf(Props(classOf[Replier], this)) someActor ! PoisonPill - //#coordinated-shutdown-addActorTerminationTask + // #coordinated-shutdown-addActorTerminationTask CoordinatedShutdown(system).addActorTerminationTask( CoordinatedShutdown.PhaseBeforeServiceUnbind, "someTaskName", someActor, Some("stop")) - //#coordinated-shutdown-addActorTerminationTask + // #coordinated-shutdown-addActorTerminationTask } } diff --git a/akka-docs/src/test/scala/docs/actor/ByteBufferSerializerDocSpec.scala b/akka-docs/src/test/scala/docs/actor/ByteBufferSerializerDocSpec.scala index 086b8312f3e..83dec6e0acc 100644 --- a/akka-docs/src/test/scala/docs/actor/ByteBufferSerializerDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/ByteBufferSerializerDocSpec.scala @@ -13,7 +13,7 @@ import akka.serialization.SerializerWithStringManifest class ByteBufferSerializerDocSpec { - //#bytebufserializer-with-manifest + // #bytebufserializer-with-manifest class ExampleByteBufSerializer extends SerializerWithStringManifest with ByteBufferSerializer { override def identifier: Int = 1337 override def manifest(o: AnyRef): String = "naive-toStringImpl" @@ -38,6 +38,6 @@ class ByteBufferSerializerDocSpec { override def toBinary(o: AnyRef, buf: ByteBuffer): Unit = ??? // implement actual logic here override def fromBinary(buf: ByteBuffer, manifest: String): AnyRef = ??? // implement actual logic here } - //#bytebufserializer-with-manifest + // #bytebufserializer-with-manifest } diff --git a/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala b/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala index 00e4ead9a39..c11d00f590f 100644 --- a/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala @@ -15,9 +15,9 @@ import scala.collection.immutable object FSMDocSpec { // messages and data types - //#test-code + // #test-code import akka.actor.ActorRef - //#simple-events + // #simple-events // received events final case class SetTarget(ref: ActorRef) final case class Queue(obj: Any) @@ -25,8 +25,8 @@ object FSMDocSpec { // sent events final case class Batch(obj: immutable.Seq[Any]) - //#simple-events - //#simple-state + // #simple-events + // #simple-state // states sealed trait State case object Idle extends State @@ -35,49 +35,46 @@ object FSMDocSpec { sealed trait Data case object Uninitialized extends Data final case class Todo(target: ActorRef, queue: immutable.Seq[Any]) extends Data - //#simple-state - //#test-code + // #simple-state + // #test-code } class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { import FSMDocSpec._ - //#fsm-code-elided - //#simple-imports + // #fsm-code-elided + // #simple-imports import akka.actor.{ ActorRef, FSM } import scala.concurrent.duration._ - //#simple-imports - //#simple-fsm + // #simple-imports + // #simple-fsm class Buncher extends FSM[State, Data] { - //#fsm-body + // #fsm-body startWith(Idle, Uninitialized) - //#when-syntax - when(Idle) { - case Event(SetTarget(ref), Uninitialized) => - stay().using(Todo(ref, Vector.empty)) + // #when-syntax + when(Idle) { case Event(SetTarget(ref), Uninitialized) => + stay().using(Todo(ref, Vector.empty)) } - //#when-syntax - - //#transition-elided - onTransition { - case Active -> Idle => - stateData match { - case Todo(ref, queue) => ref ! Batch(queue) - case _ => // nothing to do - } + // #when-syntax + + // #transition-elided + onTransition { case Active -> Idle => + stateData match { + case Todo(ref, queue) => ref ! Batch(queue) + case _ => // nothing to do + } } - //#transition-elided - //#when-syntax + // #transition-elided + // #when-syntax - when(Active, stateTimeout = 1 second) { - case Event(Flush | StateTimeout, t: Todo) => - goto(Idle).using(t.copy(queue = Vector.empty)) + when(Active, stateTimeout = 1 second) { case Event(Flush | StateTimeout, t: Todo) => + goto(Idle).using(t.copy(queue = Vector.empty)) } - //#when-syntax + // #when-syntax - //#unhandled-elided + // #unhandled-elided whenUnhandled { // common code for both states case Event(Queue(obj), t @ Todo(_, v)) => @@ -87,12 +84,12 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { log.warning("received unhandled request {} in state {}/{}", e, stateName, s) stay() } - //#unhandled-elided - //#fsm-body + // #unhandled-elided + // #fsm-body initialize() } - //#simple-fsm + // #simple-fsm object DemoCode { trait StateType case object SomeState extends StateType @@ -107,66 +104,64 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { object WillDo object Tick - //#modifier-syntax - when(SomeState) { - case Event(msg, _) => - goto(Processing).using(newData).forMax(5 seconds).replying(WillDo) + // #modifier-syntax + when(SomeState) { case Event(msg, _) => + goto(Processing).using(newData).forMax(5 seconds).replying(WillDo) } - //#modifier-syntax + // #modifier-syntax - //#transition-syntax + // #transition-syntax onTransition { case Idle -> Active => startTimerWithFixedDelay("timeout", Tick, 1 second) case Active -> _ => cancelTimer("timeout") case x -> Idle => log.info("entering Idle from " + x) } - //#transition-syntax + // #transition-syntax - //#alt-transition-syntax + // #alt-transition-syntax onTransition(handler _) def handler(from: StateType, to: StateType): Unit = { // handle it here ... } - //#alt-transition-syntax + // #alt-transition-syntax - //#stop-syntax - when(Error) { - case Event("stop", _) => - // do cleanup ... - stop() + // #stop-syntax + when(Error) { case Event("stop", _) => + // do cleanup ... + stop() } - //#stop-syntax + // #stop-syntax - //#transform-syntax - when(SomeState)(transform { - case Event(bytes: ByteString, read) => stay().using(read + bytes.length) + // #transform-syntax + when(SomeState)(transform { case Event(bytes: ByteString, read) => + stay().using(read + bytes.length) }.using { case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000 => goto(Processing) }) - //#transform-syntax + // #transform-syntax - //#alt-transform-syntax + // #alt-transform-syntax val processingTrigger: PartialFunction[State, State] = { case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000 => goto(Processing) } - when(SomeState)(transform { - case Event(bytes: ByteString, read) => stay().using(read + bytes.length) + when(SomeState)(transform { case Event(bytes: ByteString, read) => + stay().using(read + bytes.length) }.using(processingTrigger)) - //#alt-transform-syntax + // #alt-transform-syntax - //#termination-syntax + // #termination-syntax onTermination { case StopEvent(FSM.Normal, state, data) => // ... case StopEvent(FSM.Shutdown, state, data) => // ... case StopEvent(FSM.Failure(cause), state, data) => // ... } - //#termination-syntax + // #termination-syntax - //#unhandled-syntax + // #unhandled-syntax whenUnhandled { case Event(x: X, data) => log.info("Received unhandled event: " + x) @@ -175,38 +170,37 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { log.warning("Received unknown event: " + msg) goto(Error) } - //#unhandled-syntax + // #unhandled-syntax } - //#logging-fsm + // #logging-fsm import akka.actor.LoggingFSM class MyFSM extends LoggingFSM[StateType, Data] { - //#body-elided + // #body-elided override def logDepth = 12 - onTermination { - case StopEvent(FSM.Failure(_), state, data) => - val lastEvents = getLog.mkString("\n\t") - log.warning( - "Failure in state " + state + " with data " + data + "\n" + - "Events leading up to this point:\n\t" + lastEvents) + onTermination { case StopEvent(FSM.Failure(_), state, data) => + val lastEvents = getLog.mkString("\n\t") + log.warning( + "Failure in state " + state + " with data " + data + "\n" + + "Events leading up to this point:\n\t" + lastEvents) } // ... - //#body-elided + // #body-elided } - //#logging-fsm + // #logging-fsm } - //#fsm-code-elided + // #fsm-code-elided "simple finite state machine" must { "demonstrate NullFunction" in { class A extends FSM[Int, Null] { val SomeState = 0 - //#NullFunction + // #NullFunction when(SomeState)(FSM.NullFunction) - //#NullFunction + // #NullFunction } } diff --git a/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala b/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala index 0a8c110271f..a017ea546b9 100644 --- a/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala +++ b/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala @@ -17,9 +17,7 @@ import akka.pattern.{ ask, pipe } import com.typesafe.config.ConfigFactory //#imports -/** - * Runs the sample - */ +/** Runs the sample */ object FaultHandlingDocSample extends App { import Worker._ @@ -83,8 +81,8 @@ class Worker extends Actor with ActorLogging { implicit val askTimeout: Timeout = Timeout(5 seconds) // Stop the CounterService child if it throws ServiceUnavailable - override val supervisorStrategy = OneForOneStrategy() { - case _: CounterService.ServiceUnavailable => Stop + override val supervisorStrategy = OneForOneStrategy() { case _: CounterService.ServiceUnavailable => + Stop } // The sender of the initial Start message will continuously be notified @@ -106,8 +104,8 @@ class Worker extends Actor with ActorLogging { // Send current progress to the initial sender (counterService ? GetCurrentCount) - .map { - case CurrentCount(_, count) => Progress(100.0 * count / totalCount) + .map { case CurrentCount(_, count) => + Progress(100.0 * count / totalCount) } .pipeTo(progressListener.get) } diff --git a/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala index bda0fbba33d..740d681c7c2 100644 --- a/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala @@ -17,13 +17,13 @@ import akka.testkit.{ EventFilter, ImplicitSender, TestKit } //#testkit object FaultHandlingDocSpec { - //#supervisor - //#child + // #supervisor + // #child import akka.actor.Actor - //#child + // #child class Supervisor extends Actor { - //#strategy + // #strategy import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy._ import scala.concurrent.duration._ @@ -35,17 +35,17 @@ object FaultHandlingDocSpec { case _: IllegalArgumentException => Stop case _: Exception => Escalate } - //#strategy + // #strategy - def receive = { - case p: Props => sender() ! context.actorOf(p) + def receive = { case p: Props => + sender() ! context.actorOf(p) } } - //#supervisor + // #supervisor - //#supervisor2 + // #supervisor2 class Supervisor2 extends Actor { - //#strategy2 + // #strategy2 import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy._ import scala.concurrent.duration._ @@ -57,18 +57,18 @@ object FaultHandlingDocSpec { case _: IllegalArgumentException => Stop case _: Exception => Escalate } - //#strategy2 + // #strategy2 - def receive = { - case p: Props => sender() ! context.actorOf(p) + def receive = { case p: Props => + sender() ! context.actorOf(p) } // override default to kill all children during restart override def preRestart(cause: Throwable, msg: Option[Any]): Unit = {} } - //#supervisor2 + // #supervisor2 class Supervisor3 extends Actor { - //#default-strategy-fallback + // #default-strategy-fallback import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy._ import scala.concurrent.duration._ @@ -79,12 +79,12 @@ object FaultHandlingDocSpec { case t => super.supervisorStrategy.decider.applyOrElse(t, (_: Any) => Escalate) } - //#default-strategy-fallback + // #default-strategy-fallback def receive = Actor.emptyBehavior } - //#child + // #child class Child extends Actor { var state = 0 def receive = { @@ -93,7 +93,7 @@ object FaultHandlingDocSpec { case "get" => sender() ! state } } - //#child + // #child val testConf: Config = ConfigFactory.parseString(""" akka { @@ -126,16 +126,16 @@ class FaultHandlingDocSpec(_system: ActorSystem) "A supervisor" must { "apply the chosen strategy for its child" in { - //#testkit + // #testkit - //#create + // #create val supervisor = system.actorOf(Props[Supervisor](), "supervisor") supervisor ! Props[Child]() val child = expectMsgType[ActorRef] // retrieve answer from TestKit’s testActor - //#create + // #create EventFilter.warning(occurrences = 1).intercept { - //#resume + // #resume child ! 42 // set state to 42 child ! "get" expectMsg(42) @@ -143,24 +143,24 @@ class FaultHandlingDocSpec(_system: ActorSystem) child ! new ArithmeticException // crash it child ! "get" expectMsg(42) - //#resume + // #resume } EventFilter[NullPointerException](occurrences = 1).intercept { - //#restart + // #restart child ! new NullPointerException // crash it harder child ! "get" expectMsg(0) - //#restart + // #restart } EventFilter[IllegalArgumentException](occurrences = 1).intercept { - //#stop + // #stop watch(child) // have testActor watch “child” child ! new IllegalArgumentException // break it expectMsgPF() { case Terminated(`child`) => () } - //#stop + // #stop } EventFilter[Exception]("CRASH", occurrences = 2).intercept { - //#escalate-kill + // #escalate-kill supervisor ! Props[Child]() // create new child val child2 = expectMsgType[ActorRef] watch(child2) @@ -171,8 +171,8 @@ class FaultHandlingDocSpec(_system: ActorSystem) expectMsgPF() { case t @ Terminated(`child2`) if t.existenceConfirmed => () } - //#escalate-kill - //#escalate-restart + // #escalate-kill + // #escalate-restart val supervisor2 = system.actorOf(Props[Supervisor2](), "supervisor2") supervisor2 ! Props[Child]() @@ -185,9 +185,9 @@ class FaultHandlingDocSpec(_system: ActorSystem) child3 ! new Exception("CRASH") child3 ! "get" expectMsg(0) - //#escalate-restart + // #escalate-restart } - //#testkit + // #testkit // code here } } diff --git a/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala b/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala index c93e092e86c..fc997baa5d8 100644 --- a/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala @@ -10,11 +10,10 @@ import akka.testkit.{ AkkaSpec, ImplicitSender } object InitializationDocSpec { class PreStartInitExample extends Actor { - override def receive = { - case _ => // Ignore + override def receive = { case _ => // Ignore } - //#preStartInit + // #preStartInit override def preStart(): Unit = { // Initialize children here } @@ -30,24 +29,23 @@ object InitializationDocSpec { // Keep the call to postStop(), but no stopping of children postStop() } - //#preStartInit + // #preStartInit } class MessageInitExample extends Actor { - //#messageInit + // #messageInit var initializeMe: Option[String] = None - override def receive = { - case "init" => - initializeMe = Some("Up and running") - context.become(initialized, discardOld = true) + override def receive = { case "init" => + initializeMe = Some("Up and running") + context.become(initialized, discardOld = true) } - def initialized: Receive = { - case "U OK?" => initializeMe.foreach { sender() ! _ } + def initialized: Receive = { case "U OK?" => + initializeMe.foreach { sender() ! _ } } - //#messageInit + // #messageInit } } diff --git a/akka-docs/src/test/scala/docs/actor/PropsEdgeCaseSpec.scala b/akka-docs/src/test/scala/docs/actor/PropsEdgeCaseSpec.scala index 9657abd175a..61c2d3745fd 100644 --- a/akka-docs/src/test/scala/docs/actor/PropsEdgeCaseSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/PropsEdgeCaseSpec.scala @@ -15,31 +15,31 @@ case class MyValueClass(v: Int) extends AnyVal class PropsEdgeCaseSpec extends AnyWordSpec with CompileOnlySpec { "value-class-edge-case-example" in compileOnlySpec { - //#props-edge-cases-value-class-example + // #props-edge-cases-value-class-example class ValueActor(value: MyValueClass) extends Actor { - def receive = { - case multiplier: Long => sender() ! (value.v * multiplier) + def receive = { case multiplier: Long => + sender() ! (value.v * multiplier) } } val valueClassProp = Props(classOf[ValueActor], MyValueClass(5)) // Unsupported - //#props-edge-cases-value-class-example + // #props-edge-cases-value-class-example - //#props-edge-cases-default-values + // #props-edge-cases-default-values class DefaultValueActor(a: Int, b: Int = 5) extends Actor { - def receive = { - case x: Int => sender() ! ((a + x) * b) + def receive = { case x: Int => + sender() ! ((a + x) * b) } } val defaultValueProp1 = Props(classOf[DefaultValueActor], 2.0) // Unsupported class DefaultValueActor2(b: Int = 5) extends Actor { - def receive = { - case x: Int => sender() ! (x * b) + def receive = { case x: Int => + sender() ! (x * b) } } val defaultValueProp2 = Props[DefaultValueActor2]() // Unsupported val defaultValueProp3 = Props(classOf[DefaultValueActor2]) // Unsupported - //#props-edge-cases-default-values + // #props-edge-cases-default-values } } diff --git a/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala b/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala index 4e9196b0f56..543089366e8 100644 --- a/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala @@ -17,46 +17,45 @@ import akka.testkit._ class SchedulerDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "schedule a one-off task" in { - //#schedule-one-off-message - //Use the system's dispatcher as ExecutionContext + // #schedule-one-off-message + // Use the system's dispatcher as ExecutionContext import system.dispatcher - //Schedules to send the "foo"-message to the testActor after 50ms + // Schedules to send the "foo"-message to the testActor after 50ms system.scheduler.scheduleOnce(50 milliseconds, testActor, "foo") - //#schedule-one-off-message + // #schedule-one-off-message expectMsg(1 second, "foo") - //#schedule-one-off-thunk - //Schedules a function to be executed (send a message to the testActor) after 50ms + // #schedule-one-off-thunk + // Schedules a function to be executed (send a message to the testActor) after 50ms system.scheduler.scheduleOnce(50 milliseconds) { testActor ! System.currentTimeMillis } - //#schedule-one-off-thunk + // #schedule-one-off-thunk } "schedule a recurring task" in { new AnyRef { - //#schedule-recurring + // #schedule-recurring val Tick = "tick" class TickActor extends Actor { - def receive = { - case Tick => //Do something + def receive = { case Tick => // Do something } } val tickActor = system.actorOf(Props(classOf[TickActor], this)) - //Use system's dispatcher as ExecutionContext + // Use system's dispatcher as ExecutionContext import system.dispatcher - //This will schedule to send the Tick-message - //to the tickActor after 0ms repeating every 50ms + // This will schedule to send the Tick-message + // to the tickActor after 0ms repeating every 50ms val cancellable = system.scheduler.scheduleWithFixedDelay(Duration.Zero, 50.milliseconds, tickActor, Tick) - //This cancels further Ticks to be sent + // This cancels further Ticks to be sent cancellable.cancel() - //#schedule-recurring + // #schedule-recurring system.stop(tickActor) } } diff --git a/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala b/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala index f607e6e9d22..1fdecc4bed8 100644 --- a/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala @@ -6,7 +6,7 @@ package docs.actor class SharedMutableStateDocSpec { - //#mutable-state + // #mutable-state import akka.actor.{ Actor, ActorRef } import akka.pattern.ask import akka.util.Timeout @@ -18,14 +18,14 @@ class SharedMutableStateDocSpec { case class Message(msg: String) class EchoActor extends Actor { - def receive = { - case msg => sender() ! msg + def receive = { case msg => + sender() ! msg } } class CleanUpActor extends Actor { - def receive = { - case set: mutable.Set[_] => set.clear() + def receive = { case set: mutable.Set[_] => + set.clear() } } @@ -43,38 +43,37 @@ class SharedMutableStateDocSpec { "Meaning of life is 42" } - def receive = { - case _ => - implicit val ec = context.dispatcher - implicit val timeout: Timeout = 5.seconds // needed for `?` below + def receive = { case _ => + implicit val ec = context.dispatcher + implicit val timeout: Timeout = 5.seconds // needed for `?` below - // Example of incorrect approach - // Very bad: shared mutable state will cause your - // application to break in weird ways - Future { state = "This will race" } - ((echoActor ? Message("With this other one")).mapTo[Message]).foreach { received => - state = received.msg - } + // Example of incorrect approach + // Very bad: shared mutable state will cause your + // application to break in weird ways + Future { state = "This will race" } + (echoActor ? Message("With this other one")).mapTo[Message].foreach { received => + state = received.msg + } - // Very bad: shared mutable object allows - // the other actor to mutate your own state, - // or worse, you might get weird race conditions - cleanUpActor ! mySet + // Very bad: shared mutable object allows + // the other actor to mutate your own state, + // or worse, you might get weird race conditions + cleanUpActor ! mySet - // Very bad: "sender" changes for every message, - // shared mutable state bug - Future { expensiveCalculation(sender()) } + // Very bad: "sender" changes for every message, + // shared mutable state bug + Future { expensiveCalculation(sender()) } - // Example of correct approach - // Completely safe: "self" is OK to close over - // and it's an ActorRef, which is thread-safe - Future { expensiveCalculation() }.foreach { self ! _ } + // Example of correct approach + // Completely safe: "self" is OK to close over + // and it's an ActorRef, which is thread-safe + Future { expensiveCalculation() }.foreach { self ! _ } - // Completely safe: we close over a fixed value - // and it's an ActorRef, which is thread-safe - val currentSender = sender() - Future { expensiveCalculation(currentSender) } + // Completely safe: we close over a fixed value + // and it's an ActorRef, which is thread-safe + val currentSender = sender() + Future { expensiveCalculation(currentSender) } } } - //#mutable-state + // #mutable-state } diff --git a/akka-docs/src/test/scala/docs/actor/TimerDocSpec.scala b/akka-docs/src/test/scala/docs/actor/TimerDocSpec.scala index 1db489f4d70..8be441a210d 100644 --- a/akka-docs/src/test/scala/docs/actor/TimerDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/TimerDocSpec.scala @@ -5,7 +5,7 @@ package docs.actor object TimerDocSpec { - //#timers + // #timers import scala.concurrent.duration._ import akka.actor.Actor @@ -29,5 +29,5 @@ object TimerDocSpec { // do something useful here } } - //#timers + // #timers } diff --git a/akka-docs/src/test/scala/docs/actor/UnnestedReceives.scala b/akka-docs/src/test/scala/docs/actor/UnnestedReceives.scala index 05d00f679a1..f4d82421166 100644 --- a/akka-docs/src/test/scala/docs/actor/UnnestedReceives.scala +++ b/akka-docs/src/test/scala/docs/actor/UnnestedReceives.scala @@ -16,36 +16,35 @@ import scala.collection.mutable.ListBuffer */ class UnnestedReceives extends Actor { import context.become - //If you need to store sender/senderFuture you can change it to ListBuffer[(Any, Channel)] + // If you need to store sender/senderFuture you can change it to ListBuffer[(Any, Channel)] val queue = new ListBuffer[Any]() - //This message processes a message/event + // This message processes a message/event def process(msg: Any): Unit = println("processing: " + msg) - //This method subscribes the actor to the event bus - def subscribe(): Unit = {} //Your external stuff - //This method retrieves all prior messages/events + // This method subscribes the actor to the event bus + def subscribe(): Unit = {} // Your external stuff + // This method retrieves all prior messages/events def allOldMessages() = List() override def preStart(): Unit = { - //We override preStart to be sure that the first message the actor gets is - //Replay, that message will start to be processed _after_ the actor is started + // We override preStart to be sure that the first message the actor gets is + // Replay, that message will start to be processed _after_ the actor is started self ! "Replay" - //Then we subscribe to the stream of messages/events + // Then we subscribe to the stream of messages/events subscribe() } - def receive = { - case "Replay" => //Our first message should be a Replay message, all others are invalid - allOldMessages().foreach(process) //Process all old messages/events - become { //Switch behavior to look for the GoAhead signal - case "GoAhead" => //When we get the GoAhead signal we process all our buffered messages/events - queue.foreach(process) - queue.clear() - become { //Then we change behavior to process incoming messages/events as they arrive - case msg => process(msg) - } - case msg => //While we haven't gotten the GoAhead signal, buffer all incoming messages - queue += msg //Here you have full control, you can handle overflow etc - } + def receive = { case "Replay" => // Our first message should be a Replay message, all others are invalid + allOldMessages().foreach(process) // Process all old messages/events + become { // Switch behavior to look for the GoAhead signal + case "GoAhead" => // When we get the GoAhead signal we process all our buffered messages/events + queue.foreach(process) + queue.clear() + become { // Then we change behavior to process incoming messages/events as they arrive + case msg => process(msg) + } + case msg => // While we haven't gotten the GoAhead signal, buffer all incoming messages + queue += msg // Here you have full control, you can handle overflow etc + } } } diff --git a/akka-docs/src/test/scala/docs/actor/io/dns/DnsCompileOnlyDocSpec.scala b/akka-docs/src/test/scala/docs/actor/io/dns/DnsCompileOnlyDocSpec.scala index cf27c07acc0..81bd3c29fdb 100644 --- a/akka-docs/src/test/scala/docs/actor/io/dns/DnsCompileOnlyDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/io/dns/DnsCompileOnlyDocSpec.scala @@ -20,30 +20,30 @@ object DnsCompileOnlyDocSpec { implicit val timeout: Timeout = Timeout(1.second) val actorRef: ActorRef = ??? - //#resolve + // #resolve val initial: Option[Dns.Resolved] = Dns(system).cache.resolve("google.com")(system, actorRef) val cached: Option[Dns.Resolved] = Dns(system).cache.cached("google.com") - //#resolve + // #resolve { - //#actor-api-inet-address + // #actor-api-inet-address val resolved: Future[Dns.Resolved] = (IO(Dns) ? Dns.Resolve("google.com")).mapTo[Dns.Resolved] - //#actor-api-inet-address + // #actor-api-inet-address } { - //#actor-api-async + // #actor-api-async val resolved: Future[DnsProtocol.Resolved] = (IO(Dns) ? DnsProtocol.Resolve("google.com")).mapTo[DnsProtocol.Resolved] - //#actor-api-async + // #actor-api-async } { - //#srv + // #srv val resolved: Future[DnsProtocol.Resolved] = (IO(Dns) ? DnsProtocol.Resolve("your-service", Srv)).mapTo[DnsProtocol.Resolved] - //#srv + // #srv } } diff --git a/akka-docs/src/test/scala/docs/actor/typed/BlockingActor.scala b/akka-docs/src/test/scala/docs/actor/typed/BlockingActor.scala index 772e9e1792c..803a8043162 100644 --- a/akka-docs/src/test/scala/docs/actor/typed/BlockingActor.scala +++ b/akka-docs/src/test/scala/docs/actor/typed/BlockingActor.scala @@ -14,7 +14,7 @@ object BlockingActor { // DO NOT DO THIS HERE: this is an example of incorrect code, // better alternatives are described further on. - //block for 5 seconds, representing blocking I/O, etc + // block for 5 seconds, representing blocking I/O, etc Thread.sleep(5000) println(s"Blocking operation finished: $i") Behaviors.same diff --git a/akka-docs/src/test/scala/docs/actor/typed/BlockingDispatcherSample.scala b/akka-docs/src/test/scala/docs/actor/typed/BlockingDispatcherSample.scala index b529642f503..69b09be42f2 100644 --- a/akka-docs/src/test/scala/docs/actor/typed/BlockingDispatcherSample.scala +++ b/akka-docs/src/test/scala/docs/actor/typed/BlockingDispatcherSample.scala @@ -25,7 +25,7 @@ object BlockingFutureActor { def triggerFutureBlockingOperation(i: Int)(implicit ec: ExecutionContext): Future[Unit] = { println(s"Calling blocking Future: $i") Future { - Thread.sleep(5000) //block for 5 seconds + Thread.sleep(5000) // block for 5 seconds println(s"Blocking future finished $i") } } @@ -48,7 +48,7 @@ object SeparateDispatcherFutureActor { def triggerFutureBlockingOperation(i: Int)(implicit ec: ExecutionContext): Future[Unit] = { println(s"Calling blocking Future: $i") Future { - Thread.sleep(5000) //block for 5 seconds + Thread.sleep(5000) // block for 5 seconds println(s"Blocking future finished $i") } } diff --git a/akka-docs/src/test/scala/docs/actor/typed/CoordinatedActorShutdownSpec.scala b/akka-docs/src/test/scala/docs/actor/typed/CoordinatedActorShutdownSpec.scala index 2e9a3bdae32..be52230eb71 100644 --- a/akka-docs/src/test/scala/docs/actor/typed/CoordinatedActorShutdownSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/typed/CoordinatedActorShutdownSpec.scala @@ -16,7 +16,7 @@ import scala.concurrent.duration._ class CoordinatedActorShutdownSpec { - //#coordinated-shutdown-addTask + // #coordinated-shutdown-addTask object MyActor { trait Messages @@ -33,19 +33,19 @@ class CoordinatedActorShutdownSpec { } } - //#coordinated-shutdown-addTask + // #coordinated-shutdown-addTask trait Message def root: Behavior[Message] = Behaviors.setup[Message] { context => implicit val system = context.system val myActor = context.spawn(MyActor.behavior, "my-actor") - //#coordinated-shutdown-addTask + // #coordinated-shutdown-addTask CoordinatedShutdown(context.system).addTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "someTaskName") { () => implicit val timeout: Timeout = 5.seconds myActor.ask(MyActor.Stop(_)) } - //#coordinated-shutdown-addTask + // #coordinated-shutdown-addTask Behaviors.empty @@ -56,7 +56,7 @@ class CoordinatedActorShutdownSpec { def cleanup(): Unit = {} import system.executionContext - //#coordinated-shutdown-cancellable + // #coordinated-shutdown-cancellable val c: Cancellable = CoordinatedShutdown(system).addCancellableTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "cleanup") { () => Future { @@ -67,17 +67,17 @@ class CoordinatedActorShutdownSpec { // much later... c.cancel() - //#coordinated-shutdown-cancellable + // #coordinated-shutdown-cancellable - //#coordinated-shutdown-jvm-hook + // #coordinated-shutdown-jvm-hook CoordinatedShutdown(system).addJvmShutdownHook { println("custom JVM shutdown hook...") } - //#coordinated-shutdown-jvm-hook + // #coordinated-shutdown-jvm-hook // don't run this def dummy(): Unit = { - //#coordinated-shutdown-run + // #coordinated-shutdown-run // shut down with `ActorSystemTerminateReason` system.terminate() @@ -85,7 +85,7 @@ class CoordinatedActorShutdownSpec { case object UserInitiatedShutdown extends CoordinatedShutdown.Reason val done: Future[Done] = CoordinatedShutdown(system).run(UserInitiatedShutdown) - //#coordinated-shutdown-run + // #coordinated-shutdown-run } } } diff --git a/akka-docs/src/test/scala/docs/actor/typed/DispatcherDocSpec.scala b/akka-docs/src/test/scala/docs/actor/typed/DispatcherDocSpec.scala index 33c630fc36a..27f39afce59 100644 --- a/akka-docs/src/test/scala/docs/actor/typed/DispatcherDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/typed/DispatcherDocSpec.scala @@ -11,26 +11,26 @@ object DispatcherDocSpec { val context: ActorContext[Integer] = ??? { - //#defining-dispatcher-in-code + // #defining-dispatcher-in-code import akka.actor.typed.DispatcherSelector val myActor = context.spawn(PrintActor(), "PrintActor", DispatcherSelector.fromConfig("PrintActor")) - //#defining-dispatcher-in-code + // #defining-dispatcher-in-code } { - //#defining-fixed-pool-size-dispatcher + // #defining-fixed-pool-size-dispatcher import akka.actor.typed.DispatcherSelector val myActor = context.spawn(PrintActor(), "PrintActor", DispatcherSelector.fromConfig("blocking-io-dispatcher")) - //#defining-fixed-pool-size-dispatcher + // #defining-fixed-pool-size-dispatcher } { - //#lookup + // #lookup // for use with Futures, Scheduler, etc. import akka.actor.typed.DispatcherSelector implicit val executionContext = context.system.dispatchers.lookup(DispatcherSelector.fromConfig("my-dispatcher")) - //#lookup + // #lookup } } diff --git a/akka-docs/src/test/scala/docs/actor/typed/SharedMutableStateDocSpec.scala b/akka-docs/src/test/scala/docs/actor/typed/SharedMutableStateDocSpec.scala index 7300bf02f03..efd0c5b35fc 100644 --- a/akka-docs/src/test/scala/docs/actor/typed/SharedMutableStateDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/typed/SharedMutableStateDocSpec.scala @@ -28,7 +28,7 @@ class SharedMutableStateDocSpec { new MyActor(context) } } - //#mutable-state + // #mutable-state class MyActor(context: ActorContext[MyActor.Command]) extends AbstractBehavior[MyActor.Command](context) { import MyActor._ @@ -90,5 +90,5 @@ class SharedMutableStateDocSpec { this } } - //#mutable-state + // #mutable-state } diff --git a/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala b/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala index d2a1a315116..ae732f88270 100644 --- a/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala +++ b/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala @@ -27,9 +27,9 @@ class DangerousActor extends Actor with ActorLogging { def notifyMeOnOpen(): Unit = log.warning("My CircuitBreaker is now open, and will not close for one minute") - //#circuit-breaker-initialization + // #circuit-breaker-initialization - //#circuit-breaker-usage + // #circuit-breaker-usage def dangerousCall: String = "This really isn't that dangerous of a call after all" def receive = { @@ -38,7 +38,7 @@ class DangerousActor extends Actor with ActorLogging { case "block for me" => sender() ! breaker.withSyncCircuitBreaker(dangerousCall) } - //#circuit-breaker-usage + // #circuit-breaker-usage } @@ -52,7 +52,7 @@ class TellPatternActor(recipient: ActorRef) extends Actor with ActorLogging { def notifyMeOnOpen(): Unit = log.warning("My CircuitBreaker is now open, and will not close for one minute") - //#circuit-breaker-tell-pattern + // #circuit-breaker-tell-pattern import akka.actor.ReceiveTimeout def receive = { @@ -69,12 +69,12 @@ class TellPatternActor(recipient: ActorRef) extends Actor with ActorLogging { breaker.fail() } } - //#circuit-breaker-tell-pattern + // #circuit-breaker-tell-pattern } class EvenNoFailureActor extends Actor { import context.dispatcher - //#even-no-as-failure + // #even-no-as-failure def luckyNumber(): Future[Int] = { val evenNumberAsFailure: Try[Int] => Boolean = { case Success(n) => n % 2 == 0 @@ -87,9 +87,8 @@ class EvenNoFailureActor extends Actor { // this call will return 8888 and increase failure count at the same time breaker.withCircuitBreaker(Future(8888), evenNumberAsFailure) } - //#even-no-as-failure + // #even-no-as-failure - override def receive = { - case x: Int => + override def receive = { case x: Int => } } diff --git a/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala b/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala index d4792842627..4c86fccfb0c 100644 --- a/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/cluster/ClusterDocSpec.scala @@ -20,15 +20,15 @@ object ClusterDocSpec { class ClusterDocSpec extends AkkaSpec(ClusterDocSpec.config) with CompileOnlySpec { "demonstrate leave" in compileOnlySpec { - //#leave + // #leave val cluster = Cluster(system) cluster.leave(cluster.selfAddress) - //#leave + // #leave } "demonstrate data center" in compileOnlySpec { { - //#dcAccess + // #dcAccess val cluster = Cluster(system) // this node's data center val dc = cluster.selfDataCenter @@ -37,19 +37,19 @@ class ClusterDocSpec extends AkkaSpec(ClusterDocSpec.config) with CompileOnlySpe // a specific member's data center val aMember = cluster.state.members.head val aDc = aMember.dataCenter - //#dcAccess + // #dcAccess } } "demonstrate programmatic joining to seed nodes" in compileOnlySpec { - //#join-seed-nodes + // #join-seed-nodes import akka.actor.Address import akka.cluster.Cluster val cluster = Cluster(system) - val list: List[Address] = ??? //your method to dynamically get seed nodes + val list: List[Address] = ??? // your method to dynamically get seed nodes cluster.joinSeedNodes(list) - //#join-seed-nodes + // #join-seed-nodes } } diff --git a/akka-docs/src/test/scala/docs/cluster/FactorialBackend.scala b/akka-docs/src/test/scala/docs/cluster/FactorialBackend.scala index 7bb2af82cc4..8028c03a318 100644 --- a/akka-docs/src/test/scala/docs/cluster/FactorialBackend.scala +++ b/akka-docs/src/test/scala/docs/cluster/FactorialBackend.scala @@ -18,13 +18,12 @@ class FactorialBackend extends Actor with ActorLogging { import context.dispatcher - def receive = { - case (n: Int) => - Future(factorial(n)) - .map { result => - (n, result) - } - .pipeTo(sender()) + def receive = { case (n: Int) => + Future(factorial(n)) + .map { result => + (n, result) + } + .pipeTo(sender()) } def factorial(n: Int): BigInt = { diff --git a/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala b/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala index 5735e4887ae..e965d765eb9 100644 --- a/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala +++ b/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala @@ -56,18 +56,18 @@ object FactorialFrontend { val system = ActorSystem("ClusterSystem", config) system.log.info("Factorials will start when 2 backend members in the cluster.") - //#registerOnUp + // #registerOnUp Cluster(system).registerOnMemberUp { system.actorOf(Props(classOf[FactorialFrontend], upToN, true), name = "factorialFrontend") } - //#registerOnUp + // #registerOnUp } } // not used, only for documentation abstract class FactorialFrontend2 extends Actor { - //#router-lookup-in-code + // #router-lookup-in-code import akka.cluster.routing.ClusterRouterGroup import akka.cluster.routing.ClusterRouterGroupSettings import akka.cluster.metrics.AdaptiveLoadBalancingGroup @@ -83,12 +83,12 @@ abstract class FactorialFrontend2 extends Actor { useRoles = Set("backend"))).props(), name = "factorialBackendRouter2") - //#router-lookup-in-code + // #router-lookup-in-code } // not used, only for documentation abstract class FactorialFrontend3 extends Actor { - //#router-deploy-in-code + // #router-deploy-in-code import akka.cluster.routing.ClusterRouterPool import akka.cluster.routing.ClusterRouterPoolSettings import akka.cluster.metrics.AdaptiveLoadBalancingPool @@ -103,5 +103,5 @@ abstract class FactorialFrontend3 extends Actor { allowLocalRoutees = false, useRoles = Set("backend"))).props(Props[FactorialBackend]()), name = "factorialBackendRouter3") - //#router-deploy-in-code + // #router-deploy-in-code } diff --git a/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala b/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala index e3b6a9c3ecb..bbb11a46567 100644 --- a/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala +++ b/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala @@ -15,9 +15,9 @@ class SimpleClusterListener extends Actor with ActorLogging { // subscribe to cluster changes, re-subscribe when restart override def preStart(): Unit = { - //#subscribe + // #subscribe cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember]) - //#subscribe + // #subscribe } override def postStop(): Unit = cluster.unsubscribe(self) diff --git a/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala b/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala index 7d932601cb9..93cd225271b 100644 --- a/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala +++ b/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala @@ -10,25 +10,25 @@ import akka.cluster.ClusterEvent._ class SimpleClusterListener2 extends Actor with ActorLogging { - //#join + // #join val cluster = Cluster(context.system) - //#join + // #join // subscribe to cluster changes, re-subscribe when restart override def preStart(): Unit = { - //#join + // #join cluster.join(cluster.selfAddress) - //#join + // #join - //#subscribe + // #subscribe cluster.subscribe(self, classOf[MemberEvent], classOf[UnreachableMember]) - //#subscribe + // #subscribe - //#register-on-memberup + // #register-on-memberup cluster.registerOnMemberUp { cluster.subscribe(self, classOf[MemberEvent], classOf[UnreachableMember]) } - //#register-on-memberup + // #register-on-memberup } override def postStop(): Unit = cluster.unsubscribe(self) diff --git a/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala b/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala index 58651abf8aa..1e6ab674979 100644 --- a/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala +++ b/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala @@ -9,8 +9,8 @@ import akka.actor.{ Actor, Props, SupervisorStrategy } class SupervisorActor(childProps: Props, override val supervisorStrategy: SupervisorStrategy) extends Actor { val child = context.actorOf(childProps, "supervised-child") - def receive = { - case msg => child.forward(msg) + def receive = { case msg => + child.forward(msg) } } //#singleton-supervisor-actor @@ -19,7 +19,7 @@ import akka.actor.Actor abstract class ClusterSingletonSupervision extends Actor { import akka.actor.{ ActorRef, Props, SupervisorStrategy } def createSingleton(name: String, props: Props, supervisorStrategy: SupervisorStrategy): ActorRef = { - //#singleton-supervisor-actor-usage + // #singleton-supervisor-actor-usage import akka.actor.{ PoisonPill, Props } import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings } context.system.actorOf( @@ -28,6 +28,6 @@ abstract class ClusterSingletonSupervision extends Actor { terminationMessage = PoisonPill, settings = ClusterSingletonManagerSettings(context.system)), name = name) - //#singleton-supervisor-actor-usage + // #singleton-supervisor-actor-usage } } diff --git a/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala b/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala index f1a5374ff0e..3cac64df642 100644 --- a/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala +++ b/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala @@ -18,14 +18,14 @@ class ConfigDocSpec extends AnyWordSpec with Matchers { val rootBehavior = Behaviors.empty[String] def compileOnlyCustomConfig(): Unit = { - //#custom-config + // #custom-config val customConf = ConfigFactory.parseString(""" akka.log-config-on-start = on """) // ConfigFactory.load sandwiches customConfig between default reference // config and default overrides, and then resolves it. val system = ActorSystem(rootBehavior, "MySystem", ConfigFactory.load(customConf)) - //#custom-config + // #custom-config } def compileOnlyPrintConfig(): Unit = { diff --git a/akka-docs/src/test/scala/docs/coordination/LeaseDocSpec.scala b/akka-docs/src/test/scala/docs/coordination/LeaseDocSpec.scala index f3fb565531b..e74792c4c17 100644 --- a/akka-docs/src/test/scala/docs/coordination/LeaseDocSpec.scala +++ b/akka-docs/src/test/scala/docs/coordination/LeaseDocSpec.scala @@ -68,20 +68,20 @@ class LeaseDocSpec extends AkkaSpec(LeaseDocSpec.config) { "A docs lease" should { "scala lease be loadable from scala" in { - //#lease-usage + // #lease-usage val lease = LeaseProvider(system).getLease("", "docs-lease", "owner") val acquired: Future[Boolean] = lease.acquire() val stillAcquired: Boolean = lease.checkLease() val released: Future[Boolean] = lease.release() - //#lease-usage + // #lease-usage - //#lost-callback + // #lost-callback lease.acquire(leaseLostReason => doSomethingImportant(leaseLostReason)) - //#lost-callback + // #lost-callback - //#cluster-owner + // #cluster-owner val owner = Cluster(system).selfAddress.hostPort - //#cluster-owner + // #cluster-owner // remove compiler warnings blackhole(acquired, stillAcquired, released, owner) diff --git a/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala b/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala index ea7f35b2f89..27bd7b98383 100644 --- a/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala +++ b/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala @@ -44,7 +44,7 @@ object DistributedDataDocSpec { #//#japi-serializer-config """ - //#data-bot + // #data-bot import java.util.concurrent.ThreadLocalRandom import akka.actor.Actor import akka.actor.ActorLogging @@ -86,7 +86,6 @@ object DistributedDataDocSpec { } case _: UpdateResponse[_] => // ignore - case c @ Changed(DataKey) => val data = c.get(DataKey) log.info("Current elements: {}", data.elements) @@ -95,7 +94,7 @@ object DistributedDataDocSpec { override def postStop(): Unit = tickTask.cancel() } - //#data-bot + // #data-bot } @@ -106,7 +105,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val probe = TestProbe() implicit val self: ActorRef = probe.ref - //#update + // #update implicit val node: SelfUniqueAddress = DistributedData(system).selfUniqueAddress val replicator = DistributedData(system).replicator @@ -125,21 +124,21 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val writeAll = WriteAll(timeout = 5.seconds) replicator ! Update(ActiveFlagKey, Flag.Disabled, writeAll)(_.switchOn) - //#update + // #update probe.expectMsgType[UpdateResponse[_]] match { - //#update-response1 + // #update-response1 case UpdateSuccess(Counter1Key, req) => // ok - //#update-response1 + // #update-response1 case unexpected => fail("Unexpected response: " + unexpected) } probe.expectMsgType[UpdateResponse[_]] match { - //#update-response2 + // #update-response2 case UpdateSuccess(Set1Key, req) => // ok case UpdateTimeout(Set1Key, req) => // write to 3 nodes failed within 1.second - //#update-response2 + // #update-response2 case UpdateSuccess(Set2Key, None) => case unexpected => fail("Unexpected response: " + unexpected) } @@ -151,7 +150,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { implicit val self: ActorRef = probe.ref def sender() = self - //#update-request-context + // #update-request-context implicit val node = DistributedData(system).selfUniqueAddress val replicator = DistributedData(system).replicator val writeTwo = WriteTo(n = 2, timeout = 3.second) @@ -168,14 +167,14 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { case UpdateTimeout(Counter1Key, Some(replyTo: ActorRef)) => replyTo ! "nack" } - //#update-request-context + // #update-request-context } "demonstrate get" in { val probe = TestProbe() implicit val self: ActorRef = probe.ref - //#get + // #get val replicator = DistributedData(system).replicator val Counter1Key = PNCounterKey("counter1") val Set1Key = GSetKey[String]("set1") @@ -192,25 +191,25 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val readAll = ReadAll(timeout = 5.seconds) replicator ! Get(ActiveFlagKey, readAll) - //#get + // #get probe.expectMsgType[GetResponse[_]] match { - //#get-response1 + // #get-response1 case g @ GetSuccess(Counter1Key, req) => val value = g.get(Counter1Key).value case NotFound(Counter1Key, req) => // key counter1 does not exist - //#get-response1 + // #get-response1 case unexpected => fail("Unexpected response: " + unexpected) } probe.expectMsgType[GetResponse[_]] match { - //#get-response2 + // #get-response2 case g @ GetSuccess(Set1Key, req) => val elements = g.get(Set1Key).elements case GetFailure(Set1Key, req) => // read from 3 nodes failed within 1.second case NotFound(Set1Key, req) => // key set1 does not exist - //#get-response2 + // #get-response2 case g @ GetSuccess(Set2Key, None) => val elements = g.get(Set2Key).elements case unexpected => fail("Unexpected response: " + unexpected) @@ -223,7 +222,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { implicit val self: ActorRef = probe.ref def sender() = self - //#get-request-context + // #get-request-context implicit val node = DistributedData(system).selfUniqueAddress val replicator = DistributedData(system).replicator val readTwo = ReadFrom(n = 2, timeout = 3.second) @@ -242,7 +241,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { case NotFound(Counter1Key, Some(replyTo: ActorRef)) => replyTo ! 0L } - //#get-request-context + // #get-request-context } "demonstrate subscribe" in { @@ -251,7 +250,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { implicit val self: ActorRef = probe.ref def sender() = self - //#subscribe + // #subscribe val replicator = DistributedData(system).replicator val Counter1Key = PNCounterKey("counter1") // subscribe to changes of the Counter1Key value @@ -265,14 +264,14 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { // incoming request to retrieve current value of the counter sender() ! currentValue } - //#subscribe + // #subscribe } "demonstrate delete" in { val probe = TestProbe() implicit val self: ActorRef = probe.ref - //#delete + // #delete val replicator = DistributedData(system).replicator val Counter1Key = PNCounterKey("counter1") val Set2Key = ORSetKey[String]("set2") @@ -281,12 +280,12 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val writeMajority = WriteMajority(timeout = 5.seconds) replicator ! Delete(Set2Key, writeMajority) - //#delete + // #delete } "demonstrate PNCounter" in { def println(o: Any): Unit = () - //#pncounter + // #pncounter implicit val node = DistributedData(system).selfUniqueAddress val c0 = PNCounter.empty @@ -294,12 +293,12 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val c2 = c1 :+ 7 val c3: PNCounter = c2.decrement(2) println(c3.value) // 6 - //#pncounter + // #pncounter } "demonstrate PNCounterMap" in { def println(o: Any): Unit = () - //#pncountermap + // #pncountermap implicit val node = DistributedData(system).selfUniqueAddress val m0 = PNCounterMap.empty[String] val m1 = m0.increment(node, "a", 7) @@ -307,35 +306,35 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val m3 = m2.increment(node, "b", 1) println(m3.get("a")) // 5 m3.entries.foreach { case (key, value) => println(s"$key -> $value") } - //#pncountermap + // #pncountermap } "demonstrate GSet" in { def println(o: Any): Unit = () - //#gset + // #gset val s0 = GSet.empty[String] val s1 = s0 + "a" val s2 = s1 + "b" + "c" if (s2.contains("a")) println(s2.elements) // a, b, c - //#gset + // #gset } "demonstrate ORSet" in { def println(o: Any): Unit = () - //#orset + // #orset implicit val node = DistributedData(system).selfUniqueAddress val s0 = ORSet.empty[String] val s1 = s0 :+ "a" val s2 = s1 :+ "b" val s3 = s2.remove("a") println(s3.elements) // b - //#orset + // #orset } "demonstrate ORMultiMap" in { def println(o: Any): Unit = () - //#ormultimap + // #ormultimap implicit val node = DistributedData(system).selfUniqueAddress val m0 = ORMultiMap.empty[String, Int] val m1 = m0 :+ ("a" -> Set(1, 2, 3)) @@ -343,32 +342,32 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val m3 = m2.removeBinding(node, "a", 2) val m4 = m3.addBinding(node, "b", 1) println(m4.entries) - //#ormultimap + // #ormultimap } "demonstrate Flag" in { def println(o: Any): Unit = () - //#flag + // #flag val f0 = Flag.Disabled val f1 = f0.switchOn println(f1.enabled) - //#flag + // #flag } "demonstrate LWWRegister" in { def println(o: Any): Unit = () - //#lwwregister + // #lwwregister implicit val node = DistributedData(system).selfUniqueAddress val r1 = LWWRegister.create("Hello") val r2 = r1.withValueOf("Hi") println(s"${r1.value} by ${r1.updatedBy} at ${r1.timestamp}") - //#lwwregister + // #lwwregister r2.value should be("Hi") } "demonstrate LWWRegister with custom clock" in { def println(o: Any): Unit = () - //#lwwregister-custom-clock + // #lwwregister-custom-clock case class Record(version: Int, name: String, address: String) implicit val node = DistributedData(system).selfUniqueAddress @@ -385,7 +384,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val r3 = r1.merge(r2) println(r3.value) - //#lwwregister-custom-clock + // #lwwregister-custom-clock r3.value.address should be("Madison Square") } diff --git a/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala b/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala index f4baefb89ee..2dc435454fc 100644 --- a/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala +++ b/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala @@ -26,11 +26,11 @@ object ShoppingCart { final case class Cart(items: Set[LineItem]) final case class LineItem(productId: String, title: String, quantity: Int) - //#read-write-majority + // #read-write-majority private val timeout = 3.seconds private val readMajority = ReadMajority(timeout) private val writeMajority = WriteMajority(timeout) - //#read-write-majority + // #read-write-majority } @@ -49,7 +49,7 @@ class ShoppingCart(userId: String) extends Actor { .orElse[Any, Unit](receiveRemoveItem) .orElse[Any, Unit](receiveOther) - //#get-cart + // #get-cart def receiveGetCart: Receive = { case GetCart => replicator ! Get(DataKey, readMajority, Some(sender())) @@ -66,17 +66,16 @@ class ShoppingCart(userId: String) extends Actor { // ReadMajority failure, try again with local read replicator ! Get(DataKey, ReadLocal, Some(replyTo)) } - //#get-cart + // #get-cart - //#add-item - def receiveAddItem: Receive = { - case cmd @ AddItem(item) => - val update = Update(DataKey, LWWMap.empty[String, LineItem], writeMajority, Some(cmd)) { cart => - updateCart(cart, item) - } - replicator ! update + // #add-item + def receiveAddItem: Receive = { case cmd @ AddItem(item) => + val update = Update(DataKey, LWWMap.empty[String, LineItem], writeMajority, Some(cmd)) { cart => + updateCart(cart, item) + } + replicator ! update } - //#add-item + // #add-item def updateCart(data: LWWMap[String, LineItem], item: LineItem): LWWMap[String, LineItem] = data.get(item.productId) match { @@ -85,7 +84,7 @@ class ShoppingCart(userId: String) extends Actor { case None => data :+ (item.productId -> item) } - //#remove-item + // #remove-item def receiveRemoveItem: Receive = { case cmd @ RemoveItem(productId) => // Try to fetch latest from a majority of nodes first, since ORMap @@ -106,7 +105,7 @@ class ShoppingCart(userId: String) extends Actor { case NotFound(DataKey, Some(RemoveItem(productId))) => // nothing to remove } - //#remove-item + // #remove-item def receiveOther: Receive = { case _: UpdateSuccess[_] | _: UpdateTimeout[_] => diff --git a/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala b/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala index f9a586f0d25..29d8e192f48 100644 --- a/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala +++ b/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala @@ -62,7 +62,7 @@ class TwoPhaseSetSerializer(val system: ExtendedActorSystem) extends Serializer //#serializer class TwoPhaseSetSerializerWithCompression(system: ExtendedActorSystem) extends TwoPhaseSetSerializer(system) { - //#compression + // #compression override def toBinary(obj: AnyRef): Array[Byte] = obj match { case m: TwoPhaseSet => compress(twoPhaseSetToProto(m)) case _ => throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass}") @@ -71,5 +71,5 @@ class TwoPhaseSetSerializerWithCompression(system: ExtendedActorSystem) extends override def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { twoPhaseSetFromBinary(decompress(bytes)) } - //#compression + // #compression } diff --git a/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala index 7e926d5ea11..1f1079cf16c 100644 --- a/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala @@ -229,7 +229,7 @@ object DispatcherDocSpec { """ - //#prio-mailbox + // #prio-mailbox import akka.dispatch.PriorityGenerator import akka.dispatch.UnboundedStablePriorityMailbox import com.typesafe.config.Config @@ -252,37 +252,35 @@ object DispatcherDocSpec { // We default to 1, which is in between high and low case otherwise => 1 }) - //#prio-mailbox + // #prio-mailbox - //#control-aware-mailbox-messages + // #control-aware-mailbox-messages import akka.dispatch.ControlMessage case object MyControlMessage extends ControlMessage - //#control-aware-mailbox-messages + // #control-aware-mailbox-messages class MyActor extends Actor { - def receive = { - case x => + def receive = { case x => } } - //#required-mailbox-class + // #required-mailbox-class import akka.dispatch.RequiresMessageQueue import akka.dispatch.BoundedMessageQueueSemantics class MyBoundedActor extends MyActor with RequiresMessageQueue[BoundedMessageQueueSemantics] - //#required-mailbox-class + // #required-mailbox-class - //#require-mailbox-on-actor + // #require-mailbox-on-actor class MySpecialActor extends Actor with RequiresMessageQueue[MyUnboundedMessageQueueSemantics] { - //#require-mailbox-on-actor - def receive = { - case _ => + // #require-mailbox-on-actor + def receive = { case _ => } - //#require-mailbox-on-actor + // #require-mailbox-on-actor // ... } - //#require-mailbox-on-actor + // #require-mailbox-on-actor } class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { @@ -291,19 +289,19 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { "defining dispatcher in config" in { val context = system - //#defining-dispatcher-in-config + // #defining-dispatcher-in-config import akka.actor.Props val myActor = context.actorOf(Props[MyActor](), "myactor") - //#defining-dispatcher-in-config + // #defining-dispatcher-in-config } "defining dispatcher in code" in { val context = system - //#defining-dispatcher-in-code + // #defining-dispatcher-in-code import akka.actor.Props val myActor = context.actorOf(Props[MyActor]().withDispatcher("my-dispatcher"), "myactor1") - //#defining-dispatcher-in-code + // #defining-dispatcher-in-code } "defining dispatcher with bounded queue" in { @@ -312,49 +310,49 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { "defining fixed-pool-size dispatcher" in { val context = system - //#defining-fixed-pool-size-dispatcher + // #defining-fixed-pool-size-dispatcher val myActor = context.actorOf(Props[MyActor]().withDispatcher("blocking-io-dispatcher"), "myactor2") - //#defining-fixed-pool-size-dispatcher + // #defining-fixed-pool-size-dispatcher } "defining pinned dispatcher" in { val context = system - //#defining-pinned-dispatcher + // #defining-pinned-dispatcher val myActor = context.actorOf(Props[MyActor]().withDispatcher("my-pinned-dispatcher"), "myactor3") - //#defining-pinned-dispatcher + // #defining-pinned-dispatcher } "defining affinity-pool dispatcher" in { val context = system - //#defining-affinity-pool-dispatcher + // #defining-affinity-pool-dispatcher val myActor = context.actorOf(Props[MyActor]().withDispatcher("affinity-pool-dispatcher"), "myactor4") - //#defining-affinity-pool-dispatcher + // #defining-affinity-pool-dispatcher } "looking up a dispatcher" in { - //#lookup + // #lookup // for use with Futures, Scheduler, etc. implicit val executionContext = system.dispatchers.lookup("my-dispatcher") - //#lookup + // #lookup } "defining mailbox in config" in { val context = system - //#defining-mailbox-in-config + // #defining-mailbox-in-config import akka.actor.Props val myActor = context.actorOf(Props[MyActor](), "priomailboxactor") - //#defining-mailbox-in-config + // #defining-mailbox-in-config } "defining mailbox in code" in { val context = system - //#defining-mailbox-in-code + // #defining-mailbox-in-code import akka.actor.Props val myActor = context.actorOf(Props[MyActor]().withMailbox("prio-mailbox")) - //#defining-mailbox-in-code + // #defining-mailbox-in-code } "using a required mailbox" in { @@ -364,7 +362,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { "defining priority dispatcher" in { new AnyRef { - //#prio-dispatcher + // #prio-dispatcher // We create a new Actor that just prints out what it processes class Logger extends Actor { @@ -380,8 +378,8 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { self ! PoisonPill - def receive = { - case x => log.info(x.toString) + def receive = { case x => + log.info(x.toString) } } val a = system.actorOf(Props(classOf[Logger], this).withDispatcher("prio-dispatcher")) @@ -396,7 +394,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { * lowpriority * lowpriority */ - //#prio-dispatcher + // #prio-dispatcher watch(a) expectMsgPF() { case Terminated(`a`) => () } @@ -405,7 +403,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { "defining control aware dispatcher" in { new AnyRef { - //#control-aware-dispatcher + // #control-aware-dispatcher // We create a new Actor that just prints out what it processes class Logger extends Actor { @@ -416,8 +414,8 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { self ! MyControlMessage self ! PoisonPill - def receive = { - case x => log.info(x.toString) + def receive = { case x => + log.info(x.toString) } } val a = system.actorOf(Props(classOf[Logger], this).withDispatcher("control-aware-dispatcher")) @@ -428,7 +426,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { * foo * bar */ - //#control-aware-dispatcher + // #control-aware-dispatcher watch(a) expectMsgPF() { case Terminated(`a`) => () } diff --git a/akka-docs/src/test/scala/docs/duration/Sample.scala b/akka-docs/src/test/scala/docs/duration/Sample.scala index fb054553886..f1e0e33fd48 100644 --- a/akka-docs/src/test/scala/docs/duration/Sample.scala +++ b/akka-docs/src/test/scala/docs/duration/Sample.scala @@ -7,7 +7,7 @@ package docs.duration import language.postfixOps object Scala { - //#dsl + // #dsl import scala.concurrent.duration._ val fivesec = 5.seconds @@ -16,11 +16,11 @@ object Scala { assert(diff < fivesec) val fourmillis = threemillis * 4 / 3 // you cannot write it the other way around val n = threemillis / (1 millisecond) - //#dsl + // #dsl - //#deadline + // #deadline val deadline = 10.seconds.fromNow // do something val rest = deadline.timeLeft - //#deadline + // #deadline } diff --git a/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala b/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala index 9ae56814f49..886eca1ef9f 100644 --- a/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala +++ b/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala @@ -11,7 +11,7 @@ import akka.testkit.TestProbe object EventBusDocSpec { - //#lookup-bus + // #lookup-bus import akka.event.EventBus import akka.event.LookupClassification @@ -46,9 +46,9 @@ object EventBusDocSpec { } - //#lookup-bus + // #lookup-bus - //#subchannel-bus + // #subchannel-bus import akka.util.Subclassification class StartsWithSubclassification extends Subclassification[String] { @@ -84,9 +84,9 @@ object EventBusDocSpec { subscriber ! event.payload } } - //#subchannel-bus + // #subchannel-bus - //#scanning-bus + // #scanning-bus import akka.event.ScanningClassification /** @@ -118,9 +118,9 @@ object EventBusDocSpec { subscriber ! event } } - //#scanning-bus + // #scanning-bus - //#actor-bus + // #actor-bus import akka.event.ActorEventBus import akka.event.ManagedActorClassification import akka.event.ActorClassifier @@ -140,7 +140,7 @@ object EventBusDocSpec { // used internally (i.e. the expected number of different classifiers) override protected def mapSize: Int = 128 } - //#actor-bus + // #actor-bus } @@ -148,17 +148,17 @@ class EventBusDocSpec extends AkkaSpec { import EventBusDocSpec._ "demonstrate LookupClassification" in { - //#lookup-bus-test + // #lookup-bus-test val lookupBus = new LookupBusImpl lookupBus.subscribe(testActor, "greetings") lookupBus.publish(MsgEnvelope("time", System.currentTimeMillis())) lookupBus.publish(MsgEnvelope("greetings", "hello")) expectMsg("hello") - //#lookup-bus-test + // #lookup-bus-test } "demonstrate SubchannelClassification" in { - //#subchannel-bus-test + // #subchannel-bus-test val subchannelBus = new SubchannelBusImpl subchannelBus.subscribe(testActor, "abc") subchannelBus.publish(MsgEnvelope("xyzabc", "x")) @@ -167,11 +167,11 @@ class EventBusDocSpec extends AkkaSpec { expectMsg("c") subchannelBus.publish(MsgEnvelope("abcdef", "d")) expectMsg("d") - //#subchannel-bus-test + // #subchannel-bus-test } "demonstrate ScanningClassification" in { - //#scanning-bus-test + // #scanning-bus-test val scanningBus = new ScanningBusImpl scanningBus.subscribe(testActor, 3) scanningBus.publish("xyzabc") @@ -179,11 +179,11 @@ class EventBusDocSpec extends AkkaSpec { expectMsg("ab") scanningBus.publish("abc") expectMsg("abc") - //#scanning-bus-test + // #scanning-bus-test } "demonstrate ManagedActorClassification" in { - //#actor-bus-test + // #actor-bus-test val observer1 = TestProbe().ref val observer2 = TestProbe().ref val probe1 = TestProbe() @@ -200,6 +200,6 @@ class EventBusDocSpec extends AkkaSpec { actorBus.publish(Notification(observer2, 101)) probe2.expectMsg(Notification(observer2, 101)) probe1.expectNoMessage(500.millis) - //#actor-bus-test + // #actor-bus-test } } diff --git a/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala b/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala index 91a9a53835f..be2ef2d4baa 100644 --- a/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala +++ b/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala @@ -9,7 +9,7 @@ import akka.testkit.AkkaSpec object LoggingDocSpec { - //#my-actor + // #my-actor import akka.event.Logging class MyActor extends Actor { @@ -25,7 +25,7 @@ object LoggingDocSpec { case x => log.warning("Received unknown message: {}", x) } } - //#my-actor + // #my-actor import akka.event.Logging @@ -34,7 +34,7 @@ object LoggingDocSpec { def receive = { case _ => { - //#mdc + // #mdc val mdc = Map("requestId" -> 1234, "visitorId" -> 5678) log.mdc(mdc) @@ -42,12 +42,12 @@ object LoggingDocSpec { log.info("Starting new request") log.clearMDC() - //#mdc + // #mdc } } } - //#mdc-actor + // #mdc-actor import Logging.MDC final case class Req(work: String, visitorId: Int) @@ -72,9 +72,9 @@ object LoggingDocSpec { } } - //#mdc-actor + // #mdc-actor - //#my-event-listener + // #my-event-listener import akka.event.Logging.Debug import akka.event.Logging.Error import akka.event.Logging.Info @@ -91,9 +91,9 @@ object LoggingDocSpec { case Debug(logSource, logClass, message) => // ... } } - //#my-event-listener + // #my-event-listener - //#my-source + // #my-source import akka.actor.ActorSystem import akka.event.LogSource @@ -110,22 +110,22 @@ object LoggingDocSpec { val log = Logging(system, this) } - //#my-source + // #my-source object Listeners { def println(s: Any) = () - //#deadletters + // #deadletters import akka.actor.{ Actor, DeadLetter, Props } class DeadLetterListener extends Actor { - def receive = { - case d: DeadLetter => println(d) + def receive = { case d: DeadLetter => + println(d) } } - //#deadletters + // #deadletters - //#superclass-subscription-eventstream + // #superclass-subscription-eventstream abstract class AllKindsOfMusic { def artist: String } case class Jazz(artist: String) extends AllKindsOfMusic case class Electronic(artist: String) extends AllKindsOfMusic @@ -136,7 +136,7 @@ object LoggingDocSpec { case m: Electronic => println(s"${self.path.name} is listening to: ${m.artist}") } } - //#superclass-subscription-eventstream + // #superclass-subscription-eventstream } } @@ -162,16 +162,16 @@ class LoggingDocSpec extends AkkaSpec { "allow registration to dead letters" in { import LoggingDocSpec.Listeners._ - //#deadletters + // #deadletters val listener = system.actorOf(Props[DeadLetterListener]()) system.eventStream.subscribe(listener, classOf[DeadLetter]) - //#deadletters + // #deadletters } "demonstrate superclass subscriptions on eventStream" in { import LoggingDocSpec.Listeners._ - //#superclass-subscription-eventstream + // #superclass-subscription-eventstream val jazzListener = system.actorOf(Props[Listener]()) val musicListener = system.actorOf(Props[Listener]()) @@ -183,29 +183,29 @@ class LoggingDocSpec extends AkkaSpec { // jazzListener and musicListener will be notified about Jazz: system.eventStream.publish(Jazz("Sonny Rollins")) - //#superclass-subscription-eventstream + // #superclass-subscription-eventstream } "allow registration to suppressed dead letters" in { import akka.actor.Props val listener = system.actorOf(Props[MyActor]()) - //#suppressed-deadletters + // #suppressed-deadletters import akka.actor.SuppressedDeadLetter system.eventStream.subscribe(listener, classOf[SuppressedDeadLetter]) - //#suppressed-deadletters + // #suppressed-deadletters - //#all-deadletters + // #all-deadletters import akka.actor.AllDeadLetters system.eventStream.subscribe(listener, classOf[AllDeadLetters]) - //#all-deadletters + // #all-deadletters } "demonstrate logging more arguments" in { - //#array + // #array val args = Array("The", "brown", "fox", "jumps", 42) system.log.debug("five parameters: {}, {}, {}, {}, {}", args) - //#array + // #array } } diff --git a/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala b/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala index e055cf4195c..df74631fecc 100644 --- a/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala @@ -14,11 +14,11 @@ import akka.testkit.AkkaSpec import akka.actor.Extension class CountExtensionImpl extends Extension { - //Since this Extension is a shared instance + // Since this Extension is a shared instance // per ActorSystem we need to be threadsafe private val counter = new AtomicLong(0) - //This is the operation this Extension provides + // This is the operation this Extension provides def increment() = counter.incrementAndGet() } //#extension @@ -30,19 +30,17 @@ import akka.actor.ExtensionIdProvider import akka.actor.ExtendedActorSystem object CountExtension extends ExtensionId[CountExtensionImpl] with ExtensionIdProvider { - //The lookup method is required by ExtensionIdProvider, + // The lookup method is required by ExtensionIdProvider, // so we return ourselves here, this allows us // to configure our extension to be loaded when // the ActorSystem starts up override def lookup = CountExtension - //This method will be called by Akka + // This method will be called by Akka // to instantiate our Extension override def createExtension(system: ExtendedActorSystem) = new CountExtensionImpl - /** - * Java API: retrieve the Count extension for the given system. - */ + /** Java API: retrieve the Count extension for the given system. */ override def get(system: ActorSystem): CountExtensionImpl = super.get(system) override def get(system: ClassicActorSystemProvider): CountExtensionImpl = super.get(system) } @@ -58,41 +56,40 @@ object ExtensionDocSpec { //#config """ - //#extension-usage-actor + // #extension-usage-actor class MyActor extends Actor { - def receive = { - case someMessage => - CountExtension(context.system).increment() + def receive = { case someMessage => + CountExtension(context.system).increment() } } - //#extension-usage-actor + // #extension-usage-actor - //#extension-usage-actor-trait + // #extension-usage-actor-trait trait Counting { self: Actor => def increment() = CountExtension(context.system).increment() } class MyCounterActor extends Actor with Counting { - def receive = { - case someMessage => increment() + def receive = { case someMessage => + increment() } } - //#extension-usage-actor-trait + // #extension-usage-actor-trait } class ExtensionDocSpec extends AkkaSpec(ExtensionDocSpec.config) { "demonstrate how to create an extension in Scala" in { - //#extension-usage + // #extension-usage CountExtension(system).increment() - //#extension-usage + // #extension-usage } "demonstrate how to lookup a configured extension in Scala" in { - //#extension-lookup + // #extension-lookup system.extension(CountExtension) - //#extension-lookup + // #extension-lookup } } diff --git a/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala b/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala index 8f49b309f4d..2a08bf6541d 100644 --- a/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala @@ -38,9 +38,7 @@ object Settings extends ExtensionId[SettingsImpl] with ExtensionIdProvider { override def createExtension(system: ExtendedActorSystem) = new SettingsImpl(system.settings.config) - /** - * Java API: retrieve the Settings extension for the given system. - */ + /** Java API: retrieve the Settings extension for the given system. */ override def get(system: ActorSystem): SettingsImpl = super.get(system) override def get(system: ClassicActorSystemProvider): SettingsImpl = super.get(system) } @@ -61,15 +59,14 @@ object SettingsExtensionDocSpec { //#config """ - //#extension-usage-actor + // #extension-usage-actor class MyActor extends Actor { val settings = Settings(context.system) val connection = connect(settings.DbUri, settings.CircuitBreakerTimeout) - //#extension-usage-actor - def receive = { - case someMessage => + // #extension-usage-actor + def receive = { case someMessage => } def connect(dbUri: String, circuitBreakerTimeout: Duration) = { @@ -82,10 +79,10 @@ object SettingsExtensionDocSpec { class SettingsExtensionDocSpec extends AkkaSpec(SettingsExtensionDocSpec.config) { "demonstrate how to create application specific settings extension in Scala" in { - //#extension-usage + // #extension-usage val dbUri = Settings(system).DbUri val circuitBreakerTimeout = Settings(system).CircuitBreakerTimeout - //#extension-usage + // #extension-usage } } diff --git a/akka-docs/src/test/scala/docs/faq/Faq.scala b/akka-docs/src/test/scala/docs/faq/Faq.scala index 213e3083a56..3114d202733 100644 --- a/akka-docs/src/test/scala/docs/faq/Faq.scala +++ b/akka-docs/src/test/scala/docs/faq/Faq.scala @@ -20,16 +20,15 @@ object MyActor { class MyActor extends Actor { import MyActor._ - def receive = { - case message: Message => - message match { - case BarMessage(bar) => sender() ! BazMessage("Got " + bar) - // warning here: - // "match may not be exhaustive. It would fail on the following input: FooMessage(_)" - //#exhaustiveness-check - case FooMessage(_) => // avoid the warning in our build logs - //#exhaustiveness-check - } + def receive = { case message: Message => + message match { + case BarMessage(bar) => sender() ! BazMessage("Got " + bar) + // warning here: + // "match may not be exhaustive. It would fail on the following input: FooMessage(_)" + // #exhaustiveness-check + case FooMessage(_) => // avoid the warning in our build logs + // #exhaustiveness-check + } } } //#exhaustiveness-check diff --git a/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala b/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala index f297954d51a..21ec3a81afe 100644 --- a/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala +++ b/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala @@ -31,14 +31,13 @@ object FutureDocSpec { class OddActor extends Actor { var n = 1 - def receive = { - case GetNext => - sender() ! n - n += 2 + def receive = { case GetNext => + sender() ! n + n += 2 } } - //#pipe-to-usage + // #pipe-to-usage class ActorUsingPipeTo(target: ActorRef) extends Actor { // akka.pattern.pipe needs to be imported import akka.pattern.{ ask, pipe } @@ -46,38 +45,36 @@ object FutureDocSpec { implicit val ec: ExecutionContext = context.dispatcher implicit val timeout: Timeout = 5.seconds - def receive = { - case _ => - val future = target ? "some message" - future.pipeTo(sender()) // use the pipe pattern + def receive = { case _ => + val future = target ? "some message" + future.pipeTo(sender()) // use the pipe pattern } } - //#pipe-to-usage + // #pipe-to-usage - //#pipe-to-returned-data + // #pipe-to-returned-data case class UserData(data: String) case class UserActivity(activity: String) - //#pipe-to-returned-data + // #pipe-to-returned-data - //#pipe-to-user-data-actor + // #pipe-to-user-data-actor class UserDataActor extends Actor { import UserDataActor._ - //holds the user data internally + // holds the user data internally var internalData: UserData = UserData("initial data") - def receive = { - case Get => - sender() ! internalData + def receive = { case Get => + sender() ! internalData } } object UserDataActor { case object Get } - //#pipe-to-user-data-actor + // #pipe-to-user-data-actor - //#pipe-to-user-activity-actor + // #pipe-to-user-activity-actor trait UserActivityRepository { def queryHistoricalActivities(userId: String): Future[List[UserActivity]] } @@ -87,20 +84,19 @@ object FutureDocSpec { import UserActivityActor._ implicit val ec: ExecutionContext = context.dispatcher - def receive = { - case Get => - // user's historical activities are retrieved - // via the separate repository - repository.queryHistoricalActivities(userId).pipeTo(sender()) + def receive = { case Get => + // user's historical activities are retrieved + // via the separate repository + repository.queryHistoricalActivities(userId).pipeTo(sender()) } } object UserActivityActor { case object Get } - //#pipe-to-user-activity-actor + // #pipe-to-user-activity-actor - //#pipe-to-proxy-actor + // #pipe-to-proxy-actor class UserProxyActor(userData: ActorRef, userActivities: ActorRef) extends Actor { import UserProxyActor._ import akka.pattern.{ ask, pipe } @@ -115,15 +111,15 @@ object FutureDocSpec { (userActivities ? UserActivityActor.Get).pipeTo(sender()) } } - //#pipe-to-proxy-actor + // #pipe-to-proxy-actor - //#pipe-to-proxy-messages + // #pipe-to-proxy-messages object UserProxyActor { sealed trait Message case object GetUserData extends Message case object GetUserActivities extends Message } - //#pipe-to-proxy-messages + // #pipe-to-proxy-messages } @@ -135,7 +131,7 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage custom ExecutionContext" in { val yourExecutorServiceGoesHere = java.util.concurrent.Executors.newSingleThreadExecutor() - //#diy-execution-context + // #diy-execution-context import scala.concurrent.{ ExecutionContext, Promise } implicit val ec = ExecutionContext.fromExecutorService(yourExecutorServiceGoesHere) @@ -146,13 +142,13 @@ class FutureDocSpec extends AkkaSpec { // Then shut your ExecutionContext down at some // appropriate place in your program/application ec.shutdown() - //#diy-execution-context + // #diy-execution-context } "demonstrate usage of blocking from actor" in { val actor = system.actorOf(Props[MyActor]()) val msg = "hello" - //#ask-blocking + // #ask-blocking import scala.concurrent.Await import akka.pattern.ask import akka.util.Timeout @@ -161,7 +157,7 @@ class FutureDocSpec extends AkkaSpec { implicit val timeout: Timeout = 5.seconds val future = actor ? msg // enabled by the “ask” import val result = Await.result(future, timeout.duration).asInstanceOf[String] - //#ask-blocking + // #ask-blocking result should be("HELLO") } @@ -170,17 +166,17 @@ class FutureDocSpec extends AkkaSpec { val actor = system.actorOf(Props[MyActor]()) val msg = "hello" implicit val timeout: Timeout = 5.seconds - //#map-to + // #map-to import scala.concurrent.Future import akka.pattern.ask val future: Future[String] = ask(actor, msg).mapTo[String] - //#map-to + // #map-to Await.result(future, timeout.duration) should be("HELLO") } "demonstrate usage of simple future eval" in { - //#future-eval + // #future-eval import scala.concurrent.Await import scala.concurrent.Future import scala.concurrent.duration._ @@ -189,12 +185,12 @@ class FutureDocSpec extends AkkaSpec { "Hello" + "World" } future.foreach(println) - //#future-eval + // #future-eval Await.result(future, 3 seconds) should be("HelloWorld") } "demonstrate usage of map" in { - //#map + // #map val f1 = Future { "Hello" + "World" } @@ -202,14 +198,14 @@ class FutureDocSpec extends AkkaSpec { x.length } f2.foreach(println) - //#map + // #map val result = Await.result(f2, 3 seconds) result should be(10) f1.value should be(Some(Success("HelloWorld"))) } "demonstrate wrong usage of nested map" in { - //#wrong-nested-map + // #wrong-nested-map val f1 = Future { "Hello" + "World" } @@ -220,12 +216,12 @@ class FutureDocSpec extends AkkaSpec { } } f3.foreach(println) - //#wrong-nested-map + // #wrong-nested-map Await.ready(f3, 3 seconds) } "demonstrate usage of flatMap" in { - //#flat-map + // #flat-map val f1 = Future { "Hello" + "World" } @@ -236,13 +232,13 @@ class FutureDocSpec extends AkkaSpec { } } f3.foreach(println) - //#flat-map + // #flat-map val result = Await.result(f3, 3 seconds) result should be(30) } "demonstrate usage of filter" in { - //#filter + // #filter val future1 = Future.successful(4) val future2 = future1.filter(_ % 2 == 0) @@ -254,15 +250,15 @@ class FutureDocSpec extends AkkaSpec { } failedFilter.foreach(println) - //#filter + // #filter val result = Await.result(future2, 3 seconds) result should be(4) val result2 = Await.result(failedFilter, 3 seconds) - result2 should be(0) //Can only be 0 when there was a MatchError + result2 should be(0) // Can only be 0 when there was a MatchError } "demonstrate usage of for comprehension" in { - //#for-comprehension + // #for-comprehension val f = for { a <- Future(10 / 2) // 10 / 2 = 5 b <- Future(a + 1) // 5 + 1 = 6 @@ -274,7 +270,7 @@ class FutureDocSpec extends AkkaSpec { // are not done in parallel. f.foreach(println) - //#for-comprehension + // #for-comprehension val result = Await.result(f, 3 seconds) result should be(24) } @@ -288,7 +284,7 @@ class FutureDocSpec extends AkkaSpec { implicit val timeout: Timeout = 5.seconds import scala.concurrent.Await import akka.pattern.ask - //#composing-wrong + // #composing-wrong val f1 = ask(actor1, msg1) val f2 = ask(actor2, msg2) @@ -296,10 +292,10 @@ class FutureDocSpec extends AkkaSpec { val a = Await.result(f1, 3 seconds).asInstanceOf[Int] val b = Await.result(f2, 3 seconds).asInstanceOf[Int] - val f3 = ask(actor3, (a + b)) + val f3 = ask(actor3, a + b) val result = Await.result(f3, 3 seconds).asInstanceOf[Int] - //#composing-wrong + // #composing-wrong result should be(3) } @@ -312,7 +308,7 @@ class FutureDocSpec extends AkkaSpec { implicit val timeout: Timeout = 5.seconds import scala.concurrent.Await import akka.pattern.ask - //#composing + // #composing val f1 = ask(actor1, msg1) val f2 = ask(actor2, msg2) @@ -320,11 +316,11 @@ class FutureDocSpec extends AkkaSpec { val f3 = for { a <- f1.mapTo[Int] b <- f2.mapTo[Int] - c <- ask(actor3, (a + b)).mapTo[Int] + c <- ask(actor3, a + b).mapTo[Int] } yield c f3.foreach(println) - //#composing + // #composing val result = Await.result(f3, 3 seconds).asInstanceOf[Int] result should be(3) } @@ -332,7 +328,7 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage of sequence with actors" in { implicit val timeout: Timeout = 5.seconds val oddActor = system.actorOf(Props[OddActor]()) - //#sequence-ask + // #sequence-ask // oddActor returns odd numbers sequentially from 1 as a List[Future[Int]] val listOfFutures = List.fill(100)(akka.pattern.ask(oddActor, GetNext).mapTo[Int]) @@ -342,45 +338,45 @@ class FutureDocSpec extends AkkaSpec { // Find the sum of the odd numbers val oddSum = futureList.map(_.sum) oddSum.foreach(println) - //#sequence-ask + // #sequence-ask Await.result(oddSum, 3 seconds).asInstanceOf[Int] should be(10000) } "demonstrate usage of sequence" in { - //#sequence + // #sequence val futureList = Future.sequence((1 to 100).toList.map(x => Future(x * 2 - 1))) val oddSum = futureList.map(_.sum) oddSum.foreach(println) - //#sequence + // #sequence Await.result(oddSum, 3 seconds).asInstanceOf[Int] should be(10000) } "demonstrate usage of traverse" in { - //#traverse + // #traverse val futureList = Future.traverse((1 to 100).toList)(x => Future(x * 2 - 1)) val oddSum = futureList.map(_.sum) oddSum.foreach(println) - //#traverse + // #traverse Await.result(oddSum, 3 seconds).asInstanceOf[Int] should be(10000) } "demonstrate usage of fold" in { - //#fold + // #fold // Create a sequence of Futures val futures = for (i <- 1 to 1000) yield Future(i * 2) val futureSum = Future.foldLeft(futures)(0)(_ + _) futureSum.foreach(println) - //#fold + // #fold Await.result(futureSum, 3 seconds) should be(1001000) } "demonstrate usage of reduce" in { - //#reduce + // #reduce // Create a sequence of Futures val futures = for (i <- 1 to 1000) yield Future(i * 2) val futureSum = Future.reduceLeft(futures)(_ + _) futureSum.foreach(println) - //#reduce + // #reduce Await.result(futureSum, 3 seconds) should be(1001000) } @@ -388,12 +384,12 @@ class FutureDocSpec extends AkkaSpec { implicit val timeout: Timeout = 5.seconds val actor = system.actorOf(Props[MyActor]()) val msg1 = -1 - //#recover - val future = akka.pattern.ask(actor, msg1).recover { - case e: ArithmeticException => 0 + // #recover + val future = akka.pattern.ask(actor, msg1).recover { case e: ArithmeticException => + 0 } future.foreach(println) - //#recover + // #recover Await.result(future, 3.seconds) should be(0) } @@ -401,24 +397,24 @@ class FutureDocSpec extends AkkaSpec { implicit val timeout: Timeout = 5.seconds val actor = system.actorOf(Props[MyActor]()) val msg1 = -1 - //#try-recover + // #try-recover val future = akka.pattern.ask(actor, msg1).recoverWith { case e: ArithmeticException => Future.successful(0) case foo: IllegalArgumentException => Future.failed[Int](new IllegalStateException("All br0ken!")) } future.foreach(println) - //#try-recover + // #try-recover Await.result(future, 3 seconds) should be(0) } "demonstrate usage of zip" in { val future1 = Future { "foo" } val future2 = Future { "bar" } - //#zip + // #zip val future3 = future1.zip(future2).map { case (a, b) => a + " " + b } future3.foreach(println) - //#zip + // #zip Await.result(future3, 3 seconds) should be("foo bar") } @@ -427,16 +423,16 @@ class FutureDocSpec extends AkkaSpec { val url = "foo bar" def log(cause: Throwable) = () def watchSomeTV(): Unit = () - //#and-then + // #and-then val result = Future { loadPage(url) } - .andThen { - case Failure(exception) => log(exception) + .andThen { case Failure(exception) => + log(exception) } - .andThen { - case _ => watchSomeTV() + .andThen { case _ => + watchSomeTV() } result.foreach(println) - //#and-then + // #and-then Await.result(result, 3 seconds) should be("foo bar") } @@ -444,10 +440,10 @@ class FutureDocSpec extends AkkaSpec { val future1 = Future { "foo" } val future2 = Future { "bar" } val future3 = Future { "pigdog" } - //#fallback-to + // #fallback-to val future4 = future1.fallbackTo(future2).fallbackTo(future3) future4.foreach(println) - //#fallback-to + // #fallback-to Await.result(future4, 3 seconds) should be("foo") } @@ -455,27 +451,27 @@ class FutureDocSpec extends AkkaSpec { val future = Future { "foo" } def doSomethingOnSuccess(r: String) = () def doSomethingOnFailure(t: Throwable) = () - //#onComplete + // #onComplete future.onComplete { case Success(result) => doSomethingOnSuccess(result) case Failure(failure) => doSomethingOnFailure(failure) } - //#onComplete + // #onComplete Await.result(future, 3 seconds) should be("foo") } "demonstrate usage of Future.successful & Future.failed & Future.promise" in { - //#successful + // #successful val future = Future.successful("Yay!") - //#successful - //#failed + // #successful + // #failed val otherFuture = Future.failed[String](new IllegalArgumentException("Bang!")) - //#failed - //#promise + // #failed + // #promise val promise = Promise[String]() val theFuture = promise.future promise.success("hello") - //#promise + // #promise Await.result(future, 3 seconds) should be("Yay!") intercept[IllegalArgumentException] { Await.result(otherFuture, 3 seconds) } Await.result(theFuture, 3 seconds) should be("hello") @@ -484,25 +480,25 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage of pattern.after" in { import akka.actor.typed.scaladsl.adapter.ClassicActorSystemOps implicit val system: ActorSystem[Nothing] = this.system.toTyped - //#after + // #after val delayed = akka.pattern.after(200.millis)(Future.failed(new IllegalStateException("OHNOES"))) val future = Future { Thread.sleep(1000); "foo" } val result = Future.firstCompletedOf(Seq(future, delayed)) - //#after + // #after intercept[IllegalStateException] { Await.result(result, 2 second) } } "demonstrate pattern.retry" in { import akka.actor.typed.scaladsl.adapter.ClassicActorSystemOps val system: ActorSystem[Nothing] = this.system.toTyped - //#retry + // #retry import akka.actor.typed.scaladsl.adapter._ implicit val scheduler: akka.actor.Scheduler = system.scheduler.toClassic implicit val ec: ExecutionContext = system.executionContext - //Given some future that will succeed eventually + // Given some future that will succeed eventually @volatile var failCount = 0 def futureToAttempt() = { if (failCount < 5) { @@ -511,25 +507,25 @@ class FutureDocSpec extends AkkaSpec { } else Future.successful(5) } - //Return a new future that will retry up to 10 times + // Return a new future that will retry up to 10 times val retried: Future[Int] = akka.pattern.retry(() => futureToAttempt(), attempts = 10, 100 milliseconds) - //#retry + // #retry Await.result(retried, 1 second) should ===(5) } "demonstrate context.dispatcher" in { - //#context-dispatcher + // #context-dispatcher class A extends Actor { import context.dispatcher val f = Future("hello") def receive = { - //#receive-omitted + // #receive-omitted case _ => - //#receive-omitted + // #receive-omitted } } - //#context-dispatcher + // #context-dispatcher } } diff --git a/akka-docs/src/test/scala/docs/io/EchoServer.scala b/akka-docs/src/test/scala/docs/io/EchoServer.scala index 5c0074d3370..375f54b484c 100644 --- a/akka-docs/src/test/scala/docs/io/EchoServer.scala +++ b/akka-docs/src/test/scala/docs/io/EchoServer.scala @@ -50,12 +50,12 @@ class EchoManager(handlerClass: Class[_]) extends Actor with ActorLogging { log.warning(s"cannot bind to [$local]") context.stop(self) - //#echo-manager + // #echo-manager case Connected(remote, local) => log.info("received connection from {}", remote) val handler = context.actorOf(Props(handlerClass, sender(), remote)) sender() ! Register(handler, keepOpenOnPeerClosed = true) - //#echo-manager + // #echo-manager } } @@ -79,7 +79,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor // start out in optimistic write-through mode def receive = writing - //#writing + // #writing def writing: Receive = { case Received(data) => connection ! Write(data, Ack(currentOffset)) @@ -96,9 +96,9 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor if (storage.isEmpty) context.stop(self) else context.become(closing) } - //#writing + // #writing - //#buffering + // #buffering def buffering(nack: Int): Receive = { var toAck = 10 var peerClosed = false @@ -124,33 +124,35 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor else context.become(writing) } } - //#buffering + // #buffering - //#closing + // #closing def closing: Receive = { case CommandFailed(_: Write) => connection ! ResumeWriting - context.become({ + context.become( + { - case WritingResumed => - writeAll() - context.unbecome() + case WritingResumed => + writeAll() + context.unbecome() - case ack: Int => acknowledge(ack) + case ack: Int => acknowledge(ack) - }, discardOld = false) + }, + discardOld = false) case Ack(ack) => acknowledge(ack) if (storage.isEmpty) context.stop(self) } - //#closing + // #closing override def postStop(): Unit = { log.info(s"transferred $transferred bytes from/to [$remote]") } - //#storage-omitted + // #storage-omitted private var storageOffset = 0 private var storage = Vector.empty[ByteString] private var stored = 0L @@ -163,7 +165,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor private def currentOffset = storageOffset + storage.size - //#helpers + // #helpers private def buffer(data: ByteString): Unit = { storage :+= data stored += data.size @@ -196,7 +198,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor suspended = false } } - //#helpers + // #helpers private def writeFirst(): Unit = { connection ! Write(storage(0), Ack(storageOffset)) @@ -208,7 +210,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor } } - //#storage-omitted + // #storage-omitted } //#echo-handler @@ -227,16 +229,18 @@ class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) extends buffer(data) connection ! Write(data, Ack) - context.become({ - case Received(data) => buffer(data) - case Ack => acknowledge() - case PeerClosed => closing = true - }, discardOld = false) + context.become( + { + case Received(data) => buffer(data) + case Ack => acknowledge() + case PeerClosed => closing = true + }, + discardOld = false) case PeerClosed => context.stop(self) } - //#storage-omitted + // #storage-omitted override def postStop(): Unit = { log.info(s"transferred $transferred bytes from/to [$remote]") } @@ -251,7 +255,7 @@ class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) extends val lowWatermark = maxStored * 3 / 10 var suspended = false - //#simple-helpers + // #simple-helpers private def buffer(data: ByteString): Unit = { storage :+= data stored += data.size @@ -287,7 +291,7 @@ class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) extends else context.unbecome() } else connection ! Write(storage(0), Ack) } - //#simple-helpers - //#storage-omitted + // #simple-helpers + // #storage-omitted } //#simple-echo-handler diff --git a/akka-docs/src/test/scala/docs/io/IODocSpec.scala b/akka-docs/src/test/scala/docs/io/IODocSpec.scala index 1297d47c938..d5bae70c89e 100644 --- a/akka-docs/src/test/scala/docs/io/IODocSpec.scala +++ b/akka-docs/src/test/scala/docs/io/IODocSpec.scala @@ -15,12 +15,12 @@ import akka.testkit.AkkaSpec import scala.concurrent.duration._ class DemoActor extends Actor { - //#manager + // #manager import akka.io.{ IO, Tcp } import context.system // implicitly used by IO(Tcp) val manager = IO(Tcp) - //#manager + // #manager def receive = Actor.emptyBehavior } @@ -35,16 +35,16 @@ class Server extends Actor { def receive = { case b @ Bound(localAddress) => - //#do-some-logging-or-setup + // #do-some-logging-or-setup context.parent ! b - //#do-some-logging-or-setup + // #do-some-logging-or-setup case CommandFailed(_: Bind) => context.stop(self) case c @ Connected(remote, local) => - //#server + // #server context.parent ! c - //#server + // #server val handler = context.actorOf(Props[SimplisticHandler]()) val connection = sender() connection ! Register(handler) @@ -107,8 +107,8 @@ class IODocSpec extends AkkaSpec { class Parent extends Actor { context.actorOf(Props[Server](), "server") - def receive = { - case msg => testActor.forward(msg) + def receive = { case msg => + testActor.forward(msg) } } diff --git a/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala b/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala index d4ae99d3688..4678aa1e28d 100644 --- a/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala +++ b/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala @@ -21,28 +21,27 @@ object PullReadingExample { import context.system override def preStart(): Unit = - //#pull-mode-bind + // #pull-mode-bind IO(Tcp) ! Bind(self, new InetSocketAddress("localhost", 0), pullMode = true) - //#pull-mode-bind + // #pull-mode-bind def receive = { - //#pull-accepting + // #pull-accepting case Bound(localAddress) => // Accept connections one by one sender() ! ResumeAccepting(batchSize = 1) context.become(listening(sender())) - //#pull-accepting + // #pull-accepting monitor ! localAddress } - //#pull-accepting-cont - def listening(listener: ActorRef): Receive = { - case Connected(remote, local) => - val handler = context.actorOf(Props(classOf[PullEcho], sender())) - sender() ! Register(handler, keepOpenOnPeerClosed = true) - listener ! ResumeAccepting(batchSize = 1) + // #pull-accepting-cont + def listening(listener: ActorRef): Receive = { case Connected(remote, local) => + val handler = context.actorOf(Props(classOf[PullEcho], sender())) + sender() ! Register(handler, keepOpenOnPeerClosed = true) + listener ! ResumeAccepting(batchSize = 1) } - //#pull-accepting-cont + // #pull-accepting-cont } @@ -50,14 +49,14 @@ object PullReadingExample { class PullEcho(connection: ActorRef) extends Actor { - //#pull-reading-echo + // #pull-reading-echo override def preStart(): Unit = connection ! ResumeReading def receive = { case Received(data) => connection ! Write(data, Ack) case Ack => connection ! ResumeReading } - //#pull-reading-echo + // #pull-reading-echo } } @@ -69,9 +68,9 @@ class PullReadingSpec extends AkkaSpec with ImplicitSender { system.actorOf(Props(classOf[PullReadingExample.Listener], probe.ref), "server") val listenAddress = probe.expectMsgType[InetSocketAddress] - //#pull-mode-connect + // #pull-mode-connect IO(Tcp) ! Connect(listenAddress, pullMode = true) - //#pull-mode-connect + // #pull-mode-connect expectMsgType[Connected] val connection = lastSender diff --git a/akka-docs/src/test/scala/docs/io/ScalaUdpMulticast.scala b/akka-docs/src/test/scala/docs/io/ScalaUdpMulticast.scala index fbb426521de..34ea6cbef4a 100644 --- a/akka-docs/src/test/scala/docs/io/ScalaUdpMulticast.scala +++ b/akka-docs/src/test/scala/docs/io/ScalaUdpMulticast.scala @@ -31,16 +31,16 @@ final case class MulticastGroup(address: String, interface: String) extends Sock //#multicast-group class Listener(iface: String, group: String, port: Int, sink: ActorRef) extends Actor with ActorLogging { - //#bind + // #bind import context.system val opts = List(Inet6ProtocolFamily(), MulticastGroup(group, iface)) IO(Udp) ! Udp.Bind(self, new InetSocketAddress(port), opts) - //#bind + // #bind def receive = { case b @ Udp.Bound(to) => log.info("Bound to {}", to) - sink ! (b) + sink ! b case Udp.Received(data, remote) => val msg = data.decodeString("utf-8") log.info("Received '{}' from {}", msg, remote) diff --git a/akka-docs/src/test/scala/docs/io/ScalaUdpMulticastSpec.scala b/akka-docs/src/test/scala/docs/io/ScalaUdpMulticastSpec.scala index 10eb133905b..6ce6ec1ef81 100644 --- a/akka-docs/src/test/scala/docs/io/ScalaUdpMulticastSpec.scala +++ b/akka-docs/src/test/scala/docs/io/ScalaUdpMulticastSpec.scala @@ -28,11 +28,10 @@ class ScalaUdpMulticastSpec "listener" should { "send message back to sink" in { val ipv6ifaces = - NetworkInterface.getNetworkInterfaces.asScala.toSeq.filter( - iface => - iface.supportsMulticast && - iface.isUp && - iface.getInetAddresses.asScala.exists(_.isInstanceOf[Inet6Address])) + NetworkInterface.getNetworkInterfaces.asScala.toSeq.filter(iface => + iface.supportsMulticast && + iface.isUp && + iface.getInetAddresses.asScala.exists(_.isInstanceOf[Inet6Address])) if (ipv6ifaces.isEmpty) { // IPv6 not supported for any interface on this platform diff --git a/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala b/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala index 72521fe3824..f044f4afd7c 100644 --- a/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala +++ b/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala @@ -19,76 +19,72 @@ import akka.io.UdpConnected object ScalaUdpDocSpec { - //#sender + // #sender class SimpleSender(remote: InetSocketAddress) extends Actor { import context.system IO(Udp) ! Udp.SimpleSender - def receive = { - case Udp.SimpleSenderReady => - context.become(ready(sender())) - //#sender - sender() ! Udp.Send(ByteString("hello"), remote) - //#sender + def receive = { case Udp.SimpleSenderReady => + context.become(ready(sender())) + // #sender + sender() ! Udp.Send(ByteString("hello"), remote) + // #sender } - def ready(send: ActorRef): Receive = { - case msg: String => - send ! Udp.Send(ByteString(msg), remote) - //#sender - if (msg == "world") send ! PoisonPill - //#sender + def ready(send: ActorRef): Receive = { case msg: String => + send ! Udp.Send(ByteString(msg), remote) + // #sender + if (msg == "world") send ! PoisonPill + // #sender } } - //#sender + // #sender - //#listener + // #listener class Listener(nextActor: ActorRef) extends Actor { import context.system IO(Udp) ! Udp.Bind(self, new InetSocketAddress("localhost", 0)) - def receive = { - case Udp.Bound(local) => - //#listener - nextActor.forward(local) - //#listener - context.become(ready(sender())) + def receive = { case Udp.Bound(local) => + // #listener + nextActor.forward(local) + // #listener + context.become(ready(sender())) } def ready(socket: ActorRef): Receive = { case Udp.Received(data, remote) => val processed = // parse data etc., e.g. using PipelineStage - //#listener + // #listener data.utf8String - //#listener + // #listener socket ! Udp.Send(data, remote) // example server echoes back nextActor ! processed case Udp.Unbind => socket ! Udp.Unbind case Udp.Unbound => context.stop(self) } } - //#listener + // #listener - //#connected + // #connected class Connected(remote: InetSocketAddress) extends Actor { import context.system IO(UdpConnected) ! UdpConnected.Connect(self, remote) - def receive = { - case UdpConnected.Connected => - context.become(ready(sender())) - //#connected - sender() ! UdpConnected.Send(ByteString("hello")) - //#connected + def receive = { case UdpConnected.Connected => + context.become(ready(sender())) + // #connected + sender() ! UdpConnected.Send(ByteString("hello")) + // #connected } def ready(connection: ActorRef): Receive = { case UdpConnected.Received(data) => // process data, send it on, etc. - //#connected + // #connected if (data.utf8String == "hello") connection ! UdpConnected.Send(ByteString("world")) - //#connected + // #connected case msg: String => connection ! UdpConnected.Send(ByteString(msg)) case UdpConnected.Disconnect => @@ -96,7 +92,7 @@ object ScalaUdpDocSpec { case UdpConnected.Disconnected => context.stop(self) } } - //#connected + // #connected } diff --git a/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala b/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala index 455736737ff..31f66494be7 100644 --- a/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala +++ b/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala @@ -15,7 +15,7 @@ class BackoffSupervisorDocSpec { val system: ActorSystem = ??? import scala.concurrent.duration._ - //#backoff-stop + // #backoff-stop val childProps = Props(classOf[EchoActor]) val supervisor = BackoffSupervisor.props( @@ -28,14 +28,14 @@ class BackoffSupervisorDocSpec { )) system.actorOf(supervisor, name = "echoSupervisor") - //#backoff-stop + // #backoff-stop } class BackoffSupervisorDocSpecExampleFail { val system: ActorSystem = ??? import scala.concurrent.duration._ - //#backoff-fail + // #backoff-fail val childProps = Props(classOf[EchoActor]) val supervisor = BackoffSupervisor.props( @@ -48,7 +48,7 @@ class BackoffSupervisorDocSpec { )) system.actorOf(supervisor, name = "echoSupervisor") - //#backoff-fail + // #backoff-fail } class BackoffSupervisorDocSpecExampleStopOptions { @@ -57,7 +57,7 @@ class BackoffSupervisorDocSpec { val childProps = Props(classOf[EchoActor]) - //#backoff-custom-stop + // #backoff-custom-stop val supervisor = BackoffSupervisor.props( BackoffOpts .onStop( @@ -70,7 +70,7 @@ class BackoffSupervisorDocSpec { .withManualReset // the child must send BackoffSupervisor.Reset to its parent .withDefaultStoppingStrategy // Stop at any Exception thrown ) - //#backoff-custom-stop + // #backoff-custom-stop system.actorOf(supervisor, name = "echoSupervisor") } @@ -81,7 +81,7 @@ class BackoffSupervisorDocSpec { val childProps = Props(classOf[EchoActor]) - //#backoff-custom-fail + // #backoff-custom-fail val supervisor = BackoffSupervisor.props( BackoffOpts .onFailure( @@ -96,7 +96,7 @@ class BackoffSupervisorDocSpec { case _: MyException => SupervisorStrategy.Restart case _ => SupervisorStrategy.Escalate })) - //#backoff-custom-fail + // #backoff-custom-fail system.actorOf(supervisor, name = "echoSupervisor") } @@ -112,15 +112,15 @@ class BackoffSupervisorDocSpec { val childProps = Props(classOf[EchoActor]) - //#backoff-sharded + // #backoff-sharded val supervisor = BackoffSupervisor.props( BackoffOpts .onStop(childProps, childName = "myEcho", minBackoff = 3.seconds, maxBackoff = 30.seconds, randomFactor = 0.2) .withFinalStopMessage(_ == StopMessage)) - //#backoff-sharded + // #backoff-sharded - //#backoff-sharded-passivation + // #backoff-sharded-passivation context.parent ! Passivate(StopMessage) - //#backoff-sharded-passivation + // #backoff-sharded-passivation } } diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala index fb8dedffecd..f27d4181e82 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala @@ -29,63 +29,60 @@ object PersistenceDocSpec { object RecoverySample { trait MyPersistentActor1 extends PersistentActor { - //#recovery-disabled + // #recovery-disabled override def recovery = Recovery.none - //#recovery-disabled + // #recovery-disabled } trait MyPersistentActor2 extends PersistentActor { - //#recovery-custom + // #recovery-custom override def recovery = Recovery(toSequenceNr = 457L) - //#recovery-custom + // #recovery-custom } class MyPersistentActor4 extends PersistentActor { override def persistenceId = "my-stable-persistence-id" - //#recovery-completed + // #recovery-completed override def receiveRecover: Receive = { case RecoveryCompleted => // perform init after recovery, before any other messages - //... - case evt => //... + // ... + case evt => // ... } - override def receiveCommand: Receive = { - case msg => //... + override def receiveCommand: Receive = { case msg => // ... } - //#recovery-completed + // #recovery-completed } trait MyPersistentActor5 extends PersistentActor { - //#recovery-no-snap + // #recovery-no-snap override def recovery = Recovery(fromSnapshot = SnapshotSelectionCriteria.None) - //#recovery-no-snap + // #recovery-no-snap } } object PersistenceId { trait PersistentActorMethods { - //#persistence-id + // #persistence-id def persistenceId: String - //#persistence-id - //#recovery-status + // #persistence-id + // #recovery-status def recoveryRunning: Boolean def recoveryFinished: Boolean - //#recovery-status + // #recovery-status } class MyPersistentActor1 extends PersistentActor with PersistentActorMethods { - //#persistence-id-override + // #persistence-id-override override def persistenceId = "my-stable-persistence-id" - //#persistence-id-override + // #persistence-id-override - override def receiveRecover: Receive = { - case _ => + override def receiveRecover: Receive = { case _ => } - override def receiveCommand: Receive = { - case _ => + override def receiveCommand: Receive = { case _ => } } } @@ -93,18 +90,18 @@ object PersistenceDocSpec { object BackoffOnStop { abstract class MyActor extends Actor { import PersistAsync.MyPersistentActor - //#backoff + // #backoff val childProps = Props[MyPersistentActor]() val props = BackoffSupervisor.props(BackoffOpts .onStop(childProps, childName = "myActor", minBackoff = 3.seconds, maxBackoff = 30.seconds, randomFactor = 0.2)) context.actorOf(props, name = "mySupervisor") - //#backoff + // #backoff } } object AtLeastOnce { - //#at-least-once-example + // #at-least-once-example import akka.actor.{ Actor, ActorSelection } import akka.persistence.AtLeastOnceDelivery @@ -124,8 +121,8 @@ object PersistenceDocSpec { case Confirm(deliveryId) => persist(MsgConfirmed(deliveryId))(updateState) } - override def receiveRecover: Receive = { - case evt: Evt => updateState(evt) + override def receiveRecover: Receive = { case evt: Evt => + updateState(evt) } def updateState(evt: Evt): Unit = evt match { @@ -137,13 +134,12 @@ object PersistenceDocSpec { } class MyDestination extends Actor { - def receive = { - case Msg(deliveryId, s) => - // ... - sender() ! Confirm(deliveryId) + def receive = { case Msg(deliveryId, s) => + // ... + sender() ! Confirm(deliveryId) } } - //#at-least-once-example + // #at-least-once-example } object SaveSnapshot { @@ -153,7 +149,7 @@ object PersistenceDocSpec { def updateState(event: String): Unit = {} - //#save-snapshot + // #save-snapshot var state: Any = _ val snapShotInterval = 1000 @@ -167,7 +163,7 @@ object PersistenceDocSpec { saveSnapshot(state) } } - //#save-snapshot + // #save-snapshot override def receiveRecover: Receive = ??? } @@ -177,13 +173,13 @@ object PersistenceDocSpec { class MyPersistentActor extends PersistentActor { override def persistenceId = "my-stable-persistence-id" - //#snapshot-criteria + // #snapshot-criteria override def recovery = - Recovery( - fromSnapshot = SnapshotSelectionCriteria(maxSequenceNr = 457L, maxTimestamp = System.currentTimeMillis)) - //#snapshot-criteria + Recovery(fromSnapshot = + SnapshotSelectionCriteria(maxSequenceNr = 457L, maxTimestamp = System.currentTimeMillis)) + // #snapshot-criteria - //#snapshot-offer + // #snapshot-offer var state: Any = _ override def receiveRecover: Receive = { @@ -191,7 +187,7 @@ object PersistenceDocSpec { case RecoveryCompleted => case event => // ... } - //#snapshot-offer + // #snapshot-offer override def receiveCommand: Receive = ??? } @@ -200,13 +196,12 @@ object PersistenceDocSpec { object PersistAsync { - //#persist-async + // #persist-async class MyPersistentActor extends PersistentActor { override def persistenceId = "my-stable-persistence-id" - override def receiveRecover: Receive = { - case _ => // handle recovery here + override def receiveRecover: Receive = { case _ => // handle recovery here } override def receiveCommand: Receive = { @@ -234,18 +229,17 @@ object PersistenceDocSpec { // evt-b-1 // evt-b-2 - //#persist-async + // #persist-async } object Defer { - //#defer + // #defer class MyPersistentActor extends PersistentActor { override def persistenceId = "my-stable-persistence-id" - override def receiveRecover: Receive = { - case _ => // handle recovery here + override def receiveRecover: Receive = { case _ => // handle recovery here } override def receiveCommand: Receive = { @@ -263,9 +257,9 @@ object PersistenceDocSpec { } } } - //#defer + // #defer - //#defer-caller + // #defer-caller persistentActor ! "a" persistentActor ! "b" @@ -279,17 +273,16 @@ object PersistenceDocSpec { // evt-b-2 // evt-b-3 - //#defer-caller + // #defer-caller } object DeferWithPersist { - //#defer-with-persist + // #defer-with-persist class MyPersistentActor extends PersistentActor { override def persistenceId = "my-stable-persistence-id" - override def receiveRecover: Receive = { - case _ => // handle recovery here + override def receiveRecover: Receive = { case _ => // handle recovery here } override def receiveCommand: Receive = { @@ -307,7 +300,7 @@ object PersistenceDocSpec { } } } - //#defer-with-persist + // #defer-with-persist } object NestedPersists { @@ -315,33 +308,31 @@ object PersistenceDocSpec { class MyPersistentActor extends PersistentActor { override def persistenceId = "my-stable-persistence-id" - override def receiveRecover: Receive = { - case _ => // handle recovery here + override def receiveRecover: Receive = { case _ => // handle recovery here } - //#nested-persist-persist - override def receiveCommand: Receive = { - case c: String => - sender() ! c + // #nested-persist-persist + override def receiveCommand: Receive = { case c: String => + sender() ! c - persist(s"$c-1-outer") { outer1 => - sender() ! outer1 - persist(s"$c-1-inner") { inner1 => - sender() ! inner1 - } + persist(s"$c-1-outer") { outer1 => + sender() ! outer1 + persist(s"$c-1-inner") { inner1 => + sender() ! inner1 } + } - persist(s"$c-2-outer") { outer2 => - sender() ! outer2 - persist(s"$c-2-inner") { inner2 => - sender() ! inner2 - } + persist(s"$c-2-outer") { outer2 => + sender() ! outer2 + persist(s"$c-2-inner") { inner2 => + sender() ! inner2 } + } } - //#nested-persist-persist + // #nested-persist-persist } - //#nested-persist-persist-caller + // #nested-persist-persist-caller persistentActor ! "a" persistentActor ! "b" @@ -358,36 +349,34 @@ object PersistenceDocSpec { // b-inner-1 // b-inner-2 - //#nested-persist-persist-caller + // #nested-persist-persist-caller class MyPersistAsyncActor extends PersistentActor { override def persistenceId = "my-stable-persistence-id" - override def receiveRecover: Receive = { - case _ => // handle recovery here + override def receiveRecover: Receive = { case _ => // handle recovery here } - //#nested-persistAsync-persistAsync - override def receiveCommand: Receive = { - case c: String => - sender() ! c - persistAsync(c + "-outer-1") { outer => - sender() ! outer - persistAsync(c + "-inner-1") { inner => - sender() ! inner - } + // #nested-persistAsync-persistAsync + override def receiveCommand: Receive = { case c: String => + sender() ! c + persistAsync(c + "-outer-1") { outer => + sender() ! outer + persistAsync(c + "-inner-1") { inner => + sender() ! inner } - persistAsync(c + "-outer-2") { outer => - sender() ! outer - persistAsync(c + "-inner-2") { inner => - sender() ! inner - } + } + persistAsync(c + "-outer-2") { outer => + sender() ! outer + persistAsync(c + "-inner-2") { inner => + sender() ! inner } + } } - //#nested-persistAsync-persistAsync + // #nested-persistAsync-persistAsync } - //#nested-persistAsync-persistAsync-caller + // #nested-persistAsync-persistAsync-caller persistentActor ! "a" persistentActor ! "b" @@ -407,12 +396,12 @@ object PersistenceDocSpec { // a -> a-outer-1 -> a-outer-2 -> a-inner-1 -> a-inner-2 // b -> b-outer-1 -> b-outer-2 -> b-inner-1 -> b-inner-2 - //#nested-persistAsync-persistAsync-caller + // #nested-persistAsync-persistAsync-caller } object AvoidPoisonPill { - //#safe-shutdown + // #safe-shutdown /** Explicit shutdown message */ case object Shutdown @@ -427,13 +416,12 @@ object PersistenceDocSpec { context.stop(self) } - override def receiveRecover: Receive = { - case _ => // handle recovery here + override def receiveRecover: Receive = { case _ => // handle recovery here } } - //#safe-shutdown + // #safe-shutdown - //#safe-shutdown-example-bad + // #safe-shutdown-example-bad // UN-SAFE, due to PersistentActor's command stashing: persistentActor ! "a" persistentActor ! "b" @@ -444,9 +432,9 @@ object PersistenceDocSpec { // PoisonPill is an AutoReceivedMessage, is handled automatically // !! stop !! // Actor is stopped without handling `b` nor the `a` handler! - //#safe-shutdown-example-bad + // #safe-shutdown-example-bad - //#safe-shutdown-example-good + // #safe-shutdown-example-good // SAFE: persistentActor ! "a" persistentActor ! "b" @@ -462,7 +450,7 @@ object PersistenceDocSpec { // # unstashing; internal-stash = [] // Shutdown // -- stop -- - //#safe-shutdown-example-good + // #safe-shutdown-example-good } } diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala index 2609e11c51c..63b5d4c27ca 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala @@ -79,11 +79,10 @@ class PersistenceEventAdapterDocSpec(config: String) extends AkkaSpec(config) { case e => p.ref ! e } - override def receiveCommand: Receive = { - case c => - persist(c) { e => - p.ref ! e - } + override def receiveCommand: Receive = { case c => + persist(c) { e => + p.ref ! e + } } }) @@ -114,11 +113,10 @@ class PersistenceEventAdapterDocSpec(config: String) extends AkkaSpec(config) { case e => p.ref ! e } - override def receiveCommand: Receive = { - case c => - persist(c) { e => - p.ref ! e - } + override def receiveCommand: Receive = { case c => + persist(c) { e => + p.ref ! e + } } }) diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala index c3a83805c58..1ba7c9f1343 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala @@ -17,12 +17,12 @@ object PersistenceMultiDocSpec { //#default-config """ - //#default-plugins + // #default-plugins trait ActorWithDefaultPlugins extends PersistentActor { override def persistenceId = "123" } - //#default-plugins + // #default-plugins val OverrideConfig = s""" @@ -44,7 +44,7 @@ object PersistenceMultiDocSpec { //#override-config """ - //#override-plugins + // #override-plugins trait ActorWithOverridePlugins extends PersistentActor { override def persistenceId = "123" @@ -55,9 +55,9 @@ object PersistenceMultiDocSpec { override def snapshotPluginId = "akka.persistence.chronicle.snapshot-store" } - //#override-plugins + // #override-plugins - //#runtime-config + // #runtime-config trait ActorWithRuntimePluginConfig extends PersistentActor with RuntimePluginConfig { // Variable that is retrieved at runtime, from an external service for instance. val runtimeDistinction = "foo" @@ -92,5 +92,5 @@ object PersistenceMultiDocSpec { } - //#runtime-config + // #runtime-config } diff --git a/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala index 641409d9b58..724eff93a70 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala @@ -125,18 +125,17 @@ object SharedLeveldbPluginDocSpec { //#event-adapter-config """ - //#shared-store-usage + // #shared-store-usage trait SharedStoreUsage extends Actor { override def preStart(): Unit = { context.actorSelection("akka://example@127.0.0.1:2552/user/store") ! Identify(1) } - def receive = { - case ActorIdentity(1, Some(store)) => - SharedLeveldbJournal.setStore(store, context.system) + def receive = { case ActorIdentity(1, Some(store)) => + SharedLeveldbJournal.setStore(store, context.system) } } - //#shared-store-usage + // #shared-store-usage } trait SharedLeveldbPluginDocSpec { @@ -144,22 +143,22 @@ trait SharedLeveldbPluginDocSpec { { import akka.actor._ - //#shared-store-creation + // #shared-store-creation import akka.persistence.journal.leveldb.SharedLeveldbStore val store = system.actorOf(Props[SharedLeveldbStore](), "store") - //#shared-store-creation + // #shared-store-creation } } class MyJournal extends AsyncWriteJournal { - //#sync-journal-plugin-api + // #sync-journal-plugin-api def asyncWriteMessages(messages: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]] = Future.fromTry(Try { // blocking call here ??? }) - //#sync-journal-plugin-api + // #sync-journal-plugin-api def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] = ??? def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( @@ -184,10 +183,10 @@ object PersistenceTCKDoc { object example1 { import akka.persistence.journal.JournalSpec - //#journal-tck-scala + // #journal-tck-scala class MyJournalSpec - extends JournalSpec( - config = ConfigFactory.parseString("""akka.persistence.journal.plugin = "my.journal.plugin"""")) { + extends JournalSpec(config = + ConfigFactory.parseString("""akka.persistence.journal.plugin = "my.journal.plugin"""")) { override def supportsRejectingNonSerializableObjects: CapabilityFlag = false // or CapabilityFlag.off @@ -195,22 +194,20 @@ object PersistenceTCKDoc { override def supportsSerialization: CapabilityFlag = true // or CapabilityFlag.on } - //#journal-tck-scala + // #journal-tck-scala } object example2 { import akka.persistence.snapshot.SnapshotStoreSpec - //#snapshot-store-tck-scala - class MySnapshotStoreSpec - extends SnapshotStoreSpec( - config = ConfigFactory.parseString(""" + // #snapshot-store-tck-scala + class MySnapshotStoreSpec extends SnapshotStoreSpec(config = ConfigFactory.parseString(""" akka.persistence.snapshot-store.plugin = "my.snapshot-store.plugin" """)) { override def supportsSerialization: CapabilityFlag = true // or CapabilityFlag.on } - //#snapshot-store-tck-scala + // #snapshot-store-tck-scala } object example3 { import java.io.File @@ -218,9 +215,8 @@ object PersistenceTCKDoc { import akka.persistence.journal.JournalSpec import org.iq80.leveldb.util.FileUtils - //#journal-tck-before-after-scala - class MyJournalSpec - extends JournalSpec(config = ConfigFactory.parseString(""" + // #journal-tck-before-after-scala + class MyJournalSpec extends JournalSpec(config = ConfigFactory.parseString(""" akka.persistence.journal.plugin = "my.journal.plugin" """)) { @@ -242,6 +238,6 @@ object PersistenceTCKDoc { } } - //#journal-tck-before-after-scala + // #journal-tck-before-after-scala } } diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala index b7cc3ca3255..6c11ac12773 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala @@ -47,7 +47,7 @@ class PersistenceSchemaEvolutionDocSpec extends AnyWordSpec { class ProtobufReadOptional { - //#protobuf-read-optional-model + // #protobuf-read-optional-model sealed abstract class SeatType { def code: String } object SeatType { def fromString(s: String) = s match { @@ -64,9 +64,9 @@ class ProtobufReadOptional { } case class SeatReserved(letter: String, row: Int, seatType: SeatType) - //#protobuf-read-optional-model + // #protobuf-read-optional-model - //#protobuf-read-optional + // #protobuf-read-optional /** * Example serializer impl which uses protocol buffers generated classes (proto.*) * to perform the to/from binary marshalling. @@ -107,7 +107,7 @@ class ProtobufReadOptional { if (p.hasSeatType) SeatType.fromString(p.getSeatType) else SeatType.Unknown } - //#protobuf-read-optional + // #protobuf-read-optional } class ProtoBufRename { @@ -127,7 +127,7 @@ class ProtoBufRename { } class RenamePlainJson { - //#rename-plain-json + // #rename-plain-json class JsonRenamedFieldAdapter extends EventAdapter { val marshaller = new ExampleJsonMarshaller @@ -159,16 +159,16 @@ class RenamePlainJson { } } - //#rename-plain-json + // #rename-plain-json } object SimplestCustomSerializer { - //#simplest-custom-serializer-model + // #simplest-custom-serializer-model final case class Person(name: String, surname: String) - //#simplest-custom-serializer-model + // #simplest-custom-serializer-model - //#simplest-custom-serializer + // #simplest-custom-serializer /** * Simplest possible serializer, uses a string representation of the Person class. * @@ -208,7 +208,7 @@ object SimplestCustomSerializer { } - //#simplest-custom-serializer + // #simplest-custom-serializer } class PersonSerializerSettingsBox { @@ -268,7 +268,8 @@ class RemovedEventsAwareSerializer extends SerializerWithStringManifest { val utf8 = Charset.forName("UTF-8") override def identifier: Int = 8337 - val SkipEventManifestsEvents = Set("docs.persistence.CustomerBlinked" // ... + val SkipEventManifestsEvents = Set( + "docs.persistence.CustomerBlinked" // ... ) override def manifest(o: AnyRef): String = o.getClass.getName diff --git a/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala index e6febbcd9a3..3dddd5e71da 100644 --- a/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala @@ -11,7 +11,7 @@ import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal import akka.stream.scaladsl.Source object LeveldbPersistenceQueryDocSpec { - //#tagger + // #tagger import akka.persistence.journal.WriteEventAdapter import akka.persistence.journal.Tagged @@ -29,7 +29,7 @@ object LeveldbPersistenceQueryDocSpec { override def manifest(event: Any): String = "" } - //#tagger + // #tagger } class LeveldbPersistenceQueryDocSpec @@ -37,40 +37,40 @@ class LeveldbPersistenceQueryDocSpec "LeveldbPersistentQuery" must { "demonstrate how get ReadJournal" in { - //#get-read-journal + // #get-read-journal import akka.persistence.query.PersistenceQuery import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) - //#get-read-journal + // #get-read-journal } "demonstrate EventsByPersistenceId" in { - //#EventsByPersistenceId + // #EventsByPersistenceId val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) val src: Source[EventEnvelope, NotUsed] = queries.eventsByPersistenceId("some-persistence-id", 0L, Long.MaxValue) val events: Source[Any, NotUsed] = src.map(_.event) - //#EventsByPersistenceId + // #EventsByPersistenceId } "demonstrate AllPersistenceIds" in { - //#AllPersistenceIds + // #AllPersistenceIds val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) val src: Source[String, NotUsed] = queries.persistenceIds() - //#AllPersistenceIds + // #AllPersistenceIds } "demonstrate EventsByTag" in { - //#EventsByTag + // #EventsByTag val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) val src: Source[EventEnvelope, NotUsed] = queries.eventsByTag(tag = "green", offset = Sequence(0L)) - //#EventsByTag + // #EventsByTag } } diff --git a/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala index 6b77841e640..ed46864ce00 100644 --- a/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala @@ -26,14 +26,14 @@ object PersistenceQueryDocSpec { implicit val timeout: Timeout = Timeout(3.seconds) - //#advanced-journal-query-types + // #advanced-journal-query-types final case class RichEvent(tags: Set[String], payload: Any) // a plugin can provide: case class QueryMetadata(deterministicOrder: Boolean, infinite: Boolean) - //#advanced-journal-query-types + // #advanced-journal-query-types - //#my-read-journal + // #my-read-journal class MyReadJournalProvider(system: ExtendedActorSystem, config: Config) extends ReadJournalProvider { private val readJournal: MyScaladslReadJournal = @@ -70,7 +70,7 @@ object PersistenceQueryDocSpec { override def eventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] = offset match { case Sequence(offsetValue) => Source.fromGraph(new MyEventsByTagSource(tag, offsetValue, refreshInterval)) - case NoOffset => eventsByTag(tag, Sequence(0L)) //recursive + case NoOffset => eventsByTag(tag, Sequence(0L)) // recursive case _ => throw new IllegalArgumentException("MyJournal does not support " + offset.getClass.getName + " offsets") } @@ -95,9 +95,9 @@ object PersistenceQueryDocSpec { // possibility to add more plugin specific queries - //#advanced-journal-query-definition + // #advanced-journal-query-definition def byTagsWithMeta(tags: Set[String]): Source[RichEvent, QueryMetadata] = { - //#advanced-journal-query-definition + // #advanced-journal-query-definition // implement in a similar way as eventsByTag ??? } @@ -134,7 +134,7 @@ object PersistenceQueryDocSpec { } } - //#my-read-journal + // #my-read-journal case class ComplexState() { def readyToSave = false @@ -152,7 +152,7 @@ object PersistenceQueryDocSpec { def batchWriter: Subscriber[immutable.Seq[Any]] = ??? } - //#projection-into-different-store-rs + // #projection-into-different-store-rs implicit val system: ActorSystem = ActorSystem() val readJournal = @@ -167,7 +167,7 @@ object PersistenceQueryDocSpec { .map(convertToReadSideTypes) // convert to datatype .grouped(20) // batch inserts into groups of 20 .runWith(Sink.fromSubscriber(dbBatchWriter)) // write batches to read-side database - //#projection-into-different-store-rs + // #projection-into-different-store-rs } } @@ -184,7 +184,7 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { """) class BasicUsage { - //#basic-usage + // #basic-usage // obtain read journal by plugin id val readJournal = PersistenceQuery(system).readJournalFor[MyScaladslReadJournal]("akka.persistence.query.my-read-journal") @@ -197,19 +197,19 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { source.runForeach { event => println("Event: " + event) } - //#basic-usage + // #basic-usage - //#all-persistence-ids-live + // #all-persistence-ids-live readJournal.persistenceIds() - //#all-persistence-ids-live + // #all-persistence-ids-live - //#all-persistence-ids-snap + // #all-persistence-ids-snap readJournal.currentPersistenceIds() - //#all-persistence-ids-snap + // #all-persistence-ids-snap trait OrderCompleted - //#events-by-tag + // #events-by-tag // assuming journal is able to work with numeric offsets we can: val completedOrders: Source[EventEnvelope, NotUsed] = @@ -225,14 +225,14 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { // start another query, from the known offset val furtherOrders = readJournal.eventsByTag("order-completed", offset = Sequence(10)) - //#events-by-tag + // #events-by-tag - //#events-by-persistent-id + // #events-by-persistent-id readJournal.eventsByPersistenceId("user-us-1337", fromSequenceNr = 0L, toSequenceNr = Long.MaxValue) - //#events-by-persistent-id + // #events-by-persistent-id - //#advanced-journal-query-usage + // #advanced-journal-query-usage val query: Source[RichEvent, QueryMetadata] = readJournal.byTagsWithMeta(Set("red", "blue")) @@ -248,27 +248,27 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { } .runWith(Sink.ignore) - //#advanced-journal-query-usage + // #advanced-journal-query-usage } - //#projection-into-different-store + // #projection-into-different-store class MyResumableProjection(name: String) { def saveProgress(offset: Offset): Future[Long] = ??? def latestOffset: Future[Long] = ??? } - //#projection-into-different-store + // #projection-into-different-store class RunWithAsyncFunction { val readJournal = PersistenceQuery(system).readJournalFor[MyScaladslReadJournal]("akka.persistence.query.my-read-journal") - //#projection-into-different-store-simple-classes + // #projection-into-different-store-simple-classes trait ExampleStore { def save(event: Any): Future[Unit] } - //#projection-into-different-store-simple-classes + // #projection-into-different-store-simple-classes - //#projection-into-different-store-simple + // #projection-into-different-store-simple val store: ExampleStore = ??? readJournal @@ -277,7 +277,7 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { store.save(e) } .runWith(Sink.ignore) - //#projection-into-different-store-simple + // #projection-into-different-store-simple } } diff --git a/akka-docs/src/test/scala/docs/persistence/testkit/Configuration.scala b/akka-docs/src/test/scala/docs/persistence/testkit/Configuration.scala index 73bb439ab83..b82b5b5a98c 100644 --- a/akka-docs/src/test/scala/docs/persistence/testkit/Configuration.scala +++ b/akka-docs/src/test/scala/docs/persistence/testkit/Configuration.scala @@ -11,7 +11,7 @@ import com.typesafe.config.ConfigFactory object TestKitTypedConf { - //#testkit-typed-conf + // #testkit-typed-conf val yourConfiguration = ConfigFactory.defaultApplication() @@ -20,13 +20,13 @@ object TestKitTypedConf { val testKit = PersistenceTestKit(system) - //#testkit-typed-conf + // #testkit-typed-conf } object SnapshotTypedConf { - //#snapshot-typed-conf + // #snapshot-typed-conf val yourConfiguration = ConfigFactory.defaultApplication() @@ -37,6 +37,6 @@ object SnapshotTypedConf { val testKit = SnapshotTestKit(system) - //#snapshot-typed-conf + // #snapshot-typed-conf } diff --git a/akka-docs/src/test/scala/docs/persistence/testkit/PersistenceInitSpec.scala b/akka-docs/src/test/scala/docs/persistence/testkit/PersistenceInitSpec.scala index e4e73b31c1d..f35575be64a 100644 --- a/akka-docs/src/test/scala/docs/persistence/testkit/PersistenceInitSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/testkit/PersistenceInitSpec.scala @@ -19,19 +19,21 @@ import scala.concurrent.duration._ //#imports -class PersistenceInitSpec extends ScalaTestWithActorTestKit(s""" +class PersistenceInitSpec + extends ScalaTestWithActorTestKit(s""" akka.persistence.journal.plugin = "akka.persistence.journal.inmem" akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/snapshot-${UUID.randomUUID().toString}" - """) with AnyWordSpecLike { + """) + with AnyWordSpecLike { "PersistenceInit" should { "initialize plugins" in { - //#init + // #init val timeout = 5.seconds val done: Future[Done] = PersistenceInit.initializeDefaultPlugins(system, timeout) Await.result(done, timeout) - //#init + // #init } } } diff --git a/akka-docs/src/test/scala/docs/persistence/testkit/TestKitExamples.scala b/akka-docs/src/test/scala/docs/persistence/testkit/TestKitExamples.scala index 7436c0737dd..c565e7aa512 100644 --- a/akka-docs/src/test/scala/docs/persistence/testkit/TestKitExamples.scala +++ b/akka-docs/src/test/scala/docs/persistence/testkit/TestKitExamples.scala @@ -68,14 +68,14 @@ import akka.persistence.testkit._ class SampleEventStoragePolicy extends EventStorage.JournalPolicies.PolicyType { - //you can use internal state, it does not need to be thread safe + // you can use internal state, it does not need to be thread safe var count = 1 override def tryProcess(persistenceId: String, processingUnit: JournalOperation): ProcessingResult = if (count < 10) { count += 1 - //check the type of operation and react with success or with reject or with failure. - //if you return ProcessingSuccess the operation will be performed, otherwise not. + // check the type of operation and react with success or with reject or with failure. + // if you return ProcessingSuccess the operation will be performed, otherwise not. processingUnit match { case ReadEvents(batch) if batch.nonEmpty => ProcessingSuccess case WriteEvents(batch) if batch.size > 1 => @@ -94,14 +94,14 @@ class SampleEventStoragePolicy extends EventStorage.JournalPolicies.PolicyType { //#set-snapshot-storage-policy class SampleSnapshotStoragePolicy extends SnapshotStorage.SnapshotPolicies.PolicyType { - //you can use internal state, it does not need to be thread safe + // you can use internal state, it does not need to be thread safe var count = 1 override def tryProcess(persistenceId: String, processingUnit: SnapshotOperation): ProcessingResult = if (count < 10) { count += 1 - //check the type of operation and react with success or with reject or with failure. - //if you return ProcessingSuccess the operation will be performed, otherwise not. + // check the type of operation and react with success or with reject or with failure. + // if you return ProcessingSuccess the operation will be performed, otherwise not. processingUnit match { case ReadSnapshot(_, payload) if payload.nonEmpty => ProcessingSuccess diff --git a/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala b/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala index 9828a80214e..173191eedec 100644 --- a/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala +++ b/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala @@ -19,11 +19,13 @@ object RemoteDeploymentDocSpec { } -class RemoteDeploymentDocSpec extends AkkaSpec(""" +class RemoteDeploymentDocSpec + extends AkkaSpec(""" akka.actor.provider = remote akka.remote.artery.canonical.port = 0 akka.remote.use-unsafe-remote-features-outside-cluster = on -""") with ImplicitSender { +""") + with ImplicitSender { import RemoteDeploymentDocSpec._ @@ -34,35 +36,35 @@ class RemoteDeploymentDocSpec extends AkkaSpec(""" override def afterTermination(): Unit = { shutdown(other) } "demonstrate programmatic deployment" in { - //#deploy + // #deploy val ref = system.actorOf(Props[SampleActor]().withDeploy(Deploy(scope = RemoteScope(address)))) - //#deploy + // #deploy ref.path.address should be(address) ref ! "test" expectMsgType[ActorRef].path.address should be(address) } def makeAddress(): Unit = { - //#make-address-artery + // #make-address-artery val one = AddressFromURIString("akka://sys@host:1234") val two = Address("akka", "sys", "host", 1234) // this gives the same - //#make-address-artery + // #make-address-artery } "demonstrate address extractor" in { - //#make-address + // #make-address val one = AddressFromURIString("akka://sys@host:1234") val two = Address("akka", "sys", "host", 1234) // this gives the same - //#make-address + // #make-address one should be(two) } "demonstrate sampleActor" in { - //#sample-actor + // #sample-actor val actor = system.actorOf(Props[SampleActor](), "sampleActor") actor ! "Pretty slick" - //#sample-actor + // #sample-actor } } diff --git a/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala b/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala index 055498b0471..9b46e503b82 100644 --- a/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala @@ -11,7 +11,7 @@ import akka.actor.ActorRef object ConsistentHashingRouterDocSpec { - //#cache-actor + // #cache-actor import akka.actor.Actor import akka.routing.ConsistentHashingRouter.ConsistentHashable @@ -32,7 +32,7 @@ object ConsistentHashingRouterDocSpec { } final case class Entry(key: String, value: String) - //#cache-actor + // #cache-actor } @@ -44,14 +44,14 @@ class ConsistentHashingRouterDocSpec extends AkkaSpec with ImplicitSender { def context = system - //#consistent-hashing-router + // #consistent-hashing-router import akka.actor.Props import akka.routing.ConsistentHashingPool import akka.routing.ConsistentHashingRouter.ConsistentHashMapping import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope - def hashMapping: ConsistentHashMapping = { - case Evict(key) => key + def hashMapping: ConsistentHashMapping = { case Evict(key) => + key } val cache: ActorRef = @@ -70,7 +70,7 @@ class ConsistentHashingRouterDocSpec extends AkkaSpec with ImplicitSender { cache ! Get("hi") expectMsg(None) - //#consistent-hashing-router + // #consistent-hashing-router } diff --git a/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala b/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala index 99eb5bbe282..9b8cafb8c36 100644 --- a/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala @@ -40,7 +40,7 @@ akka.actor.deployment { #//#jconfig """ - //#routing-logic + // #routing-logic import scala.collection.immutable import java.util.concurrent.ThreadLocalRandom import akka.routing.RoundRobinRoutingLogic @@ -55,20 +55,20 @@ akka.actor.deployment { SeveralRoutees(targets) } } - //#routing-logic + // #routing-logic class Storage extends Actor { - def receive = { - case x => sender() ! x + def receive = { case x => + sender() ! x } } - //#unit-test-logic + // #unit-test-logic final case class TestRoutee(n: Int) extends Routee { override def send(message: Any, sender: ActorRef): Unit = () } - //#unit-test-logic + // #unit-test-logic } //#group @@ -98,7 +98,7 @@ class CustomRouterDocSpec extends AkkaSpec(CustomRouterDocSpec.config) with Impl import akka.routing.SeveralRoutees "unit test routing logic" in { - //#unit-test-logic + // #unit-test-logic val logic = new RedundancyRoutingLogic(nbrCopies = 3) val routees = for (n <- 1 to 7) yield TestRoutee(n) @@ -111,26 +111,26 @@ class CustomRouterDocSpec extends AkkaSpec(CustomRouterDocSpec.config) with Impl val r3 = logic.select("msg", routees) r3.asInstanceOf[SeveralRoutees].routees should be(Vector(TestRoutee(7), TestRoutee(1), TestRoutee(2))) - //#unit-test-logic + // #unit-test-logic } "demonstrate usage of custom router" in { - //#usage-1 + // #usage-1 for (n <- 1 to 10) system.actorOf(Props[Storage](), "s" + n) - val paths = for (n <- 1 to 10) yield ("/user/s" + n) + val paths = for (n <- 1 to 10) yield "/user/s" + n val redundancy1: ActorRef = system.actorOf(RedundancyGroup(paths, nbrCopies = 3).props(), name = "redundancy1") redundancy1 ! "important" - //#usage-1 + // #usage-1 for (_ <- 1 to 3) expectMsg("important") - //#usage-2 + // #usage-2 val redundancy2: ActorRef = system.actorOf(FromConfig.props(), name = "redundancy2") redundancy2 ! "very important" - //#usage-2 + // #usage-2 for (_ <- 1 to 5) expectMsg("very important") diff --git a/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala b/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala index 623f2e029df..2e2cf0b8403 100644 --- a/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala @@ -293,7 +293,7 @@ router-dispatcher {} final case class Work(payload: String) - //#router-in-actor + // #router-in-actor import akka.routing.{ ActorRefRoutee, RoundRobinRoutingLogic, Router } class Master extends Actor { @@ -316,87 +316,85 @@ router-dispatcher {} router = router.addRoutee(r) } } - //#router-in-actor + // #router-in-actor class Worker extends Actor { - def receive = { - case _ => + def receive = { case _ => } } - //#create-worker-actors + // #create-worker-actors class Workers extends Actor { context.actorOf(Props[Worker](), name = "w1") context.actorOf(Props[Worker](), name = "w2") context.actorOf(Props[Worker](), name = "w3") // ... - //#create-worker-actors + // #create-worker-actors - def receive = { - case _ => + def receive = { case _ => } } class Parent extends Actor { - //#paths + // #paths val paths = List("/user/workers/w1", "/user/workers/w2", "/user/workers/w3") - //#paths + // #paths - //#round-robin-pool-1 + // #round-robin-pool-1 val router1: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router1") - //#round-robin-pool-1 + // #round-robin-pool-1 - //#round-robin-pool-2 + // #round-robin-pool-2 val router2: ActorRef = context.actorOf(RoundRobinPool(5).props(Props[Worker]()), "router2") - //#round-robin-pool-2 + // #round-robin-pool-2 - //#round-robin-group-1 + // #round-robin-group-1 val router3: ActorRef = context.actorOf(FromConfig.props(), "router3") - //#round-robin-group-1 + // #round-robin-group-1 - //#round-robin-group-2 + // #round-robin-group-2 val router4: ActorRef = context.actorOf(RoundRobinGroup(paths).props(), "router4") - //#round-robin-group-2 + // #round-robin-group-2 - //#random-pool-1 + // #random-pool-1 val router5: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router5") - //#random-pool-1 + // #random-pool-1 - //#random-pool-2 + // #random-pool-2 val router6: ActorRef = context.actorOf(RandomPool(5).props(Props[Worker]()), "router6") - //#random-pool-2 + // #random-pool-2 - //#random-group-1 + // #random-group-1 val router7: ActorRef = context.actorOf(FromConfig.props(), "router7") - //#random-group-1 + // #random-group-1 - //#random-group-2 + // #random-group-2 val router8: ActorRef = context.actorOf(RandomGroup(paths).props(), "router8") - //#random-group-2 + // #random-group-2 - //#balancing-pool-1 + // #balancing-pool-1 val router9: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router9") - //#balancing-pool-1 + // #balancing-pool-1 - //#balancing-pool-2 + // #balancing-pool-2 val router10: ActorRef = context.actorOf(BalancingPool(5).props(Props[Worker]()), "router10") - //#balancing-pool-2 + // #balancing-pool-2 // #balancing-pool-3 val router10b: ActorRef = context.actorOf(BalancingPool(20).props(Props[Worker]()), "router10b") - //#balancing-pool-3 + // #balancing-pool-3 for (i <- 1 to 100) router10b ! i val threads10b = Thread.getAllStackTraces.keySet.asScala.filter { _.getName contains "router10b" } val threads10bNr = threads10b.size @@ -404,121 +402,120 @@ router-dispatcher {} threads10bNr == 5, s"Expected 5 threads for router10b, had $threads10bNr! Got: ${threads10b.map(_.getName)}") - //#smallest-mailbox-pool-1 + // #smallest-mailbox-pool-1 val router11: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router11") - //#smallest-mailbox-pool-1 + // #smallest-mailbox-pool-1 - //#smallest-mailbox-pool-2 + // #smallest-mailbox-pool-2 val router12: ActorRef = context.actorOf(SmallestMailboxPool(5).props(Props[Worker]()), "router12") - //#smallest-mailbox-pool-2 + // #smallest-mailbox-pool-2 - //#broadcast-pool-1 + // #broadcast-pool-1 val router13: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router13") - //#broadcast-pool-1 + // #broadcast-pool-1 - //#broadcast-pool-2 + // #broadcast-pool-2 val router14: ActorRef = context.actorOf(BroadcastPool(5).props(Props[Worker]()), "router14") - //#broadcast-pool-2 + // #broadcast-pool-2 - //#broadcast-group-1 + // #broadcast-group-1 val router15: ActorRef = context.actorOf(FromConfig.props(), "router15") - //#broadcast-group-1 + // #broadcast-group-1 - //#broadcast-group-2 + // #broadcast-group-2 val router16: ActorRef = context.actorOf(BroadcastGroup(paths).props(), "router16") - //#broadcast-group-2 + // #broadcast-group-2 - //#scatter-gather-pool-1 + // #scatter-gather-pool-1 val router17: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router17") - //#scatter-gather-pool-1 + // #scatter-gather-pool-1 - //#scatter-gather-pool-2 + // #scatter-gather-pool-2 val router18: ActorRef = context.actorOf(ScatterGatherFirstCompletedPool(5, within = 10.seconds).props(Props[Worker]()), "router18") - //#scatter-gather-pool-2 + // #scatter-gather-pool-2 - //#scatter-gather-group-1 + // #scatter-gather-group-1 val router19: ActorRef = context.actorOf(FromConfig.props(), "router19") - //#scatter-gather-group-1 + // #scatter-gather-group-1 - //#scatter-gather-group-2 + // #scatter-gather-group-2 val router20: ActorRef = context.actorOf(ScatterGatherFirstCompletedGroup(paths, within = 10.seconds).props(), "router20") - //#scatter-gather-group-2 + // #scatter-gather-group-2 - //#tail-chopping-pool-1 + // #tail-chopping-pool-1 val router21: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router21") - //#tail-chopping-pool-1 + // #tail-chopping-pool-1 - //#tail-chopping-pool-2 + // #tail-chopping-pool-2 val router22: ActorRef = context.actorOf(TailChoppingPool(5, within = 10.seconds, interval = 20.millis).props(Props[Worker]()), "router22") - //#tail-chopping-pool-2 + // #tail-chopping-pool-2 - //#tail-chopping-group-1 + // #tail-chopping-group-1 val router23: ActorRef = context.actorOf(FromConfig.props(), "router23") - //#tail-chopping-group-1 + // #tail-chopping-group-1 - //#tail-chopping-group-2 + // #tail-chopping-group-2 val router24: ActorRef = context.actorOf(TailChoppingGroup(paths, within = 10.seconds, interval = 20.millis).props(), "router24") - //#tail-chopping-group-2 + // #tail-chopping-group-2 - //#consistent-hashing-pool-1 + // #consistent-hashing-pool-1 val router25: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router25") - //#consistent-hashing-pool-1 + // #consistent-hashing-pool-1 - //#consistent-hashing-pool-2 + // #consistent-hashing-pool-2 val router26: ActorRef = context.actorOf(ConsistentHashingPool(5).props(Props[Worker]()), "router26") - //#consistent-hashing-pool-2 + // #consistent-hashing-pool-2 - //#consistent-hashing-group-1 + // #consistent-hashing-group-1 val router27: ActorRef = context.actorOf(FromConfig.props(), "router27") - //#consistent-hashing-group-1 + // #consistent-hashing-group-1 - //#consistent-hashing-group-2 + // #consistent-hashing-group-2 val router28: ActorRef = context.actorOf(ConsistentHashingGroup(paths).props(), "router28") - //#consistent-hashing-group-2 + // #consistent-hashing-group-2 - //#resize-pool-1 + // #resize-pool-1 val router29: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router29") - //#resize-pool-1 + // #resize-pool-1 - //#resize-pool-2 + // #resize-pool-2 val resizer = DefaultResizer(lowerBound = 2, upperBound = 15) val router30: ActorRef = context.actorOf(RoundRobinPool(5, Some(resizer)).props(Props[Worker]()), "router30") - //#resize-pool-2 + // #resize-pool-2 - //#optimal-size-exploring-resize-pool + // #optimal-size-exploring-resize-pool val router31: ActorRef = context.actorOf(FromConfig.props(Props[Worker]()), "router31") - //#optimal-size-exploring-resize-pool + // #optimal-size-exploring-resize-pool - def receive = { - case _ => + def receive = { case _ => } } class Echo extends Actor { - def receive = { - case m => sender() ! m + def receive = { case m => + sender() ! m } } } @@ -527,89 +524,89 @@ class RouterDocSpec extends AkkaSpec(RouterDocSpec.config) with ImplicitSender { import RouterDocSpec._ - //#create-workers + // #create-workers system.actorOf(Props[Workers](), "workers") - //#create-workers + // #create-workers - //#create-parent + // #create-parent system.actorOf(Props[Parent](), "parent") - //#create-parent + // #create-parent "demonstrate dispatcher" in { - //#dispatchers + // #dispatchers val router: ActorRef = system.actorOf( // “head” router actor will run on "router-dispatcher" dispatcher // Worker routees will run on "pool-dispatcher" dispatcher RandomPool(5, routerDispatcher = "router-dispatcher").props(Props[Worker]()), name = "poolWithDispatcher") - //#dispatchers + // #dispatchers } "demonstrate broadcast" in { val router = system.actorOf(RoundRobinPool(nrOfInstances = 5).props(Props[Echo]())) - //#broadcastDavyJonesWarning + // #broadcastDavyJonesWarning import akka.routing.Broadcast router ! Broadcast("Watch out for Davy Jones' locker") - //#broadcastDavyJonesWarning + // #broadcastDavyJonesWarning (receiveN(5, 5.seconds.dilated) should have).length(5) } "demonstrate PoisonPill" in { val router = watch(system.actorOf(RoundRobinPool(nrOfInstances = 5).props(Props[Echo]()))) - //#poisonPill + // #poisonPill import akka.actor.PoisonPill router ! PoisonPill - //#poisonPill + // #poisonPill expectTerminated(router) } "demonstrate broadcast of PoisonPill" in { val router = watch(system.actorOf(RoundRobinPool(nrOfInstances = 5).props(Props[Echo]()))) - //#broadcastPoisonPill + // #broadcastPoisonPill import akka.actor.PoisonPill import akka.routing.Broadcast router ! Broadcast(PoisonPill) - //#broadcastPoisonPill + // #broadcastPoisonPill expectTerminated(router) } "demonstrate Kill" in { val router = watch(system.actorOf(RoundRobinPool(nrOfInstances = 5).props(Props[Echo]()))) - //#kill + // #kill import akka.actor.Kill router ! Kill - //#kill + // #kill expectTerminated(router) } "demonstrate broadcast of Kill" in { val router = watch(system.actorOf(RoundRobinPool(nrOfInstances = 5).props(Props[Echo]()))) - //#broadcastKill + // #broadcastKill import akka.actor.Kill import akka.routing.Broadcast router ! Broadcast(Kill) - //#broadcastKill + // #broadcastKill expectTerminated(router) } "demonstrate remote deploy" in { - //#remoteRoutees + // #remoteRoutees import akka.actor.{ Address, AddressFromURIString } import akka.remote.routing.RemoteRouterConfig val addresses = Seq(Address("akka", "remotesys", "otherhost", 1234), AddressFromURIString("akka://othersys@anotherhost:1234")) val routerRemote = system.actorOf(RemoteRouterConfig(RoundRobinPool(5), addresses).props(Props[Echo]())) - //#remoteRoutees + // #remoteRoutees } // only compile test def demonstrateRemoteDeployWithArtery(): Unit = { - //#remoteRoutees-artery + // #remoteRoutees-artery import akka.actor.{ Address, AddressFromURIString } import akka.remote.routing.RemoteRouterConfig val addresses = Seq(Address("akka", "remotesys", "otherhost", 1234), AddressFromURIString("akka://othersys@anotherhost:1234")) val routerRemote = system.actorOf(RemoteRouterConfig(RoundRobinPool(5), addresses).props(Props[Echo]())) - //#remoteRoutees-artery + // #remoteRoutees-artery } } diff --git a/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala b/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala index 86fb77a1a85..9d6f897dbd0 100644 --- a/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala +++ b/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala @@ -4,26 +4,26 @@ package docs.serialization { - //#imports + // #imports import akka.actor._ import akka.actor.typed.scaladsl.Behaviors import akka.cluster.Cluster import akka.serialization._ - //#imports + // #imports import akka.testkit._ import com.typesafe.config.ConfigFactory import akka.actor.ExtendedActorSystem import java.nio.charset.StandardCharsets - //#marker-interface + // #marker-interface import akka.serialization.jackson.JsonSerializable final case class MyMessage(name: String, nr: Int) extends JsonSerializable - //#marker-interface + // #marker-interface - //#my-own-serializer + // #my-own-serializer class MyOwnSerializer extends Serializer { // If you need logging here, introduce a constructor that takes an ExtendedActorSystem. @@ -42,23 +42,23 @@ package docs.serialization { // "toBinary" serializes the given object to an Array of Bytes def toBinary(obj: AnyRef): Array[Byte] = { // Put the code that serializes the object here - //#... + // #... Array[Byte]() - //#... + // #... } // "fromBinary" deserializes the given array, // using the type hint (if any, see "includeManifest" above) def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { // Put your code that deserializes here - //#... + // #... null - //#... + // #... } } - //#my-own-serializer + // #my-own-serializer - //#my-own-serializer2 + // #my-own-serializer2 class MyOwnSerializer2 extends SerializerWithStringManifest { val CustomerManifest = "customer" @@ -99,7 +99,7 @@ package docs.serialization { } } } - //#my-own-serializer2 + // #my-own-serializer2 trait MyOwnSerializable final case class Customer(name: String) extends MyOwnSerializable @@ -196,7 +196,7 @@ package docs.serialization { } "demonstrate the programmatic API" in { - //#programmatic + // #programmatic val system = ActorSystem("example") // Get the Serialization Extension @@ -212,7 +212,7 @@ package docs.serialization { // Turn it back into an object val back = serialization.deserialize(bytes, serializerId, manifest).get - //#programmatic + // #programmatic // Voilá! back should be(original) @@ -221,21 +221,21 @@ package docs.serialization { } def demonstrateTypedActorSystem(): Unit = { - //#programmatic-typed + // #programmatic-typed import akka.actor.typed.ActorSystem val system = ActorSystem(Behaviors.empty, "example") // Get the Serialization Extension val serialization = SerializationExtension(system) - //#programmatic-typed + // #programmatic-typed } def demonstrateSerializationOfActorRefs(): Unit = { val theActorRef: ActorRef = system.deadLetters val extendedSystem: ExtendedActorSystem = system.asInstanceOf[ExtendedActorSystem] - //#actorref-serializer + // #actorref-serializer // Serialize // (beneath toBinary) val serializedRef: String = Serialization.serializedActorPath(theActorRef) @@ -246,18 +246,18 @@ package docs.serialization { // (beneath fromBinary) val deserializedRef = extendedSystem.provider.resolveActorRef(serializedRef) // Then use the ActorRef - //#actorref-serializer + // #actorref-serializer } def demonstrateSerializationOfActorRefs2(): Unit = { val theActorRef: ActorRef = system.deadLetters - //#external-address-default + // #external-address-default val selfAddress = Cluster(system).selfAddress val serializedRef: String = theActorRef.path.toSerializationFormatWithAddress(selfAddress) - //#external-address-default + // #external-address-default } } } diff --git a/akka-docs/src/test/scala/docs/stream/BidiFlowDocSpec.scala b/akka-docs/src/test/scala/docs/stream/BidiFlowDocSpec.scala index 3448d4d30d2..89f795900a4 100644 --- a/akka-docs/src/test/scala/docs/stream/BidiFlowDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/BidiFlowDocSpec.scala @@ -14,24 +14,24 @@ import scala.concurrent.duration._ import scala.concurrent.Await object BidiFlowDocSpec { - //#codec + // #codec trait Message case class Ping(id: Int) extends Message case class Pong(id: Int) extends Message - //#codec-impl + // #codec-impl def toBytes(msg: Message): ByteString = { - //#implementation-details-elided + // #implementation-details-elided implicit val order = ByteOrder.LITTLE_ENDIAN msg match { case Ping(id) => ByteString.newBuilder.putByte(1).putInt(id).result() case Pong(id) => ByteString.newBuilder.putByte(2).putInt(id).result() } - //#implementation-details-elided + // #implementation-details-elided } def fromBytes(bytes: ByteString): Message = { - //#implementation-details-elided + // #implementation-details-elided implicit val order = ByteOrder.LITTLE_ENDIAN val it = bytes.iterator it.getByte match { @@ -39,9 +39,9 @@ object BidiFlowDocSpec { case 2 => Pong(it.getInt) case other => throw new RuntimeException(s"parse error: expected 1|2 got $other") } - //#implementation-details-elided + // #implementation-details-elided } - //#codec-impl + // #codec-impl val codecVerbose = BidiFlow.fromGraph(GraphDSL.create() { b => // construct and add the top flow, going outbound @@ -54,9 +54,9 @@ object BidiFlowDocSpec { // this is the same as the above val codec = BidiFlow.fromFunctions(toBytes _, fromBytes _) - //#codec + // #codec - //#framing + // #framing val framing = BidiFlow.fromGraph(GraphDSL.create() { b => implicit val order = ByteOrder.LITTLE_ENDIAN @@ -78,28 +78,32 @@ object BidiFlowDocSpec { // this holds the current message length or -1 if at a boundary var needed = -1 - setHandler(out, new OutHandler { - override def onPull(): Unit = { - if (isClosed(in)) run() - else pull(in) - } - }) - setHandler(in, new InHandler { - override def onPush(): Unit = { - val bytes = grab(in) - stash = stash ++ bytes - run() - } + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + if (isClosed(in)) run() + else pull(in) + } + }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + val bytes = grab(in) + stash = stash ++ bytes + run() + } - override def onUpstreamFinish(): Unit = { - // either we are done - if (stash.isEmpty) completeStage() - // or we still have bytes to emit - // wait with completion and let run() complete when the - // rest of the stash has been sent downstream - else if (isAvailable(out)) run() - } - }) + override def onUpstreamFinish(): Unit = { + // either we are done + if (stash.isEmpty) completeStage() + // or we still have bytes to emit + // wait with completion and let run() complete when the + // rest of the stash has been sent downstream + else if (isAvailable(out)) run() + } + }) private def run(): Unit = { if (needed == -1) { @@ -132,7 +136,7 @@ object BidiFlowDocSpec { val inbound = b.add(Flow[ByteString].via(new FrameParser)) BidiShape.fromFlows(outbound, inbound) }) - //#framing + // #framing val chopUp = BidiFlow.fromGraph(GraphDSL.create() { b => val f = Flow[ByteString].mapConcat(_.map(ByteString(_))) @@ -151,7 +155,7 @@ class BidiFlowDocSpec extends AkkaSpec { "A BidiFlow" must { "compose" in { - //#compose + // #compose /* construct protocol stack * +------------------------------------+ * | stack | @@ -170,7 +174,7 @@ class BidiFlowDocSpec extends AkkaSpec { val flow = stack.atop(stack.reversed).join(pingpong) val result = Source((0 to 9).map(Ping(_))).via(flow).limit(20).runWith(Sink.seq) Await.result(result, 1.second) should ===((0 to 9).map(Pong(_))) - //#compose + // #compose } "work when chopped up" in { diff --git a/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala b/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala index d5ee7b8b4ac..cbc3bf66d84 100644 --- a/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala @@ -19,15 +19,15 @@ class CompositionDocSpec extends AkkaSpec { implicit val ec: ExecutionContext = system.dispatcher "nonnested flow" in { - //#non-nested-flow + // #non-nested-flow Source.single(0).map(_ + 1).filter(_ != 0).map(_ - 2).to(Sink.fold(0)(_ + _)) // ... where is the nesting? - //#non-nested-flow + // #non-nested-flow } "nested flow" in { - //#nested-flow + // #nested-flow val nestedSource = Source .single(0) // An atomic source @@ -47,7 +47,7 @@ class CompositionDocSpec extends AkkaSpec { // Create a RunnableGraph val runnableGraph = nestedSource.to(nestedSink) - //#nested-flow + // #nested-flow } "reusing components" in { @@ -68,13 +68,13 @@ class CompositionDocSpec extends AkkaSpec { .to(Sink.fold(0)(_ + _)) // wire an atomic sink to the nestedFlow .named("nestedSink") // wrap it up - //#reuse + // #reuse // Create a RunnableGraph from our components val runnableGraph = nestedSource.to(nestedSink) // Usage is uniform, no matter if modules are composite or atomic val runnableGraph2 = Source.single(0).to(Sink.fold(0)(_ + _)) - //#reuse + // #reuse } "complex graph" in { @@ -136,9 +136,9 @@ class CompositionDocSpec extends AkkaSpec { //#partial-graph // format: ON - //#partial-use + // #partial-use Source.single(0).via(partial).to(Sink.ignore) - //#partial-use + // #partial-use // format: OFF //#partial-flow-dsl @@ -169,18 +169,18 @@ class CompositionDocSpec extends AkkaSpec { } "closed graph" in { - //#embed-closed + // #embed-closed val closed1 = Source.single(0).to(Sink.foreach(println)) val closed2 = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => val embeddedClosed: ClosedShape = builder.add(closed1) // … embeddedClosed }) - //#embed-closed + // #embed-closed } "materialized values" in { - //#mat-combine-1 + // #mat-combine-1 // Materializes to Promise[Option[Int]] (red) val source: Source[Int, Promise[Option[Int]]] = Source.maybe[Int] @@ -190,9 +190,9 @@ class CompositionDocSpec extends AkkaSpec { // Materializes to Promise[Int] (red) val nestedSource: Source[Int, Promise[Option[Int]]] = source.viaMat(flow1)(Keep.left).named("nestedSource") - //#mat-combine-1 + // #mat-combine-1 - //#mat-combine-2 + // #mat-combine-2 // Materializes to NotUsed (orange) val flow2: Flow[Int, ByteString, NotUsed] = Flow[Int].map { i => ByteString(i.toString) @@ -205,18 +205,18 @@ class CompositionDocSpec extends AkkaSpec { // Materializes to Future[OutgoingConnection] (yellow) val nestedFlow: Flow[Int, ByteString, Future[OutgoingConnection]] = flow2.viaMat(flow3)(Keep.right).named("nestedFlow") - //#mat-combine-2 + // #mat-combine-2 - //#mat-combine-3 + // #mat-combine-3 // Materializes to Future[String] (green) val sink: Sink[ByteString, Future[String]] = Sink.fold("")(_ + _.utf8String) // Materializes to (Future[OutgoingConnection], Future[String]) (blue) val nestedSink: Sink[Int, (Future[OutgoingConnection], Future[String])] = nestedFlow.toMat(sink)(Keep.both) - //#mat-combine-3 + // #mat-combine-3 - //#mat-combine-4 + // #mat-combine-4 case class MyClass(private val p: Promise[Option[Int]], conn: OutgoingConnection) { def close() = p.trySuccess(None) } @@ -230,11 +230,11 @@ class CompositionDocSpec extends AkkaSpec { // Materializes to Future[MyClass] (purple) val runnableGraph: RunnableGraph[Future[MyClass]] = nestedSource.toMat(nestedSink)(f) - //#mat-combine-4 + // #mat-combine-4 } "attributes" in { - //#attributes-inheritance + // #attributes-inheritance import Attributes._ val nestedSource = Source.single(0).map(_ + 1).named("nestedSource") // Wrap, no inputBuffer set @@ -249,6 +249,6 @@ class CompositionDocSpec extends AkkaSpec { nestedFlow .to(Sink.fold(0)(_ + _)) // wire an atomic sink to the nestedFlow .withAttributes(name("nestedSink") and inputBuffer(3, 3)) // override - //#attributes-inheritance + // #attributes-inheritance } } diff --git a/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala index 3fbda80e4ef..fed528bca68 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala @@ -23,18 +23,18 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { implicit val ec: ExecutionContext = system.dispatcher "source is immutable" in { - //#source-immutable + // #source-immutable val source = Source(1 to 10) source.map(_ => 0) // has no effect on source, since it's immutable source.runWith(Sink.fold(0)(_ + _)) // 55 val zeroes = source.map(_ => 0) // returns new Source[Int], with `map()` appended zeroes.runWith(Sink.fold(0)(_ + _)) // 0 - //#source-immutable + // #source-immutable } "materialization in steps" in { - //#materialization-in-steps + // #materialization-in-steps val source = Source(1 to 10) val sink = Sink.fold[Int, Int](0)(_ + _) @@ -44,21 +44,21 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { // materialize the flow and get the value of the sink val sum: Future[Int] = runnable.run() - //#materialization-in-steps + // #materialization-in-steps } "materialization runWith" in { - //#materialization-runWith + // #materialization-runWith val source = Source(1 to 10) val sink = Sink.fold[Int, Int](0)(_ + _) // materialize the flow, getting the Sink's materialized value val sum: Future[Int] = source.runWith(sink) - //#materialization-runWith + // #materialization-runWith } "materialization is unique" in { - //#stream-reuse + // #stream-reuse // connect the Source to the Sink, obtaining a RunnableGraph val sink = Sink.fold[Int, Int](0)(_ + _) val runnable: RunnableGraph[Future[Int]] = @@ -69,7 +69,7 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { val sum2: Future[Int] = runnable.run() // sum1 and sum2 are different Futures! - //#stream-reuse + // #stream-reuse } "compound source cannot be used as key" in { @@ -91,7 +91,7 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { } "creating sources, sinks" in { - //#source-sink + // #source-sink // Create a source from an Iterable Source(List(1, 2, 3)) @@ -117,11 +117,11 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { // A Sink that executes a side-effecting call for every element of the stream Sink.foreach[String](println(_)) - //#source-sink + // #source-sink } "various ways of connecting source, sink, flow" in { - //#flow-connecting + // #flow-connecting // Explicitly creating and wiring up a Source, Sink and Flow Source(1 to 6).via(Flow[Int].map(_ * 2)).to(Sink.foreach(println(_))) @@ -138,21 +138,21 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { Flow[Int].alsoTo(Sink.foreach(println(_))).to(Sink.ignore) Source(1 to 6).to(otherSink) - //#flow-connecting + // #flow-connecting } "various ways of transforming materialized values" in { import scala.concurrent.duration._ - val throttler = Flow.fromGraph(GraphDSL.createGraph(Source.tick(1.second, 1.second, "test")) { - implicit builder => tickSource => + val throttler = + Flow.fromGraph(GraphDSL.createGraph(Source.tick(1.second, 1.second, "test")) { implicit builder => tickSource => import GraphDSL.Implicits._ val zip = builder.add(ZipWith[String, Int, Int](Keep.right)) tickSource ~> zip.in0 FlowShape(zip.in1, zip.out) - }) + }) - //#flow-mat-combine + // #flow-mat-combine // A source that can be signalled explicitly from the outside val source: Source[Int, Promise[Option[Int]]] = Source.maybe[Int] @@ -192,9 +192,8 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { // It is also possible to map over the materialized values. In r9 we had a // doubly nested pair, but we want to flatten it out val r11: RunnableGraph[(Promise[Option[Int]], Cancellable, Future[Int])] = - r9.mapMaterializedValue { - case ((promise, cancellable), future) => - (promise, cancellable, future) + r9.mapMaterializedValue { case ((promise, cancellable), future) => + (promise, cancellable, future) } // Now we can use pattern matching to get the resulting materialized values @@ -213,17 +212,17 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { ClosedShape }) - //#flow-mat-combine + // #flow-mat-combine } "defining asynchronous boundaries" in { - //#flow-async + // #flow-async Source(List(1, 2, 3)).map(_ + 1).async.map(_ * 2).to(Sink.ignore) - //#flow-async + // #flow-async } "source pre-materialization" in { - //#source-prematerialization + // #source-prematerialization val completeWithDone: PartialFunction[Any, CompletionStrategy] = { case Done => CompletionStrategy.immediately } val matValuePoweredSource = Source.actorRef[String]( @@ -238,13 +237,13 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { // pass source around for materialization source.runWith(Sink.foreach(println)) - //#source-prematerialization + // #source-prematerialization } } object FlowDocSpec { - //#materializer-from-actor-context + // #materializer-from-actor-context final class RunWithMyself extends Actor { implicit val mat: Materializer = Materializer(context) @@ -253,14 +252,13 @@ object FlowDocSpec { case Failure(ex) => println(s"Failed: ${ex.getMessage}") }) - def receive = { - case "boom" => - context.stop(self) // will also terminate the stream + def receive = { case "boom" => + context.stop(self) // will also terminate the stream } } - //#materializer-from-actor-context + // #materializer-from-actor-context - //#materializer-from-system-in-actor + // #materializer-from-system-in-actor final class RunForever(implicit val mat: Materializer) extends Actor { Source.maybe.runWith(Sink.onComplete { @@ -268,11 +266,10 @@ object FlowDocSpec { case Failure(ex) => println(s"Failed: ${ex.getMessage}") }) - def receive = { - case "boom" => - context.stop(self) // will NOT terminate the stream (it's bound to the system!) + def receive = { case "boom" => + context.stop(self) // will NOT terminate the stream (it's bound to the system!) } } - //#materializer-from-system-in-actor + // #materializer-from-system-in-actor } diff --git a/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala index c9b6c139eae..ca6ec34b818 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala @@ -14,12 +14,12 @@ import scala.concurrent.duration._ class FlowErrorDocSpec extends AkkaSpec { "demonstrate fail stream" in { - //#stop + // #stop val source = Source(0 to 5).map(100 / _) val result = source.runWith(Sink.fold(0)(_ + _)) // division by zero will fail the stream and the // result here will be a Future completed with Failure(ArithmeticException) - //#stop + // #stop intercept[ArithmeticException] { Await.result(result, 3.seconds) @@ -27,7 +27,7 @@ class FlowErrorDocSpec extends AkkaSpec { } "demonstrate resume stream" in { - //#resume + // #resume val decider: Supervision.Decider = { case _: ArithmeticException => Supervision.Resume case _ => Supervision.Stop @@ -41,13 +41,13 @@ class FlowErrorDocSpec extends AkkaSpec { val result = withCustomSupervision.run() // the element causing division by zero will be dropped // result here will be a Future completed with Success(228) - //#resume + // #resume Await.result(result, 3.seconds) should be(228) } "demonstrate resume section" in { - //#resume-section + // #resume-section val decider: Supervision.Decider = { case _: ArithmeticException => Supervision.Resume case _ => Supervision.Stop @@ -61,13 +61,13 @@ class FlowErrorDocSpec extends AkkaSpec { val result = source.runWith(Sink.fold(0)(_ + _)) // the elements causing division by zero will be dropped // result here will be a Future completed with Success(150) - //#resume-section + // #resume-section Await.result(result, 3.seconds) should be(150) } "demonstrate restart section" in { - //#restart-section + // #restart-section val decider: Supervision.Decider = { case _: IllegalArgumentException => Supervision.Restart case _ => Supervision.Stop @@ -83,24 +83,23 @@ class FlowErrorDocSpec extends AkkaSpec { // the negative element cause the scan stage to be restarted, // i.e. start from 0 again // result here will be a Future completed with Success(Vector(0, 1, 4, 0, 5, 12)) - //#restart-section + // #restart-section Await.result(result, 3.seconds) should be(Vector(0, 1, 4, 0, 5, 12)) } "demonstrate recover" in { - //#recover + // #recover Source(0 to 6) - .map( - n => - // assuming `4` and `5` are unexpected values that could throw exception - if (List(4, 5).contains(n)) throw new RuntimeException(s"Boom! Bad value found: $n") - else n.toString) - .recover { - case e: RuntimeException => e.getMessage + .map(n => + // assuming `4` and `5` are unexpected values that could throw exception + if (List(4, 5).contains(n)) throw new RuntimeException(s"Boom! Bad value found: $n") + else n.toString) + .recover { case e: RuntimeException => + e.getMessage } .runForeach(println) - //#recover + // #recover /* Output: @@ -111,22 +110,24 @@ Output: 3 // last element before failure Boom! Bad value found: 4 // first element on failure //#recover-output - */ + */ } "demonstrate recoverWithRetries" in { - //#recoverWithRetries + // #recoverWithRetries val planB = Source(List("five", "six", "seven", "eight")) Source(0 to 10) .map(n => if (n < 5) n.toString else throw new RuntimeException("Boom!")) - .recoverWithRetries(attempts = 1, { - case _: RuntimeException => planB - }) + .recoverWithRetries( + attempts = 1, + { case _: RuntimeException => + planB + }) .runForeach(println) - //#recoverWithRetries + // #recoverWithRetries /* Output: @@ -141,7 +142,7 @@ six seven eight //#recoverWithRetries-output - */ + */ } } diff --git a/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala index de3e2348cbf..768a6736c20 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala @@ -34,16 +34,16 @@ class FlowParallelismDocSpec extends AkkaSpec { //format: ON "Demonstrate pipelining" in { - //#pipelining + // #pipelining // With the two frying pans we can fully cook pancakes val pancakeChef: Flow[ScoopOfBatter, Pancake, NotUsed] = Flow[ScoopOfBatter].via(fryingPan1.async).via(fryingPan2.async) - //#pipelining + // #pipelining } "Demonstrate parallel processing" in { - //#parallelism + // #parallelism val fryingPan: Flow[ScoopOfBatter, Pancake, NotUsed] = Flow[ScoopOfBatter].map { batter => Pancake() @@ -64,11 +64,11 @@ class FlowParallelismDocSpec extends AkkaSpec { FlowShape(dispatchBatter.in, mergePancakes.out) }) - //#parallelism + // #parallelism } "Demonstrate parallelized pipelines" in { - //#parallel-pipeline + // #parallel-pipeline val pancakeChef: Flow[ScoopOfBatter, Pancake, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit builder => val dispatchBatter = builder.add(Balance[ScoopOfBatter](2)) @@ -81,11 +81,11 @@ class FlowParallelismDocSpec extends AkkaSpec { FlowShape(dispatchBatter.in, mergePancakes.out) }) - //#parallel-pipeline + // #parallel-pipeline } "Demonstrate pipelined parallel processing" in { - //#pipelined-parallel + // #pipelined-parallel val pancakeChefs1: Flow[ScoopOfBatter, HalfCookedPancake, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit builder => val dispatchBatter = builder.add(Balance[ScoopOfBatter](2)) @@ -113,7 +113,7 @@ class FlowParallelismDocSpec extends AkkaSpec { }) val kitchen: Flow[ScoopOfBatter, Pancake, NotUsed] = pancakeChefs1.via(pancakeChefs2) - //#pipelined-parallel + // #pipelined-parallel } diff --git a/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala index 987b0223827..4947def9a22 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala @@ -13,7 +13,7 @@ import docs.CompileOnlySpec class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { "offer a source ref" in compileOnlySpec { - //#offer-source + // #offer-source import akka.stream.SourceRef import akka.pattern.pipe @@ -22,26 +22,25 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { class DataSource extends Actor { - def receive = { - case RequestLogs(streamId) => - // obtain the source you want to offer: - val source: Source[String, NotUsed] = streamLogs(streamId) + def receive = { case RequestLogs(streamId) => + // obtain the source you want to offer: + val source: Source[String, NotUsed] = streamLogs(streamId) - // materialize the SourceRef: - val ref: SourceRef[String] = source.runWith(StreamRefs.sourceRef()) + // materialize the SourceRef: + val ref: SourceRef[String] = source.runWith(StreamRefs.sourceRef()) - // wrap the SourceRef in some domain message, such that the sender knows what source it is - val reply = LogsOffer(streamId, ref) + // wrap the SourceRef in some domain message, such that the sender knows what source it is + val reply = LogsOffer(streamId, ref) - // reply to sender - sender() ! reply + // reply to sender + sender() ! reply } def streamLogs(streamId: Long): Source[String, NotUsed] = ??? } - //#offer-source + // #offer-source - //#offer-source-use + // #offer-source-use val sourceActor = system.actorOf(Props[DataSource](), "dataSource") sourceActor ! RequestLogs(1337) @@ -52,11 +51,11 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { // alternatively explicitly obtain Source from SourceRef: // offer.sourceRef.source.runWith(Sink.foreach(println)) - //#offer-source-use + // #offer-source-use } "offer a sink ref" in compileOnlySpec { - //#offer-sink + // #offer-sink import akka.stream.SinkRef case class PrepareUpload(id: String) @@ -64,29 +63,28 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { class DataReceiver extends Actor { - def receive = { - case PrepareUpload(nodeId) => - // obtain the source you want to offer: - val sink: Sink[String, NotUsed] = logsSinkFor(nodeId) + def receive = { case PrepareUpload(nodeId) => + // obtain the source you want to offer: + val sink: Sink[String, NotUsed] = logsSinkFor(nodeId) - // materialize the SinkRef (the remote is like a source of data for us): - val ref: SinkRef[String] = StreamRefs.sinkRef[String]().to(sink).run() + // materialize the SinkRef (the remote is like a source of data for us): + val ref: SinkRef[String] = StreamRefs.sinkRef[String]().to(sink).run() - // wrap the SinkRef in some domain message, such that the sender knows what source it is - val reply = MeasurementsSinkReady(nodeId, ref) + // wrap the SinkRef in some domain message, such that the sender knows what source it is + val reply = MeasurementsSinkReady(nodeId, ref) - // reply to sender - sender() ! reply + // reply to sender + sender() ! reply } def logsSinkFor(nodeId: String): Sink[String, NotUsed] = ??? } - //#offer-sink + // #offer-sink def localMetrics(): Source[String, NotUsed] = Source.single("") - //#offer-sink-use + // #offer-sink-use val receiver = system.actorOf(Props[DataReceiver](), "receiver") receiver ! PrepareUpload("system-42-tmp") @@ -94,11 +92,11 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { // stream local metrics to Sink's origin: localMetrics().runWith(ready.sinkRef) - //#offer-sink-use + // #offer-sink-use } "show how to configure timeouts with attrs" in compileOnlySpec { - //#attr-sub-timeout + // #attr-sub-timeout // configure the timeout for source import scala.concurrent.duration._ import akka.stream.StreamRefAttributes @@ -113,7 +111,7 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { .sinkRef() .addAttributes(StreamRefAttributes.subscriptionTimeout(5.seconds)) .runWith(Sink.ignore) // not very interesting Sink, just an example - //#attr-sub-timeout + // #attr-sub-timeout } } diff --git a/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala index b376745944d..8d8de77b0d1 100644 --- a/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala @@ -38,14 +38,14 @@ class GraphDSLDocSpec extends AkkaSpec { //#simple-graph-dsl //format: ON - //#simple-graph-run + // #simple-graph-run g.run() - //#simple-graph-run + // #simple-graph-run } "flow connection errors" in { intercept[IllegalStateException] { - //#simple-graph + // #simple-graph RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ val source1 = Source(1 to 10) @@ -58,18 +58,18 @@ class GraphDSLDocSpec extends AkkaSpec { // unconnected zip.out (!) => "must have at least 1 outgoing edge" ClosedShape }) - //#simple-graph + // #simple-graph }.getMessage should include("ZipWith2.out") } "reusing a flow in a graph" in { - //#graph-dsl-reusing-a-flow + // #graph-dsl-reusing-a-flow val topHeadSink = Sink.head[Int] val bottomHeadSink = Sink.head[Int] val sharedDoubler = Flow[Int].map(_ * 2) - //#graph-dsl-reusing-a-flow + // #graph-dsl-reusing-a-flow // format: OFF val g = @@ -93,7 +93,7 @@ class GraphDSLDocSpec extends AkkaSpec { "building a reusable component" in { - //#graph-dsl-components-shape + // #graph-dsl-components-shape // A shape represents the input and output ports of a reusable // processing module case class PriorityWorkerPoolShape[In, Out](jobsIn: Inlet[In], priorityJobsIn: Inlet[In], resultsOut: Outlet[Out]) @@ -112,9 +112,9 @@ class GraphDSLDocSpec extends AkkaSpec { PriorityWorkerPoolShape(jobsIn.carbonCopy(), priorityJobsIn.carbonCopy(), resultsOut.carbonCopy()) } - //#graph-dsl-components-shape + // #graph-dsl-components-shape - //#graph-dsl-components-create + // #graph-dsl-components-create object PriorityWorkerPool { def apply[In, Out]( worker: Flow[In, Out, Any], @@ -147,11 +147,11 @@ class GraphDSLDocSpec extends AkkaSpec { } } - //#graph-dsl-components-create + // #graph-dsl-components-create def println(s: Any): Unit = () - //#graph-dsl-components-use + // #graph-dsl-components-use val worker1 = Flow[String].map("step 1 " + _) val worker2 = Flow[String].map("step 2 " + _) @@ -172,9 +172,9 @@ class GraphDSLDocSpec extends AkkaSpec { ClosedShape }) .run() - //#graph-dsl-components-use + // #graph-dsl-components-use - //#graph-dsl-components-shape2 + // #graph-dsl-components-shape2 import FanInShape.{ Init, Name } class PriorityWorkerPoolShape2[In, Out](_init: Init[Out] = Name("PriorityWorkerPool")) @@ -185,22 +185,22 @@ class GraphDSLDocSpec extends AkkaSpec { val priorityJobsIn = newInlet[In]("priorityJobsIn") // Outlet[Out] with name "out" is automatically created } - //#graph-dsl-components-shape2 + // #graph-dsl-components-shape2 } "access to materialized value" in { - //#graph-dsl-matvalue + // #graph-dsl-matvalue import GraphDSL.Implicits._ - val foldFlow: Flow[Int, Int, Future[Int]] = Flow.fromGraph(GraphDSL.createGraph(Sink.fold[Int, Int](0)(_ + _)) { - implicit builder => fold => + val foldFlow: Flow[Int, Int, Future[Int]] = + Flow.fromGraph(GraphDSL.createGraph(Sink.fold[Int, Int](0)(_ + _)) { implicit builder => fold => FlowShape(fold.in, builder.materializedValue.mapAsync(4)(identity).outlet) - }) - //#graph-dsl-matvalue + }) + // #graph-dsl-matvalue Await.result(Source(1 to 10).via(foldFlow).runWith(Sink.head), 3.seconds) should ===(55) - //#graph-dsl-matvalue-cycle + // #graph-dsl-matvalue-cycle import GraphDSL.Implicits._ // This cannot produce any value: val cyclicFold: Source[Int, Future[Int]] = @@ -213,7 +213,7 @@ class GraphDSLDocSpec extends AkkaSpec { builder.materializedValue.mapAsync(4)(identity) ~> fold SourceShape(builder.materializedValue.mapAsync(4)(identity).outlet) }) - //#graph-dsl-matvalue-cycle + // #graph-dsl-matvalue-cycle } } diff --git a/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala index 172be5e8272..3c3f6cc35af 100644 --- a/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala @@ -19,7 +19,7 @@ import scala.collection.immutable.Iterable class GraphStageDocSpec extends AkkaSpec { "Demonstrate creation of GraphStage boilerplate" in { - //#boilerplate-example + // #boilerplate-example import akka.stream.SourceShape import akka.stream.stage.GraphStage @@ -32,12 +32,12 @@ class GraphStageDocSpec extends AkkaSpec { // This is where the actual (possibly stateful) logic will live override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = ??? } - //#boilerplate-example + // #boilerplate-example } "Demonstrate creation of GraphStage Source" in { - //#custom-source-example + // #custom-source-example import akka.stream.Attributes import akka.stream.Outlet import akka.stream.SourceShape @@ -58,17 +58,19 @@ class GraphStageDocSpec extends AkkaSpec { // registered handlers. private var counter = 1 - setHandler(out, new OutHandler { - override def onPull(): Unit = { - push(out, counter) - counter += 1 - } - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + push(out, counter) + counter += 1 + } + }) } } - //#custom-source-example + // #custom-source-example - //#simple-source-usage + // #simple-source-usage // A GraphStage is a proper Graph, just like what GraphDSL.create would return val sourceGraph: Graph[SourceShape[Int], NotUsed] = new NumbersSource @@ -80,14 +82,14 @@ class GraphStageDocSpec extends AkkaSpec { // The source is reusable. This returns 5050 val result2: Future[Int] = mySource.take(100).runFold(0)(_ + _) - //#simple-source-usage + // #simple-source-usage Await.result(result1, 3.seconds) should ===(55) Await.result(result2, 3.seconds) should ===(5050) } "Demonstrate creation of GraphStage Sink" in { - //#custom-sink-example + // #custom-sink-example import akka.stream.Attributes import akka.stream.Inlet import akka.stream.SinkShape @@ -105,20 +107,22 @@ class GraphStageDocSpec extends AkkaSpec { // This requests one element at the Sink startup. override def preStart(): Unit = pull(in) - setHandler(in, new InHandler { - override def onPush(): Unit = { - println(grab(in)) - pull(in) - } - }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + println(grab(in)) + pull(in) + } + }) } } - //#custom-sink-example + // #custom-sink-example Source(List(0, 1, 2)).runWith(Sink.fromGraph(new StdoutSink)) } - //#one-to-one + // #one-to-one class Map[A, B](f: A => B) extends GraphStage[FlowShape[A, B]] { val in = Inlet[A]("Map.in") @@ -128,19 +132,23 @@ class GraphStageDocSpec extends AkkaSpec { override def createLogic(attr: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = { - push(out, f(grab(in))) - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - pull(in) - } - }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + push(out, f(grab(in))) + } + }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + pull(in) + } + }) } } - //#one-to-one + // #one-to-one "Demonstrate a one to one element GraphStage" in { // tests: @@ -152,7 +160,7 @@ class GraphStageDocSpec extends AkkaSpec { Await.result(result, 3.seconds) should ===(Seq(3, 3, 5)) } - //#many-to-one + // #many-to-one class Filter[A](p: A => Boolean) extends GraphStage[FlowShape[A, A]] { val in = Inlet[A]("Filter.in") @@ -162,21 +170,25 @@ class GraphStageDocSpec extends AkkaSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - if (p(elem)) push(out, elem) - else pull(in) - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - pull(in) - } - }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + if (p(elem)) push(out, elem) + else pull(in) + } + }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + pull(in) + } + }) } } - //#many-to-one + // #many-to-one "Demonstrate a many to one element GraphStage" in { @@ -189,7 +201,7 @@ class GraphStageDocSpec extends AkkaSpec { Await.result(result, 3.seconds) should ===(Seq(2, 4, 6)) } - //#one-to-many + // #one-to-many class Duplicator[A] extends GraphStage[FlowShape[A, A]] { val in = Inlet[A]("Duplicator.in") @@ -203,32 +215,36 @@ class GraphStageDocSpec extends AkkaSpec { // MUST be inside the GraphStageLogic var lastElem: Option[A] = None - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - lastElem = Some(elem) - push(out, elem) - } + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + lastElem = Some(elem) + push(out, elem) + } - override def onUpstreamFinish(): Unit = { - if (lastElem.isDefined) emit(out, lastElem.get) - complete(out) - } + override def onUpstreamFinish(): Unit = { + if (lastElem.isDefined) emit(out, lastElem.get) + complete(out) + } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - if (lastElem.isDefined) { - push(out, lastElem.get) - lastElem = None - } else { - pull(in) + }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + if (lastElem.isDefined) { + push(out, lastElem.get) + lastElem = None + } else { + pull(in) + } } - } - }) + }) } } - //#one-to-many + // #one-to-many "Demonstrate a one to many element GraphStage" in { // tests: @@ -241,7 +257,7 @@ class GraphStageDocSpec extends AkkaSpec { } "Demonstrate a simpler one to many stage" in { - //#simpler-one-to-many + // #simpler-one-to-many class Duplicator[A] extends GraphStage[FlowShape[A, A]] { val in = Inlet[A]("Duplicator.in") @@ -252,22 +268,26 @@ class GraphStageDocSpec extends AkkaSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - // this will temporarily suspend this handler until the two elems - // are emitted and then reinstates it - emitMultiple(out, Iterable(elem, elem)) - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - pull(in) - } - }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + // this will temporarily suspend this handler until the two elems + // are emitted and then reinstates it + emitMultiple(out, Iterable(elem, elem)) + } + }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + pull(in) + } + }) } } - //#simpler-one-to-many + // #simpler-one-to-many // tests: val duplicator = Flow.fromGraph(new Duplicator[Int]) @@ -282,18 +302,18 @@ class GraphStageDocSpec extends AkkaSpec { "Demonstrate chaining of graph stages" in { val sink = Sink.fold[List[Int], Int](List.empty[Int])((acc, n) => acc :+ n) - //#graph-operator-chain + // #graph-operator-chain val resultFuture = Source(1 to 5).via(new Filter(_ % 2 == 0)).via(new Duplicator()).via(new Map(_ / 2)).runWith(sink) - //#graph-operator-chain + // #graph-operator-chain Await.result(resultFuture, 3.seconds) should ===(List(1, 1, 2, 2)) } "Demonstrate an asynchronous side channel" in { import system.dispatcher - //#async-side-channel + // #async-side-channel // will close upstream in all materializations of the graph stage instance // when the future completes class KillSwitch[A](switch: Future[Unit]) extends GraphStage[FlowShape[A, A]] { @@ -307,21 +327,25 @@ class GraphStageDocSpec extends AkkaSpec { new GraphStageLogic(shape) { override def preStart(): Unit = { - val callback = getAsyncCallback[Unit] { (_) => + val callback = getAsyncCallback[Unit] { _ => completeStage() } switch.foreach(callback.invoke) } - setHandler(in, new InHandler { - override def onPush(): Unit = { push(out, grab(in)) } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { pull(in) } - }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { push(out, grab(in)) } + }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { pull(in) } + }) } } - //#async-side-channel + // #async-side-channel // tests: @@ -354,7 +378,7 @@ class GraphStageDocSpec extends AkkaSpec { "Demonstrate a graph stage with a timer" in { - //#timed + // #timed // each time an event is pushed through it will trigger a period of silence class TimedGate[A](silencePeriod: FiniteDuration) extends GraphStage[FlowShape[A, A]] { @@ -368,27 +392,31 @@ class GraphStageDocSpec extends AkkaSpec { var open = false - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - if (open) pull(in) - else { - push(out, elem) - open = true - scheduleOnce(None, silencePeriod) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + if (open) pull(in) + else { + push(out, elem) + open = true + scheduleOnce(None, silencePeriod) + } } - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { pull(in) } - }) + }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { pull(in) } + }) override protected def onTimer(timerKey: Any): Unit = { open = false } } } - //#timed + // #timed // tests: val result = @@ -402,7 +430,7 @@ class GraphStageDocSpec extends AkkaSpec { "Demonstrate a custom materialized value" in { - //#materialized + // #materialized class FirstValue[A] extends GraphStageWithMaterializedValue[FlowShape[A, A], Future[A]] { val in = Inlet[A]("FirstValue.in") @@ -414,33 +442,39 @@ class GraphStageDocSpec extends AkkaSpec { val promise = Promise[A]() val logic = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - promise.success(elem) - push(out, elem) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + promise.success(elem) + push(out, elem) - // replace handler with one that only forwards elements - setHandler(in, new InHandler { - override def onPush(): Unit = { - push(out, grab(in)) - } - }) - } - }) + // replace handler with one that only forwards elements + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + push(out, grab(in)) + } + }) + } + }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - pull(in) - } - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + pull(in) + } + }) } (logic, promise.future) } } - //#materialized + // #materialized // tests: val flow = Source(Vector(1, 2, 3)).viaMat(new FirstValue)(Keep.right).to(Sink.ignore) @@ -453,7 +487,7 @@ class GraphStageDocSpec extends AkkaSpec { "Demonstrate a detached graph stage" in { - //#detached + // #detached class TwoBuffer[A] extends GraphStage[FlowShape[A, A]] { val in = Inlet[A]("TwoBuffer.in") @@ -517,7 +551,7 @@ class GraphStageDocSpec extends AkkaSpec { } } - //#detached + // #detached // tests: val result1 = Source(Vector(1, 2, 3)).via(new TwoBuffer).runFold(Vector.empty[Int])((acc, n) => acc :+ n) @@ -542,7 +576,7 @@ class GraphStageDocSpec extends AkkaSpec { "Demonstrate stream extension" when { "targeting a Source" in { - //#extending-source + // #extending-source implicit class SourceDuplicator[Out, Mat](s: Source[Out, Mat]) { def duplicateElements: Source[Out, Mat] = s.via(new Duplicator) } @@ -550,11 +584,11 @@ class GraphStageDocSpec extends AkkaSpec { val s = Source(1 to 3).duplicateElements s.runWith(Sink.seq).futureValue should ===(Seq(1, 1, 2, 2, 3, 3)) - //#extending-source + // #extending-source } "targeting a Flow" in { - //#extending-flow + // #extending-flow implicit class FlowDuplicator[In, Out, Mat](s: Flow[In, Out, Mat]) { def duplicateElements: Flow[In, Out, Mat] = s.via(new Duplicator) } @@ -562,7 +596,7 @@ class GraphStageDocSpec extends AkkaSpec { val f = Flow[Int].duplicateElements Source(1 to 3).via(f).runWith(Sink.seq).futureValue should ===(Seq(1, 1, 2, 2, 3, 3)) - //#extending-flow + // #extending-flow } } diff --git a/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala index 7e77ca213aa..3ff42c727da 100644 --- a/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala @@ -15,7 +15,7 @@ class GraphStageLoggingDocSpec extends AkkaSpec("akka.loglevel = DEBUG") { implicit val ec: ExecutionContext = system.dispatcher - //#operator-with-logging + // #operator-with-logging import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler, StageLogging } final class RandomLettersSource extends GraphStage[SourceShape[String]] { @@ -24,22 +24,24 @@ class GraphStageLoggingDocSpec extends AkkaSpec("akka.loglevel = DEBUG") { override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with StageLogging { - setHandler(out, new OutHandler { - override def onPull(): Unit = { - val c = nextChar() // ASCII lower case letters - - // `log` is obtained from materializer automatically (via StageLogging) - log.debug("Randomly generated: [{}]", c) - - push(out, c.toString) - } - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + val c = nextChar() // ASCII lower case letters + + // `log` is obtained from materializer automatically (via StageLogging) + log.debug("Randomly generated: [{}]", c) + + push(out, c.toString) + } + }) } def nextChar(): Char = ThreadLocalRandom.current().nextInt('a', 'z'.toInt + 1).toChar } - //#operator-with-logging + // #operator-with-logging "demonstrate logging in custom graphstage" in { val n = 10 diff --git a/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala b/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala index ca21e5ba965..dbea62c34fc 100644 --- a/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala @@ -20,7 +20,7 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { "demonstrate creating a dynamic merge" in { def println(s: String) = testActor ! s - //#merge-hub + // #merge-hub // A simple consumer that will print to the console for now val consumer = Sink.foreach(println) @@ -38,13 +38,13 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { // Feeding two independent sources into the hub. Source.single("Hello!").runWith(toConsumer) Source.single("Hub!").runWith(toConsumer) - //#merge-hub + // #merge-hub expectMsgAllOf("Hello!", "Hub!") } "demonstrate creating a dynamic broadcast" in compileOnlySpec { - //#broadcast-hub + // #broadcast-hub // A simple producer that publishes a new "message" every second val producer = Source.tick(1.second, 1.second, "New message") @@ -62,26 +62,26 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { // Print out messages from the producer in two independent consumers fromProducer.runForeach(msg => println("consumer1: " + msg)) fromProducer.runForeach(msg => println("consumer2: " + msg)) - //#broadcast-hub + // #broadcast-hub } "demonstrate combination" in { def println(s: String) = testActor ! s - //#pub-sub-1 + // #pub-sub-1 // Obtain a Sink and Source which will publish and receive from the "bus" respectively. val (sink, source) = MergeHub.source[String](perProducerBufferSize = 16).toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both).run() - //#pub-sub-1 + // #pub-sub-1 - //#pub-sub-2 + // #pub-sub-2 // Ensure that the Broadcast output is dropped if there are no listening parties. // If this dropping Sink is not attached, then the broadcast hub will not drop any // elements itself when there are no subscribers, backpressuring the producer instead. source.runWith(Sink.ignore) - //#pub-sub-2 + // #pub-sub-2 - //#pub-sub-3 + // #pub-sub-3 // We create now a Flow that represents a publish-subscribe channel using the above // started stream as its "topic". We add two more features, external cancellation of // the registration and automatic cleanup for very slow subscribers. @@ -90,19 +90,19 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { .fromSinkAndSource(sink, source) .joinMat(KillSwitches.singleBidi[String, String])(Keep.right) .backpressureTimeout(3.seconds) - //#pub-sub-3 + // #pub-sub-3 - //#pub-sub-4 + // #pub-sub-4 val switch: UniqueKillSwitch = Source.repeat("Hello world!").viaMat(busFlow)(Keep.right).to(Sink.foreach(println)).run() // Shut down externally switch.shutdown() - //#pub-sub-4 + // #pub-sub-4 } "demonstrate creating a dynamic partition hub" in compileOnlySpec { - //#partition-hub + // #partition-hub // A simple producer that publishes a new "message-" every second val producer = Source.tick(1.second, 1.second, "message").zipWith(Source(1 to 100))((a, b) => s"$a-$b") @@ -124,11 +124,11 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { // Print out messages from the producer in two independent consumers fromProducer.runForeach(msg => println("consumer1: " + msg)) fromProducer.runForeach(msg => println("consumer2: " + msg)) - //#partition-hub + // #partition-hub } "demonstrate creating a dynamic stateful partition hub" in compileOnlySpec { - //#partition-hub-stateful + // #partition-hub-stateful // A simple producer that publishes a new "message-" every second val producer = Source.tick(1.second, 1.second, "message").zipWith(Source(1 to 100))((a, b) => s"$a-$b") @@ -158,11 +158,11 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { // Print out messages from the producer in two independent consumers fromProducer.runForeach(msg => println("consumer1: " + msg)) fromProducer.runForeach(msg => println("consumer2: " + msg)) - //#partition-hub-stateful + // #partition-hub-stateful } "demonstrate creating a dynamic partition hub routing to fastest consumer" in compileOnlySpec { - //#partition-hub-fastest + // #partition-hub-fastest val producer = Source(0 until 100) // ConsumerInfo.queueSize is the approximate number of buffered elements for a consumer. @@ -178,7 +178,7 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { fromProducer.runForeach(msg => println("consumer1: " + msg)) fromProducer.throttle(10, 100.millis).runForeach(msg => println("consumer2: " + msg)) - //#partition-hub-fastest + // #partition-hub-fastest } } diff --git a/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala b/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala index 4fe782b2cdb..c8d2d30c299 100644 --- a/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala @@ -42,21 +42,21 @@ object IntegrationDocSpec { """) class AddressSystem { - //#email-address-lookup + // #email-address-lookup def lookupEmail(handle: String): Future[Option[String]] = - //#email-address-lookup + // #email-address-lookup Future.successful(Some(handle + "@somewhere.com")) - //#phone-lookup + // #phone-lookup def lookupPhoneNumber(handle: String): Future[Option[String]] = - //#phone-lookup + // #phone-lookup Future.successful(Some(handle.hashCode.toString)) } class AddressSystem2 { - //#email-address-lookup2 + // #email-address-lookup2 def lookupEmail(handle: String): Future[String] = - //#email-address-lookup2 + // #email-address-lookup2 Future.successful(handle + "@somewhere.com") } @@ -64,44 +64,43 @@ object IntegrationDocSpec { final case class TextMessage(to: String, body: String) class EmailServer(probe: ActorRef) { - //#email-server-send + // #email-server-send def send(email: Email): Future[Unit] = { // ... - //#email-server-send + // #email-server-send probe ! email.to Future.successful(()) - //#email-server-send + // #email-server-send } - //#email-server-send + // #email-server-send } class SmsServer(probe: ActorRef) { - //#sms-server-send + // #sms-server-send def send(text: TextMessage): Unit = { // ... - //#sms-server-send + // #sms-server-send probe ! text.to - //#sms-server-send + // #sms-server-send } - //#sms-server-send + // #sms-server-send } final case class Save(tweet: Tweet) case object SaveDone class DatabaseService(probe: ActorRef) extends Actor { - override def receive = { - case Save(tweet: Tweet) => - probe ! tweet.author.handle - sender() ! SaveDone + override def receive = { case Save(tweet: Tweet) => + probe ! tweet.author.handle + sender() ! SaveDone } } - //#sometimes-slow-service + // #sometimes-slow-service class SometimesSlowService(implicit ec: ExecutionContext) { - //#sometimes-slow-service + // #sometimes-slow-service def println(s: String): Unit = () - //#sometimes-slow-service + // #sometimes-slow-service private val runningCount = new AtomicInteger @@ -117,18 +116,17 @@ object IntegrationDocSpec { } } } - //#sometimes-slow-service + // #sometimes-slow-service - //#ask-actor + // #ask-actor class Translator extends Actor { - def receive = { - case word: String => - // ... process message - val reply = word.toUpperCase - sender() ! reply // reply to the ask + def receive = { case word: String => + // ... process message + val reply = word.toUpperCase + sender() ! reply // reply to the ask } } - //#ask-actor + // #ask-actor } @@ -139,7 +137,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val ref: ActorRef = system.actorOf(Props[Translator]()) "ask" in { - //#ask + // #ask implicit val askTimeout: Timeout = 5.seconds val words: Source[String, NotUsed] = Source(List("hello", "hi")) @@ -149,7 +147,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { // continue processing of the replies from the actor .map(_.toLowerCase) .runWith(Sink.ignore) - //#ask + // #ask } "calling external service with mapAsync" in { @@ -157,19 +155,19 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val addressSystem = new AddressSystem val emailServer = new EmailServer(probe.ref) - //#tweet-authors + // #tweet-authors val authors: Source[Author, NotUsed] = tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) - //#tweet-authors + // #tweet-authors - //#email-addresses-mapAsync + // #email-addresses-mapAsync val emailAddresses: Source[String, NotUsed] = - authors.mapAsync(4)(author => addressSystem.lookupEmail(author.handle)).collect { - case Some(emailAddress) => emailAddress + authors.mapAsync(4)(author => addressSystem.lookupEmail(author.handle)).collect { case Some(emailAddress) => + emailAddress } - //#email-addresses-mapAsync + // #email-addresses-mapAsync - //#send-emails + // #send-emails val sendEmails: RunnableGraph[NotUsed] = emailAddresses .mapAsync(4)(address => { @@ -178,7 +176,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { .to(Sink.ignore) sendEmails.run() - //#send-emails + // #send-emails probe.expectMsg("rolandkuhn@somewhere.com") probe.expectMsg("patriknw@somewhere.com") @@ -190,7 +188,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { } "actorRefWithBackpressure" in { - //#actorRefWithBackpressure + // #actorRefWithBackpressure val words: Source[String, NotUsed] = Source(List("hello", "hi")) @@ -217,10 +215,10 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { probe.expectMsg("hello") probe.expectMsg("hi") probe.expectMsg("Stream completed!") - //#actorRefWithBackpressure + // #actorRefWithBackpressure } - //#actorRefWithBackpressure-actor + // #actorRefWithBackpressure-actor object AckingReceiver { case object Ack @@ -250,14 +248,14 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { log.error(ex, "Stream failed!") } } - //#actorRefWithBackpressure-actor + // #actorRefWithBackpressure-actor "lookup email with mapAsync and supervision" in { val addressSystem = new AddressSystem2 val authors: Source[Author, NotUsed] = tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) - //#email-addresses-mapAsync-supervision + // #email-addresses-mapAsync-supervision import ActorAttributes.supervisionStrategy import Supervision.resumingDecider @@ -266,7 +264,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { Flow[Author] .mapAsync(4)(author => addressSystem.lookupEmail(author.handle)) .withAttributes(supervisionStrategy(resumingDecider))) - //#email-addresses-mapAsync-supervision + // #email-addresses-mapAsync-supervision } "calling external service with mapAsyncUnordered" in { @@ -274,7 +272,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val addressSystem = new AddressSystem val emailServer = new EmailServer(probe.ref) - //#external-service-mapAsyncUnordered + // #external-service-mapAsyncUnordered val authors: Source[Author, NotUsed] = tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) @@ -291,7 +289,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { .to(Sink.ignore) sendEmails.run() - //#external-service-mapAsyncUnordered + // #external-service-mapAsyncUnordered probe.receiveN(7).toSet should be( Set( @@ -312,11 +310,11 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val authors = tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) val phoneNumbers = - authors.mapAsync(4)(author => addressSystem.lookupPhoneNumber(author.handle)).collect { - case Some(phoneNo) => phoneNo + authors.mapAsync(4)(author => addressSystem.lookupPhoneNumber(author.handle)).collect { case Some(phoneNo) => + phoneNo } - //#blocking-mapAsync + // #blocking-mapAsync val blockingExecutionContext = system.dispatchers.lookup("blocking-dispatcher") val sendTextMessages: RunnableGraph[NotUsed] = @@ -329,7 +327,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { .to(Sink.ignore) sendTextMessages.run() - //#blocking-mapAsync + // #blocking-mapAsync probe.receiveN(7).toSet should be( Set( @@ -350,11 +348,11 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val authors = tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) val phoneNumbers = - authors.mapAsync(4)(author => addressSystem.lookupPhoneNumber(author.handle)).collect { - case Some(phoneNo) => phoneNo + authors.mapAsync(4)(author => addressSystem.lookupPhoneNumber(author.handle)).collect { case Some(phoneNo) => + phoneNo } - //#blocking-map + // #blocking-map val send = Flow[String] .map { phoneNo => smsServer.send(TextMessage(to = phoneNo, body = "I like your tweet")) @@ -364,7 +362,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { phoneNumbers.via(send).to(Sink.ignore) sendTextMessages.run() - //#blocking-map + // #blocking-map probe.expectMsg("rolandkuhn".hashCode.toString) probe.expectMsg("patriknw".hashCode.toString) @@ -379,7 +377,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val probe = TestProbe() val database = system.actorOf(Props(classOf[DatabaseService], probe.ref), "db") - //#save-tweets + // #save-tweets import akka.pattern.ask val akkaTweets: Source[Tweet, NotUsed] = tweets.filter(_.hashtags.contains(akkaTag)) @@ -387,7 +385,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { implicit val timeout: Timeout = 3.seconds val saveTweets: RunnableGraph[NotUsed] = akkaTweets.mapAsync(4)(tweet => database ? Save(tweet)).to(Sink.ignore) - //#save-tweets + // #save-tweets saveTweets.run() @@ -407,7 +405,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { probe.ref ! s } - //#sometimes-slow-mapAsync + // #sometimes-slow-mapAsync implicit val blockingExecutionContext = system.dispatchers.lookup("blocking-dispatcher") val service = new SometimesSlowService @@ -417,7 +415,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { .to(Sink.foreach(elem => println(s"after: $elem"))) .withAttributes(Attributes.inputBuffer(initial = 4, max = 4)) .run() - //#sometimes-slow-mapAsync + // #sometimes-slow-mapAsync probe.expectMsg("after: A") probe.expectMsg("after: B") @@ -438,7 +436,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { probe.ref ! s } - //#sometimes-slow-mapAsyncUnordered + // #sometimes-slow-mapAsyncUnordered implicit val blockingExecutionContext = system.dispatchers.lookup("blocking-dispatcher") val service = new SometimesSlowService @@ -448,7 +446,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { .to(Sink.foreach(elem => println(s"after: $elem"))) .withAttributes(Attributes.inputBuffer(initial = 4, max = 4)) .run() - //#sometimes-slow-mapAsyncUnordered + // #sometimes-slow-mapAsyncUnordered probe.receiveN(10).toSet should be( Set( @@ -465,7 +463,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { } "illustrate use of source queue" in { - //#source-queue + // #source-queue val bufferSize = 10 val elementsToProcess = 5 @@ -489,14 +487,14 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { } }) .runWith(Sink.ignore) - //#source-queue + // #source-queue } "illustrate use of synchronous source queue" in { - //#source-queue-synchronous + // #source-queue-synchronous val bufferSize = 1000 - //#source-queue-synchronous + // #source-queue-synchronous // format: OFF //#source-queue-synchronous val queue = Source diff --git a/akka-docs/src/test/scala/docs/stream/MigrationsScala.scala b/akka-docs/src/test/scala/docs/stream/MigrationsScala.scala index 6c5554f2956..63b8f58bade 100644 --- a/akka-docs/src/test/scala/docs/stream/MigrationsScala.scala +++ b/akka-docs/src/test/scala/docs/stream/MigrationsScala.scala @@ -12,23 +12,23 @@ class MigrationsScala extends AkkaSpec { "Examples in migration guide" must { "compile" in { lazy val dontExecuteMe = { - //#expand-continually + // #expand-continually Flow[Int].expand(Iterator.continually(_)) - //#expand-continually - //#expand-state + // #expand-continually + // #expand-state Flow[Int].expand(i => { var state = 0 - Iterator.continually({ + Iterator.continually { state += 1 (i, state) - }) + } }) - //#expand-state + // #expand-state - //#async + // #async val flow = Flow[Int].map(_ + 1) Source(1 to 10).via(flow.async) - //#async + // #async } } } diff --git a/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala b/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala index 3f19b07f87d..7ed74d66277 100644 --- a/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala @@ -37,48 +37,48 @@ class QuickStartDocSpec extends AnyWordSpec with BeforeAndAfterAll with ScalaFut "demonstrate Source" in { implicit val system = ActorSystem("QuickStart") - //#create-source + // #create-source val source: Source[Int, NotUsed] = Source(1 to 100) - //#create-source + // #create-source - //#run-source + // #run-source source.runForeach(i => println(i)) - //#run-source + // #run-source - //#transform-source + // #transform-source val factorials = source.scan(BigInt(1))((acc, next) => acc * next) val result: Future[IOResult] = factorials.map(num => ByteString(s"$num\n")).runWith(FileIO.toPath(Paths.get("factorials.txt"))) - //#transform-source + // #transform-source - //#use-transformed-sink + // #use-transformed-sink factorials.map(_.toString).runWith(lineSink("factorial2.txt")) - //#use-transformed-sink + // #use-transformed-sink - //#add-streams + // #add-streams factorials .zipWith(Source(0 to 100))((num, idx) => s"$idx! = $num") .throttle(1, 1.second) - //#add-streams + // #add-streams .take(3) - //#add-streams + // #add-streams .runForeach(println) - //#add-streams + // #add-streams - //#run-source-and-terminate + // #run-source-and-terminate val done: Future[Done] = source.runForeach(i => println(i)) implicit val ec = system.dispatcher done.onComplete(_ => system.terminate()) - //#run-source-and-terminate + // #run-source-and-terminate done.futureValue } - //#transform-sink + // #transform-sink def lineSink(filename: String): Sink[String, Future[IOResult]] = Flow[String].map(s => ByteString(s + "\n")).toMat(FileIO.toPath(Paths.get(filename)))(Keep.right) - //#transform-sink + // #transform-sink } diff --git a/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala b/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala index 029ab188c4e..9bedb1258d0 100644 --- a/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala @@ -18,14 +18,14 @@ import scala.concurrent.Await class RateTransformationDocSpec extends AkkaSpec { "conflate should summarize" in { - //#conflate-summarize + // #conflate-summarize val statsFlow = Flow[Double].conflateWithSeed(immutable.Seq(_))(_ :+ _).map { s => val μ = s.sum / s.size val se = s.map(x => pow(x - μ, 2)) val σ = sqrt(se.sum / se.size) (σ, μ, s.size) } - //#conflate-summarize + // #conflate-summarize val fut = Source @@ -38,7 +38,7 @@ class RateTransformationDocSpec extends AkkaSpec { } "conflate should sample" in { - //#conflate-sample + // #conflate-sample val p = 0.01 val sampleFlow = Flow[Double] .conflateWithSeed(immutable.Seq(_)) { @@ -46,7 +46,7 @@ class RateTransformationDocSpec extends AkkaSpec { case (acc, _) => acc } .mapConcat(identity) - //#conflate-sample + // #conflate-sample val fut = Source(1 to 1000).map(_.toDouble).via(sampleFlow).runWith(Sink.fold(Seq.empty[Double])(_ :+ _)) @@ -54,9 +54,9 @@ class RateTransformationDocSpec extends AkkaSpec { } "extrapolate should repeat last" in { - //#extrapolate-last + // #extrapolate-last val lastFlow = Flow[Double].extrapolate(Iterator.continually(_)) - //#extrapolate-last + // #extrapolate-last val (probe, fut) = TestSource[Double]().via(lastFlow).grouped(10).toMat(Sink.head)(Keep.both).run() @@ -67,10 +67,10 @@ class RateTransformationDocSpec extends AkkaSpec { } "extrapolate should send seed first" in { - //#extrapolate-seed + // #extrapolate-seed val initial = 2.0 val seedFlow = Flow[Double].extrapolate(Iterator.continually(_), Some(initial)) - //#extrapolate-seed + // #extrapolate-seed val fut = TestSource[Double]().via(seedFlow).grouped(10).runWith(Sink.head) @@ -80,9 +80,9 @@ class RateTransformationDocSpec extends AkkaSpec { } "extrapolate should track drift" in { - //#extrapolate-drift + // #extrapolate-drift val driftFlow = Flow[Double].map(_ -> 0).extrapolate[(Double, Int)] { case (i, _) => Iterator.from(1).map(i -> _) } - //#extrapolate-drift + // #extrapolate-drift val latch = TestLatch(2) val realDriftFlow = Flow[Double].map(d => { latch.countDown(); d -> 0; }).extrapolate[(Double, Int)] { case (d, _) => latch.countDown(); Iterator.from(1).map(d -> _) @@ -103,9 +103,9 @@ class RateTransformationDocSpec extends AkkaSpec { } "expand should track drift" in { - //#expand-drift + // #expand-drift val driftFlow = Flow[Double].expand(i => Iterator.from(0).map(i -> _)) - //#expand-drift + // #expand-drift val latch = TestLatch(2) val realDriftFlow = Flow[Double].expand(d => { latch.countDown(); Iterator.from(0).map(d -> _) }) diff --git a/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala b/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala index 4b7829bf01d..9d2085e4d56 100644 --- a/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala @@ -12,29 +12,29 @@ import akka.testkit.AkkaSpec class ReactiveStreamsDocSpec extends AkkaSpec { import TwitterStreamQuickstartDocSpec._ - //#imports + // #imports import org.reactivestreams.Publisher import org.reactivestreams.Subscriber import org.reactivestreams.Processor - //#imports + // #imports trait Fixture { - //#authors + // #authors val authors = Flow[Tweet].filter(_.hashtags.contains(akkaTag)).map(_.author) - //#authors + // #authors - //#tweets-publisher + // #tweets-publisher def tweets: Publisher[Tweet] - //#tweets-publisher + // #tweets-publisher - //#author-storage-subscriber + // #author-storage-subscriber def storage: Subscriber[Author] - //#author-storage-subscriber + // #author-storage-subscriber - //#author-alert-subscriber + // #author-alert-subscriber def alert: Subscriber[Author] - //#author-alert-subscriber + // #author-alert-subscriber } val impl = new Fixture { @@ -63,9 +63,9 @@ class ReactiveStreamsDocSpec extends AkkaSpec { import impl._ val storage = impl.storage - //#connect-all + // #connect-all Source.fromPublisher(tweets).via(authors).to(Sink.fromSubscriber(storage)).run() - //#connect-all + // #connect-all assertResult(storage) } @@ -74,12 +74,12 @@ class ReactiveStreamsDocSpec extends AkkaSpec { import impl._ val storage = impl.storage - //#flow-publisher-subscriber + // #flow-publisher-subscriber val processor: Processor[Tweet, Author] = authors.toProcessor.run() tweets.subscribe(processor) processor.subscribe(storage) - //#flow-publisher-subscriber + // #flow-publisher-subscriber assertResult(storage) } @@ -88,12 +88,12 @@ class ReactiveStreamsDocSpec extends AkkaSpec { import impl._ val storage = impl.storage - //#source-publisher + // #source-publisher val authorPublisher: Publisher[Author] = Source.fromPublisher(tweets).via(authors).runWith(Sink.asPublisher(fanout = false)) authorPublisher.subscribe(storage) - //#source-publisher + // #source-publisher assertResult(storage) } @@ -103,13 +103,13 @@ class ReactiveStreamsDocSpec extends AkkaSpec { val storage = impl.storage val alert = impl.alert - //#source-fanoutPublisher + // #source-fanoutPublisher val authorPublisher: Publisher[Author] = Source.fromPublisher(tweets).via(authors).runWith(Sink.asPublisher(fanout = true)) authorPublisher.subscribe(storage) authorPublisher.subscribe(alert) - //#source-fanoutPublisher + // #source-fanoutPublisher // this relies on fanoutPublisher buffer size > number of authors assertResult(storage) @@ -120,24 +120,24 @@ class ReactiveStreamsDocSpec extends AkkaSpec { import impl._ val storage = impl.storage - //#sink-subscriber + // #sink-subscriber val tweetSubscriber: Subscriber[Tweet] = authors.to(Sink.fromSubscriber(storage)).runWith(Source.asSubscriber[Tweet]) tweets.subscribe(tweetSubscriber) - //#sink-subscriber + // #sink-subscriber assertResult(storage) } "use a processor" in { - //#use-processor + // #use-processor // An example Processor factory def createProcessor: Processor[Int, Int] = Flow[Int].toProcessor.run() val flow: Flow[Int, Int, NotUsed] = Flow.fromProcessor(() => createProcessor) - //#use-processor + // #use-processor } diff --git a/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala b/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala index 8b3a94ad4b6..bdbb0db83dd 100644 --- a/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala @@ -33,7 +33,7 @@ class RestartDocSpec extends AkkaSpec with CompileOnlySpec { "demonstrate a restart with backoff source" in compileOnlySpec { - //#restart-with-backoff-source + // #restart-with-backoff-source val settings = RestartSettings( minBackoff = 3.seconds, maxBackoff = 30.seconds, @@ -50,9 +50,9 @@ class RestartDocSpec extends AkkaSpec with CompileOnlySpec { .flatMap(Unmarshal(_).to[Source[ServerSentEvent, NotUsed]]) } } - //#restart-with-backoff-source + // #restart-with-backoff-source - //#with-kill-switch + // #with-kill-switch val killSwitch = restartSource .viaMat(KillSwitches.single)(Keep.right) .toMat(Sink.foreach(event => println(s"Got event: $event")))(Keep.left) @@ -61,7 +61,7 @@ class RestartDocSpec extends AkkaSpec with CompileOnlySpec { doSomethingElse() killSwitch.shutdown() - //#with-kill-switch + // #with-kill-switch } } diff --git a/akka-docs/src/test/scala/docs/stream/SinkRecipeDocSpec.scala b/akka-docs/src/test/scala/docs/stream/SinkRecipeDocSpec.scala index 5ffcc5084d9..d1ce871d64d 100644 --- a/akka-docs/src/test/scala/docs/stream/SinkRecipeDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/SinkRecipeDocSpec.scala @@ -13,11 +13,11 @@ class SinkRecipeDocSpec extends RecipeSpec { "Sink.foreachAsync" must { "processing each element asynchronously" in { def asyncProcessing(value: Int): Future[Unit] = Future { println(value) }(system.dispatcher) - //#forseachAsync-processing - //def asyncProcessing(value: Int): Future[Unit] = _ + // #forseachAsync-processing + // def asyncProcessing(value: Int): Future[Unit] = _ Source(1 to 100).runWith(Sink.foreachAsync(10)(asyncProcessing)) - //#forseachAsync-processing + // #forseachAsync-processing } } } diff --git a/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala b/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala index dc75b6dd5b8..8aa60d09720 100644 --- a/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala @@ -13,7 +13,7 @@ class StreamBuffersRateSpec extends AkkaSpec { "Demonstrate pipelining" in { def println(s: Any) = () - //#pipelining + // #pipelining Source(1 to 3) .map { i => println(s"A: $i"); i @@ -28,22 +28,25 @@ class StreamBuffersRateSpec extends AkkaSpec { } .async .runWith(Sink.ignore) - //#pipelining + // #pipelining } "Demonstrate buffer sizes" in { - //#section-buffer - val section = Flow[Int].map(_ * 2).async.addAttributes(Attributes.inputBuffer(initial = 1, max = 1)) // the buffer size of this map is 1 + // #section-buffer + val section = Flow[Int] + .map(_ * 2) + .async + .addAttributes(Attributes.inputBuffer(initial = 1, max = 1)) // the buffer size of this map is 1 val flow = section.via(Flow[Int].map(_ / 2)).async // the buffer size of this map is the default val runnableGraph = Source(1 to 10).via(flow).to(Sink.foreach(elem => println(elem))) val withOverriddenDefaults = runnableGraph.withAttributes(Attributes.inputBuffer(initial = 64, max = 64)) - //#section-buffer + // #section-buffer } "buffering abstraction leak" in { - //#buffering-abstraction-leak + // #buffering-abstraction-leak import scala.concurrent.duration._ case class Tick() @@ -57,42 +60,42 @@ class StreamBuffersRateSpec extends AkkaSpec { Source .tick(initialDelay = 1.second, interval = 1.second, "message!") - .conflateWithSeed(seed = (_) => 1)((count, _) => count + 1) ~> zipper.in1 + .conflateWithSeed(seed = _ => 1)((count, _) => count + 1) ~> zipper.in1 zipper.out ~> Sink.foreach(println) ClosedShape }) - //#buffering-abstraction-leak + // #buffering-abstraction-leak } "explicit buffers" in { trait Job def inboundJobsConnector(): Source[Job, NotUsed] = Source.empty - //#explicit-buffers-backpressure + // #explicit-buffers-backpressure // Getting a stream of jobs from an imaginary external system as a Source val jobs: Source[Job, NotUsed] = inboundJobsConnector() jobs.buffer(1000, OverflowStrategy.backpressure) - //#explicit-buffers-backpressure + // #explicit-buffers-backpressure - //#explicit-buffers-droptail + // #explicit-buffers-droptail jobs.buffer(1000, OverflowStrategy.dropTail) - //#explicit-buffers-droptail + // #explicit-buffers-droptail - //#explicit-buffers-dropnew + // #explicit-buffers-dropnew jobs.buffer(1000, OverflowStrategy.dropNew) - //#explicit-buffers-dropnew + // #explicit-buffers-dropnew - //#explicit-buffers-drophead + // #explicit-buffers-drophead jobs.buffer(1000, OverflowStrategy.dropHead) - //#explicit-buffers-drophead + // #explicit-buffers-drophead - //#explicit-buffers-dropbuffer + // #explicit-buffers-dropbuffer jobs.buffer(1000, OverflowStrategy.dropBuffer) - //#explicit-buffers-dropbuffer + // #explicit-buffers-dropbuffer - //#explicit-buffers-fail + // #explicit-buffers-fail jobs.buffer(1000, OverflowStrategy.fail) - //#explicit-buffers-fail + // #explicit-buffers-fail } diff --git a/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala b/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala index f230e65d70f..46689b3112d 100644 --- a/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/StreamPartialGraphDSLDocSpec.scala @@ -18,7 +18,7 @@ class StreamPartialGraphDSLDocSpec extends AkkaSpec { implicit val ec: ExecutionContext = system.dispatcher "build with open ports" in { - //#simple-partial-graph-dsl + // #simple-partial-graph-dsl val pickMaxOfThree = GraphDSL.create() { implicit b => import GraphDSL.Implicits._ @@ -46,11 +46,11 @@ class StreamPartialGraphDSLDocSpec extends AkkaSpec { val max: Future[Int] = g.run() Await.result(max, 300.millis) should equal(3) - //#simple-partial-graph-dsl + // #simple-partial-graph-dsl } "build source from partial graph" in { - //#source-from-partial-graph-dsl + // #source-from-partial-graph-dsl val pairs = Source.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ @@ -67,12 +67,12 @@ class StreamPartialGraphDSLDocSpec extends AkkaSpec { }) val firstPair: Future[(Int, Int)] = pairs.runWith(Sink.head) - //#source-from-partial-graph-dsl + // #source-from-partial-graph-dsl Await.result(firstPair, 300.millis) should equal(1 -> 2) } "build flow from partial graph" in { - //#flow-from-partial-graph-dsl + // #flow-from-partial-graph-dsl val pairUpWithToString = Flow.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ @@ -89,7 +89,7 @@ class StreamPartialGraphDSLDocSpec extends AkkaSpec { FlowShape(broadcast.in, zip.out) }) - //#flow-from-partial-graph-dsl + // #flow-from-partial-graph-dsl // format: OFF val (_, matSink: Future[(Int, String)]) = @@ -102,26 +102,26 @@ class StreamPartialGraphDSLDocSpec extends AkkaSpec { } "combine sources with simplified API" in { - //#source-combine + // #source-combine val sourceOne = Source(List(1)) val sourceTwo = Source(List(2)) val merged = Source.combine(sourceOne, sourceTwo)(Merge(_)) val mergedResult: Future[Int] = merged.runWith(Sink.fold(0)(_ + _)) - //#source-combine + // #source-combine Await.result(mergedResult, 300.millis) should equal(3) } "combine sinks with simplified API" in { val actorRef: ActorRef = testActor - //#sink-combine + // #sink-combine val sendRemotely = Sink.actorRef(actorRef, "Done", _ => "Failed") val localProcessing = Sink.foreach[Int](_ => /* do something useful */ ()) val sink = Sink.combine(sendRemotely, localProcessing)(Broadcast[Int](_)) Source(List(0, 1, 2)).runWith(sink) - //#sink-combine + // #sink-combine expectMsg(0) expectMsg(1) expectMsg(2) diff --git a/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala b/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala index 2b242a075fe..0d2516478fb 100644 --- a/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala @@ -17,18 +17,18 @@ import akka.pattern class StreamTestKitDocSpec extends AkkaSpec { "strict collection" in { - //#strict-collection + // #strict-collection val sinkUnderTest = Flow[Int].map(_ * 2).toMat(Sink.fold(0)(_ + _))(Keep.right) val future = Source(1 to 4).runWith(sinkUnderTest) val result = Await.result(future, 3.seconds) assert(result == 20) - //#strict-collection + // #strict-collection } "grouped part of infinite stream" in { - //#grouped-infinite + // #grouped-infinite import system.dispatcher import akka.pattern.pipe @@ -37,21 +37,21 @@ class StreamTestKitDocSpec extends AkkaSpec { val future = sourceUnderTest.take(10).runWith(Sink.seq) val result = Await.result(future, 3.seconds) assert(result == Seq.fill(10)(2)) - //#grouped-infinite + // #grouped-infinite } "folded stream" in { - //#folded-stream + // #folded-stream val flowUnderTest = Flow[Int].takeWhile(_ < 5) val future = Source(1 to 10).via(flowUnderTest).runWith(Sink.fold(Seq.empty[Int])(_ :+ _)) val result = Await.result(future, 3.seconds) assert(result == (1 to 4)) - //#folded-stream + // #folded-stream } "pipe to test probe" in { - //#pipeto-testprobe + // #pipeto-testprobe import system.dispatcher import akka.pattern.pipe @@ -60,11 +60,11 @@ class StreamTestKitDocSpec extends AkkaSpec { val probe = TestProbe() sourceUnderTest.runWith(Sink.seq).pipeTo(probe.ref) probe.expectMsg(3.seconds, Seq(Seq(1, 2), Seq(3, 4))) - //#pipeto-testprobe + // #pipeto-testprobe } "sink actor ref" in { - //#sink-actorref + // #sink-actorref case object Tick val sourceUnderTest = Source.tick(0.seconds, 200.millis, Tick) @@ -78,18 +78,17 @@ class StreamTestKitDocSpec extends AkkaSpec { probe.expectMsg(3.seconds, Tick) cancellable.cancel() probe.expectMsg(3.seconds, "completed") - //#sink-actorref + // #sink-actorref } "source actor ref" in { - //#source-actorref + // #source-actorref val sinkUnderTest = Flow[Int].map(_.toString).toMat(Sink.fold("")(_ + _))(Keep.right) val (ref, future) = Source .actorRef( - completionMatcher = { - case Done => - CompletionStrategy.draining + completionMatcher = { case Done => + CompletionStrategy.draining }, // Never fail the stream because of a message: failureMatcher = PartialFunction.empty, @@ -105,39 +104,39 @@ class StreamTestKitDocSpec extends AkkaSpec { val result = Await.result(future, 3.seconds) assert(result == "123") - //#source-actorref + // #source-actorref } "test sink probe" in { - //#test-sink-probe + // #test-sink-probe val sourceUnderTest = Source(1 to 4).filter(_ % 2 == 0).map(_ * 2) sourceUnderTest.runWith(TestSink[Int]()).request(2).expectNext(4, 8).expectComplete() - //#test-sink-probe + // #test-sink-probe } "test source probe" in { - //#test-source-probe + // #test-source-probe val sinkUnderTest = Sink.cancelled TestSource[Int]().toMat(sinkUnderTest)(Keep.left).run().expectCancellation() - //#test-source-probe + // #test-source-probe } "injecting failure" in { - //#injecting-failure + // #injecting-failure val sinkUnderTest = Sink.head[Int] val (probe, future) = TestSource[Int]().toMat(sinkUnderTest)(Keep.both).run() probe.sendError(new Exception("boom")) assert(future.failed.futureValue.getMessage == "boom") - //#injecting-failure + // #injecting-failure } "test source and a sink" in { import system.dispatcher - //#test-source-and-sink + // #test-source-and-sink val flowUnderTest = Flow[Int].mapAsyncUnordered(2) { sleep => pattern.after(10.millis * sleep, using = system.scheduler)(Future.successful(sleep)) } @@ -153,7 +152,7 @@ class StreamTestKitDocSpec extends AkkaSpec { pub.sendError(new Exception("Power surge in the linear subroutine C-47!")) val ex = sub.expectError() assert(ex.getMessage.contains("C-47")) - //#test-source-and-sink + // #test-source-and-sink } } diff --git a/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala b/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala index 4024767df70..dfd5e5155aa 100644 --- a/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala @@ -5,40 +5,40 @@ package docs.stream import akka.stream.scaladsl.{ Sink, Source } -import akka.stream.{ SubstreamCancelStrategy } +import akka.stream.SubstreamCancelStrategy import akka.testkit.AkkaSpec class SubstreamDocSpec extends AkkaSpec { "generate substreams by groupBy" in { - //#groupBy1 + // #groupBy1 val source = Source(1 to 10).groupBy(3, _ % 3) - //#groupBy1 + // #groupBy1 - //#groupBy2 + // #groupBy2 Source(1 to 10).groupBy(3, _ % 3).to(Sink.ignore).run() - //#groupBy2 + // #groupBy2 - //#groupBy3 + // #groupBy3 Source(1 to 10).groupBy(3, _ % 3).mergeSubstreams.runWith(Sink.ignore) - //#groupBy3 + // #groupBy3 - //#groupBy4 + // #groupBy4 Source(1 to 10).groupBy(3, _ % 3).mergeSubstreamsWithParallelism(2).runWith(Sink.ignore) - //concatSubstreams is equivalent to mergeSubstreamsWithParallelism(1) + // concatSubstreams is equivalent to mergeSubstreamsWithParallelism(1) Source(1 to 10).groupBy(3, _ % 3).concatSubstreams.runWith(Sink.ignore) - //#groupBy4 + // #groupBy4 } "generate substreams by splitWhen and splitAfter" in { - //#splitWhenAfter + // #splitWhenAfter Source(1 to 10).splitWhen(SubstreamCancelStrategy.drain)(_ == 3) Source(1 to 10).splitAfter(SubstreamCancelStrategy.drain)(_ == 3) - //#splitWhenAfter + // #splitWhenAfter - //#wordCount + // #wordCount val text = "This is the first line.\n" + "The second line.\n" + @@ -51,16 +51,16 @@ class SubstreamDocSpec extends AkkaSpec { .reduce(_ + _) .to(Sink.foreach(println)) .run() - //#wordCount + // #wordCount } "generate substreams by flatMapConcat and flatMapMerge" in { - //#flatMapConcat + // #flatMapConcat Source(1 to 2).flatMapConcat(i => Source(List.fill(3)(i))).runWith(Sink.ignore) - //#flatMapConcat + // #flatMapConcat - //#flatMapMerge + // #flatMapMerge Source(1 to 2).flatMapMerge(2, i => Source(List.fill(3)(i))).runWith(Sink.ignore) - //#flatMapMerge + // #flatMapMerge } } diff --git a/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala b/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala index 93aeccd0b74..5ace610f010 100644 --- a/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala @@ -21,7 +21,7 @@ import scala.concurrent.ExecutionContext object TwitterStreamQuickstartDocSpec { - //#model + // #model final case class Author(handle: String) final case class Hashtag(name: String) @@ -37,12 +37,12 @@ object TwitterStreamQuickstartDocSpec { } val akkaTag = Hashtag("#akka") - //#model + // #model abstract class TweetSourceDecl { - //#tweet-source + // #tweet-source val tweets: Source[Tweet, NotUsed] - //#tweet-source + // #tweet-source } val tweets: Source[Tweet, NotUsed] = Source( @@ -68,52 +68,52 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { def println(s: Any): Unit = () trait Example1 { - //#first-sample - //#system-setup + // #first-sample + // #system-setup implicit val system: ActorSystem = ActorSystem("reactive-tweets") - //#system-setup - //#first-sample + // #system-setup + // #first-sample } "filter and map" in { - //#first-sample + // #first-sample - //#authors-filter-map + // #authors-filter-map val authors: Source[Author, NotUsed] = tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) - //#first-sample - //#authors-filter-map + // #first-sample + // #authors-filter-map trait Example3 { - //#authors-collect + // #authors-collect val authors: Source[Author, NotUsed] = tweets.collect { case t if t.hashtags.contains(akkaTag) => t.author } - //#authors-collect + // #authors-collect } - //#first-sample + // #first-sample - //#authors-foreachsink-println + // #authors-foreachsink-println authors.runWith(Sink.foreach(println)) - //#authors-foreachsink-println - //#first-sample + // #authors-foreachsink-println + // #first-sample - //#authors-foreach-println + // #authors-foreach-println authors.runForeach(println) - //#authors-foreach-println + // #authors-foreach-println } "mapConcat hashtags" in { - //#hashtags-mapConcat + // #hashtags-mapConcat val hashtags: Source[Hashtag, NotUsed] = tweets.mapConcat(_.hashtags.toList) - //#hashtags-mapConcat + // #hashtags-mapConcat } trait HiddenDefinitions { - //#graph-dsl-broadcast + // #graph-dsl-broadcast val writeAuthors: Sink[Author, NotUsed] = ??? val writeHashtags: Sink[Hashtag, NotUsed] = ??? - //#graph-dsl-broadcast + // #graph-dsl-broadcast } "simple broadcast" in { @@ -142,28 +142,28 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { 42 } - //#tweets-slow-consumption-dropHead + // #tweets-slow-consumption-dropHead tweets.buffer(10, OverflowStrategy.dropHead).map(slowComputation).runWith(Sink.ignore) - //#tweets-slow-consumption-dropHead + // #tweets-slow-consumption-dropHead } "backpressure by readline" in { trait X { import scala.concurrent.duration._ - //#backpressure-by-readline + // #backpressure-by-readline val completion: Future[Done] = Source(1 to 10).map(i => { println(s"map => $i"); i }).runForeach { i => readLine(s"Element = $i; continue reading? [press enter]\n") } Await.ready(completion, 1.minute) - //#backpressure-by-readline + // #backpressure-by-readline } } "count elements on finite stream" in { - //#tweets-fold-count + // #tweets-fold-count val count: Flow[Tweet, Int, NotUsed] = Flow[Tweet].map(_ => 1) val sumSink: Sink[Int, Future[Int]] = Sink.fold[Int, Int](0)(_ + _) @@ -174,19 +174,19 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { val sum: Future[Int] = counterGraph.run() sum.foreach(c => println(s"Total tweets processed: $c")) - //#tweets-fold-count + // #tweets-fold-count new AnyRef { - //#tweets-fold-count-oneline + // #tweets-fold-count-oneline val sum: Future[Int] = tweets.map(t => 1).runWith(sumSink) - //#tweets-fold-count-oneline + // #tweets-fold-count-oneline } } "materialize multiple times" in { val tweetsInMinuteFromNow = tweets // not really in second, just acting as if - //#tweets-runnable-flow-materialized-twice + // #tweets-runnable-flow-materialized-twice val sumSink = Sink.fold[Int, Int](0)(_ + _) val counterRunnableGraph: RunnableGraph[Future[Int]] = tweetsInMinuteFromNow.filter(_.hashtags contains akkaTag).map(t => 1).toMat(sumSink)(Keep.right) @@ -196,7 +196,7 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { // and once in the evening, reusing the flow val eveningTweetsCount: Future[Int] = counterRunnableGraph.run() - //#tweets-runnable-flow-materialized-twice + // #tweets-runnable-flow-materialized-twice val sum: Future[Int] = counterRunnableGraph.run() diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala index f7c4c013c45..c50c062a19d 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala @@ -16,17 +16,17 @@ import scala.concurrent.duration._ class RecipeAdhocSource extends RecipeSpec { - //#adhoc-source + // #adhoc-source def adhocSource[T](source: Source[T, _], timeout: FiniteDuration, maxRetries: Int): Source[T, _] = - Source.lazySource( - () => - source - .backpressureTimeout(timeout) - .recoverWithRetries(maxRetries, { - case t: TimeoutException => - Source.lazySource(() => source.backpressureTimeout(timeout)).mapMaterializedValue(_ => NotUsed) + Source.lazySource(() => + source + .backpressureTimeout(timeout) + .recoverWithRetries( + maxRetries, + { case t: TimeoutException => + Source.lazySource(() => source.backpressureTimeout(timeout)).mapMaterializedValue(_ => NotUsed) })) - //#adhoc-source + // #adhoc-source "Recipe for adhoc source" must { "not start the source if there is no demand" taggedAs TimingTest in { @@ -44,9 +44,12 @@ class RecipeAdhocSource extends RecipeSpec { "shut down the source when the next demand times out" taggedAs TimingTest in { val shutdown = Promise[Done]() - val sink = adhocSource(Source.repeat("a").watchTermination() { (_, term) => - shutdown.completeWith(term) - }, 200.milliseconds, 3).runWith(TestSink[String]()) + val sink = adhocSource( + Source.repeat("a").watchTermination() { (_, term) => + shutdown.completeWith(term) + }, + 200.milliseconds, + 3).runWith(TestSink[String]()) sink.requestNext("a") Thread.sleep(200) @@ -55,9 +58,12 @@ class RecipeAdhocSource extends RecipeSpec { "not shut down the source when there are still demands" taggedAs TimingTest in { val shutdown = Promise[Done]() - val sink = adhocSource(Source.repeat("a").watchTermination() { (_, term) => - shutdown.completeWith(term) - }, 200.milliseconds, 3).runWith(TestSink[String]()) + val sink = adhocSource( + Source.repeat("a").watchTermination() { (_, term) => + shutdown.completeWith(term) + }, + 200.milliseconds, + 3).runWith(TestSink[String]()) sink.requestNext("a") Thread.sleep(100) @@ -79,9 +85,12 @@ class RecipeAdhocSource extends RecipeSpec { val source = Source.empty.mapMaterializedValue(_ => startedCount.incrementAndGet()).concat(Source.repeat("a")) - val sink = adhocSource(source.watchTermination() { (_, term) => - shutdown.completeWith(term) - }, 200.milliseconds, 3).runWith(TestSink[String]()) + val sink = adhocSource( + source.watchTermination() { (_, term) => + shutdown.completeWith(term) + }, + 200.milliseconds, + 3).runWith(TestSink[String]()) sink.requestNext("a") startedCount.get() should be(1) @@ -95,9 +104,12 @@ class RecipeAdhocSource extends RecipeSpec { val source = Source.empty.mapMaterializedValue(_ => startedCount.incrementAndGet()).concat(Source.repeat("a")) - val sink = adhocSource(source.watchTermination() { (_, term) => - shutdown.completeWith(term) - }, 200.milliseconds, 3).runWith(TestSink[String]()) + val sink = adhocSource( + source.watchTermination() { (_, term) => + shutdown.completeWith(term) + }, + 200.milliseconds, + 3).runWith(TestSink[String]()) sink.requestNext("a") startedCount.get() should be(1) @@ -115,12 +127,12 @@ class RecipeAdhocSource extends RecipeSpec { Thread.sleep(500) sink.requestNext("a") - startedCount.get() should be(4) //startCount == 4, which means "re"-tried 3 times + startedCount.get() should be(4) // startCount == 4, which means "re"-tried 3 times Thread.sleep(500) sink.expectError() shouldBe a[TimeoutException] - sink.request(1) //send demand - sink.expectNoMessage(200.milliseconds) //but no more restart + sink.request(1) // send demand + sink.expectNoMessage(200.milliseconds) // but no more restart } } } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala index 93f81a9dc23..3d0baa32ca7 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala @@ -20,7 +20,7 @@ class RecipeByteStrings extends RecipeSpec { val rawBytes = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9))) val ChunkLimit = 2 - //#bytestring-chunker + // #bytestring-chunker import akka.stream.stage._ class Chunker(val chunkSize: Int) extends GraphStage[FlowShape[ByteString, ByteString]] { @@ -30,11 +30,13 @@ class RecipeByteStrings extends RecipeSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private var buffer = ByteString.empty - setHandler(out, new OutHandler { - override def onPull(): Unit = { - emitChunk() - } - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + emitChunk() + } + }) setHandler( in, new InHandler { @@ -72,7 +74,7 @@ class RecipeByteStrings extends RecipeSpec { } val chunksStream = rawBytes.via(new Chunker(ChunkLimit)) - //#bytestring-chunker + // #bytestring-chunker val chunksFuture = chunksStream.limit(10).runWith(Sink.seq) val chunks = Await.result(chunksFuture, 3.seconds) @@ -84,7 +86,7 @@ class RecipeByteStrings extends RecipeSpec { "have a working bytes limiter" in { val SizeLimit = 9 - //#bytes-limiter + // #bytes-limiter import akka.stream.stage._ class ByteLimiter(val maximumBytes: Long) extends GraphStage[FlowShape[ByteString, ByteString]] { val in = Inlet[ByteString]("ByteLimiter.in") @@ -94,24 +96,27 @@ class RecipeByteStrings extends RecipeSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private var count = 0 - setHandlers(in, out, new InHandler with OutHandler { + setHandlers( + in, + out, + new InHandler with OutHandler { - override def onPull(): Unit = { - pull(in) - } + override def onPull(): Unit = { + pull(in) + } - override def onPush(): Unit = { - val chunk = grab(in) - count += chunk.size - if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes")) - else push(out, chunk) - } - }) + override def onPush(): Unit = { + val chunk = grab(in) + count += chunk.size + if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes")) + else push(out, chunk) + } + }) } } val limiter = Flow[ByteString].via(new ByteLimiter(SizeLimit)) - //#bytes-limiter + // #bytes-limiter val bytes1 = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9))) val bytes2 = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9, 10))) @@ -128,9 +133,9 @@ class RecipeByteStrings extends RecipeSpec { val data = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9))) - //#compacting-bytestrings + // #compacting-bytestrings val compacted: Source[ByteString, NotUsed] = data.map(_.compact) - //#compacting-bytestrings + // #compacting-bytestrings Await.result(compacted.limit(10).runWith(Sink.seq), 3.seconds).forall(_.isCompact) should be(true) } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala index 14ae85d1fd9..3870db9341c 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala @@ -14,16 +14,16 @@ import scala.concurrent.duration._ class RecipeDecompress extends RecipeSpec { "Recipe for decompressing a Gzip stream" must { "work" in { - //#decompress-gzip + // #decompress-gzip import akka.stream.scaladsl.Compression - //#decompress-gzip + // #decompress-gzip val compressed = Source.single(ByteString.fromString("Hello World")).via(Compression.gzip) - //#decompress-gzip + // #decompress-gzip val uncompressed = compressed.via(Compression.gunzip()).map(_.utf8String) - //#decompress-gzip + // #decompress-gzip Await.result(uncompressed.runWith(Sink.head), 3.seconds) should be("Hello World") } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala index f3af9bb8cea..31f34125a19 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala @@ -13,7 +13,7 @@ class RecipeDigest extends RecipeSpec { "work" in { - //#calculating-digest + // #calculating-digest import java.security.MessageDigest import akka.NotUsed @@ -33,27 +33,31 @@ class RecipeDigest extends RecipeSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private val digest = MessageDigest.getInstance(algorithm) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) - - setHandler(in, new InHandler { - override def onPush(): Unit = { - val chunk = grab(in) - digest.update(chunk.toArray) - pull(in) - } - - override def onUpstreamFinish(): Unit = { - emit(out, ByteString(digest.digest())) - completeStage() - } - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) + + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + val chunk = grab(in) + digest.update(chunk.toArray) + pull(in) + } + + override def onUpstreamFinish(): Unit = { + emit(out, ByteString(digest.digest())) + completeStage() + } + }) } } val digest: Source[ByteString, NotUsed] = data.via(new DigestCalculator("SHA-256")) - //#calculating-digest + // #calculating-digest Await.result(digest.runWith(Sink.head), 3.seconds) should be( ByteString(0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, 0xb0, diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala index dab55ac0d10..c60710c9dba 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala @@ -23,7 +23,7 @@ class RecipeDroppyBroadcast extends RecipeSpec { val mySink2 = Sink.fromSubscriber(sub2) val mySink3 = Sink.fromSubscriber(sub3) - //#droppy-bcast + // #droppy-bcast val graph = RunnableGraph.fromGraph(GraphDSL.createGraph(mySink1, mySink2, mySink3)((_, _, _)) { implicit b => (sink1, sink2, sink3) => import GraphDSL.Implicits._ @@ -36,7 +36,7 @@ class RecipeDroppyBroadcast extends RecipeSpec { bcast.buffer(10, OverflowStrategy.dropHead) ~> sink3 ClosedShape }) - //#droppy-bcast + // #droppy-bcast graph.run() diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeFlattenSeq.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeFlattenSeq.scala index a3c3bd2ce77..2b1dfdf1a42 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeFlattenSeq.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeFlattenSeq.scala @@ -18,10 +18,10 @@ class RecipeFlattenSeq extends RecipeSpec { val someDataSource = Source(List(List("1"), List("2"), List("3", "4", "5"), List("6", "7"))) - //#flattening-seqs + // #flattening-seqs val myData: Source[List[Message], NotUsed] = someDataSource val flattened: Source[Message, NotUsed] = myData.mapConcat(identity) - //#flattening-seqs + // #flattening-seqs Await.result(flattened.limit(8).runWith(Sink.seq), 3.seconds) should be(List("1", "2", "3", "4", "5", "6", "7")) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala index da5b9cff28a..d9181f2e034 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala @@ -18,7 +18,7 @@ class RecipeGlobalRateLimit extends RecipeSpec { "Global rate limiting recipe" must { - //#global-limiter-actor + // #global-limiter-actor object Limiter { case object WantToPass case object MayPass @@ -75,11 +75,11 @@ class RecipeGlobalRateLimit extends RecipeSpec { waitQueue.foreach(_ ! Status.Failure(new IllegalStateException("limiter stopped"))) } } - //#global-limiter-actor + // #global-limiter-actor "work" in { - //#global-limiter-flow + // #global-limiter-flow def limitGlobal[T](limiter: ActorRef, maxAllowedWait: FiniteDuration): Flow[T, T, NotUsed] = { import akka.pattern.ask import akka.util.Timeout @@ -87,11 +87,11 @@ class RecipeGlobalRateLimit extends RecipeSpec { import system.dispatcher implicit val triggerTimeout = Timeout(maxAllowedWait) val limiterTriggerFuture = limiter ? Limiter.WantToPass - limiterTriggerFuture.map((_) => element) + limiterTriggerFuture.map(_ => element) }) } - //#global-limiter-flow + // #global-limiter-flow // Use a large period and emulate the timer by hand instead val limiter = system.actorOf(Limiter.props(2, 100.days, 1), "limiter") diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala index c0010619223..d7de8e8ed85 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala @@ -11,7 +11,7 @@ import akka.stream.testkit._ import scala.concurrent.duration._ object HoldOps { - //#hold-version-1 + // #hold-version-1 import akka.stream._ import akka.stream.stage._ final class HoldWithInitial[T](initial: T) extends GraphStage[FlowShape[T, T]] { @@ -23,16 +23,19 @@ object HoldOps { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private var currentValue: T = initial - setHandlers(in, out, new InHandler with OutHandler { - override def onPush(): Unit = { - currentValue = grab(in) - pull(in) - } + setHandlers( + in, + out, + new InHandler with OutHandler { + override def onPush(): Unit = { + currentValue = grab(in) + pull(in) + } - override def onPull(): Unit = { - push(out, currentValue) - } - }) + override def onPull(): Unit = { + push(out, currentValue) + } + }) override def preStart(): Unit = { pull(in) @@ -40,9 +43,9 @@ object HoldOps { } } - //#hold-version-1 + // #hold-version-1 - //#hold-version-2 + // #hold-version-2 import akka.stream._ import akka.stream.stage._ final class HoldWithWait[T] extends GraphStage[FlowShape[T, T]] { @@ -78,7 +81,7 @@ object HoldOps { } } } - //#hold-version-2 + // #hold-version-2 } class RecipeHold extends RecipeSpec { diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeKeepAlive.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeKeepAlive.scala index e316a373b55..323e8c2a24f 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeKeepAlive.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeKeepAlive.scala @@ -15,11 +15,11 @@ class RecipeKeepAlive extends RecipeSpec { "work" in { val keepaliveMessage = ByteString(11) - //#inject-keepalive + // #inject-keepalive import scala.concurrent.duration._ val injectKeepAlive: Flow[ByteString, ByteString, NotUsed] = Flow[ByteString].keepAlive(1.second, () => keepaliveMessage) - //#inject-keepalive + // #inject-keepalive // No need to test, this is a built-in stage with proper tests } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala index 6e51cbf9951..951740d1816 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala @@ -20,11 +20,11 @@ class RecipeLoggingElements extends RecipeSpec { val mySource = Source(List("1", "2", "3")) - //#println-debug + // #println-debug val loggedSource = mySource.map { elem => println(elem); elem } - //#println-debug + // #println-debug loggedSource.runWith(Sink.ignore) printProbe.expectMsgAllOf("1", "2", "3") @@ -33,22 +33,22 @@ class RecipeLoggingElements extends RecipeSpec { val mySource = Source(List("1", "2", "3")) def analyse(s: String) = s "use log()" in { - //#log-custom + // #log-custom // customise log levels mySource .log("before-map") .withAttributes(Attributes .logLevels(onElement = Logging.WarningLevel, onFinish = Logging.InfoLevel, onFailure = Logging.DebugLevel)) .map(analyse) - //#log-custom + // #log-custom } "use log() with custom adapter" in { - //#log-custom + // #log-custom // or provide custom logging adapter implicit val adapter: LoggingAdapter = Logging(system, "customLogger") mySource.log("custom") - //#log-custom + // #log-custom val loggedSource = mySource.log("custom") EventFilter.debug(start = "[custom] Element: ").intercept { @@ -57,12 +57,12 @@ class RecipeLoggingElements extends RecipeSpec { } "use log() for error logging" in { - //#log-error + // #log-error Source(-5 to 5) - .map(1 / _) //throwing ArithmeticException: / by zero + .map(1 / _) // throwing ArithmeticException: / by zero .log("error logging") .runWith(Sink.ignore) - //#log-error + // #log-error } } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeManualTrigger.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeManualTrigger.scala index 3e4c1d3c968..4c33c66fac3 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeManualTrigger.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeManualTrigger.scala @@ -22,7 +22,7 @@ class RecipeManualTrigger extends RecipeSpec { val triggerSource = Source.fromPublisher(pub) val sink = Sink.fromSubscriber(sub) - //#manually-triggered-stream + // #manually-triggered-stream val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ val zip = builder.add(Zip[Message, Trigger]()) @@ -31,7 +31,7 @@ class RecipeManualTrigger extends RecipeSpec { zip.out ~> Flow[(Message, Trigger)].map { case (msg, trigger) => msg } ~> sink ClosedShape }) - //#manually-triggered-stream + // #manually-triggered-stream graph.run() @@ -61,7 +61,7 @@ class RecipeManualTrigger extends RecipeSpec { val triggerSource = Source.fromPublisher(pub) val sink = Sink.fromSubscriber(sub) - //#manually-triggered-stream-zipwith + // #manually-triggered-stream-zipwith val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ val zip = builder.add(ZipWith((msg: Message, trigger: Trigger) => msg)) @@ -71,7 +71,7 @@ class RecipeManualTrigger extends RecipeSpec { zip.out ~> sink ClosedShape }) - //#manually-triggered-stream-zipwith + // #manually-triggered-stream-zipwith graph.run() diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala index 265c52223d0..98ee432361a 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala @@ -23,13 +23,13 @@ class RecipeMissedTicks extends RecipeSpec { val tickStream = Source.fromPublisher(pub) val sink = Sink.fromSubscriber(sub) - //#missed-ticks + // #missed-ticks val missedTicks: Flow[Tick, Int, NotUsed] = - Flow[Tick].conflateWithSeed(seed = (_) => 0)((missedTicks, tick) => missedTicks + 1) - //#missed-ticks + Flow[Tick].conflateWithSeed(seed = _ => 0)((missedTicks, tick) => missedTicks + 1) + // #missed-ticks val latch = TestLatch(3) val realMissedTicks: Flow[Tick, Int, NotUsed] = - Flow[Tick].conflateWithSeed(seed = (_) => 0)((missedTicks, tick) => { latch.countDown(); missedTicks + 1 }) + Flow[Tick].conflateWithSeed(seed = _ => 0)((missedTicks, tick) => { latch.countDown(); missedTicks + 1 }) tickStream.via(realMissedTicks).to(sink).run() diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala index d3eadee299a..57c18805e87 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala @@ -25,7 +25,7 @@ class RecipeMultiGroupBy extends RecipeSpec { else List(Topic("1"), Topic("2")) } - //#multi-groupby + // #multi-groupby val topicMapper: (Message) => immutable.Seq[Topic] = extractTopics val messageAndTopic: Source[(Message, Topic), NotUsed] = elems.mapConcat { (msg: Message) => @@ -35,14 +35,13 @@ class RecipeMultiGroupBy extends RecipeSpec { topicsForMessage.map(msg -> _) } - val multiGroups = messageAndTopic.groupBy(2, _._2).map { - case (msg, topic) => - // do what needs to be done - //#multi-groupby - (msg, topic) - //#multi-groupby + val multiGroups = messageAndTopic.groupBy(2, _._2).map { case (msg, topic) => + // do what needs to be done + // #multi-groupby + (msg, topic) + // #multi-groupby } - //#multi-groupby + // #multi-groupby val result = multiGroups .grouped(10) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala index 8b4bf143467..34697f1f6be 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala @@ -24,12 +24,12 @@ class RecipeParseLines extends RecipeSpec { ByteString("\nHello Akka!\r\nHello Streams!"), ByteString("\r\n\r\n"))) - //#parse-lines + // #parse-lines import akka.stream.scaladsl.Framing val linesStream = rawData .via(Framing.delimiter(ByteString("\r\n"), maximumFrameLength = 100, allowTruncation = true)) .map(_.utf8String) - //#parse-lines + // #parse-lines Await.result(linesStream.limit(10).runWith(Sink.seq), 3.seconds) should be( List("Hello World\r!", "Hello Akka!", "Hello Streams!", "")) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala index 266014888a6..45b21f736be 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala @@ -19,17 +19,17 @@ class RecipeReduceByKey extends RecipeSpec { def words = Source(List("hello", "world", "and", "hello", "universe", "akka") ++ List.fill(1000)("rocks!")) - //#word-count + // #word-count val counts: Source[(String, Int), NotUsed] = words - // split the words into separate streams first + // split the words into separate streams first .groupBy(MaximumDistinctWords, identity) - //transform each element to pair with number of words in it + // transform each element to pair with number of words in it .map(_ -> 1) // add counting logic to the streams .reduce((l, r) => (l._1, l._2 + r._2)) // get a stream of word counts .mergeSubstreams - //#word-count + // #word-count Await.result(counts.limit(10).runWith(Sink.seq), 3.seconds).toSet should be( Set(("hello", 2), ("world", 1), ("and", 1), ("universe", 1), ("akka", 1), ("rocks!", 1000))) @@ -39,7 +39,7 @@ class RecipeReduceByKey extends RecipeSpec { def words = Source(List("hello", "world", "and", "hello", "universe", "akka") ++ List.fill(1000)("rocks!")) - //#reduce-by-key-general + // #reduce-by-key-general def reduceByKey[In, K, Out](maximumGroupSize: Int, groupKey: (In) => K, map: (In) => Out)( reduce: (Out, Out) => Out): Flow[In, (K, Out), NotUsed] = { @@ -53,7 +53,7 @@ class RecipeReduceByKey extends RecipeSpec { val wordCounts = words.via( reduceByKey(MaximumDistinctWords, groupKey = (word: String) => word, map = (word: String) => 1)( (left: Int, right: Int) => left + right)) - //#reduce-by-key-general + // #reduce-by-key-general Await.result(wordCounts.limit(10).runWith(Sink.seq), 3.seconds).toSet should be( Set(("hello", 2), ("world", 1), ("and", 1), ("universe", 1), ("akka", 1), ("rocks!", 1000))) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSeq.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSeq.scala index f691c467b4f..049c9d0bea1 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSeq.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSeq.scala @@ -13,16 +13,16 @@ class RecipeSeq extends RecipeSpec { "not be done unsafely" in { val mySource = Source(1 to 3).map(_.toString) - //#draining-to-seq-unsafe + // #draining-to-seq-unsafe // Dangerous: might produce a collection with 2 billion elements! val f: Future[Seq[String]] = mySource.runWith(Sink.seq) - //#draining-to-seq-unsafe + // #draining-to-seq-unsafe f.futureValue should ===(Seq("1", "2", "3")) } "be done safely" in { val mySource = Source(1 to 3).map(_.toString) - //#draining-to-seq-safe + // #draining-to-seq-safe val MAX_ALLOWED_SIZE = 100 // OK. Future will fail with a `StreamLimitReachedException` @@ -33,7 +33,7 @@ class RecipeSeq extends RecipeSpec { // OK. Collect up until max-th elements only, then cancel upstream val ignoreOverflow: Future[Seq[String]] = mySource.take(MAX_ALLOWED_SIZE).runWith(Sink.seq) - //#draining-to-seq-safe + // #draining-to-seq-safe limited.futureValue should ===(Seq("1", "2", "3")) ignoreOverflow.futureValue should ===(Seq("1", "2", "3")) } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSimpleDrop.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSimpleDrop.scala index a354a84f7da..5d39e2d6e97 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSimpleDrop.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSimpleDrop.scala @@ -17,10 +17,10 @@ class RecipeSimpleDrop extends RecipeSpec { "work" in { - //#simple-drop + // #simple-drop val droppyStream: Flow[Message, Message, NotUsed] = Flow[Message].conflate((lastMessage, newMessage) => newMessage) - //#simple-drop + // #simple-drop val latch = TestLatch(2) val realDroppyStream = Flow[Message].conflate((lastMessage, newMessage) => { latch.countDown(); newMessage }) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSourceFromFunction.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSourceFromFunction.scala index b05b12fc24e..fc7ccdb3c65 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSourceFromFunction.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSourceFromFunction.scala @@ -16,9 +16,9 @@ class RecipeSourceFromFunction extends RecipeSpec { "be a mapping of Source.repeat" in { def builderFunction(): String = UUID.randomUUID.toString - //#source-from-function + // #source-from-function val source = Source.repeat(NotUsed).map(_ => builderFunction()) - //#source-from-function + // #source-from-function val f = source.take(2).runWith(Sink.seq) f.futureValue.distinct.size should ===(2) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSplitter.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSplitter.scala index 68994fbd446..0a0998474c4 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSplitter.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeSplitter.scala @@ -19,47 +19,47 @@ class RecipeSplitter extends AnyWordSpec with BeforeAndAfterAll with Matchers wi "Splitter" should { " simple split " in { - //#Simple-Split - //Sample Source + // #Simple-Split + // Sample Source val source: Source[String, NotUsed] = Source(List("1-2-3", "2-3", "3-4")) val ret = source .map(s => s.split("-").toList) .mapConcat(identity) - //Sub-streams logic + // Sub-streams logic .map(s => s.toInt) .runWith(Sink.seq) - //Verify results + // Verify results ret.futureValue should be(Vector(1, 2, 3, 2, 3, 3, 4)) - //#Simple-Split + // #Simple-Split } " aggregate split" in { - //#Aggregate-Split - //Sample Source + // #Aggregate-Split + // Sample Source val source: Source[String, NotUsed] = Source(List("1-2-3", "2-3", "3-4")) val result = source .map(s => s.split("-").toList) - //split all messages into sub-streams + // split all messages into sub-streams .splitWhen(a => true) - //now split each collection + // now split each collection .mapConcat(identity) - //Sub-streams logic + // Sub-streams logic .map(s => s.toInt) - //aggregate each sub-stream + // aggregate each sub-stream .reduce((a, b) => a + b) - //and merge back the result into the original stream + // and merge back the result into the original stream .mergeSubstreams .runWith(Sink.seq); - //Verify results + // Verify results result.futureValue should be(Vector(6, 5, 7)) - //#Aggregate-Split + // #Aggregate-Split } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala index f38c0c53209..b22bd2a41d0 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala @@ -21,7 +21,7 @@ class RecipeWorkerPool extends RecipeSpec { val worker = Flow[String].map(_ + " done") - //#worker-pool + // #worker-pool def balancer[In, Out](worker: Flow[In, Out, Any], workerCount: Int): Flow[In, Out, NotUsed] = { import GraphDSL.Implicits._ @@ -40,7 +40,7 @@ class RecipeWorkerPool extends RecipeSpec { } val processedJobs: Source[Result, NotUsed] = myJobs.via(balancer(worker, 3)) - //#worker-pool + // #worker-pool Await.result(processedJobs.limit(10).runWith(Sink.seq), 3.seconds).toSet should be( Set("1 done", "2 done", "3 done", "4 done", "5 done")) diff --git a/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala b/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala index 42537faec96..b8d4c3deda1 100644 --- a/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala @@ -27,45 +27,45 @@ class StreamFileDocSpec extends AkkaSpec(UnboundedMailboxConfig) { override def afterTermination() = Files.delete(file) { - //#file-source + // #file-source import akka.stream.scaladsl._ - //#file-source + // #file-source Thread.sleep(0) // needs a statement here for valid syntax and to avoid "unused" warnings } { - //#file-source + // #file-source val file = Paths.get("example.csv") - //#file-source + // #file-source } { - //#file-sink + // #file-sink val file = Paths.get("greeting.txt") - //#file-sink + // #file-sink } "read data from a file" in { - //#file-source - def handle(b: ByteString): Unit //#file-source + // #file-source + def handle(b: ByteString): Unit // #file-source = () - //#file-source + // #file-source val foreach: Future[IOResult] = FileIO.fromPath(file).to(Sink.ignore).run() - //#file-source + // #file-source } "configure dispatcher in code" in { - //#custom-dispatcher-code + // #custom-dispatcher-code FileIO.fromPath(file).withAttributes(ActorAttributes.dispatcher("custom-blocking-io-dispatcher")) - //#custom-dispatcher-code + // #custom-dispatcher-code } "write data into a file" in { - //#file-sink + // #file-sink val text = Source.single("Hello Akka Stream!") val result: Future[IOResult] = text.map(t => ByteString(t)).runWith(FileIO.toPath(file)) - //#file-sink + // #file-sink } } diff --git a/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala b/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala index 5550b375db0..e0d02c2c9b8 100644 --- a/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala @@ -25,20 +25,19 @@ class StreamTcpDocSpec extends AkkaSpec { "simple server connection" in { { - //#echo-server-simple-bind + // #echo-server-simple-bind val binding: Future[ServerBinding] = Tcp(system).bind("127.0.0.1", 8888).to(Sink.ignore).run() binding.map { b => - b.unbind().onComplete { - case _ => // ... + b.unbind().onComplete { case _ => // ... } } - //#echo-server-simple-bind + // #echo-server-simple-bind } { val (host, port) = SocketUtil.temporaryServerHostnameAndPort() - //#echo-server-simple-handle + // #echo-server-simple-handle import akka.stream.scaladsl.Framing val connections: Source[IncomingConnection, Future[ServerBinding]] = @@ -54,7 +53,7 @@ class StreamTcpDocSpec extends AkkaSpec { connection.handleWith(echo) } - //#echo-server-simple-handle + // #echo-server-simple-handle } } @@ -66,7 +65,7 @@ class StreamTcpDocSpec extends AkkaSpec { import akka.stream.scaladsl.Framing val binding = - //#welcome-banner-chat-server + // #welcome-banner-chat-server connections .to(Sink.foreach { connection => // server logic, parses incoming commands @@ -79,11 +78,11 @@ class StreamTcpDocSpec extends AkkaSpec { val serverLogic = Flow[ByteString] .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 256, allowTruncation = true)) .map(_.utf8String) - //#welcome-banner-chat-server + // #welcome-banner-chat-server .map { command => serverProbe.ref ! command; command } - //#welcome-banner-chat-server + // #welcome-banner-chat-server .via(commandParser) // merge in the initial banner after parser .merge(welcome) @@ -93,7 +92,7 @@ class StreamTcpDocSpec extends AkkaSpec { connection.handleWith(serverLogic) }) .run() - //#welcome-banner-chat-server + // #welcome-banner-chat-server // make sure server is started before we connect binding.futureValue @@ -110,14 +109,14 @@ class StreamTcpDocSpec extends AkkaSpec { { // just for docs, never actually used - //#repl-client + // #repl-client val connection = Tcp(system).outgoingConnection("127.0.0.1", 8888) - //#repl-client + // #repl-client } { val connection = Tcp(system).outgoingConnection(localhost) - //#repl-client + // #repl-client val replParser = Flow[String].takeWhile(_ != "q").concat(Source.single("BYE")).map(elem => ByteString(s"$elem\n")) @@ -130,7 +129,7 @@ class StreamTcpDocSpec extends AkkaSpec { .via(replParser) val connected = connection.join(repl).run() - //#repl-client + // #repl-client // make sure we have a connection or fail already here connected.futureValue diff --git a/akka-docs/src/test/scala/docs/stream/operators/BroadcastDocExample.scala b/akka-docs/src/test/scala/docs/stream/operators/BroadcastDocExample.scala index 9a3d659ea81..9c27a06fb1b 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/BroadcastDocExample.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/BroadcastDocExample.scala @@ -15,7 +15,7 @@ object BroadcastDocExample { implicit val system: ActorSystem = ActorSystem("BroadcastDocExample") - //#broadcast + // #broadcast import akka.NotUsed import akka.stream.ClosedShape import akka.stream.scaladsl.GraphDSL @@ -43,9 +43,9 @@ object BroadcastDocExample { ClosedShape }) .run() - //#broadcast + // #broadcast - //#broadcast-async + // #broadcast-async RunnableGraph.fromGraph(GraphDSL.createGraph(countSink.async, minSink.async, maxSink.async)(Tuple3.apply) { implicit builder => (countS, minS, maxS) => import GraphDSL.Implicits._ @@ -56,5 +56,5 @@ object BroadcastDocExample { broadcast.out(2) ~> maxS ClosedShape }) - //#broadcast-async + // #broadcast-async } diff --git a/akka-docs/src/test/scala/docs/stream/operators/Map.scala b/akka-docs/src/test/scala/docs/stream/operators/Map.scala index 2ece424a5c5..b5a509d68cd 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/Map.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/Map.scala @@ -12,8 +12,8 @@ import akka.stream.scaladsl._ object Map { - //#map + // #map val source: Source[Int, NotUsed] = Source(1 to 10) val mapped: Source[String, NotUsed] = source.map(elem => elem.toString) - //#map + // #map } diff --git a/akka-docs/src/test/scala/docs/stream/operators/MergeSequenceDocExample.scala b/akka-docs/src/test/scala/docs/stream/operators/MergeSequenceDocExample.scala index 7ea005dbfb2..5fc65f98e14 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/MergeSequenceDocExample.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/MergeSequenceDocExample.scala @@ -24,10 +24,13 @@ object MergeSequenceDocExample { .fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ // Partitions stream into messages that should or should not be processed - val partition = builder.add(Partition[(Message, Long)](2, { - case (message, _) if shouldProcess(message) => 0 - case _ => 1 - })) + val partition = builder.add( + Partition[(Message, Long)]( + 2, + { + case (message, _) if shouldProcess(message) => 0 + case _ => 1 + })) // Merges stream by the index produced by zipWithIndex val merge = builder.add(MergeSequence[(Message, Long)](2)(_._2)) diff --git a/akka-docs/src/test/scala/docs/stream/operators/PartitionDocExample.scala b/akka-docs/src/test/scala/docs/stream/operators/PartitionDocExample.scala index a2019267537..9d70ce2d6d6 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/PartitionDocExample.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/PartitionDocExample.scala @@ -10,7 +10,7 @@ object PartitionDocExample { implicit val system: ActorSystem = ??? - //#partition + // #partition import akka.NotUsed import akka.stream.Attributes import akka.stream.Attributes.LogLevels @@ -40,5 +40,5 @@ object PartitionDocExample { }) .run() - //#partition + // #partition } diff --git a/akka-docs/src/test/scala/docs/stream/operators/SourceOperators.scala b/akka-docs/src/test/scala/docs/stream/operators/SourceOperators.scala index 91a5fcb0277..3dec28f9ed6 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/SourceOperators.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/SourceOperators.scala @@ -12,7 +12,7 @@ object SourceOperators { implicit val system: ActorSystem = ??? def fromFuture(): Unit = { - //#sourceFromFuture + // #sourceFromFuture import akka.actor.ActorSystem import akka.stream.scaladsl._ @@ -23,12 +23,12 @@ object SourceOperators { val source: Source[Int, NotUsed] = Source.future(Future.successful(10)) val sink: Sink[Int, Future[Done]] = Sink.foreach((i: Int) => println(i)) - val done: Future[Done] = source.runWith(sink) //10 - //#sourceFromFuture + val done: Future[Done] = source.runWith(sink) // 10 + // #sourceFromFuture } def actorRef(): Unit = { - //#actorRef + // #actorRef import akka.Done import akka.actor.ActorRef import akka.stream.OverflowStrategy @@ -36,10 +36,9 @@ object SourceOperators { import akka.stream.scaladsl._ val source: Source[Any, ActorRef] = Source.actorRef( - completionMatcher = { - case Done => - // complete stream immediately if we send it Done - CompletionStrategy.immediately + completionMatcher = { case Done => + // complete stream immediately if we send it Done + CompletionStrategy.immediately }, // never fail the stream because of a message failureMatcher = PartialFunction.empty, @@ -52,11 +51,11 @@ object SourceOperators { // The stream completes successfully with the following message actorRef ! Done - //#actorRef + // #actorRef } def actorRefWithBackpressure(): Unit = { - //#actorRefWithBackpressure + // #actorRefWithBackpressure import akka.actor.Status.Success import akka.actor.ActorRef @@ -68,8 +67,8 @@ object SourceOperators { val source: Source[String, ActorRef] = Source.actorRefWithBackpressure[String]( ackMessage = "ack", // complete when we send akka.actor.status.Success - completionMatcher = { - case _: Success => CompletionStrategy.immediately + completionMatcher = { case _: Success => + CompletionStrategy.immediately }, // do not fail on any message failureMatcher = PartialFunction.empty) @@ -82,11 +81,11 @@ object SourceOperators { // The stream completes successfully with the following message actorRef ! Success(()) - //#actorRefWithBackpressure + // #actorRefWithBackpressure } def maybe(): Unit = { - //#maybe + // #maybe import akka.stream.scaladsl._ import scala.concurrent.Promise @@ -98,6 +97,6 @@ object SourceOperators { // a new Promise is returned when the stream is materialized val promise2 = source.run() promise2.success(Some(2)) // prints 2 - //#maybe + // #maybe } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/WithContextSpec.scala b/akka-docs/src/test/scala/docs/stream/operators/WithContextSpec.scala index e8b93324a90..c092321c38b 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/WithContextSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/WithContextSpec.scala @@ -28,7 +28,7 @@ class WithContextSpec extends AkkaSpec { .map(_._1) // keep the first tuple element as stream element val mapped: SourceWithContext[String, Int, NotUsed] = sourceWithContext - // regular operators apply to the element without seeing the context + // regular operators apply to the element without seeing the context .map(s => s.reverse) // running the source and asserting the outcome @@ -63,7 +63,7 @@ class WithContextSpec extends AkkaSpec { .map(_._1) // keep the first pair element as stream element val mapped = flowWithContext - // regular operators apply to the element without seeing the context + // regular operators apply to the element without seeing the context .map(_.reverse) // running the flow with some sample data and asserting the outcome diff --git a/akka-docs/src/test/scala/docs/stream/operators/converters/StreamConvertersToJava.scala b/akka-docs/src/test/scala/docs/stream/operators/converters/StreamConvertersToJava.scala index 602d89b7995..6a73f3aceaf 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/converters/StreamConvertersToJava.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/converters/StreamConvertersToJava.scala @@ -23,25 +23,25 @@ import scala.concurrent.Future class StreamConvertersToJava extends AkkaSpec with Futures { "demonstrate materialization to Java8 streams" in { - //#asJavaStream + // #asJavaStream val source: Source[Int, NotUsed] = Source(0 to 9).filter(_ % 2 == 0) val sink: Sink[Int, stream.Stream[Int]] = StreamConverters.asJavaStream[Int]() val jStream: java.util.stream.Stream[Int] = source.runWith(sink) - //#asJavaStream + // #asJavaStream jStream.count should be(5) } "demonstrate conversion from Java8 streams" in { - //#fromJavaStream + // #fromJavaStream def factory(): IntStream = IntStream.rangeClosed(0, 9) val source: Source[Int, NotUsed] = StreamConverters.fromJavaStream(() => factory()).map(_.intValue()) val sink: Sink[Int, Future[immutable.Seq[Int]]] = Sink.seq[Int] val futureInts: Future[immutable.Seq[Int]] = source.toMat(sink)(Keep.right).run() - //#fromJavaStream + // #fromJavaStream whenReady(futureInts) { ints => ints should be((0 to 9).toSeq) } diff --git a/akka-docs/src/test/scala/docs/stream/operators/converters/ToFromJavaIOStreams.scala b/akka-docs/src/test/scala/docs/stream/operators/converters/ToFromJavaIOStreams.scala index 266a93300ea..8ac57513a3d 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/converters/ToFromJavaIOStreams.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/converters/ToFromJavaIOStreams.scala @@ -23,7 +23,7 @@ class ToFromJavaIOStreams extends AkkaSpec with Futures { "demonstrate conversion from java.io.streams" in { - //#tofromJavaIOStream + // #tofromJavaIOStream val bytes = "Some random input".getBytes val inputStream = new ByteArrayInputStream(bytes) val outputStream = new ByteArrayOutputStream() @@ -36,7 +36,7 @@ class ToFromJavaIOStreams extends AkkaSpec with Futures { val eventualResult = source.via(toUpperCase).runWith(sink) - //#tofromJavaIOStream + // #tofromJavaIOStream whenReady(eventualResult) { _ => outputStream.toByteArray.map(_.toChar).mkString should be("SOME RANDOM INPUT") } @@ -44,26 +44,26 @@ class ToFromJavaIOStreams extends AkkaSpec with Futures { } "demonstrate usage as java.io.InputStream" in { - //#asJavaInputStream + // #asJavaInputStream val toUpperCase: Flow[ByteString, ByteString, NotUsed] = Flow[ByteString].map(_.map(_.toChar.toUpper.toByte)) val source: Source[ByteString, NotUsed] = Source.single(ByteString("some random input")) val sink: Sink[ByteString, InputStream] = StreamConverters.asInputStream() val inputStream: InputStream = source.via(toUpperCase).runWith(sink) - //#asJavaInputStream + // #asJavaInputStream inputStream.read() should be('S') inputStream.close() } "demonstrate usage as java.io.OutputStream" in { - //#asJavaOutputStream + // #asJavaOutputStream val source: Source[ByteString, OutputStream] = StreamConverters.asOutputStream() val sink: Sink[ByteString, Future[ByteString]] = Sink.fold[ByteString, ByteString](ByteString.empty)(_ ++ _) val (outputStream, result): (OutputStream, Future[ByteString]) = source.toMat(sink)(Keep.both).run() - //#asJavaOutputStream + // #asJavaOutputStream val bytesArray = Array.fill[Byte](3)(Random.nextInt(1024).asInstanceOf[Byte]) outputStream.write(bytesArray) outputStream.close() diff --git a/akka-docs/src/test/scala/docs/stream/operators/flow/ContraMap.scala b/akka-docs/src/test/scala/docs/stream/operators/flow/ContraMap.scala index 18841079421..9017e97e785 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/flow/ContraMap.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/flow/ContraMap.scala @@ -12,8 +12,8 @@ import akka.stream.scaladsl._ object ContraMap { - //#contramap + // #contramap val flow: Flow[Int, Int, NotUsed] = Flow[Int] val newFlow: Flow[String, Int, NotUsed] = flow.contramap(_.toInt) - //#contramap + // #contramap } diff --git a/akka-docs/src/test/scala/docs/stream/operators/flow/FutureFlow.scala b/akka-docs/src/test/scala/docs/stream/operators/flow/FutureFlow.scala index 8054ee51491..9466f7ede32 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/flow/FutureFlow.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/flow/FutureFlow.scala @@ -24,10 +24,9 @@ class FutureFlow { } val source: Source[String, NotUsed] = - Source(1 to 10).prefixAndTail(1).flatMapConcat { - case (List(id: Int), tail) => - // base the Future flow creation on the first element - tail.via(Flow.futureFlow(processingFlow(id))) + Source(1 to 10).prefixAndTail(1).flatMapConcat { case (List(id: Int), tail) => + // base the Future flow creation on the first element + tail.via(Flow.futureFlow(processingFlow(id))) } // #base-on-first-element } diff --git a/akka-docs/src/test/scala/docs/stream/operators/flow/StatefulMap.scala b/akka-docs/src/test/scala/docs/stream/operators/flow/StatefulMap.scala index 46bddf6001a..b38a611f9ab 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/flow/StatefulMap.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/flow/StatefulMap.scala @@ -11,20 +11,20 @@ object StatefulMap { implicit val actorSystem: ActorSystem = ??? def indexed(): Unit = { - //#zipWithIndex + // #zipWithIndex Source(List("A", "B", "C", "D")) .statefulMap(() => 0L)((index, elem) => (index + 1, (elem, index)), _ => None) .runForeach(println) - //prints - //(A,0) - //(B,1) - //(C,2) - //(D,3) - //#zipWithIndex + // prints + // (A,0) + // (B,1) + // (C,2) + // (D,3) + // #zipWithIndex } def bufferUntilChanged(): Unit = { - //#bufferUntilChanged + // #bufferUntilChanged Source("A" :: "B" :: "B" :: "C" :: "C" :: "C" :: "D" :: Nil) .statefulMap(() => List.empty[String])( (buffer, element) => @@ -35,16 +35,16 @@ object StatefulMap { buffer => Some(buffer)) .filter(_.nonEmpty) .runForeach(println) - //prints - //List(A) - //List(B, B) - //List(C, C, C) - //List(D) - //#bufferUntilChanged + // prints + // List(A) + // List(B, B) + // List(C, C, C) + // List(D) + // #bufferUntilChanged } def distinctUntilChanged(): Unit = { - //#distinctUntilChanged + // #distinctUntilChanged Source("A" :: "B" :: "B" :: "C" :: "C" :: "C" :: "D" :: Nil) .statefulMap(() => Option.empty[String])( (lastElement, elem) => @@ -55,20 +55,20 @@ object StatefulMap { _ => None) .collect { case Some(elem) => elem } .runForeach(println) - //prints - //A - //B - //C - //D - //#distinctUntilChanged + // prints + // A + // B + // C + // D + // #distinctUntilChanged } def statefulMapConcatLike(): Unit = { - //#statefulMapConcatLike + // #statefulMapConcatLike Source(1 to 10) .statefulMap(() => List.empty[Int])( (state, elem) => { - //grouped 3 elements into a list + // grouped 3 elements into a list val newState = elem :: state if (newState.size == 3) (Nil, newState.reverse) @@ -78,17 +78,17 @@ object StatefulMap { state => Some(state.reverse)) .mapConcat(identity) .runForeach(println) - //prints - //1 - //2 - //3 - //4 - //5 - //6 - //7 - //8 - //9 - //10 - //#statefulMapConcatLike + // prints + // 1 + // 2 + // 3 + // 4 + // 5 + // 6 + // 7 + // 8 + // 9 + // 10 + // #statefulMapConcatLike } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sink/AsPublisher.scala b/akka-docs/src/test/scala/docs/stream/operators/sink/AsPublisher.scala index 6b881cd6376..c3ec421fb68 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sink/AsPublisher.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sink/AsPublisher.scala @@ -13,15 +13,15 @@ object AsPublisher { implicit val ec: ExecutionContextExecutor = system.dispatcher def asPublisherExample() = { def asPublisherExample() = { - //#asPublisher + // #asPublisher val source = Source(1 to 5) val publisher = source.runWith(Sink.asPublisher(false)) Source.fromPublisher(publisher).runWith(Sink.foreach(println)) // 1 2 3 4 5 Source .fromPublisher(publisher) - .runWith(Sink.foreach(println)) //No output, because the source was not able to subscribe to the publisher. - //#asPublisher + .runWith(Sink.foreach(println)) // No output, because the source was not able to subscribe to the publisher. + // #asPublisher } } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sink/Cancelled.scala b/akka-docs/src/test/scala/docs/stream/operators/sink/Cancelled.scala index 0b94b9bf061..0596144c61c 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sink/Cancelled.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sink/Cancelled.scala @@ -14,9 +14,9 @@ object Cancelled { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContextExecutor = system.dispatcher def cancelledExample(): NotUsed = { - //#cancelled + // #cancelled val source = Source(1 to 5) source.runWith(Sink.cancelled) - //#cancelled + // #cancelled } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sink/Collection.scala b/akka-docs/src/test/scala/docs/stream/operators/sink/Collection.scala index 262ce22e1b2..fb3abbf048b 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sink/Collection.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sink/Collection.scala @@ -13,11 +13,11 @@ object Collection { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContextExecutor = system.dispatcher def collectionExample(): Unit = { - //#collection + // #collection val source = Source(1 to 5) val result: Future[List[Int]] = source.runWith(Sink.collection[Int, List[Int]]) result.foreach(println) - //List(1, 2, 3, 4, 5) - //#collection + // List(1, 2, 3, 4, 5) + // #collection } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sink/Fold.scala b/akka-docs/src/test/scala/docs/stream/operators/sink/Fold.scala index b65060df435..5b13e82052b 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sink/Fold.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sink/Fold.scala @@ -13,11 +13,11 @@ object Fold { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContextExecutor = system.dispatcher def foldExample: Future[Unit] = { - //#fold + // #fold val source = Source(1 to 100) val result: Future[Int] = source.runWith(Sink.fold(0)((acc, element) => acc + element)) result.map(println) - //5050 - //#fold + // 5050 + // #fold } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sink/HeadOption.scala b/akka-docs/src/test/scala/docs/stream/operators/sink/HeadOption.scala index 1bf8f16a0f4..9efc892922c 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sink/HeadOption.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sink/HeadOption.scala @@ -13,11 +13,11 @@ object HeadOption { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContextExecutor = system.dispatcher def headOptionExample(): Unit = { - //#headoption + // #headoption val source = Source.empty val result: Future[Option[Int]] = source.runWith(Sink.headOption) result.foreach(println) - //None - //#headoption + // None + // #headoption } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sink/Ignore.scala b/akka-docs/src/test/scala/docs/stream/operators/sink/Ignore.scala index a1b4e1efad1..07d41cbf87f 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sink/Ignore.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sink/Ignore.scala @@ -17,12 +17,12 @@ object Ignore { implicit val system: ActorSystem = ??? def ignoreExample(): Unit = { - //#ignore + // #ignore val lines: Source[String, NotUsed] = readLinesFromFile() val databaseIds: Source[UUID, NotUsed] = lines.mapAsync(1)(line => saveLineToDatabase(line)) databaseIds.mapAsync(1)(uuid => writeIdToFile(uuid)).runWith(Sink.ignore) - //#ignore + // #ignore } private def readLinesFromFile(): Source[String, NotUsed] = diff --git a/akka-docs/src/test/scala/docs/stream/operators/source/From.scala b/akka-docs/src/test/scala/docs/stream/operators/source/From.scala index 3d07dd0ce6f..6a19b8d88ae 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/source/From.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/source/From.scala @@ -14,23 +14,23 @@ object From { implicit val system: ActorSystem = null def fromIteratorSample(): Unit = { - //#from-iterator + // #from-iterator Source.fromIterator(() => (1 to 3).iterator).runForeach(println) // could print // 1 // 2 // 3 - //#from-iterator + // #from-iterator } def fromJavaStreamSample(): Unit = { - //#from-javaStream + // #from-javaStream Source.fromJavaStream(() => IntStream.rangeClosed(1, 3)).runForeach(println) // could print // 1 // 2 // 3 - //#from-javaStream + // #from-javaStream } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/source/Restart.scala b/akka-docs/src/test/scala/docs/stream/operators/source/Restart.scala index 6ef308b88dd..95bce12b72e 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/source/Restart.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/source/Restart.scala @@ -25,7 +25,7 @@ object Restart extends App { case class CantConnectToDatabase(msg: String) extends RuntimeException(msg) with NoStackTrace def onRestartWithBackoffInnerFailure(): Unit = { - //#restart-failure-inner-failure + // #restart-failure-inner-failure // could throw if for example it used a database connection to get rows val flakySource: Source[() => Int, NotUsed] = Source(List(() => 1, () => 2, () => 3, () => throw CantConnectToDatabase("darn"))) @@ -34,27 +34,27 @@ object Restart extends App { RestartSettings(minBackoff = 1.second, maxBackoff = 10.seconds, randomFactor = 0.1))(() => flakySource) forever.runWith(Sink.foreach(nr => system.log.info("{}", nr()))) // logs - //[INFO] [12/10/2019 13:51:58.300] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 1 - //[INFO] [12/10/2019 13:51:58.301] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 2 - //[INFO] [12/10/2019 13:51:58.302] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 3 - //[WARN] [12/10/2019 13:51:58.310] [default-akka.test.stream-dispatcher-7] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) + // [INFO] [12/10/2019 13:51:58.300] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 1 + // [INFO] [12/10/2019 13:51:58.301] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 2 + // [INFO] [12/10/2019 13:51:58.302] [default-akka.test.stream-dispatcher-7] [akka.actor.ActorSystemImpl(default)] 3 + // [WARN] [12/10/2019 13:51:58.310] [default-akka.test.stream-dispatcher-7] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) // --> 1 second gap - //[INFO] [12/10/2019 13:51:59.379] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 1 - //[INFO] [12/10/2019 13:51:59.382] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 2 - //[INFO] [12/10/2019 13:51:59.383] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 3 - //[WARN] [12/10/2019 13:51:59.386] [default-akka.test.stream-dispatcher-8] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) - //--> 2 second gap - //[INFO] [12/10/2019 13:52:01.594] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 1 - //[INFO] [12/10/2019 13:52:01.595] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 2 - //[INFO] [12/10/2019 13:52:01.595] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 3 - //[WARN] [12/10/2019 13:52:01.596] [default-akka.test.stream-dispatcher-8] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) - //#restart-failure-inner-failure + // [INFO] [12/10/2019 13:51:59.379] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 1 + // [INFO] [12/10/2019 13:51:59.382] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 2 + // [INFO] [12/10/2019 13:51:59.383] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 3 + // [WARN] [12/10/2019 13:51:59.386] [default-akka.test.stream-dispatcher-8] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) + // --> 2 second gap + // [INFO] [12/10/2019 13:52:01.594] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 1 + // [INFO] [12/10/2019 13:52:01.595] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 2 + // [INFO] [12/10/2019 13:52:01.595] [default-akka.test.stream-dispatcher-8] [akka.actor.ActorSystemImpl(default)] 3 + // [WARN] [12/10/2019 13:52:01.596] [default-akka.test.stream-dispatcher-8] [RestartWithBackoffSource(akka://default)] Restarting graph due to failure. stack_trace: (docs.stream.operators.source.Restart$CantConnectToDatabase: darn) + // #restart-failure-inner-failure } def onRestartWithBackoffInnerComplete(): Unit = { - //#restart-failure-inner-complete + // #restart-failure-inner-complete val finiteSource = Source.tick(1.second, 1.second, "tick").take(3) val forever = RestartSource.onFailuresWithBackoff(RestartSettings(1.second, 10.seconds, 0.1))(() => finiteSource) forever.runWith(Sink.foreach(println)) @@ -62,11 +62,11 @@ object Restart extends App { // tick // tick // tick - //#restart-failure-inner-complete + // #restart-failure-inner-complete } def onRestartWitFailureKillSwitch(): Unit = { - //#restart-failure-inner-complete-kill-switch + // #restart-failure-inner-complete-kill-switch val flakySource: Source[() => Int, NotUsed] = Source(List(() => 1, () => 2, () => 3, () => throw CantConnectToDatabase("darn"))) val stopRestarting: UniqueKillSwitch = @@ -75,9 +75,9 @@ object Restart extends App { .viaMat(KillSwitches.single)(Keep.right) .toMat(Sink.foreach(nr => println(s"Nr ${nr()}")))(Keep.left) .run() - //... from some where else + // ... from some where else // stop the source from restarting stopRestarting.shutdown() - //#restart-failure-inner-complete-kill-switch + // #restart-failure-inner-complete-kill-switch } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/source/Unfold.scala b/akka-docs/src/test/scala/docs/stream/operators/source/Unfold.scala index 41fe1c5ff71..dd138e1c802 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/source/Unfold.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/source/Unfold.scala @@ -19,9 +19,8 @@ object Unfold { // #fibonacci def fibonacci: Source[BigInt, NotUsed] = - Source.unfold((BigInt(0), BigInt(1))) { - case (a, b) => - Some(((b, a + b), a)) + Source.unfold((BigInt(0), BigInt(1))) { case (a, b) => + Some(((b, a + b), a)) } // #fibonacci diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Collect.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Collect.scala index cd0f81a7b14..1a6524138d9 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Collect.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Collect.scala @@ -8,25 +8,25 @@ import akka.NotUsed import akka.stream.scaladsl.Flow object Collect { - //#collect-elements + // #collect-elements trait Message final case class Ping(id: Int) extends Message final case class Pong(id: Int) - //#collect-elements + // #collect-elements def collectExample(): Unit = { - //#collect + // #collect val flow: Flow[Message, Pong, NotUsed] = Flow[Message].collect { case Ping(id) if id != 0 => Pong(id) } - //#collect + // #collect } def collectType(): Unit = { - //#collectType + // #collectType val flow: Flow[Message, Pong, NotUsed] = Flow[Message].collectType[Ping].filter(_.id != 0).map(p => Pong(p.id)) - //#collectType + // #collectType } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/CompletionTimeout.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/CompletionTimeout.scala index 3ed5fdaae45..f5f9f590ed3 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/CompletionTimeout.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/CompletionTimeout.scala @@ -15,9 +15,9 @@ object CompletionTimeout { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContextExecutor = system.dispatcher def completionTimeoutExample: Future[Done] = { - //#completionTimeout + // #completionTimeout val source = Source(1 to 10000).map(number => number * number) source.completionTimeout(10.milliseconds).run() - //#completionTimeout + // #completionTimeout } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Conflate.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Conflate.scala index ea38a2b26e9..6c857e7fec9 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Conflate.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Conflate.scala @@ -13,7 +13,7 @@ import akka.stream.scaladsl.Source object Conflate { def conflateExample(): Unit = { - //#conflate + // #conflate import scala.concurrent.duration._ Source @@ -21,11 +21,11 @@ object Conflate { .throttle(10, per = 1.second) // faster upstream .conflate((acc, el) => acc + el) // acc: Int, el: Int .throttle(1, per = 1.second) // slow downstream - //#conflate + // #conflate } def conflateWithSeedExample(): Unit = { - //#conflateWithSeed + // #conflateWithSeed import scala.concurrent.duration._ case class Summed(i: Int) { @@ -37,7 +37,7 @@ object Conflate { .throttle(10, per = 1.second) // faster upstream .conflateWithSeed(el => Summed(el))((acc, el) => acc.sum(Summed(el))) // (Summed, Int) => Summed .throttle(1, per = 1.second) // slow downstream - //#conflateWithSeed + // #conflateWithSeed } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ExtrapolateAndExpand.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ExtrapolateAndExpand.scala index ff6b466a123..0cbed235803 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ExtrapolateAndExpand.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ExtrapolateAndExpand.scala @@ -22,7 +22,6 @@ import scala.concurrent.duration._ import scala.util.Random /** - * */ object ExtrapolateAndExpandMain extends App { implicit val sys: ActorSystem = ActorSystem("25fps-stream") @@ -42,10 +41,12 @@ object ExtrapolateAndExpand { // #extrapolate // if upstream is too slow, produce copies of the last frame but grayed out. val rateControl: Flow[Frame, Frame, NotUsed] = - Flow[Frame].extrapolate((frame: Frame) => { - val grayedOut = frame.withFilter(Gray) - Iterator.continually(grayedOut) - }, Some(Frame.blackFrame)) + Flow[Frame].extrapolate( + (frame: Frame) => { + val grayedOut = frame.withFilter(Gray) + Iterator.continually(grayedOut) + }, + Some(Frame.blackFrame)) val videoSource: Source[Frame, NotUsed] = networkSource.via(decode).via(rateControl) @@ -64,7 +65,7 @@ object ExtrapolateAndExpand { Flow[Frame].expand((frame: Frame) => { val watermarked = frame.withFilter(Watermark) val grayedOut = frame.withFilter(Gray) - (Iterator.single(watermarked) ++ Iterator.continually(grayedOut)) + Iterator.single(watermarked) ++ Iterator.continually(grayedOut) }) val watermarkedVideoSource: Source[Frame, NotUsed] = diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Fold.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Fold.scala index 229ba638e4e..b8a342a1d56 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Fold.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Fold.scala @@ -11,17 +11,17 @@ import akka.stream.scaladsl.Source //#imports object Fold extends App { - //#histogram + // #histogram case class Histogram(low: Long = 0, high: Long = 0) { def add(i: Int): Histogram = if (i < 100) copy(low = low + 1) else copy(high = high + 1) } - //#histogram + // #histogram implicit val sys: ActorSystem = ActorSystem() - //#fold + // #fold Source(1 to 150).fold(Histogram())((acc, n) => acc.add(n)).runForeach(println) // Prints: Histogram(99,51) - //#fold + // #fold } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/FoldAsync.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/FoldAsync.scala index b138b2a61a8..a645ff76987 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/FoldAsync.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/FoldAsync.scala @@ -16,14 +16,15 @@ object FoldAsync extends App { implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher - //#foldAsync + // #foldAsync case class Histogram(low: Long = 0, high: Long = 0) { def add(i: Int): Future[Histogram] = - if (i < 100) Future { copy(low = low + 1) } else Future { copy(high = high + 1) } + if (i < 100) Future { copy(low = low + 1) } + else Future { copy(high = high + 1) } } Source(1 to 150).foldAsync(Histogram())((acc, n) => acc.add(n)).runForeach(println) // Prints: Histogram(99,51) - //#foldAsync + // #foldAsync } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupBy.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupBy.scala index 58bda2fdfd8..403b574e97b 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupBy.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupBy.scala @@ -11,15 +11,15 @@ object GroupBy { def groupBySourceExample(): Unit = { implicit val system: ActorSystem = ??? - //#groupBy + // #groupBy Source(1 to 10) .groupBy(maxSubstreams = 2, _ % 2) // create two sub-streams with odd and even numbers .reduce(_ + _) // for each sub-stream, sum its elements .mergeSubstreams // merge back into a stream .runForeach(println) - //30 - //25 - //#groupBy + // 30 + // 25 + // #groupBy } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Grouped.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Grouped.scala index 2d85a7180b6..d96f51fe8ba 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Grouped.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Grouped.scala @@ -11,7 +11,7 @@ object Grouped { implicit val system: ActorSystem = ActorSystem() - //#grouped + // #grouped Source(1 to 7).grouped(3).runForeach(println) // Vector(1, 2, 3) // Vector(4, 5, 6) @@ -21,7 +21,7 @@ object Grouped { // 6 (= 1 + 2 + 3) // 15 (= 4 + 5 + 6) // 7 (= 7) - //#grouped + // #grouped } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupedWeighted.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupedWeighted.scala index e810cc1cf07..1aead2dcfa8 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupedWeighted.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/GroupedWeighted.scala @@ -12,7 +12,7 @@ object GroupedWeighted { implicit val system: ActorSystem = ActorSystem() - //#groupedWeighted + // #groupedWeighted val collections = immutable.Iterable(Seq(1, 2), Seq(3, 4), Seq(5, 6)) Source[Seq[Int]](collections).groupedWeighted(4)(_.length).runForeach(println) // Vector(Seq(1, 2), Seq(3, 4)) @@ -21,7 +21,7 @@ object GroupedWeighted { Source[Seq[Int]](collections).groupedWeighted(3)(_.length).runForeach(println) // Vector(Seq(1, 2), Seq(3, 4)) // Vector(Seq(5, 6)) - //#groupedWeighted + // #groupedWeighted } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Intersperse.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Intersperse.scala index 6a4fe093663..e1ab7eb8a34 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Intersperse.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Intersperse.scala @@ -12,11 +12,11 @@ object Intersperse extends App { implicit val system: ActorSystem = ActorSystem() - //#intersperse + // #intersperse Source(1 to 4).map(_.toString).intersperse("[", ", ", "]").runWith(Sink.foreach(print)) // prints // [1, 2, 3, 4] - //#intersperse + // #intersperse system.terminate() } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Log.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Log.scala index a744bff4716..f6434f5b46b 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Log.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Log.scala @@ -13,13 +13,13 @@ import akka.stream.Attributes object Log { def logExample(): Unit = { Flow[String] - //#log + // #log .log(name = "myStream") .addAttributes( Attributes.logLevels( onElement = Attributes.LogLevels.Off, onFinish = Attributes.LogLevels.Info, onFailure = Attributes.LogLevels.Error)) - //#log + // #log } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/LogWithMarker.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/LogWithMarker.scala index 04493bffeb5..af848947c7a 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/LogWithMarker.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/LogWithMarker.scala @@ -14,13 +14,13 @@ import akka.stream.Attributes object LogWithMarker { def logWithMarkerExample(): Unit = { Flow[String] - //#logWithMarker + // #logWithMarker .logWithMarker(name = "myStream", e => LogMarker(name = "myMarker", properties = Map("element" -> e))) .addAttributes( Attributes.logLevels( onElement = Attributes.LogLevels.Off, onFinish = Attributes.LogLevels.Info, onFailure = Attributes.LogLevels.Error)) - //#logWithMarker + // #logWithMarker } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapAsyncs.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapAsyncs.scala index 8472e229e80..2a9d7f89a70 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapAsyncs.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapAsyncs.scala @@ -58,7 +58,8 @@ object CommonMapAsync { } val seqNr = countsPerEntity.getOrElse(entityId, 0) + 1 (countsPerEntity + (entityId -> seqNr), EntityEvent(entityId, seqNr)) - }, { _ => + }, + { _ => None }) } else throw new AssertionError("pro forma") @@ -180,10 +181,9 @@ object MapAsyncPartitioned extends App { } eventsForEntities.zipWithIndex - .map { - case (event, count) => - println(s"Received event $event at offset $count from message broker") - event + .map { case (event, count) => + println(s"Received event $event at offset $count from message broker") + event } .mapAsyncPartitioned(parallelism = 10, perPartition = 1)(partitioner) { (event, partition) => println(s"Processing event $event from partition $partition") diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapConcat.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapConcat.scala index 86c66f81aba..9664ceccacb 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapConcat.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapConcat.scala @@ -16,7 +16,7 @@ object MapConcat { implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher - //#map-concat + // #map-concat def duplicate(i: Int): List[Int] = List(i, i) Source(1 to 3).mapConcat(i => duplicate(i)).runForeach(println) @@ -27,7 +27,7 @@ object MapConcat { // 2 // 3 // 3 - //#map-concat + // #map-concat } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapError.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapError.scala index 7353bc9b330..deb588abfa8 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapError.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapError.scala @@ -16,12 +16,11 @@ object MapError extends App { implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher - //#map-error + // #map-error Source(-1 to 1) .map(1 / _) - .mapError { - case _: ArithmeticException => - new UnsupportedOperationException("Divide by Zero Operation is not supported.") with NoStackTrace + .mapError { case _: ArithmeticException => + new UnsupportedOperationException("Divide by Zero Operation is not supported.") with NoStackTrace } .runWith(Sink.seq) .onComplete { @@ -30,6 +29,6 @@ object MapError extends App { } // prints "Divide by Zero Operation is not supported." - //#map-error + // #map-error } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapWithResource.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapWithResource.scala index 6bd4b4cb385..067368e833d 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapWithResource.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MapWithResource.scala @@ -11,7 +11,7 @@ import java.net.URL object MapWithResource { implicit val actorSystem: ActorSystem = ??? - //#mapWithResource-blocking-api + // #mapWithResource-blocking-api trait DBDriver { def create(url: URL, userName: String, password: String): Connection } @@ -19,7 +19,7 @@ object MapWithResource { def close(): Unit } trait Database { - //blocking query + // blocking query def doQuery(connection: Connection, query: String): QueryResult = ??? } trait QueryResult { @@ -30,14 +30,14 @@ object MapWithResource { def toList(): List[DataBaseRecord] } trait DataBaseRecord - //#mapWithResource-blocking-api + // #mapWithResource-blocking-api val url: URL = ??? val userName = "Akka" val password = "Hakking" val dbDriver: DBDriver = ??? def mapWithResourceExample(): Unit = { - //#mapWithResource - //some database for JVM + // #mapWithResource + // some database for JVM val db: Database = ??? Source( List( @@ -51,6 +51,6 @@ object MapWithResource { }) .mapConcat(identity) .runForeach(println) - //#mapWithResource + // #mapWithResource } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MergeLatest.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MergeLatest.scala index 525bceab465..0c13c0c327b 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MergeLatest.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/MergeLatest.scala @@ -9,14 +9,14 @@ import akka.stream.scaladsl.Source object MergeLatest extends App { implicit val system: ActorSystem = ActorSystem() - //#mergeLatest + // #mergeLatest val prices = Source(List(100, 101, 99, 103)) val quantity = Source(List(1, 3, 4, 2)) prices .mergeLatest(quantity) - .map { - case price :: quantity :: Nil => price * quantity + .map { case price :: quantity :: Nil => + price * quantity } .runForeach(println) @@ -28,5 +28,5 @@ object MergeLatest extends App { // 396 // 412 // 206 - //#mergeLatest + // #mergeLatest } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Reduce.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Reduce.scala index e66f30b2114..a9caad2426d 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Reduce.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Reduce.scala @@ -13,11 +13,11 @@ object Reduce { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContextExecutor = system.dispatcher def reduceExample: Future[Unit] = { - //#reduceExample + // #reduceExample val source = Source(1 to 100).reduce((acc, element) => acc + element) val result: Future[Int] = source.runWith(Sink.head) result.map(println) - //5050 - //#reduceExample + // 5050 + // #reduceExample } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Scan.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Scan.scala index 7eee27398b5..019c65a730f 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Scan.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Scan.scala @@ -11,7 +11,7 @@ object Scan { implicit val system: ActorSystem = ActorSystem() - //#scan + // #scan val source = Source(1 to 5) source.scan(0)((acc, x) => acc + x).runForeach(println) // 0 (= 0) @@ -20,7 +20,7 @@ object Scan { // 6 (= 0 + 1 + 2 + 3) // 10 (= 0 + 1 + 2 + 3 + 4) // 15 (= 0 + 1 + 2 + 3 + 4 + 5) - //#scan + // #scan } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ScanAsync.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ScanAsync.scala index a0cf841b012..b4d61b2f15a 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ScanAsync.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/ScanAsync.scala @@ -17,7 +17,7 @@ object ScanAsync { implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher - //#scan-async + // #scan-async def asyncFunction(acc: Int, next: Int): Future[Int] = Future { acc + next } @@ -30,7 +30,7 @@ object ScanAsync { // 6 (= 0 + 1 + 2 + 3) // 10 (= 0 + 1 + 2 + 3 + 4) // 15 (= 0 + 1 + 2 + 3 + 4 + 5) - //#scan-async + // #scan-async } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Sliding.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Sliding.scala index 714ae8c376e..e02f82fd71c 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Sliding.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Sliding.scala @@ -11,28 +11,28 @@ object Sliding { implicit val system: ActorSystem = ??? def slidingExample1(): Unit = { - //#sliding-1 + // #sliding-1 val source = Source(1 to 4) source.sliding(2).runForeach(println) // prints: // Vector(1, 2) // Vector(2, 3) // Vector(3, 4) - //#sliding-1 + // #sliding-1 } def slidingExample2(): Unit = { - //#sliding-2 + // #sliding-2 val source = Source(1 to 4) source.sliding(n = 3, step = 2).runForeach(println) // prints: // Vector(1, 2, 3) // Vector(3, 4) - shorter because stream ended before we got 3 elements - //#sliding-2 + // #sliding-2 } def slidingExample3(): Unit = { - //#moving-average + // #moving-average val numbers = Source(1 :: 3 :: 10 :: 2 :: 3 :: 4 :: 2 :: 10 :: 11 :: Nil) val movingAverage = numbers.sliding(5).map(window => window.sum.toFloat / window.size) movingAverage.runForeach(println) @@ -42,7 +42,7 @@ object Sliding { // 4.2 = average of 10, 2, 3, 4, 2 // 4.2 = average of 2, 3, 4, 2, 10 // 6.0 = average of 3, 4, 2, 10, 11 - //#moving-average + // #moving-average } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Split.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Split.scala index 84bd5a080d4..9a2cf981798 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Split.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Split.scala @@ -19,7 +19,7 @@ object Split { implicit val system: ActorSystem = ActorSystem() - //#splitWhen + // #splitWhen Source(1 to 100) .throttle(1, 100.millis) .map(elem => (elem, Instant.now())) @@ -28,14 +28,13 @@ object Split { // keep track of time bucket (one per second) var currentTimeBucket = LocalDateTime.ofInstant(Instant.ofEpochMilli(0), ZoneOffset.UTC) - { - case (elem, timestamp) => - val time = LocalDateTime.ofInstant(timestamp, ZoneOffset.UTC) - val bucket = time.withNano(0) - val newBucket = bucket != currentTimeBucket - if (newBucket) - currentTimeBucket = bucket - List((elem, newBucket)) + { case (elem, timestamp) => + val time = LocalDateTime.ofInstant(timestamp, ZoneOffset.UTC) + val bucket = time.withNano(0) + val newBucket = bucket != currentTimeBucket + if (newBucket) + currentTimeBucket = bucket + List((elem, newBucket)) } }) .splitWhen(_._2) // split when time bucket changes @@ -54,7 +53,7 @@ object Split { // 10 // 10 // 7 - //#splitWhen + // #splitWhen } def splitAfterExample(args: Array[String]): Unit = { @@ -62,7 +61,7 @@ object Split { implicit val system: ActorSystem = ActorSystem() - //#splitAfter + // #splitAfter Source(1 to 100) .throttle(1, 100.millis) .map(elem => (elem, Instant.now())) @@ -95,7 +94,7 @@ object Split { // 6 // note that the very last element is never included due to sliding, // but that would not be problem for an infinite stream - //#splitAfter + // #splitAfter } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Throttle.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Throttle.scala index 63745d67682..8a55ca49a90 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Throttle.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Throttle.scala @@ -13,7 +13,6 @@ import akka.stream.scaladsl.Source import scala.concurrent.duration._ /** - * */ object Throttle extends App { diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Watch.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Watch.scala index e1993f59dfa..5b4652fca3a 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Watch.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/Watch.scala @@ -14,13 +14,13 @@ object Watch { def someActor(): ActorRef = ??? def watchExample(): Unit = { - //#watch + // #watch val ref: ActorRef = someActor() val flow: Flow[String, String, NotUsed] = - Flow[String].watch(ref).recover { - case _: WatchedActorTerminatedException => s"$ref terminated" + Flow[String].watch(ref).recover { case _: WatchedActorTerminatedException => + s"$ref terminated" } - //#watch + // #watch } } diff --git a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/WatchTermination.scala b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/WatchTermination.scala index 98d686b8161..de0acc5ee53 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/WatchTermination.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/sourceorflow/WatchTermination.scala @@ -16,16 +16,15 @@ object WatchTermination { implicit val system: ActorSystem = ??? implicit val ec: ExecutionContext = ??? - //#watchTermination + // #watchTermination Source(1 to 5) - .watchTermination()( - (prevMatValue, future) => - // this function will be run when the stream terminates - // the Future provided as a second parameter indicates whether the stream completed successfully or failed - future.onComplete { - case Failure(exception) => println(exception.getMessage) - case Success(_) => println(s"The stream materialized $prevMatValue") - }) + .watchTermination()((prevMatValue, future) => + // this function will be run when the stream terminates + // the Future provided as a second parameter indicates whether the stream completed successfully or failed + future.onComplete { + case Failure(exception) => println(exception.getMessage) + case Success(_) => println(s"The stream materialized $prevMatValue") + }) .runForeach(println) /* Prints: @@ -50,6 +49,6 @@ object WatchTermination { 2 Boom */ - //#watchTermination + // #watchTermination } } diff --git a/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala b/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala index 37b0fb40cf1..17f6545e3cd 100644 --- a/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala +++ b/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala @@ -16,9 +16,7 @@ import akka.actor.ActorRefFactory import akka.testkit.TestKit import org.scalatest.BeforeAndAfterAll -/** - * Parent-Child examples - */ +/** Parent-Child examples */ //#test-example class Parent extends Actor { val child = context.actorOf(Props[Child](), "child") @@ -31,16 +29,16 @@ class Parent extends Actor { } class Child extends Actor { - def receive = { - case "ping" => context.parent ! "pong" + def receive = { case "ping" => + context.parent ! "pong" } } //#test-example //#test-dependentchild class DependentChild(parent: ActorRef) extends Actor { - def receive = { - case "ping" => parent ! "pong" + def receive = { case "ping" => + parent ! "pong" } } //#test-dependentchild @@ -66,12 +64,10 @@ class GenericDependentParent(childMaker: ActorRefFactory => ActorRef) extends Ac } //#test-dependentparent -/** - * Test specification - */ +/** Test specification */ class MockedChild extends Actor { - def receive = { - case "ping" => sender() ! "pong" + def receive = { case "ping" => + sender() ! "pong" } } @@ -105,23 +101,23 @@ class ParentChildSpec extends AnyWordSpec with Matchers with TestKitBase with Be "A GenericDependentParent" should { "be tested with a child probe" in { val probe = TestProbe() - //#child-maker-test + // #child-maker-test val maker = (_: ActorRefFactory) => probe.ref val parent = system.actorOf(Props(new GenericDependentParent(maker))) - //#child-maker-test + // #child-maker-test probe.send(parent, "pingit") probe.expectMsg("ping") } "demonstrate production version of child creator" in { - //#child-maker-prod + // #child-maker-prod val maker = (f: ActorRefFactory) => f.actorOf(Props(new Child)) val parent = system.actorOf(Props(new GenericDependentParent(maker))) - //#child-maker-prod + // #child-maker-prod } } - //#test-TestProbe-parent + // #test-TestProbe-parent "A TestProbe serving as parent" should { "test its child responses" in { val parent = TestProbe() @@ -130,9 +126,9 @@ class ParentChildSpec extends AnyWordSpec with Matchers with TestKitBase with Be parent.expectMsg("pong") } } - //#test-TestProbe-parent + // #test-TestProbe-parent - //#test-fabricated-parent + // #test-fabricated-parent "A fabricated parent" should { "test its child responses" in { val proxy = TestProbe() @@ -148,5 +144,5 @@ class ParentChildSpec extends AnyWordSpec with Matchers with TestKitBase with Be proxy.expectMsg("pong") } } - //#test-fabricated-parent + // #test-fabricated-parent } diff --git a/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala b/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala index 13773018680..8821dca0964 100644 --- a/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala +++ b/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala @@ -18,7 +18,7 @@ class MySpec() with AnyWordSpecLike with Matchers with BeforeAndAfterAll { - //#implicit-sender + // #implicit-sender override def afterAll(): Unit = { TestKit.shutdownActorSystem(system) diff --git a/akka-docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala b/akka-docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala index 14fcf48cd7a..951578fa0e4 100644 --- a/akka-docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala +++ b/akka-docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala @@ -23,9 +23,7 @@ import akka.testkit.{ DefaultTimeout, ImplicitSender, TestActors, TestKit } import scala.concurrent.duration._ import scala.collection.immutable -/** - * a Test to show some TestKit examples - */ +/** a Test to show some TestKit examples */ class TestKitUsageSpec extends TestKit(ActorSystem("TestKitUsageSpec", ConfigFactory.parseString(TestKitUsageSpec.config))) with DefaultTimeout @@ -79,8 +77,8 @@ class TestKitUsageSpec filterRef ! "text" filterRef ! 1 - receiveWhile(500 millis) { - case msg: String => messages = msg +: messages + receiveWhile(500 millis) { case msg: String => + messages = msg +: messages } } messages.length should be(3) @@ -90,13 +88,13 @@ class TestKitUsageSpec "A SequencingActor" should { "receive an interesting message at some point " in { within(500 millis) { - ignoreMsg { - case msg: String => msg != "something" + ignoreMsg { case msg: String => + msg != "something" } seqRef ! "something" expectMsg("something") - ignoreMsg { - case msg: String => msg == "1" + ignoreMsg { case msg: String => + msg == "1" } expectNoMessage() ignoreNoMsg() @@ -113,18 +111,14 @@ object TestKitUsageSpec { } """ - /** - * An Actor that forwards every message to a next Actor - */ + /** An Actor that forwards every message to a next Actor */ class ForwardingActor(next: ActorRef) extends Actor { - def receive = { - case msg => next ! msg + def receive = { case msg => + next ! msg } } - /** - * An Actor that only forwards certain messages to a next Actor - */ + /** An Actor that only forwards certain messages to a next Actor */ class FilteringActor(next: ActorRef) extends Actor { def receive = { case msg: String => next ! msg diff --git a/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala b/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala index ee4b0563442..b5d478c2e20 100644 --- a/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala @@ -32,15 +32,15 @@ object TestKitDocSpec { class TestFsmActor extends Actor with FSM[Int, String] { startWith(1, "") - when(1) { - case Event("go", _) => goto(2).using("go") + when(1) { case Event("go", _) => + goto(2).using("go") } - when(2) { - case Event("back", _) => goto(1).using("back") + when(2) { case Event("back", _) => + goto(1).using("back") } } - //#my-double-echo + // #my-double-echo class MyDoubleEcho extends Actor { var dest1: ActorRef = _ var dest2: ActorRef = _ @@ -54,49 +54,46 @@ object TestKitDocSpec { } } - //#my-double-echo + // #my-double-echo - //#test-probe-forward-actors + // #test-probe-forward-actors class Source(target: ActorRef) extends Actor { - def receive = { - case "start" => target ! "work" + def receive = { case "start" => + target ! "work" } } class Destination extends Actor { - def receive = { - case x => // Do something.. + def receive = { case x => // Do something.. } } - //#test-probe-forward-actors + // #test-probe-forward-actors - //#timer + // #timer case class TriggerScheduling(foo: String) object SchedKey case class ScheduledMessage(foo: String) class TestTimerActor extends Actor with Timers { - override def receive = { - case TriggerScheduling(foo) => triggerScheduling(ScheduledMessage(foo)) + override def receive = { case TriggerScheduling(foo) => + triggerScheduling(ScheduledMessage(foo)) } def triggerScheduling(msg: ScheduledMessage) = timers.startSingleTimer(SchedKey, msg, 500.millis) } - //#timer + // #timer class LoggingActor extends Actor { - //#logging-receive + // #logging-receive import akka.event.LoggingReceive - def receive = LoggingReceive { - case msg => // Do something ... + def receive = LoggingReceive { case msg => // Do something ... } - def otherState: Receive = LoggingReceive.withLabel("other") { - case msg => // Do something else ... + def otherState: Receive = LoggingReceive.withLabel("other") { case msg => // Do something else ... } - //#logging-receive + // #logging-receive } } @@ -104,12 +101,12 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { import TestKitDocSpec._ "demonstrate usage of TestActorRef" in { - //#test-actor-ref + // #test-actor-ref import akka.testkit.TestActorRef val actorRef = TestActorRef[MyActor] val actor = actorRef.underlyingActor - //#test-actor-ref + // #test-actor-ref } "demonstrate built-in expect methods" in { @@ -119,25 +116,25 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { testActor.tell("hello", ActorRef.noSender) testActor.tell("world", ActorRef.noSender) testActor.tell(42, ActorRef.noSender) - //#test-expect + // #test-expect val hello: String = expectMsg("hello") val any: String = expectMsgAnyOf("hello", "world") val all: immutable.Seq[String] = expectMsgAllOf("hello", "world") val i: Int = expectMsgType[Int] expectNoMessage(200.millis) - //#test-expect + // #test-expect testActor.tell("receveN-1", ActorRef.noSender) testActor.tell("receveN-2", ActorRef.noSender) - //#test-expect + // #test-expect val two: immutable.Seq[AnyRef] = receiveN(2) - //#test-expect + // #test-expect assert("hello" == hello) assert("hello" == any) assert(42 == i) } "demonstrate usage of TestFSMRef" in { - //#test-fsm-ref + // #test-fsm-ref import akka.testkit.TestFSMRef import scala.concurrent.duration._ @@ -159,12 +156,12 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { assert(fsm.isTimerActive("test") == true) fsm.cancelTimer("test") assert(fsm.isTimerActive("test") == false) - //#test-fsm-ref + // #test-fsm-ref } "demonstrate testing of behavior" in { - //#test-behavior + // #test-behavior import akka.testkit.TestActorRef import akka.pattern.ask @@ -172,35 +169,35 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { // hypothetical message stimulating a '42' answer val future: Future[Any] = actorRef ? Say42 future.futureValue should be(42) - //#test-behavior + // #test-behavior } "demonstrate unhandled message" in { - //#test-unhandled + // #test-unhandled import akka.testkit.TestActorRef system.eventStream.subscribe(testActor, classOf[UnhandledMessage]) val ref = TestActorRef[MyActor] ref.receive(Unknown) expectMsg(1 second, UnhandledMessage(Unknown, system.deadLetters, ref)) - //#test-unhandled + // #test-unhandled } "demonstrate expecting exceptions" in { - //#test-expecting-exceptions + // #test-expecting-exceptions import akka.testkit.TestActorRef val actorRef = TestActorRef(new Actor { - def receive = { - case "hello" => throw new IllegalArgumentException("boom") + def receive = { case "hello" => + throw new IllegalArgumentException("boom") } }) intercept[IllegalArgumentException] { actorRef.receive("hello") } - //#test-expecting-exceptions + // #test-expecting-exceptions } "demonstrate within" in { type Worker = MyActor - //#test-within + // #test-within import akka.actor.Props import scala.concurrent.duration._ @@ -211,19 +208,19 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { expectNoMessage() // will block for the rest of the 200ms Thread.sleep(300) // will NOT make this block fail } - //#test-within + // #test-within } "demonstrate dilated duration" in { - //#duration-dilation + // #duration-dilation import scala.concurrent.duration._ import akka.testkit._ 10.milliseconds.dilated - //#duration-dilation + // #duration-dilation } "demonstrate usage of probe" in { - //#test-probe + // #test-probe val probe1 = TestProbe() val probe2 = TestProbe() val actor = system.actorOf(Props[MyDoubleEcho]()) @@ -231,9 +228,9 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { actor ! "hello" probe1.expectMsg(500 millis, "hello") probe2.expectMsg(500 millis, "hello") - //#test-probe + // #test-probe - //#test-special-probe + // #test-special-probe final case class Update(id: Int, value: String) val probe = new TestProbe(system) { @@ -244,58 +241,58 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { sender() ! "ACK" } } - //#test-special-probe + // #test-special-probe } "demonstrate usage of test probe with custom name" in { - //#test-probe-with-custom-name + // #test-probe-with-custom-name val worker = TestProbe("worker") val aggregator = TestProbe("aggregator") worker.ref.path.name should startWith("worker") aggregator.ref.path.name should startWith("aggregator") - //#test-probe-with-custom-name + // #test-probe-with-custom-name } "demonstrate probe watch" in { import akka.testkit.TestProbe val target = system.actorOf(Props.empty) - //#test-probe-watch + // #test-probe-watch val probe = TestProbe() probe.watch(target) target ! PoisonPill probe.expectTerminated(target) - //#test-probe-watch + // #test-probe-watch } "demonstrate probe reply" in { import akka.testkit.TestProbe import scala.concurrent.duration._ import akka.pattern.ask - //#test-probe-reply + // #test-probe-reply val probe = TestProbe() val future = probe.ref ? "hello" probe.expectMsg(0 millis, "hello") // TestActor runs on CallingThreadDispatcher probe.reply("world") assert(future.isCompleted && future.value.contains(Success("world"))) - //#test-probe-reply + // #test-probe-reply } "demonstrate probe forward" in { import akka.testkit.TestProbe import akka.actor.Props - //#test-probe-forward + // #test-probe-forward val probe = TestProbe() val source = system.actorOf(Props(classOf[Source], probe.ref)) val dest = system.actorOf(Props[Destination]()) source ! "start" probe.expectMsg("work") probe.forward(dest) - //#test-probe-forward + // #test-probe-forward } "demonstrate using inheritance to test timers" in { - //#timer-test + // #timer-test import akka.testkit.TestProbe import akka.actor.Props @@ -307,18 +304,18 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { actor ! TriggerScheduling("abc") probe.expectMsg(ScheduledMessage("abc")) - //#timer-test + // #timer-test } "demonstrate calling thread dispatcher" in { - //#calling-thread-dispatcher + // #calling-thread-dispatcher import akka.testkit.CallingThreadDispatcher val ref = system.actorOf(Props[MyActor]().withDispatcher(CallingThreadDispatcher.Id)) - //#calling-thread-dispatcher + // #calling-thread-dispatcher } "demonstrate EventFilter" in { - //#event-filter + // #event-filter import akka.testkit.EventFilter import com.typesafe.config.ConfigFactory @@ -335,36 +332,36 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { } finally { shutdown(system) } - //#event-filter + // #event-filter } "demonstrate TestKitBase" in { - //#test-kit-base + // #test-kit-base import akka.testkit.TestKitBase class MyTest extends TestKitBase { implicit lazy val system: ActorSystem = ActorSystem() - //#put-your-test-code-here + // #put-your-test-code-here val probe = TestProbe() probe.send(testActor, "hello") try expectMsg("hello") catch { case NonFatal(e) => system.terminate(); throw e } - //#put-your-test-code-here + // #put-your-test-code-here shutdown(system) } - //#test-kit-base + // #test-kit-base } "demonstrate within() nesting" in { intercept[AssertionError] { - //#test-within-probe + // #test-within-probe val probe = TestProbe() within(1 second) { probe.expectMsg("hello") } - //#test-within-probe + // #test-within-probe } } diff --git a/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala b/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala index c9650ff6b09..9f9037a51c3 100644 --- a/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala +++ b/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala @@ -57,10 +57,9 @@ class StartStopActor1(context: ActorContext[String]) extends AbstractBehavior[St case "stop" => Behaviors.stopped } - override def onSignal: PartialFunction[Signal, Behavior[String]] = { - case PostStop => - println("first stopped") - this + override def onSignal: PartialFunction[Signal, Behavior[String]] = { case PostStop => + println("first stopped") + this } } @@ -78,10 +77,9 @@ class StartStopActor2(context: ActorContext[String]) extends AbstractBehavior[St Behaviors.unhandled } - override def onSignal: PartialFunction[Signal, Behavior[String]] = { - case PostStop => - println("second stopped") - this + override def onSignal: PartialFunction[Signal, Behavior[String]] = { case PostStop => + println("second stopped") + this } } @@ -165,17 +163,17 @@ class ActorHierarchyExperiments extends ScalaTestWithActorTestKit with AnyWordSp def context = this "start and stop actors" in { - //#start-stop-main + // #start-stop-main val first = context.spawn(StartStopActor1(), "first") first ! "stop" - //#start-stop-main + // #start-stop-main } "supervise actors" in { - //#supervise-main + // #supervise-main val supervisingActor = context.spawn(SupervisingActor(), "supervising-actor") supervisingActor ! "failChild" - //#supervise-main + // #supervise-main Thread.sleep(200) // allow for the println/logging to complete } } diff --git a/akka-docs/src/test/scala/typed/tutorial_2/IotSupervisor.scala b/akka-docs/src/test/scala/typed/tutorial_2/IotSupervisor.scala index 421e491dde2..968a7c49799 100644 --- a/akka-docs/src/test/scala/typed/tutorial_2/IotSupervisor.scala +++ b/akka-docs/src/test/scala/typed/tutorial_2/IotSupervisor.scala @@ -32,10 +32,9 @@ class IotSupervisor(context: ActorContext[Nothing]) extends AbstractBehavior[Not Behaviors.unhandled } - override def onSignal: PartialFunction[Signal, Behavior[Nothing]] = { - case PostStop => - context.log.info("IoT Application stopped") - this + override def onSignal: PartialFunction[Signal, Behavior[Nothing]] = { case PostStop => + context.log.info("IoT Application stopped") + this } } //#iot-supervisor diff --git a/akka-docs/src/test/scala/typed/tutorial_3/Device.scala b/akka-docs/src/test/scala/typed/tutorial_3/Device.scala index f6298536d53..ae6d219ca3d 100644 --- a/akka-docs/src/test/scala/typed/tutorial_3/Device.scala +++ b/akka-docs/src/test/scala/typed/tutorial_3/Device.scala @@ -23,11 +23,11 @@ object Device { final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) extends Command final case class RespondTemperature(requestId: Long, value: Option[Double]) - //#write-protocol + // #write-protocol final case class RecordTemperature(requestId: Long, value: Double, replyTo: ActorRef[TemperatureRecorded]) extends Command final case class TemperatureRecorded(requestId: Long) - //#write-protocol + // #write-protocol } class Device(context: ActorContext[Device.Command], groupId: String, deviceId: String) @@ -52,10 +52,9 @@ class Device(context: ActorContext[Device.Command], groupId: String, deviceId: S } } - override def onSignal: PartialFunction[Signal, Behavior[Command]] = { - case PostStop => - context.log.info2("Device actor {}-{} stopped", groupId, deviceId) - this + override def onSignal: PartialFunction[Signal, Behavior[Command]] = { case PostStop => + context.log.info2("Device actor {}-{} stopped", groupId, deviceId) + this } } diff --git a/akka-docs/src/test/scala/typed/tutorial_3/DeviceInProgress.scala b/akka-docs/src/test/scala/typed/tutorial_3/DeviceInProgress.scala index 22d3b724282..51a79bc628b 100644 --- a/akka-docs/src/test/scala/typed/tutorial_3/DeviceInProgress.scala +++ b/akka-docs/src/test/scala/typed/tutorial_3/DeviceInProgress.scala @@ -16,7 +16,7 @@ import akka.actor.typed.Signal object DeviceInProgress1 { - //#read-protocol-1 + // #read-protocol-1 import akka.actor.typed.ActorRef object Device { @@ -24,14 +24,14 @@ object DeviceInProgress1 { final case class ReadTemperature(replyTo: ActorRef[RespondTemperature]) extends Command final case class RespondTemperature(value: Option[Double]) } - //#read-protocol-1 + // #read-protocol-1 } object DeviceInProgress2 { import akka.actor.typed.ActorRef - //#device-with-read + // #device-with-read import akka.actor.typed.Behavior import akka.actor.typed.scaladsl.AbstractBehavior import akka.actor.typed.scaladsl.ActorContext @@ -42,11 +42,11 @@ object DeviceInProgress2 { def apply(groupId: String, deviceId: String): Behavior[Command] = Behaviors.setup(context => new Device(context, groupId, deviceId)) - //#read-protocol-2 + // #read-protocol-2 sealed trait Command final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) extends Command final case class RespondTemperature(requestId: Long, value: Option[Double]) - //#read-protocol-2 + // #read-protocol-2 } class Device(context: ActorContext[Device.Command], groupId: String, deviceId: String) @@ -65,23 +65,22 @@ object DeviceInProgress2 { } } - override def onSignal: PartialFunction[Signal, Behavior[Command]] = { - case PostStop => - context.log.info2("Device actor {}-{} stopped", groupId, deviceId) - this + override def onSignal: PartialFunction[Signal, Behavior[Command]] = { case PostStop => + context.log.info2("Device actor {}-{} stopped", groupId, deviceId) + this } } - //#device-with-read + // #device-with-read } object DeviceInProgress3 { object Device { - //#write-protocol-1 + // #write-protocol-1 sealed trait Command final case class RecordTemperature(value: Double) extends Command - //#write-protocol-1 + // #write-protocol-1 } } diff --git a/akka-docs/src/test/scala/typed/tutorial_3/DeviceSpec.scala b/akka-docs/src/test/scala/typed/tutorial_3/DeviceSpec.scala index c52219b6ecc..2636d4a118f 100644 --- a/akka-docs/src/test/scala/typed/tutorial_3/DeviceSpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_3/DeviceSpec.scala @@ -22,9 +22,9 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { response.requestId should ===(42) response.value should ===(None) } - //#device-read-test + // #device-read-test - //#device-write-read-test + // #device-write-read-test "reply with latest temperature reading" in { val recordProbe = createTestProbe[TemperatureRecorded]() val readProbe = createTestProbe[RespondTemperature]() @@ -46,9 +46,9 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { response2.requestId should ===(4) response2.value should ===(Some(55.0)) } - //#device-write-read-test + // #device-write-read-test } - //#device-read-test + // #device-read-test } //#device-read-test diff --git a/akka-docs/src/test/scala/typed/tutorial_4/Device.scala b/akka-docs/src/test/scala/typed/tutorial_4/Device.scala index cbee7587629..2fc55006c20 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/Device.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/Device.scala @@ -27,9 +27,9 @@ object Device { extends Command final case class TemperatureRecorded(requestId: Long) - //#passivate-msg + // #passivate-msg case object Passivate extends Command - //#passivate-msg + // #passivate-msg } class Device(context: ActorContext[Device.Command], groupId: String, deviceId: String) @@ -57,10 +57,9 @@ class Device(context: ActorContext[Device.Command], groupId: String, deviceId: S } } - override def onSignal: PartialFunction[Signal, Behavior[Command]] = { - case PostStop => - context.log.info2("Device actor {}-{} stopped", groupId, deviceId) - this + override def onSignal: PartialFunction[Signal, Behavior[Command]] = { case PostStop => + context.log.info2("Device actor {}-{} stopped", groupId, deviceId) + this } } diff --git a/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroup.scala b/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroup.scala index 3a603ac4ce5..61c502ce0fd 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroup.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroup.scala @@ -47,9 +47,9 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) case None => context.log.info("Creating device actor for {}", trackMsg.deviceId) val deviceActor = context.spawn(Device(groupId, deviceId), s"device-$deviceId") - //#device-group-register + // #device-group-register context.watchWith(deviceActor, DeviceTerminated(deviceActor, groupId, deviceId)) - //#device-group-register + // #device-group-register deviceIdToActor += deviceId -> deviceActor replyTo ! DeviceRegistered(deviceActor) } @@ -58,8 +58,8 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) case RequestTrackDevice(gId, _, _) => context.log.warn2("Ignoring TrackDevice request for {}. This actor is responsible for {}.", gId, groupId) this - //#device-group-register - //#device-group-remove + // #device-group-register + // #device-group-remove case RequestDeviceList(requestId, gId, replyTo) => if (gId == groupId) { @@ -67,20 +67,19 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) this } else Behaviors.unhandled - //#device-group-remove + // #device-group-remove case DeviceTerminated(_, _, deviceId) => context.log.info("Device actor for {} has been terminated", deviceId) deviceIdToActor -= deviceId this - //#device-group-register + // #device-group-register } - override def onSignal: PartialFunction[Signal, Behavior[Command]] = { - case PostStop => - context.log.info("DeviceGroup {} stopped", groupId) - this + override def onSignal: PartialFunction[Signal, Behavior[Command]] = { case PostStop => + context.log.info("DeviceGroup {} stopped", groupId) + this } } //#device-group-remove diff --git a/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroupSpec.scala b/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroupSpec.scala index 720673a52e8..083cf6e6e97 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroupSpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroupSpec.scala @@ -14,7 +14,7 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { "DeviceGroup actor" must { - //#device-group-test-registration + // #device-group-test-registration "be able to register a device actor" in { val probe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -44,9 +44,9 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { groupActor ! RequestTrackDevice("wrongGroup", "device1", probe.ref) probe.expectNoMessage(500.milliseconds) } - //#device-group-test-registration + // #device-group-test-registration - //#device-group-test3 + // #device-group-test3 "return same actor for same deviceId" in { val probe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -60,9 +60,9 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { registered1.device should ===(registered2.device) } - //#device-group-test3 + // #device-group-test3 - //#device-group-list-terminate-test + // #device-group-list-terminate-test "be able to list active devices" in { val registeredProbe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -103,7 +103,7 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { deviceListProbe.expectMessage(ReplyDeviceList(requestId = 1, Set("device2"))) } } - //#device-group-list-terminate-test + // #device-group-list-terminate-test } diff --git a/akka-docs/src/test/scala/typed/tutorial_4/DeviceManager.scala b/akka-docs/src/test/scala/typed/tutorial_4/DeviceManager.scala index b059b5e8e84..f61449d5f38 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/DeviceManager.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/DeviceManager.scala @@ -17,28 +17,28 @@ object DeviceManager { def apply(): Behavior[Command] = Behaviors.setup(context => new DeviceManager(context)) - //#device-manager-msgs + // #device-manager-msgs sealed trait Command - //#device-registration-msgs + // #device-registration-msgs final case class RequestTrackDevice(groupId: String, deviceId: String, replyTo: ActorRef[DeviceRegistered]) extends DeviceManager.Command with DeviceGroup.Command final case class DeviceRegistered(device: ActorRef[Device.Command]) - //#device-registration-msgs + // #device-registration-msgs - //#device-list-msgs + // #device-list-msgs final case class RequestDeviceList(requestId: Long, groupId: String, replyTo: ActorRef[ReplyDeviceList]) extends DeviceManager.Command with DeviceGroup.Command final case class ReplyDeviceList(requestId: Long, ids: Set[String]) - //#device-list-msgs + // #device-list-msgs private final case class DeviceGroupTerminated(groupId: String) extends DeviceManager.Command - //#device-manager-msgs + // #device-manager-msgs } class DeviceManager(context: ActorContext[DeviceManager.Command]) @@ -79,10 +79,9 @@ class DeviceManager(context: ActorContext[DeviceManager.Command]) this } - override def onSignal: PartialFunction[Signal, Behavior[Command]] = { - case PostStop => - context.log.info("DeviceManager stopped") - this + override def onSignal: PartialFunction[Signal, Behavior[Command]] = { case PostStop => + context.log.info("DeviceManager stopped") + this } } diff --git a/akka-docs/src/test/scala/typed/tutorial_4/DeviceSpec.scala b/akka-docs/src/test/scala/typed/tutorial_4/DeviceSpec.scala index 3ecdd275b06..6f01ae7799f 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/DeviceSpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/DeviceSpec.scala @@ -12,7 +12,7 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { "Device actor" must { - //#device-read-test + // #device-read-test "reply with empty reading if no temperature is known" in { val probe = createTestProbe[RespondTemperature]() val deviceActor = spawn(Device("group", "device")) @@ -22,9 +22,9 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { response.requestId should ===(42) response.value should ===(None) } - //#device-read-test + // #device-read-test - //#device-write-read-test + // #device-write-read-test "reply with latest temperature reading" in { val recordProbe = createTestProbe[TemperatureRecorded]() val readProbe = createTestProbe[RespondTemperature]() @@ -46,7 +46,7 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { response2.requestId should ===(4) response2.value should ===(Some(55.0)) } - //#device-write-read-test + // #device-write-read-test } diff --git a/akka-docs/src/test/scala/typed/tutorial_5/Device.scala b/akka-docs/src/test/scala/typed/tutorial_5/Device.scala index ef72a59c0d6..194025c6205 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/Device.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/Device.scala @@ -21,9 +21,9 @@ object Device { sealed trait Command final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) extends Command - //#respond-declare + // #respond-declare final case class RespondTemperature(requestId: Long, deviceId: String, value: Option[Double]) - //#respond-declare + // #respond-declare final case class RecordTemperature(requestId: Long, value: Double, replyTo: ActorRef[TemperatureRecorded]) extends Command final case class TemperatureRecorded(requestId: Long) @@ -46,20 +46,19 @@ class Device(context: ActorContext[Device.Command], groupId: String, deviceId: S lastTemperatureReading = Some(value) replyTo ! TemperatureRecorded(id) this - //#respond-reply + // #respond-reply case ReadTemperature(id, replyTo) => replyTo ! RespondTemperature(id, deviceId, lastTemperatureReading) this - //#respond-reply + // #respond-reply case Passivate => Behaviors.stopped } } - override def onSignal: PartialFunction[Signal, Behavior[Command]] = { - case PostStop => - context.log.info2("Device actor {}-{} stopped", groupId, deviceId) - this + override def onSignal: PartialFunction[Signal, Behavior[Command]] = { case PostStop => + context.log.info2("Device actor {}-{} stopped", groupId, deviceId) + this } } diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroup.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroup.scala index 41c0dc7cbcf..927381d82a4 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroup.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroup.scala @@ -43,7 +43,7 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) override def onMessage(msg: Command): Behavior[Command] = msg match { - //#query-added + // #query-added case trackMsg @ RequestTrackDevice(`groupId`, deviceId, replyTo) => deviceIdToActor.get(deviceId) match { case Some(deviceActor) => @@ -51,9 +51,9 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) case None => context.log.info("Creating device actor for {}", trackMsg.deviceId) val deviceActor = context.spawn(Device(groupId, deviceId), s"device-$deviceId") - //#device-group-register + // #device-group-register context.watchWith(deviceActor, DeviceTerminated(deviceActor, groupId, deviceId)) - //#device-group-register + // #device-group-register deviceIdToActor += deviceId -> deviceActor replyTo ! DeviceRegistered(deviceActor) } @@ -69,14 +69,14 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) this } else Behaviors.unhandled - //#device-group-remove + // #device-group-remove case DeviceTerminated(_, _, deviceId) => context.log.info("Device actor for {} has been terminated", deviceId) deviceIdToActor -= deviceId this - //#query-added + // #query-added // ... other cases omitted case RequestAllTemperatures(requestId, gId, replyTo) => @@ -88,10 +88,9 @@ class DeviceGroup(context: ActorContext[DeviceGroup.Command], groupId: String) Behaviors.unhandled } - override def onSignal: PartialFunction[Signal, Behavior[Command]] = { - case PostStop => - context.log.info("DeviceGroup {} stopped", groupId) - this + override def onSignal: PartialFunction[Signal, Behavior[Command]] = { case PostStop => + context.log.info("DeviceGroup {} stopped", groupId) + this } } //#query-added diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala index a12741b4312..8e683f797a2 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala @@ -59,22 +59,21 @@ class DeviceGroupQuery( private val respondTemperatureAdapter = context.messageAdapter(WrappedRespondTemperature.apply) - //#query-outline - //#query-state + // #query-outline + // #query-state private var repliesSoFar = Map.empty[String, TemperatureReading] private var stillWaiting = deviceIdToActor.keySet - //#query-state - //#query-outline + // #query-state + // #query-outline - deviceIdToActor.foreach { - case (deviceId, device) => - context.watchWith(device, DeviceTerminated(deviceId)) - device ! Device.ReadTemperature(0, respondTemperatureAdapter) + deviceIdToActor.foreach { case (deviceId, device) => + context.watchWith(device, DeviceTerminated(deviceId)) + device ! Device.ReadTemperature(0, respondTemperatureAdapter) } - //#query-outline - //#query-state + // #query-outline + // #query-state override def onMessage(msg: Command): Behavior[Command] = msg match { case WrappedRespondTemperature(response) => onRespondTemperature(response) @@ -108,9 +107,9 @@ class DeviceGroupQuery( stillWaiting = Set.empty respondWhenAllCollected() } - //#query-state + // #query-state - //#query-collect-reply + // #query-collect-reply private def respondWhenAllCollected(): Behavior[Command] = { if (stillWaiting.isEmpty) { requester ! RespondAllTemperatures(requestId, repliesSoFar) @@ -119,8 +118,8 @@ class DeviceGroupQuery( this } } - //#query-collect-reply - //#query-outline + // #query-collect-reply + // #query-outline } //#query-outline //#query-full diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala index 1130927e8fe..8d93bdcd892 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala @@ -19,7 +19,7 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with AnyWordSpecLik "DeviceGroupQuery" must { - //#query-test-normal + // #query-test-normal "return temperature value for working devices" in { val requester = createTestProbe[RespondAllTemperatures]() @@ -42,9 +42,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with AnyWordSpecLik requestId = 1, temperatures = Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0)))) } - //#query-test-normal + // #query-test-normal - //#query-test-no-reading + // #query-test-no-reading "return TemperatureNotAvailable for devices with no readings" in { val requester = createTestProbe[RespondAllTemperatures]() @@ -67,9 +67,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with AnyWordSpecLik requestId = 1, temperatures = Map("device1" -> TemperatureNotAvailable, "device2" -> Temperature(2.0)))) } - //#query-test-no-reading + // #query-test-no-reading - //#query-test-stopped + // #query-test-stopped "return DeviceNotAvailable if device stops before answering" in { val requester = createTestProbe[RespondAllTemperatures]() @@ -93,9 +93,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with AnyWordSpecLik requestId = 1, temperatures = Map("device1" -> Temperature(2.0), "device2" -> DeviceNotAvailable))) } - //#query-test-stopped + // #query-test-stopped - //#query-test-stopped-later + // #query-test-stopped-later "return temperature reading even if device stops after answering" in { val requester = createTestProbe[RespondAllTemperatures]() @@ -120,9 +120,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with AnyWordSpecLik requestId = 1, temperatures = Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0)))) } - //#query-test-stopped-later + // #query-test-stopped-later - //#query-test-timeout + // #query-test-timeout "return DeviceTimedOut if device does not answer in time" in { val requester = createTestProbe[RespondAllTemperatures]() @@ -146,7 +146,7 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with AnyWordSpecLik requestId = 1, temperatures = Map("device1" -> Temperature(1.0), "device2" -> DeviceTimedOut))) } - //#query-test-timeout + // #query-test-timeout } diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala index 7edd820157c..e1e209e4bb3 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala @@ -14,7 +14,7 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { "DeviceGroup actor" must { - //#device-group-test-registration + // #device-group-test-registration "be able to register a device actor" in { val probe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -44,9 +44,9 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { groupActor ! RequestTrackDevice("wrongGroup", "device1", probe.ref) probe.expectNoMessage(500.milliseconds) } - //#device-group-test-registration + // #device-group-test-registration - //#device-group-test3 + // #device-group-test3 "return same actor for same deviceId" in { val probe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -60,9 +60,9 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { registered1.device should ===(registered2.device) } - //#device-group-test3 + // #device-group-test3 - //#device-group-list-terminate-test + // #device-group-list-terminate-test "be able to list active devices" in { val registeredProbe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -103,9 +103,9 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { deviceListProbe.expectMessage(ReplyDeviceList(requestId = 1, Set("device2"))) } } - //#device-group-list-terminate-test + // #device-group-list-terminate-test - //#group-query-integration-test + // #group-query-integration-test "be able to collect temperatures from all active devices" in { val registeredProbe = createTestProbe[DeviceRegistered]() val groupActor = spawn(DeviceGroup("group")) @@ -135,7 +135,7 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { temperatures = Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0), "device3" -> TemperatureNotAvailable))) } - //#group-query-integration-test + // #group-query-integration-test } diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceManager.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceManager.scala index 71eb8c13cd0..6f8156f747f 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceManager.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceManager.scala @@ -17,7 +17,7 @@ object DeviceManager { def apply(): Behavior[Command] = Behaviors.setup(context => new DeviceManager(context)) - //#device-manager-msgs + // #device-manager-msgs sealed trait Command @@ -34,9 +34,9 @@ object DeviceManager { final case class ReplyDeviceList(requestId: Long, ids: Set[String]) private final case class DeviceGroupTerminated(groupId: String) extends DeviceManager.Command - //#device-manager-msgs + // #device-manager-msgs - //#query-protocol + // #query-protocol final case class RequestAllTemperatures(requestId: Long, groupId: String, replyTo: ActorRef[RespondAllTemperatures]) extends DeviceGroupQuery.Command @@ -50,7 +50,7 @@ object DeviceManager { case object TemperatureNotAvailable extends TemperatureReading case object DeviceNotAvailable extends TemperatureReading case object DeviceTimedOut extends TemperatureReading - //#query-protocol + // #query-protocol } class DeviceManager(context: ActorContext[DeviceManager.Command]) @@ -100,10 +100,9 @@ class DeviceManager(context: ActorContext[DeviceManager.Command]) this } - override def onSignal: PartialFunction[Signal, Behavior[Command]] = { - case PostStop => - context.log.info("DeviceManager stopped") - this + override def onSignal: PartialFunction[Signal, Behavior[Command]] = { case PostStop => + context.log.info("DeviceManager stopped") + this } } diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceSpec.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceSpec.scala index b2727e89d6b..6a18c516815 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceSpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceSpec.scala @@ -12,7 +12,7 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { "Device actor" must { - //#device-read-test + // #device-read-test "reply with empty reading if no temperature is known" in { val probe = createTestProbe[RespondTemperature]() val deviceActor = spawn(Device("group", "device")) @@ -22,9 +22,9 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { response.requestId should ===(42) response.value should ===(None) } - //#device-read-test + // #device-read-test - //#device-write-read-test + // #device-write-read-test "reply with latest temperature reading" in { val recordProbe = createTestProbe[TemperatureRecorded]() val readProbe = createTestProbe[RespondTemperature]() @@ -46,7 +46,7 @@ class DeviceSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { response2.requestId should ===(4) response2.value should ===(Some(55.0)) } - //#device-write-read-test + // #device-write-read-test } diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala index b1e821589db..e4c4d124673 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -83,8 +83,8 @@ trait Conductor { this: TestConductorExt => _controller = system.systemActorOf(Props(classOf[Controller], participants, controllerPort), "controller") import Settings.BarrierTimeout import system.dispatcher - (controller ? GetSockAddr).mapTo[InetSocketAddress].flatMap { - case sockAddr: InetSocketAddress => startClient(name, sockAddr).map(_ => sockAddr) + (controller ? GetSockAddr).mapTo[InetSocketAddress].flatMap { case sockAddr: InetSocketAddress => + startClient(name, sockAddr).map(_ => sockAddr) } } @@ -232,14 +232,12 @@ trait Conductor { this: TestConductorExt => import system.dispatcher // the recover is needed to handle ClientDisconnectedException exception, // which is normal during shutdown - (controller ? Terminate(node, Left(abort))).mapTo(classTag[Done]).recover { - case _: ClientDisconnectedException => Done + (controller ? Terminate(node, Left(abort))).mapTo(classTag[Done]).recover { case _: ClientDisconnectedException => + Done } } - /** - * Obtain the list of remote host names currently registered. - */ + /** Obtain the list of remote host names currently registered. */ def getNodes: Future[Iterable[RoleName]] = { import Settings.QueryTimeout (controller ? GetNodes).mapTo(classTag[Iterable[RoleName]]) @@ -309,9 +307,7 @@ private[akka] class ConductorHandler(_createTimeout: Timeout, controller: ActorR } } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[akka] object ServerFSM { sealed trait State case object Initial extends State @@ -350,10 +346,9 @@ private[akka] class ServerFSM(val controller: ActorRef, val channel: Channel) case Event(ClientDisconnected, None) => stop() } - onTermination { - case _ => - controller ! ClientDisconnected(roleName) - channel.close() + onTermination { case _ => + controller ! ClientDisconnected(roleName) + channel.close() } when(Initial, stateTimeout = 10 seconds) { @@ -398,9 +393,7 @@ private[akka] class ServerFSM(val controller: ActorRef, val channel: Channel) initialize() } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[akka] object Controller { final case class ClientDisconnected(name: RoleName) extends DeadLetterSuppression class ClientDisconnectedException(msg: String) extends AkkaException(msg) with NoStackTrace @@ -495,7 +488,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP case GetAddress(node) => if (nodes contains node) sender() ! ToClient(AddressReply(node, nodes(node).addr)) else addrInterest += node -> ((addrInterest.get(node).getOrElse(Set())) + sender()) - case _: Done => //FIXME what should happen? + case _: Done => // FIXME what should happen? } case op: CommandOp => op match { @@ -522,9 +515,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP } } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[akka] object BarrierCoordinator { sealed trait State case object Idle extends State diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala index 6896af7c130..67b515a4ed5 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -27,9 +27,7 @@ private[akka] sealed trait NetworkOp // messages sent over the wire private[akka] sealed trait UnconfirmedClientOp extends ClientOp // unconfirmed messages going to the Player private[akka] sealed trait ConfirmedClientOp extends ClientOp -/** - * First message of connection sets names straight. - */ +/** First message of connection sets names straight. */ private[akka] final case class Hello(name: String, addr: Address) extends NetworkOp private[akka] final case class EnterBarrier(name: String, timeout: Option[FiniteDuration]) diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala index f9c68cb7e8c..f6e5c9849cd 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala @@ -28,9 +28,7 @@ object TestConductor extends ExtensionId[TestConductorExt] with ExtensionIdProvi override def createExtension(system: ExtendedActorSystem): TestConductorExt = new TestConductorExt(system) - /** - * Java API: retrieve the TestConductor extension for the given system. - */ + /** Java API: retrieve the TestConductor extension for the given system. */ override def get(system: ActorSystem): TestConductorExt = super.get(system) override def get(system: ClassicActorSystemProvider): TestConductorExt = super.get(system) @@ -51,7 +49,6 @@ object TestConductor extends ExtensionId[TestConductorExt] with ExtensionIdProvi * To use ``blackhole``, ``passThrough``, and ``throttle`` you must activate the * failure injector and throttler transport adapters by specifying `testTransport(on = true)` * in your MultiNodeConfig. - * */ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with Conductor with Player { @@ -78,14 +75,10 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C val ClientSocketWorkerPoolSize = computeWPS(config.getConfig("netty.client-socket-worker-pool")) } - /** - * Remote transport used by the actor ref provider. - */ + /** Remote transport used by the actor ref provider. */ val transport = system.provider.asInstanceOf[RemoteActorRefProvider].transport - /** - * Transport address of this Netty-like remote transport. - */ + /** Transport address of this Netty-like remote transport. */ val address = transport.defaultAddress } diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala index c39315d25aa..789181a4b65 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala @@ -53,7 +53,8 @@ object Player { def receive = { case fsm: ActorRef => waiting = sender(); fsm ! SubscribeTransitionCallBack(self) - case Transition(_, f: ClientFSM.State, t: ClientFSM.State) if f == Connecting && t == AwaitDone => // step 1, not there yet // // SI-5900 workaround + case Transition(_, f: ClientFSM.State, t: ClientFSM.State) + if f == Connecting && t == AwaitDone => // step 1, not there yet // // SI-5900 workaround case Transition(_, f: ClientFSM.State, t: ClientFSM.State) if f == AwaitDone && t == Connected => // SI-5900 workaround waiting ! Done; context.stop(self) @@ -136,18 +137,14 @@ trait Player { this: TestConductorExt => } } - /** - * Query remote transport address of named node. - */ + /** Query remote transport address of named node. */ def getAddressFor(name: RoleName): Future[Address] = { import Settings.QueryTimeout (client ? ToServer(GetAddress(name))).mapTo(classTag[Address]) } } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[akka] object ClientFSM { sealed trait State case object Connecting extends State @@ -281,8 +278,9 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) cmdFuture.foreach { case true => self ! ToServer(Done) case _ => - throw new RuntimeException("Throttle was requested from the TestConductor, but no transport " + - "adapters available that support throttling. Specify `testTransport(on = true)` in your MultiNodeConfig") + throw new RuntimeException( + "Throttle was requested from the TestConductor, but no transport " + + "adapters available that support throttling. Specify `testTransport(on = true)` in your MultiNodeConfig") } stay() case _: DisconnectMsg => @@ -297,7 +295,7 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) case TerminateMsg(Right(exitValue)) => System.exit(exitValue) stay() // needed because Java doesn’t have Nothing - case _: Done => stay() //FIXME what should happen? + case _: Done => stay() // FIXME what should happen? } } @@ -309,15 +307,14 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) stay() } - onTermination { - case StopEvent(_, _, Data(Some(channel), _)) => - try { - channel.close() - } catch { - case NonFatal(ex) => - // silence this one to not make tests look like they failed, it's not really critical - log.debug(s"Failed closing channel with ${ex.getClass.getName} ${ex.getMessage}") - } + onTermination { case StopEvent(_, _, Data(Some(channel), _)) => + try { + channel.close() + } catch { + case NonFatal(ex) => + // silence this one to not make tests look like they failed, it's not really critical + log.debug(s"Failed closing channel with ${ex.getClass.getName} ${ex.getMessage}") + } } initialize() diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/RemoteConnection.scala index 81754133ca9..172e69f22e3 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/RemoteConnection.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -30,9 +30,7 @@ import java.net.InetSocketAddress import java.util.concurrent.TimeUnit import scala.util.control.NonFatal -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[akka] class ProtobufEncoder extends MessageToMessageEncoder[Message] { override def encode(ctx: ChannelHandlerContext, msg: Message, out: java.util.List[AnyRef]): Unit = { @@ -44,9 +42,7 @@ private[akka] class ProtobufEncoder extends MessageToMessageEncoder[Message] { } } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[akka] class ProtobufDecoder(prototype: Message) extends MessageToMessageDecoder[ByteBuf] { override def decode(ctx: ChannelHandlerContext, msg: ByteBuf, out: java.util.List[AnyRef]): Unit = { @@ -55,9 +51,7 @@ private[akka] class ProtobufDecoder(prototype: Message) extends MessageToMessage } } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ @Sharable private[akka] class TestConductorPipelineFactory(handler: ChannelInboundHandler) extends ChannelInitializer[SocketChannel] { @@ -74,40 +68,26 @@ private[akka] class TestConductorPipelineFactory(handler: ChannelInboundHandler) } } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[akka] sealed trait Role -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[akka] case object Client extends Role -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[akka] case object Server extends Role -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[akka] trait RemoteConnection { - /** - * The channel future associated with this connection. - */ + /** The channel future associated with this connection. */ def channelFuture: ChannelFuture - /** - * Shutdown the connection and release the resources. - */ + /** Shutdown the connection and release the resources. */ def shutdown(): Unit } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[akka] object RemoteConnection { def apply( role: Role, diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala index 12f97560993..b2d1a400201 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -31,9 +31,7 @@ import scala.concurrent.Awaitable import scala.language.implicitConversions import scala.util.control.NonFatal -/** - * Configure the role names and participants of the test, including configuration settings. - */ +/** Configure the role names and participants of the test, including configuration settings. */ @ccompatUsedUntil213 abstract class MultiNodeConfig { @@ -44,14 +42,10 @@ abstract class MultiNodeConfig { private var _allDeploy = Vector[String]() private var _testTransport = false - /** - * Register a common base config for all test participants, if so desired. - */ + /** Register a common base config for all test participants, if so desired. */ def commonConfig(config: Config): Unit = _commonConf = Some(config) - /** - * Register a config override for a specific participant. - */ + /** Register a config override for a specific participant. */ def nodeConfig(roles: RoleName*)(configs: Config*): Unit = { val c = configs.reduceLeft(_.withFallback(_)) _nodeConf ++= roles.map { _ -> c } @@ -118,8 +112,8 @@ abstract class MultiNodeConfig { else ConfigFactory.empty val configs = _nodeConf - .get(myself) - .toList ::: _commonConf.toList ::: transportConfig :: MultiNodeSpec.nodeConfig :: MultiNodeSpec.baseConfig :: Nil + .get(myself) + .toList ::: _commonConf.toList ::: transportConfig :: MultiNodeSpec.nodeConfig :: MultiNodeSpec.baseConfig :: Nil configs.reduceLeft(_.withFallback(_)) } @@ -277,9 +271,11 @@ object MultiNodeSpec { // are exposed in kubernetes def configureNextPortIfFixed(config: Config): Config = { val arteryPortConfig = getNextPortString("akka.remote.artery.canonical.port", config) - ConfigFactory.parseString(s"""{ + ConfigFactory + .parseString(s"""{ $arteryPortConfig - }""").withFallback(config) + }""") + .withFallback(config) } private def getNextPortString(key: String, config: Config): String = { @@ -317,18 +313,19 @@ abstract class MultiNodeSpec( this(config.myself, actorSystemCreator(ConfigFactory.load(config.config)), config.roles, config.deployments) def this(config: MultiNodeConfig) = - this(config, { - val name = TestKitUtils.testNameFromCallStack(classOf[MultiNodeSpec], "".r) - config => - try { - ActorSystem(name, config) - } catch { - // Retry creating the system once as when using port = 0 two systems may try and use the same one. - // RTE is for aeron, CE for netty - case _: RemoteTransportException => ActorSystem(name, config) - case _: ChannelException => ActorSystem(name, config) - } - }) + this( + config, { + val name = TestKitUtils.testNameFromCallStack(classOf[MultiNodeSpec], "".r) + config => + try { + ActorSystem(name, config) + } catch { + // Retry creating the system once as when using port = 0 two systems may try and use the same one. + // RTE is for aeron, CE for netty + case _: RemoteTransportException => ActorSystem(name, config) + case _: ChannelException => ActorSystem(name, config) + } + }) val log: LoggingAdapter = Logging(system, this)(_.getClass.getName) @@ -350,10 +347,12 @@ abstract class MultiNodeSpec( if (selfIndex == 0) { testConductor.removeNode(myself) within(testConductor.Settings.BarrierTimeout.duration) { - awaitCond({ - // Await.result(testConductor.getNodes, remaining).filterNot(_ == myself).isEmpty - testConductor.getNodes.await.forall(_ == myself) - }, message = s"Nodes not shutdown: ${testConductor.getNodes.await}") + awaitCond( + { + // Await.result(testConductor.getNodes, remaining).filterNot(_ == myself).isEmpty + testConductor.getNodes.await.forall(_ == myself) + }, + message = s"Nodes not shutdown: ${testConductor.getNodes.await}") } } shutdown(system, duration = shutdownTimeout) @@ -372,19 +371,13 @@ abstract class MultiNodeSpec( * Test Class Interface */ - /** - * Override this method to do something when the whole test is starting up. - */ + /** Override this method to do something when the whole test is starting up. */ protected def atStartup(): Unit = () - /** - * Override this method to do something when the whole test is terminating. - */ + /** Override this method to do something when the whole test is terminating. */ protected def afterTermination(): Unit = () - /** - * All registered roles - */ + /** All registered roles */ def roles: immutable.Seq[RoleName] = _roles /** @@ -419,9 +412,7 @@ abstract class MultiNodeSpec( } } - /** - * Verify that the running node matches one of the given nodes - */ + /** Verify that the running node matches one of the given nodes */ def isNode(nodes: RoleName*): Boolean = nodes contains myself /** @@ -493,12 +484,12 @@ abstract class MultiNodeSpec( protected def injectDeployments(sys: ActorSystem, role: RoleName): Unit = { val deployer = sys.asInstanceOf[ExtendedActorSystem].provider.deployer deployments(role).foreach { str => - val deployString = replacements.foldLeft(str) { - case (base, r @ Replacement(tag, _)) => - base.indexOf(tag) match { - case -1 => base - case _ => - val replaceWith = try r.addr + val deployString = replacements.foldLeft(str) { case (base, r @ Replacement(tag, _)) => + base.indexOf(tag) match { + case -1 => base + case _ => + val replaceWith = + try r.addr catch { case NonFatal(e) => // might happen if all test cases are ignored (excluded) and @@ -508,8 +499,8 @@ abstract class MultiNodeSpec( log.warning(unresolved + " due to: " + e.getMessage) unresolved } - base.replace(tag, replaceWith) - } + base.replace(tag, replaceWith) + } } import akka.util.ccompat.JavaConverters._ ConfigFactory.parseString(deployString).root.asScala.foreach { @@ -565,13 +556,9 @@ abstract class MultiNodeSpec( */ trait MultiNodeSpecCallbacks { - /** - * Call this before the start of the test run. NOT before every test case. - */ + /** Call this before the start of the test run. NOT before every test case. */ def multiNodeSpecBeforeAll(): Unit - /** - * Call this after the all test cases have run. NOT after every test case. - */ + /** Call this after the all test cases have run. NOT after every test case. */ def multiNodeSpecAfterAll(): Unit } diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/PerfFlamesSupport.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/PerfFlamesSupport.scala index 6c4c2007bc2..58b481249a8 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/PerfFlamesSupport.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/PerfFlamesSupport.scala @@ -11,9 +11,7 @@ import scala.concurrent.duration._ import akka.remote.testconductor.RoleName -/** - * INTERNAL API: Support trait allowing trivially recording perf metrics from [[MultiNodeSpec]]s - */ +/** INTERNAL API: Support trait allowing trivially recording perf metrics from [[MultiNodeSpec]]s */ private[akka] trait PerfFlamesSupport { self: MultiNodeSpec => /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/DurableStateChange.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/DurableStateChange.scala index 06902cdbee7..ea750203c37 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/DurableStateChange.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/DurableStateChange.scala @@ -18,14 +18,10 @@ import akka.annotation.DoNotInherit @DoNotInherit sealed trait DurableStateChange[A] { - /** - * The persistence id of the origin entity. - */ + /** The persistence id of the origin entity. */ def persistenceId: String - /** - * The offset that can be used in next `changes` or `currentChanges` query. - */ + /** The offset that can be used in next `changes` or `currentChanges` query. */ def offset: Offset } @@ -36,7 +32,6 @@ object UpdatedDurableState { } /** - * * @param persistenceId The persistence id of the origin entity. * @param revision The revision number from the origin entity. * @param value The object value. @@ -60,7 +55,6 @@ object DeletedDurableState { } /** - * * @param persistenceId The persistence id of the origin entity. * @param revision The revision number from the origin entity. * @param offset The offset that can be used in next `changes` or `currentChanges` query. diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala index 5cb19249d88..1376789ecbe 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala @@ -59,9 +59,7 @@ final class EventEnvelope( def this(offset: Offset, persistenceId: String, sequenceNr: Long, event: Any, timestamp: Long) = this(offset, persistenceId, sequenceNr, event, timestamp, None) - /** - * Java API - */ + /** Java API */ def getEventMetaData(): Optional[Any] = { import scala.compat.java8.OptionConverters._ eventMetadata.asJava diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/Offset.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/Offset.scala index 08eac8166ae..74d321e47cd 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/Offset.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/Offset.scala @@ -60,9 +60,7 @@ object TimestampOffset { def apply(timestamp: Instant, readTimestamp: Instant, seen: Map[String, Long]): TimestampOffset = new TimestampOffset(timestamp, readTimestamp, seen) - /** - * Try to convert the Offset to a TimestampOffset. Epoch timestamp is used for `NoOffset`. - */ + /** Try to convert the Offset to a TimestampOffset. Epoch timestamp is used for `NoOffset`. */ def toTimestampOffset(offset: Offset): TimestampOffset = { offset match { case t: TimestampOffset => t @@ -114,13 +112,9 @@ final class TimestampOffset private (val timestamp: Instant, val readTimestamp: } } -/** - * Used when retrieving all events. - */ +/** Used when retrieving all events. */ case object NoOffset extends Offset { - /** - * Java API: - */ + /** Java API: */ def getInstance: Offset = this } diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala index 234144a2694..5d4559b0b03 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala @@ -14,9 +14,7 @@ import akka.persistence.{ PersistencePlugin, PluginProvider } import akka.persistence.query.scaladsl.ReadJournal import akka.util.unused -/** - * Persistence extension for queries. - */ +/** Persistence extension for queries. */ object PersistenceQuery extends ExtensionId[PersistenceQuery] with ExtensionIdProvider { override def get(system: ActorSystem): PersistenceQuery = super.get(system) diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/ReadJournalProvider.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/ReadJournalProvider.scala index c81aebb8a58..e8988ecdd94 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/ReadJournalProvider.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/ReadJournalProvider.scala @@ -15,7 +15,6 @@ package akka.persistence.query * and even though those types can easily be converted to each other it is most convenient * for the end user to get access to the Java or Scala `Source` directly. * One of the implementations can delegate to the other. - * */ trait ReadJournalProvider { diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/internal/QuerySerializer.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/internal/QuerySerializer.scala index 0ed52bc7cbb..68d4e97d2f3 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/internal/QuerySerializer.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/internal/QuerySerializer.scala @@ -228,10 +228,9 @@ import akka.util.ccompat.JavaConverters._ val seqNr = offset.seen.head._2 str.append(timestampOffsetSeparator).append(pid).append(timestampOffsetSeparator).append(seqNr) } else if (offset.seen.nonEmpty) { - offset.seen.toList.sortBy(_._1).foreach { - case (pid, seqNr) => - checkSeparator(pid) - str.append(timestampOffsetSeparator).append(pid).append(timestampOffsetSeparator).append(seqNr) + offset.seen.toList.sortBy(_._1).foreach { case (pid, seqNr) => + checkSeparator(pid) + str.append(timestampOffsetSeparator).append(pid).append(timestampOffsetSeparator).append(seqNr) } } str.toString diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala index 7cb3e74392b..1694d4cd188 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala @@ -8,9 +8,7 @@ import akka.NotUsed import akka.persistence.query.EventEnvelope import akka.stream.javadsl.Source -/** - * A plugin may optionally support this query by implementing this interface. - */ +/** A plugin may optionally support this query by implementing this interface. */ trait CurrentEventsByPersistenceIdQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByTagQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByTagQuery.scala index a14e360475e..691fd3b25db 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByTagQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByTagQuery.scala @@ -8,9 +8,7 @@ import akka.NotUsed import akka.persistence.query.{ EventEnvelope, Offset } import akka.stream.javadsl.Source -/** - * A plugin may optionally support this query by implementing this interface. - */ +/** A plugin may optionally support this query by implementing this interface. */ trait CurrentEventsByTagQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentPersistenceIdsQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentPersistenceIdsQuery.scala index 3999e5e0390..ab621633fbd 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentPersistenceIdsQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentPersistenceIdsQuery.scala @@ -7,9 +7,7 @@ package akka.persistence.query.javadsl import akka.NotUsed import akka.stream.javadsl.Source -/** - * A plugin may optionally support this query by implementing this interface. - */ +/** A plugin may optionally support this query by implementing this interface. */ trait CurrentPersistenceIdsQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/DurableStateStorePagedPersistenceIdsQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/DurableStateStorePagedPersistenceIdsQuery.scala index fc40246925a..8a8385c2907 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/DurableStateStorePagedPersistenceIdsQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/DurableStateStorePagedPersistenceIdsQuery.scala @@ -10,9 +10,7 @@ import akka.NotUsed import akka.persistence.state.javadsl.DurableStateStore import akka.stream.javadsl.Source -/** - * A DurableStateStore may optionally support this query by implementing this trait. - */ +/** A DurableStateStore may optionally support this query by implementing this trait. */ trait DurableStateStorePagedPersistenceIdsQuery[A] extends DurableStateStore[A] { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByPersistenceIdQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByPersistenceIdQuery.scala index 828d1936455..4d9901927f4 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByPersistenceIdQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByPersistenceIdQuery.scala @@ -8,9 +8,7 @@ import akka.NotUsed import akka.persistence.query.EventEnvelope import akka.stream.javadsl.Source -/** - * A plugin may optionally support this query by implementing this interface. - */ +/** A plugin may optionally support this query by implementing this interface. */ trait EventsByPersistenceIdQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByTagQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByTagQuery.scala index 0d2bbd9202b..3f9ca0c884d 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByTagQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByTagQuery.scala @@ -8,9 +8,7 @@ import akka.NotUsed import akka.persistence.query.{ EventEnvelope, Offset } import akka.stream.javadsl.Source -/** - * A plugin may optionally support this query by implementing this interface. - */ +/** A plugin may optionally support this query by implementing this interface. */ trait EventsByTagQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/PagedPersistenceIdsQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/PagedPersistenceIdsQuery.scala index bd200dfa063..ec17dd022c5 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/PagedPersistenceIdsQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/PagedPersistenceIdsQuery.scala @@ -9,9 +9,7 @@ import java.util.Optional import akka.NotUsed import akka.stream.javadsl.Source -/** - * A ReadJournal may optionally support this query by implementing this trait. - */ +/** A ReadJournal may optionally support this query by implementing this trait. */ trait PagedPersistenceIdsQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/PersistenceIdsQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/PersistenceIdsQuery.scala index b87070fafd6..c80bece5c1b 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/PersistenceIdsQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/PersistenceIdsQuery.scala @@ -7,9 +7,7 @@ package akka.persistence.query.javadsl import akka.NotUsed import akka.stream.javadsl.Source -/** - * A plugin may optionally support this query by implementing this interface. - */ +/** A plugin may optionally support this query by implementing this interface. */ trait PersistenceIdsQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/AllPersistenceIdsStage.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/AllPersistenceIdsStage.scala index 4f924747184..e76ed02d7a5 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/AllPersistenceIdsStage.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/AllPersistenceIdsStage.scala @@ -17,9 +17,7 @@ import akka.stream.stage.GraphStageLogic import akka.stream.stage.OutHandler import akka.stream.stage.TimerGraphStageLogicWithLogging -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final private[akka] class AllPersistenceIdsStage(liveQuery: Boolean, writeJournalPluginId: String, mat: Materializer) extends GraphStage[SourceShape[String]] { diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/Buffer.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/Buffer.scala index 011d1016558..65d610c19a0 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/Buffer.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/Buffer.scala @@ -11,9 +11,7 @@ import akka.stream.Outlet import akka.stream.stage.GraphStageLogic import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[leveldb] abstract trait Buffer[T] { self: GraphStageLogic => diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdStage.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdStage.scala index c3126a56f8e..97b8f4c88cf 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdStage.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdStage.scala @@ -27,17 +27,13 @@ import akka.stream.stage.GraphStageLogic import akka.stream.stage.OutHandler import akka.stream.stage.TimerGraphStageLogicWithLogging -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object EventsByPersistenceIdStage { case object Continue } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final private[akka] class EventsByPersistenceIdStage( persistenceId: String, diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagStage.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagStage.scala index fa9d9bc1836..e3e6ce7dd34 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagStage.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagStage.scala @@ -27,17 +27,13 @@ import akka.stream.stage.GraphStageLogic import akka.stream.stage.OutHandler import akka.stream.stage.TimerGraphStageLogicWithLogging -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object EventsByTagStage { case object Continue } -/** - * INTERNAL API - */ +/** INTERNAL API */ final private[leveldb] class EventsByTagStage( tag: String, fromOffset: Long, diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala index c6f6ba34f5d..1a30f4b997c 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala @@ -23,7 +23,6 @@ import akka.stream.javadsl.Source * Configuration settings can be defined in the configuration section with the * absolute path corresponding to the identifier, which is `"akka.persistence.query.journal.leveldb"` * for the default [[LeveldbReadJournal#Identifier]]. See `reference.conf`. - * */ @deprecated("Use another journal implementation", "2.6.15") class LeveldbReadJournal(scaladslReadJournal: akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal) diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala index 2185bfd943b..cd84db7023c 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala @@ -236,7 +236,7 @@ class LeveldbReadJournal(system: ExtendedActorSystem, config: Config) mat)) .named("eventsByTag-" + URLEncoder.encode(tag, ByteString.UTF_8)) - case NoOffset => eventsByTag(tag, Sequence(0L)) //recursive + case NoOffset => eventsByTag(tag, Sequence(0L)) // recursive case _ => throw new IllegalArgumentException( "LevelDB does not support " + Logging.simpleName(offset.getClass) + " offsets") diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala index 48006b47eaf..0a91726f229 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala @@ -8,9 +8,7 @@ import akka.NotUsed import akka.persistence.query.EventEnvelope import akka.stream.scaladsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait CurrentEventsByPersistenceIdQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByTagQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByTagQuery.scala index f279ec3e58f..125b3d4ac05 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByTagQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByTagQuery.scala @@ -8,9 +8,7 @@ import akka.NotUsed import akka.persistence.query.{ EventEnvelope, Offset } import akka.stream.scaladsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait CurrentEventsByTagQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentPersistenceIdsQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentPersistenceIdsQuery.scala index 485a1a8eabf..5302d940ab6 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentPersistenceIdsQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentPersistenceIdsQuery.scala @@ -7,9 +7,7 @@ package akka.persistence.query.scaladsl import akka.NotUsed import akka.stream.scaladsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait CurrentPersistenceIdsQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/DurableStateStorePagedPersistenceIdsQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/DurableStateStorePagedPersistenceIdsQuery.scala index ec754988e01..3d79b690a35 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/DurableStateStorePagedPersistenceIdsQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/DurableStateStorePagedPersistenceIdsQuery.scala @@ -8,9 +8,7 @@ import akka.NotUsed import akka.persistence.state.scaladsl.DurableStateStore import akka.stream.scaladsl.Source -/** - * A DurableStateStore may optionally support this query by implementing this trait. - */ +/** A DurableStateStore may optionally support this query by implementing this trait. */ trait DurableStateStorePagedPersistenceIdsQuery[A] extends DurableStateStore[A] { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala index 63a045b2e31..af32af1134c 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala @@ -8,9 +8,7 @@ import akka.NotUsed import akka.persistence.query.EventEnvelope import akka.stream.scaladsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait EventsByPersistenceIdQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByTagQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByTagQuery.scala index bd1c7bfc846..75d982b98f6 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByTagQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByTagQuery.scala @@ -8,9 +8,7 @@ import akka.NotUsed import akka.persistence.query.{ EventEnvelope, Offset } import akka.stream.scaladsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait EventsByTagQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/PagedPersistenceIdsQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/PagedPersistenceIdsQuery.scala index 1ceb3f811c9..96cf9a22318 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/PagedPersistenceIdsQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/PagedPersistenceIdsQuery.scala @@ -7,9 +7,7 @@ package akka.persistence.query.scaladsl import akka.NotUsed import akka.stream.scaladsl.Source -/** - * A plugin ReadJournal may optionally support this query by implementing this trait. - */ +/** A plugin ReadJournal may optionally support this query by implementing this trait. */ trait PagedPersistenceIdsQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/PersistenceIdsQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/PersistenceIdsQuery.scala index 4c16f1b37d3..9313c891e4f 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/PersistenceIdsQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/PersistenceIdsQuery.scala @@ -7,9 +7,7 @@ package akka.persistence.query.scaladsl import akka.NotUsed import akka.stream.scaladsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait PersistenceIdsQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/EventEnvelope.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/EventEnvelope.scala index 1a10a109857..269f6949f78 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/EventEnvelope.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/EventEnvelope.scala @@ -191,9 +191,7 @@ final class EventEnvelope[Event]( } } - /** - * Java API - */ + /** Java API */ def getEvent(): Event = eventOption match { case Some(evt) => evt @@ -207,25 +205,19 @@ final class EventEnvelope[Event]( } } - /** - * Java API - */ + /** Java API */ def getOptionalEvent(): Optional[Event] = { import scala.compat.java8.OptionConverters._ eventOption.asJava } - /** - * Java API - */ + /** Java API */ def getEventMetaData(): Optional[AnyRef] = { import scala.compat.java8.OptionConverters._ eventMetadata.map(_.asInstanceOf[AnyRef]).asJava } - /** - * Java API: - */ + /** Java API: */ def getTags(): JSet[String] = tags.asJava /** @@ -304,6 +296,6 @@ final class EventEnvelope[Event]( case None => "" } s"EventEnvelope($offset,$persistenceId,$sequenceNr,$eventStr,$timestamp,$metaStr,$entityType,$slice,$filtered,$source,${tags - .mkString("[", ", ", "]")})" + .mkString("[", ", ", "]")})" } } diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/internal/EventsBySliceFirehose.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/internal/EventsBySliceFirehose.scala index c4fd653848b..cbd9c6d2f4a 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/internal/EventsBySliceFirehose.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/internal/EventsBySliceFirehose.scala @@ -220,7 +220,8 @@ import akka.util.unused val fastestConsumer = trackingValues.maxBy(_.offsetTimestamp) val behind = elementsBehind(fastestConsumer.history, slowestConsumer.history) if (behind > 0) { - val diffSlowestFastestsMillis = fastestConsumer.offsetTimestamp.toEpochMilli - slowestConsumer.offsetTimestamp.toEpochMilli + val diffSlowestFastestsMillis = + fastestConsumer.offsetTimestamp.toEpochMilli - slowestConsumer.offsetTimestamp.toEpochMilli val fastestLagMillis = now.toEpochMilli - fastestConsumer.offsetTimestamp.toEpochMilli val diffFastest = fastestConsumer.offsetTimestamp.toEpochMilli - tracking.offsetTimestamp.toEpochMilli @@ -288,9 +289,9 @@ import akka.util.unused if (confirmedSlowConsumers.nonEmpty) { if (log.isInfoEnabled) { val behindMillis = fastestConsumer.offsetTimestamp.toEpochMilli - confirmedSlowConsumers - .maxBy(_.offsetTimestamp) - .offsetTimestamp - .toEpochMilli + .maxBy(_.offsetTimestamp) + .offsetTimestamp + .toEpochMilli log.info( s"Firehose entityType [$entityType] sliceRange [$sliceRangeStr], [${confirmedSlowConsumers.size}] " + s"slow consumers are aborted [${confirmedSlowConsumers.map(_.consumerId).mkString(", ")}], " + @@ -345,7 +346,8 @@ import akka.util.unused else "same as slowest" val consumerBehind = elementsBehind(fastestConsumer.history, tracking.history) - val logMessage = s"Firehose entityType [$entityType] sliceRange [$sliceRangeStr] consumer [${tracking.consumerId}], " + + val logMessage = + s"Firehose entityType [$entityType] sliceRange [$sliceRangeStr] consumer [${tracking.consumerId}], " + s"behind [$consumerBehind] events from fastest, " + s"$diffFastestStr, $diffSlowestStr, firehoseOnly [${tracking.firehoseOnly}]" @@ -394,9 +396,7 @@ import akka.util.unused JDuration.between(from, to).compareTo(duration) > 0 } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class EventsBySliceFirehose(system: ActorSystem) extends Extension { import EventsBySliceFirehose._ private val log = Logging(system, classOf[EventsBySliceFirehose]) @@ -568,9 +568,7 @@ import akka.util.unused } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object CatchupOrFirehose { private sealed trait Mode private case object CatchUpOnly extends Mode @@ -580,9 +578,7 @@ import akka.util.unused private case class DeduplicationCacheEntry(pid: String, seqNr: Long, source: String) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class CatchupOrFirehose( consumerId: String, firehose: EventsBySliceFirehose.Firehose, @@ -620,12 +616,14 @@ import akka.util.unused override protected def logSource: Class[_] = classOf[CatchupOrFirehose] - setHandler(out, new OutHandler { - override def onPull(): Unit = { - tryPushOutput() - tryPullAllIfNeeded() - } - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + tryPushOutput() + tryPullAllIfNeeded() + } + }) setHandler(firehoseInlet, firehoseHandler) setHandler(catchupInlet, catchupHandler) diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/CurrentEventsByPersistenceIdStartingFromSnapshotQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/CurrentEventsByPersistenceIdStartingFromSnapshotQuery.scala index 2ca97a05eea..31371a8ee4e 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/CurrentEventsByPersistenceIdStartingFromSnapshotQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/CurrentEventsByPersistenceIdStartingFromSnapshotQuery.scala @@ -10,9 +10,7 @@ import akka.persistence.query.javadsl.ReadJournal import akka.persistence.query.typed.EventEnvelope import akka.stream.javadsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ @ApiMayChange trait CurrentEventsByPersistenceIdStartingFromSnapshotQuery extends ReadJournal { diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/CurrentEventsByPersistenceIdTypedQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/CurrentEventsByPersistenceIdTypedQuery.scala index 428b8dda415..5634d50e21e 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/CurrentEventsByPersistenceIdTypedQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/CurrentEventsByPersistenceIdTypedQuery.scala @@ -9,9 +9,7 @@ import akka.persistence.query.javadsl.ReadJournal import akka.persistence.query.typed.EventEnvelope import akka.stream.javadsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait CurrentEventsByPersistenceIdTypedQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/CurrentEventsBySliceQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/CurrentEventsBySliceQuery.scala index 4b410ba53ce..2572933588c 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/CurrentEventsBySliceQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/CurrentEventsBySliceQuery.scala @@ -11,9 +11,7 @@ import akka.persistence.query.javadsl.ReadJournal import akka.persistence.query.typed.EventEnvelope import akka.stream.javadsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait CurrentEventsBySliceQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/EventTimestampQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/EventTimestampQuery.scala index 2fbc4647b83..d0924e569fc 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/EventTimestampQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/EventTimestampQuery.scala @@ -10,9 +10,7 @@ import java.util.concurrent.CompletionStage import akka.persistence.query.javadsl.ReadJournal -/** - * [[EventsBySliceQuery]] that is using a timestamp based offset should also implement this query. - */ +/** [[EventsBySliceQuery]] that is using a timestamp based offset should also implement this query. */ trait EventTimestampQuery extends ReadJournal { def timestampOf(persistenceId: String, sequenceNr: Long): CompletionStage[Optional[Instant]] diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/EventsByPersistenceIdStartingFromSnapshotQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/EventsByPersistenceIdStartingFromSnapshotQuery.scala index c1070de325f..ab28405f41e 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/EventsByPersistenceIdStartingFromSnapshotQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/EventsByPersistenceIdStartingFromSnapshotQuery.scala @@ -10,9 +10,7 @@ import akka.persistence.query.javadsl.ReadJournal import akka.persistence.query.typed.EventEnvelope import akka.stream.javadsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ @ApiMayChange trait EventsByPersistenceIdStartingFromSnapshotQuery extends ReadJournal { diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/EventsByPersistenceIdTypedQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/EventsByPersistenceIdTypedQuery.scala index b14715fffe8..2c780b35bcf 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/EventsByPersistenceIdTypedQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/EventsByPersistenceIdTypedQuery.scala @@ -9,9 +9,7 @@ import akka.persistence.query.javadsl.ReadJournal import akka.persistence.query.typed.EventEnvelope import akka.stream.javadsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait EventsByPersistenceIdTypedQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/LoadEventQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/LoadEventQuery.scala index f2a78d0a14d..8c11033778c 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/LoadEventQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/javadsl/LoadEventQuery.scala @@ -9,9 +9,7 @@ import java.util.concurrent.CompletionStage import akka.persistence.query.javadsl.ReadJournal import akka.persistence.query.typed.EventEnvelope -/** - * [[EventsBySliceQuery]] that is using a timestamp based offset should also implement this query. - */ +/** [[EventsBySliceQuery]] that is using a timestamp based offset should also implement this query. */ trait LoadEventQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsByPersistenceIdStartingFromSnapshotQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsByPersistenceIdStartingFromSnapshotQuery.scala index 3e3bb3cdb3a..543b1716e60 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsByPersistenceIdStartingFromSnapshotQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsByPersistenceIdStartingFromSnapshotQuery.scala @@ -10,9 +10,7 @@ import akka.persistence.query.scaladsl.ReadJournal import akka.persistence.query.typed.EventEnvelope import akka.stream.scaladsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ @ApiMayChange trait CurrentEventsByPersistenceIdStartingFromSnapshotQuery extends ReadJournal { diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsByPersistenceIdTypedQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsByPersistenceIdTypedQuery.scala index f8a90202945..a694b0e7db3 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsByPersistenceIdTypedQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsByPersistenceIdTypedQuery.scala @@ -9,9 +9,7 @@ import akka.persistence.query.scaladsl.ReadJournal import akka.persistence.query.typed.EventEnvelope import akka.stream.scaladsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait CurrentEventsByPersistenceIdTypedQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsBySliceQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsBySliceQuery.scala index e87e060e9a9..1ad2b1ed167 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsBySliceQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsBySliceQuery.scala @@ -12,9 +12,7 @@ import akka.persistence.query.scaladsl.ReadJournal import akka.persistence.query.typed.EventEnvelope import akka.stream.scaladsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait CurrentEventsBySliceQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsBySliceStartingFromSnapshotsQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsBySliceStartingFromSnapshotsQuery.scala index 20ab01bf4ea..c519537a6cb 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsBySliceStartingFromSnapshotsQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/CurrentEventsBySliceStartingFromSnapshotsQuery.scala @@ -12,9 +12,7 @@ import akka.persistence.query.scaladsl.ReadJournal import akka.persistence.query.typed.EventEnvelope import akka.stream.scaladsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait CurrentEventsBySliceStartingFromSnapshotsQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/EventTimestampQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/EventTimestampQuery.scala index ab644dcc6dd..8321f5fd7c2 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/EventTimestampQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/EventTimestampQuery.scala @@ -10,9 +10,7 @@ import scala.concurrent.Future import akka.persistence.query.scaladsl.ReadJournal -/** - * [[EventsBySliceQuery]] that is using a timestamp based offset should also implement this query. - */ +/** [[EventsBySliceQuery]] that is using a timestamp based offset should also implement this query. */ trait EventTimestampQuery extends ReadJournal { def timestampOf(persistenceId: String, sequenceNr: Long): Future[Option[Instant]] diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/EventsByPersistenceIdStartingFromSnapshotQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/EventsByPersistenceIdStartingFromSnapshotQuery.scala index f609f45b3d8..7a6097da7cb 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/EventsByPersistenceIdStartingFromSnapshotQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/EventsByPersistenceIdStartingFromSnapshotQuery.scala @@ -10,9 +10,7 @@ import akka.persistence.query.scaladsl.ReadJournal import akka.persistence.query.typed.EventEnvelope import akka.stream.scaladsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ @ApiMayChange trait EventsByPersistenceIdStartingFromSnapshotQuery extends ReadJournal { diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/EventsByPersistenceIdTypedQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/EventsByPersistenceIdTypedQuery.scala index 591b4860f79..57773888546 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/EventsByPersistenceIdTypedQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/EventsByPersistenceIdTypedQuery.scala @@ -9,9 +9,7 @@ import akka.persistence.query.scaladsl.ReadJournal import akka.persistence.query.typed.EventEnvelope import akka.stream.scaladsl.Source -/** - * A plugin may optionally support this query by implementing this trait. - */ +/** A plugin may optionally support this query by implementing this trait. */ trait EventsByPersistenceIdTypedQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/LoadEventQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/LoadEventQuery.scala index 21ac15b69e9..1bc79c2988f 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/LoadEventQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/typed/scaladsl/LoadEventQuery.scala @@ -9,9 +9,7 @@ import scala.concurrent.Future import akka.persistence.query.scaladsl.ReadJournal import akka.persistence.query.typed.EventEnvelope -/** - * [[EventsBySliceQuery]] that is using a timestamp based offset should also implement this query. - */ +/** [[EventsBySliceQuery]] that is using a timestamp based offset should also implement this query. */ trait LoadEventQuery extends ReadJournal { /** diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/OffsetSpec.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/OffsetSpec.scala index c4ec4364430..a079c539afd 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/OffsetSpec.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/OffsetSpec.scala @@ -16,9 +16,12 @@ class OffsetSpec extends AnyWordSpecLike with Matchers { "TimeBasedUUID offset" must { "be ordered correctly" in { - val uuid1 = TimeBasedUUID(UUID.fromString("49225740-2019-11ea-a752-ffae2393b6e4")) //2019-12-16T15:32:36.148Z[UTC] - val uuid2 = TimeBasedUUID(UUID.fromString("91be23d0-2019-11ea-a752-ffae2393b6e4")) //2019-12-16T15:34:37.965Z[UTC] - val uuid3 = TimeBasedUUID(UUID.fromString("91f95810-2019-11ea-a752-ffae2393b6e4")) //2019-12-16T15:34:38.353Z[UTC] + val uuid1 = + TimeBasedUUID(UUID.fromString("49225740-2019-11ea-a752-ffae2393b6e4")) // 2019-12-16T15:32:36.148Z[UTC] + val uuid2 = + TimeBasedUUID(UUID.fromString("91be23d0-2019-11ea-a752-ffae2393b6e4")) // 2019-12-16T15:34:37.965Z[UTC] + val uuid3 = + TimeBasedUUID(UUID.fromString("91f95810-2019-11ea-a752-ffae2393b6e4")) // 2019-12-16T15:34:38.353Z[UTC] uuid1.value.timestamp() should be < uuid2.value.timestamp() uuid2.value.timestamp() should be < uuid3.value.timestamp() List(uuid2, uuid1, uuid3).sorted shouldEqual List(uuid1, uuid2, uuid3) diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/TestClock.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/TestClock.scala index ac61f8e2375..2b24760d2fb 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/TestClock.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/TestClock.scala @@ -14,9 +14,7 @@ import scala.concurrent.duration.FiniteDuration import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class TestClock extends Clock { @volatile private var _instant = roundToMillis(Instant.now()) diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/internal/QuerySerializerSpec.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/internal/QuerySerializerSpec.scala index 838cb97a22b..2545b8bda08 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/internal/QuerySerializerSpec.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/internal/QuerySerializerSpec.scala @@ -104,7 +104,7 @@ class QuerySerializerSpec extends AkkaSpec { } "serialize EventEnvelope with TimeBasedUUID Offset" in { - //2019-12-16T15:32:36.148Z[UTC] + // 2019-12-16T15:32:36.148Z[UTC] val uuidString = "49225740-2019-11ea-a752-ffae2393b6e4" val timeUuidOffset = TimeBasedUUID(UUID.fromString(uuidString)) verifySerialization( @@ -132,7 +132,7 @@ class QuerySerializerSpec extends AkkaSpec { } "serialize TimeBasedUUID Offset" in { - //2019-12-16T15:32:36.148Z[UTC] + // 2019-12-16T15:32:36.148Z[UTC] val uuidString = "49225740-2019-11ea-a752-ffae2393b6e4" val timeUuidOffset = TimeBasedUUID(UUID.fromString(uuidString)) verifySerialization(timeUuidOffset) diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/TestActor.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/TestActor.scala index 2396c07de5e..06bf412aa38 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/TestActor.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/TestActor.scala @@ -18,8 +18,7 @@ class TestActor(override val persistenceId: String) extends PersistentActor { import TestActor.DeleteCmd - val receiveRecover: Receive = { - case _: String => + val receiveRecover: Receive = { case _: String => } val receiveCommand: Receive = { diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/typed/internal/EventsBySliceFirehoseSpec.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/typed/internal/EventsBySliceFirehoseSpec.scala index cc50377de8b..1f569d29fec 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/typed/internal/EventsBySliceFirehoseSpec.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/typed/internal/EventsBySliceFirehoseSpec.scala @@ -146,12 +146,11 @@ class EventsBySliceFirehoseSpec val firehoseRunning = new AtomicBoolean private val firehosePublisherPromise = Promise[TestPublisher.Probe[EventEnvelope[Any]]]() private val firehoseSource: Source[EventEnvelope[Any], NotUsed] = - TestSource[EventEnvelope[Any]]().watchTermination()(Keep.both).mapMaterializedValue { - case (probe, termination) => - firehoseRunning.set(true) - termination.onComplete(_ => firehoseRunning.set(false))(ExecutionContexts.parasitic) - firehosePublisherPromise.success(probe) - NotUsed + TestSource[EventEnvelope[Any]]().watchTermination()(Keep.both).mapMaterializedValue { case (probe, termination) => + firehoseRunning.set(true) + termination.onComplete(_ => firehoseRunning.set(false))(ExecutionContexts.parasitic) + firehosePublisherPromise.success(probe) + NotUsed } val eventsBySliceFirehose = new EventsBySliceFirehose(system.classicSystem) { diff --git a/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/PersistencePluginProxySpec.scala b/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/PersistencePluginProxySpec.scala index d061ca8baba..3728b78c08e 100644 --- a/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/PersistencePluginProxySpec.scala +++ b/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/PersistencePluginProxySpec.scala @@ -13,7 +13,8 @@ import akka.testkit.{ AkkaSpec, TestProbe } object PersistencePluginProxySpec { lazy val config = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka { actor { provider = remote @@ -40,7 +41,8 @@ object PersistencePluginProxySpec { log-dead-letters-during-shutdown = off test.single-expect-default = 10s } - """).withFallback(SharedLeveldbJournal.configToEnableJavaSerializationForTest) + """) + .withFallback(SharedLeveldbJournal.configToEnableJavaSerializationForTest) lazy val startTargetConfig = ConfigFactory.parseString(""" @@ -56,13 +58,13 @@ object PersistencePluginProxySpec { |akka.extensions = ["akka.persistence.Persistence"] |akka.persistence.journal.auto-start-journals = [""] |akka.persistence.journal.proxy.target-journal-address = "${system - .asInstanceOf[ExtendedActorSystem] - .provider - .getDefaultAddress}" + .asInstanceOf[ExtendedActorSystem] + .provider + .getDefaultAddress}" |akka.persistence.snapshot-store.proxy.target-snapshot-store-address = "${system - .asInstanceOf[ExtendedActorSystem] - .provider - .getDefaultAddress}" + .asInstanceOf[ExtendedActorSystem] + .provider + .getDefaultAddress}" """.stripMargin) class ExamplePersistentActor(probe: ActorRef, name: String) extends NamedPersistentActor(name) { @@ -71,19 +73,18 @@ object PersistencePluginProxySpec { case payload => probe ! payload } - override def receiveCommand = { - case payload => - persist(payload) { _ => - probe ! payload - } + override def receiveCommand = { case payload => + persist(payload) { _ => + probe ! payload + } } } class ExampleApp(probe: ActorRef) extends Actor { val p = context.actorOf(Props(classOf[ExamplePersistentActor], probe, context.system.name)) - def receive = { - case m => p.forward(m) + def receive = { case m => + p.forward(m) } } diff --git a/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala b/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala index 60a63767a62..f80fc2f4866 100644 --- a/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala +++ b/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala @@ -13,7 +13,8 @@ import akka.persistence._ import akka.testkit.{ AkkaSpec, TestProbe } object SharedLeveldbJournalSpec { - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString(""" akka { actor { provider = remote @@ -39,18 +40,18 @@ object SharedLeveldbJournalSpec { log-dead-letters-during-shutdown = off test.single-expect-default = 10s } - """).withFallback(SharedLeveldbJournal.configToEnableJavaSerializationForTest) + """) + .withFallback(SharedLeveldbJournal.configToEnableJavaSerializationForTest) class ExamplePersistentActor(probe: ActorRef, name: String) extends NamedPersistentActor(name) { override def receiveRecover = { case RecoveryCompleted => // ignore case payload => probe ! payload } - override def receiveCommand = { - case payload => - persist(payload) { _ => - probe ! payload - } + override def receiveCommand = { case payload => + persist(payload) { _ => + probe ! payload + } } } @@ -90,7 +91,8 @@ class SharedLeveldbJournalSpec extends AkkaSpec(SharedLeveldbJournalSpec.config) @nowarn val sharedLeveldbStoreCls = classOf[SharedLeveldbStore] system.actorOf(Props(sharedLeveldbStoreCls, storeConfig), "store") - val storePath = RootActorPath(system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress) / "user" / "store" + val storePath = + RootActorPath(system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress) / "user" / "store" val appA = systemA.actorOf(Props(classOf[ExampleApp], probeA.ref, storePath)) val appB = systemB.actorOf(Props(classOf[ExampleApp], probeB.ref, storePath)) diff --git a/akka-persistence-shared/src/test/scala/akka/persistence/serialization/SerializerSpec.scala b/akka-persistence-shared/src/test/scala/akka/persistence/serialization/SerializerSpec.scala index adcd7507013..9eebfc6b1cf 100644 --- a/akka-persistence-shared/src/test/scala/akka/persistence/serialization/SerializerSpec.scala +++ b/akka-persistence-shared/src/test/scala/akka/persistence/serialization/SerializerSpec.scala @@ -297,8 +297,8 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(customSerializers) { object MessageSerializerRemotingSpec { class LocalActor(port: Int) extends Actor { - def receive = { - case m => context.actorSelection(s"akka://remote@127.0.0.1:${port}/user/remote").tell(m, Actor.noSender) + def receive = { case m => + context.actorSelection(s"akka://remote@127.0.0.1:${port}/user/remote").tell(m, Actor.noSender) } } diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/CapabilityFlags.scala b/akka-persistence-tck/src/main/scala/akka/persistence/CapabilityFlags.scala index 4a3ff0fb1f7..f5e23fb8903 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/CapabilityFlags.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/CapabilityFlags.scala @@ -7,7 +7,7 @@ package akka.persistence import scala.language.implicitConversions sealed abstract class CapabilityFlag { - private val capturedStack = (new Throwable().getStackTrace) + private val capturedStack = new Throwable().getStackTrace .filter(_.getMethodName.startsWith("supports")) .find { el => val clazz = Class.forName(el.getClassName) diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalPerfSpec.scala b/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalPerfSpec.scala index 330fd4d30e9..17cd74f466a 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalPerfSpec.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalPerfSpec.scala @@ -61,11 +61,10 @@ object JournalPerfSpec { counter = 0 } - override def receiveRecover: Receive = { - case Cmd(_, payload) => - counter += 1 - require(payload == counter, s"Expected to receive [$counter] yet got: [${payload}]") - if (counter == replyAfter) replyTo ! payload + override def receiveRecover: Receive = { case Cmd(_, payload) => + counter += 1 + require(payload == counter, s"Expected to receive [$counter] yet got: [${payload}]") + if (counter == replyAfter) replyTo ! payload } } @@ -73,9 +72,7 @@ object JournalPerfSpec { case object ResetCounter case class Cmd(mode: String, payload: Int) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] class CmdSerializer extends SerializerWithStringManifest { override def identifier: Int = 293562 diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala b/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala index 11378305245..bfb4f312528 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala @@ -307,8 +307,8 @@ abstract class JournalSpec(config: Config) case ReplayedMessage(PersistentImpl(payload, 6L, Pid, _, _, Actor.noSender, WriterUuid, _, _)) => payload should be(event) } - receiverProbe.expectMsgPF() { - case RecoverySuccess(highestSequenceNr) => highestSequenceNr should be >= 6L + receiverProbe.expectMsgPF() { case RecoverySuccess(highestSequenceNr) => + highestSequenceNr should be >= 6L } } } @@ -335,8 +335,8 @@ abstract class JournalSpec(config: Config) val WriterUuid = writerUuid probe.expectMsgPF() { case WriteMessageSuccess( - PersistentImpl(payload, 6L, Pid, _, _, Actor.noSender, WriterUuid, _, Some(`meta`)), - _) => + PersistentImpl(payload, 6L, Pid, _, _, Actor.noSender, WriterUuid, _, Some(`meta`)), + _) => payload should be(event) } diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala b/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala index c75a3d81b77..ac98082371d 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala @@ -215,8 +215,8 @@ abstract class SnapshotStoreSpec(config: Config) snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria.Latest, Long.MaxValue), senderProbe.ref) senderProbe.expectMsgPF() { case LoadSnapshotResult( - Some(SelectedSnapshot(meta @ SnapshotMetadata(Pid, 100, _), payload)), - Long.MaxValue) => + Some(SelectedSnapshot(meta @ SnapshotMetadata(Pid, 100, _), payload)), + Long.MaxValue) => payload should be(snap) meta.metadata should ===(Some(fictionalMeta)) } diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala index 9671315c843..04d1e7de8ee 100644 --- a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala +++ b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala @@ -8,11 +8,10 @@ import akka.persistence.{ PersistenceSpec, PluginCleanup } import akka.persistence.journal.JournalSpec class LeveldbJournalJavaSpec - extends JournalSpec( - config = PersistenceSpec.config( - "leveldb", - "LeveldbJournalJavaSpec", - extraConfig = Some(""" + extends JournalSpec(config = PersistenceSpec.config( + "leveldb", + "LeveldbJournalJavaSpec", + extraConfig = Some(""" akka.persistence.journal.leveldb.native = off akka.actor.allow-java-serialization = off akka.actor.warn-about-java-serializer-usage = on diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala index 73cf3ad82d0..868a2ce35fd 100644 --- a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala +++ b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala @@ -8,11 +8,10 @@ import akka.persistence.{ PersistenceSpec, PluginCleanup } import akka.persistence.journal.JournalSpec class LeveldbJournalNativeSpec - extends JournalSpec( - config = PersistenceSpec.config( - "leveldb", - "LeveldbJournalNativeSpec", - extraConfig = Some(""" + extends JournalSpec(config = PersistenceSpec.config( + "leveldb", + "LeveldbJournalNativeSpec", + extraConfig = Some(""" akka.persistence.journal.leveldb.native = on akka.actor.allow-java-serialization = off akka.actor.warn-about-java-serializer-usage = on diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala index 23d8eda9f6d..67e6e747cf3 100644 --- a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala +++ b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala @@ -8,20 +8,17 @@ import akka.persistence.{ PersistenceSpec, PluginCleanup } import akka.persistence.journal.JournalSpec class LeveldbJournalNoAtomicPersistMultipleEventsSpec - extends JournalSpec( - config = PersistenceSpec.config( - "leveldb", - "LeveldbJournalNoAtomicPersistMultipleEventsSpec", - extraConfig = Some(""" + extends JournalSpec(config = PersistenceSpec.config( + "leveldb", + "LeveldbJournalNoAtomicPersistMultipleEventsSpec", + extraConfig = Some(""" akka.persistence.journal.leveldb.native = off akka.actor.allow-java-serialization = off akka.actor.warn-about-java-serializer-usage = on """))) with PluginCleanup { - /** - * Setting to false to test the single message atomic write behavior of JournalSpec - */ + /** Setting to false to test the single message atomic write behavior of JournalSpec */ override def supportsAtomicPersistAllOfSeveralEvents = false override def supportsRejectingNonSerializableObjects = true diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/snapshot/local/LocalSnapshotStoreSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/snapshot/local/LocalSnapshotStoreSpec.scala index 5d0d2b68b87..c3508be004e 100644 --- a/akka-persistence-tck/src/test/scala/akka/persistence/snapshot/local/LocalSnapshotStoreSpec.scala +++ b/akka-persistence-tck/src/test/scala/akka/persistence/snapshot/local/LocalSnapshotStoreSpec.scala @@ -11,9 +11,7 @@ import akka.persistence.PluginCleanup import akka.persistence.snapshot.SnapshotStoreSpec class LocalSnapshotStoreSpec - extends SnapshotStoreSpec( - config = - ConfigFactory.parseString(""" + extends SnapshotStoreSpec(config = ConfigFactory.parseString(""" akka.test.timefactor = 3 akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/snapshots" diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/EventStorage.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/EventStorage.scala index bc8f5d2da5e..ef5d09877cd 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/EventStorage.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/EventStorage.scala @@ -18,9 +18,7 @@ import akka.persistence.testkit.internal.TestKitStorage import akka.stream.scaladsl.Source import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[testkit] trait EventStorage extends TestKitStorage[JournalOperation, PersistentRepr] { import EventStorage._ @@ -33,7 +31,7 @@ private[testkit] trait EventStorage extends TestKitStorage[JournalOperation, Per // and therefore must be done at the same time with the update, not before updateOrSetNew(key, v => v ++ mapAny(key, elems).toVector) - override def reprToSeqNum(repr: (PersistentRepr)): Long = repr.sequenceNr + override def reprToSeqNum(repr: PersistentRepr): Long = repr.sequenceNr def add(elems: immutable.Seq[PersistentRepr]): Unit = elems.groupBy(_.persistenceId).foreach { gr => @@ -42,15 +40,14 @@ private[testkit] trait EventStorage extends TestKitStorage[JournalOperation, Per override protected val DefaultPolicy = JournalPolicies.PassAll - /** - * @throws Exception from StorageFailure in the current writing policy - */ + /** @throws Exception from StorageFailure in the current writing policy */ def tryAdd(elems: immutable.Seq[PersistentRepr]): Try[Unit] = { val grouped = elems.groupBy(_.persistenceId) - val processed = grouped.map { - case (pid, els) => - currentPolicy.tryProcess(pid, WriteEvents(els.map(_.payload match { + val processed = grouped.map { case (pid, els) => + currentPolicy.tryProcess( + pid, + WriteEvents(els.map(_.payload match { case Tagged(payload, _) => payload case nonTagged => nonTagged }))) @@ -155,9 +152,7 @@ private[testkit] trait EventStorage extends TestKitStorage[JournalOperation, Per object EventStorage { object JournalPolicies extends DefaultPolicies[JournalOperation] - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] implicit val persistentReprOrdering: Ordering[PersistentRepr] = Ordering.fromLessThan[PersistentRepr] { (a, b) => if (a eq b) false @@ -176,39 +171,29 @@ object EventStorage { @InternalApi sealed trait JournalOperation -/** - * Read from journal operation with events that were read. - */ +/** Read from journal operation with events that were read. */ final case class ReadEvents(batch: immutable.Seq[Any]) extends JournalOperation { def getBatch(): JList[Any] = batch.asJava } -/** - * Write in journal operation with events to be written. - */ +/** Write in journal operation with events to be written. */ final case class WriteEvents(batch: immutable.Seq[Any]) extends JournalOperation { def getBatch(): JList[Any] = batch.asJava } -/** - * Read persistent actor's sequence number operation. - */ +/** Read persistent actor's sequence number operation. */ case object ReadSeqNum extends JournalOperation { - /** - * Java API: the singleton instance. - */ + /** Java API: the singleton instance. */ def getInstance() = this } -/** - * Delete events in the journal up to `toSeqNumber` operation. - */ +/** Delete events in the journal up to `toSeqNumber` operation. */ final case class DeleteEvents(toSeqNumber: Long) extends JournalOperation { def getToSeqNumber() = toSeqNumber diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/ProcessingPolicy.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/ProcessingPolicy.scala index 24d22a9991e..f79c785fb61 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/ProcessingPolicy.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/ProcessingPolicy.scala @@ -30,15 +30,11 @@ trait ProcessingPolicy[U] { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi object ProcessingPolicy { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[testkit] trait DefaultPolicies[U] { @@ -102,37 +98,36 @@ object ProcessingPolicy { returnNonTrigger: => ProcessingResult, cond: (String, U) => Boolean, onLimitExceed: => Unit) - extends ReturnAfterNextNCond(returnOnTrigger, returnNonTrigger, new Function2[String, U, Boolean] { - - var counter = 0 - - override def apply(persistenceId: String, v1: U): Boolean = { - val intRes = cond(persistenceId, v1) - if (intRes && counter < numberToCount) { - counter += 1 - if (counter == numberToCount) onLimitExceed - intRes - } else { - false + extends ReturnAfterNextNCond( + returnOnTrigger, + returnNonTrigger, + new Function2[String, U, Boolean] { + + var counter = 0 + + override def apply(persistenceId: String, v1: U): Boolean = { + val intRes = cond(persistenceId, v1) + if (intRes && counter < numberToCount) { + counter += 1 + if (counter == numberToCount) onLimitExceed + intRes + } else { + false + } } - } - }) + }) } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi sealed trait ProcessingResult sealed abstract class ProcessingSuccess extends ProcessingResult -/** - * Emulates successful processing of some operation. - */ +/** Emulates successful processing of some operation. */ case object ProcessingSuccess extends ProcessingSuccess { def getInstance(): ProcessingSuccess = this @@ -180,9 +175,7 @@ object Reject { } -/** - * Emulates exception thrown by the storage on the attempt to perform some operation. - */ +/** Emulates exception thrown by the storage on the attempt to perform some operation. */ final case class StorageFailure(error: Throwable = ExpectedFailure) extends ProcessingFailure { def getError(): Throwable = error diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/SnapshotStorage.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/SnapshotStorage.scala index 3510b5a341d..97a261cef29 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/SnapshotStorage.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/SnapshotStorage.scala @@ -12,9 +12,7 @@ import akka.persistence.{ SelectedSnapshot, SnapshotMetadata, SnapshotSelectionC import akka.persistence.testkit.ProcessingPolicy.DefaultPolicies import akka.persistence.testkit.internal.TestKitStorage -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[testkit] trait SnapshotStorage extends TestKitStorage[SnapshotOperation, (SnapshotMetadata, Any)] @@ -28,7 +26,9 @@ private[testkit] trait SnapshotStorage override protected val DefaultPolicy = SnapshotPolicies.PassAll def tryAdd(meta: SnapshotMetadata, payload: Any): Unit = { - currentPolicy.tryProcess(meta.persistenceId, WriteSnapshot(SnapshotMeta(meta.sequenceNr, meta.timestamp), payload)) match { + currentPolicy.tryProcess( + meta.persistenceId, + WriteSnapshot(SnapshotMeta(meta.sequenceNr, meta.timestamp), payload)) match { case ProcessingSuccess => add(meta.persistenceId, (meta, payload)) Success(()) @@ -56,7 +56,9 @@ private[testkit] trait SnapshotStorage } def tryDelete(meta: SnapshotMetadata): Unit = { - currentPolicy.tryProcess(meta.persistenceId, DeleteSnapshotByMeta(SnapshotMeta(meta.sequenceNr, meta.timestamp))) match { + currentPolicy.tryProcess( + meta.persistenceId, + DeleteSnapshotByMeta(SnapshotMeta(meta.sequenceNr, meta.timestamp))) match { case ProcessingSuccess => delete(meta.persistenceId, _._1.sequenceNr == meta.sequenceNr) case f: ProcessingFailure => throw f.error @@ -71,9 +73,7 @@ object SnapshotStorage { } -/** - * Snapshot metainformation. - */ +/** Snapshot metainformation. */ final case class SnapshotMeta(sequenceNr: Long, timestamp: Long = 0L) { def getSequenceNr() = sequenceNr @@ -99,7 +99,6 @@ case object SnapshotMeta { sealed trait SnapshotOperation /** - * * Storage read operation for recovery of the persistent actor. * * @param criteria criteria with which snapshot is searched @@ -128,24 +127,18 @@ final case class WriteSnapshot(metadata: SnapshotMeta, snapshot: Any) extends Sn } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi sealed abstract class DeleteSnapshot extends SnapshotOperation -/** - * Delete snapshots from storage by criteria. - */ +/** Delete snapshots from storage by criteria. */ final case class DeleteSnapshotsByCriteria(criteria: SnapshotSelectionCriteria) extends DeleteSnapshot { def getCriteria() = criteria } -/** - * Delete particular snapshot from storage by its metadata. - */ +/** Delete particular snapshot from storage by its metadata. */ final case class DeleteSnapshotByMeta(metadata: SnapshotMeta) extends DeleteSnapshot { def getMetadata() = metadata diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/CurrentTime.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/CurrentTime.scala index d952d018b4a..85280096406 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/CurrentTime.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/CurrentTime.scala @@ -8,15 +8,11 @@ import java.util.concurrent.atomic.AtomicLong import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object CurrentTime { private val previous = new AtomicLong(System.currentTimeMillis()) - /** - * `System.currentTimeMillis` but always increasing. - */ + /** `System.currentTimeMillis` but always increasing. */ def now(): Long = { val current = System.currentTimeMillis() val prev = previous.get() diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/EventSourcedBehaviorTestKitImpl.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/EventSourcedBehaviorTestKitImpl.scala index 91261425533..01e1587d8a4 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/EventSourcedBehaviorTestKitImpl.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/EventSourcedBehaviorTestKitImpl.scala @@ -31,9 +31,7 @@ import akka.persistence.typed.internal.EventSourcedBehaviorImpl import akka.persistence.typed.internal.EventSourcedBehaviorImpl.GetStateReply import akka.stream.scaladsl.Sink -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object EventSourcedBehaviorTestKitImpl { final case class CommandResultImpl[Command, Event, State, Reply]( command: Command, @@ -77,9 +75,7 @@ import akka.stream.scaladsl.Sink final case class RestartResultImpl[State](state: State) extends RestartResult[State] } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class EventSourcedBehaviorTestKitImpl[Command, Event, State]( actorTestKit: ActorTestKit, behavior: Behavior[Command], @@ -90,7 +86,7 @@ import akka.stream.scaladsl.Sink private def system: ActorSystem[_] = actorTestKit.system if (system.settings.config.getBoolean("akka.persistence.testkit.events.serialize") || - system.settings.config.getBoolean("akka.persistence.testkit.snapshots.serialize")) { + system.settings.config.getBoolean("akka.persistence.testkit.snapshots.serialize")) { system.log.warn( "Persistence TestKit serialization enabled when using EventSourcedBehaviorTestKit, this is not intended. " + "make sure you create the system used in the test with the config from EventSourcedBehaviorTestKit.config " + @@ -148,14 +144,15 @@ import akka.stream.scaladsl.Sink actor ! command - val reply = try { - replyProbe.receiveMessage() - } catch { - case NonFatal(_) => - throw new AssertionError(s"Missing expected reply for command [$command].") - } finally { - replyProbe.stop() - } + val reply = + try { + replyProbe.receiveMessage() + } catch { + case NonFatal(_) => + throw new AssertionError(s"Missing expected reply for command [$command].") + } finally { + replyProbe.stop() + } val newState = getState() val newEvents = getEvents(seqNrBefore + 1) @@ -255,7 +252,7 @@ import akka.stream.scaladsl.Sink stateOption.foreach { state => snapshotTestKit match { case Some(kit) => kit.persistForRecovery(persistenceId.id, (SnapshotMeta(0), state)) - case _ => throw new IllegalArgumentException("Cannot initialize from state when snapshots are not used.") + case _ => throw new IllegalArgumentException("Cannot initialize from state when snapshots are not used.") } } persistenceTestKit.persistForRecovery(persistenceId.id, collection.immutable.Seq.empty ++ events) diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/InMemStorageExtension.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/InMemStorageExtension.scala index 10ee214caba..beb00ac6284 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/InMemStorageExtension.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/InMemStorageExtension.scala @@ -15,9 +15,7 @@ import akka.persistence.testkit.PersistenceTestKitPlugin import akka.persistence.testkit.ProcessingPolicy import akka.persistence.testkit.scaladsl.PersistenceTestKit -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[testkit] object InMemStorageExtension extends ExtensionId[InMemStorageExtension] with ExtensionIdProvider { @@ -30,9 +28,7 @@ private[testkit] object InMemStorageExtension extends ExtensionId[InMemStorageEx } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final class InMemStorageExtension(system: ExtendedActorSystem) extends Extension { @@ -46,13 +42,15 @@ final class InMemStorageExtension(system: ExtendedActorSystem) extends Extension def resetPolicy(): Unit = defaultStorage().resetPolicy() def storageFor(key: String): EventStorage = - stores.computeIfAbsent(key, _ => { - // we don't really care about the key here, we just want separate instances - if (PersistenceTestKit.Settings(system).serialize) { - new SerializedEventStorageImpl(system) - } else { - new SimpleEventStorageImpl - } - }) + stores.computeIfAbsent( + key, + _ => { + // we don't really care about the key here, we just want separate instances + if (PersistenceTestKit.Settings(system).serialize) { + new SerializedEventStorageImpl(system) + } else { + new SimpleEventStorageImpl + } + }) } diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/PersistenceInitImpl.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/PersistenceInitImpl.scala index 3bd8034c49c..6677f5f7f58 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/PersistenceInitImpl.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/PersistenceInitImpl.scala @@ -12,9 +12,7 @@ import akka.annotation.InternalApi import akka.persistence.PersistentActor import akka.persistence.RecoveryCompleted -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object PersistenceInitImpl { def props(journalPluginId: String, snapshotPluginId: String, persistenceId: String): Props = { @@ -47,10 +45,9 @@ import akka.persistence.RecoveryCompleted case _ => } - def receiveCommand: Receive = { - case msg => - // recovery has completed - sender() ! msg - context.stop(self) + def receiveCommand: Receive = { case msg => + // recovery has completed + sender() ! msg + context.stop(self) } } diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SerializedEventStorageImpl.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SerializedEventStorageImpl.scala index 22a3fc19eeb..55bc7e7a489 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SerializedEventStorageImpl.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SerializedEventStorageImpl.scala @@ -36,9 +36,7 @@ private[testkit] class SerializedEventStorageImpl(system: ActorSystem) extends E private lazy val serialization = SerializationExtension(system) - /** - * @return (serializer id, serialized bytes) - */ + /** @return (serializer id, serialized bytes) */ override def toInternal(pr: PersistentRepr): Serialized = Serialization.withTransportInformation(system.asInstanceOf[ExtendedActorSystem]) { () => val (payload, tags) = pr.payload match { @@ -60,9 +58,7 @@ private[testkit] class SerializedEventStorageImpl(system: ActorSystem) extends E metadata = pr.metadata) } - /** - * @param internal (serializer id, serialized bytes) - */ + /** @param internal (serializer id, serialized bytes) */ override def toRepr(internal: Serialized): PersistentRepr = { val event = serialization.deserialize(internal.payload, internal.payloadSerId, internal.payloadSerManifest).get val eventForRepr = diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SerializedSnapshotStorageImpl.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SerializedSnapshotStorageImpl.scala index 6659225eee5..e3de7409801 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SerializedSnapshotStorageImpl.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SerializedSnapshotStorageImpl.scala @@ -10,9 +10,7 @@ import akka.persistence.SnapshotMetadata import akka.persistence.testkit.SnapshotStorage import akka.serialization.{ Serialization, SerializationExtension, Serializers } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[testkit] class SerializedSnapshotStorageImpl(system: ActorSystem) extends SnapshotStorage { diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SimpleEventStorageImpl.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SimpleEventStorageImpl.scala index 90feb33e332..6301a300473 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SimpleEventStorageImpl.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SimpleEventStorageImpl.scala @@ -8,9 +8,7 @@ import akka.annotation.InternalApi import akka.persistence._ import akka.persistence.testkit.EventStorage -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[testkit] class SimpleEventStorageImpl extends EventStorage { diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SimpleSnapshotStorageImpl.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SimpleSnapshotStorageImpl.scala index e4ed8f763dc..91eb5cecc9b 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SimpleSnapshotStorageImpl.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SimpleSnapshotStorageImpl.scala @@ -8,9 +8,7 @@ import akka.annotation.InternalApi import akka.persistence.SnapshotMetadata import akka.persistence.testkit.SnapshotStorage -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[testkit] class SimpleSnapshotStorageImpl extends SnapshotStorage { diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SnapshotStorageEmulatorExtension.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SnapshotStorageEmulatorExtension.scala index d4a4b29814a..12b2ca60235 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SnapshotStorageEmulatorExtension.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/SnapshotStorageEmulatorExtension.scala @@ -10,9 +10,7 @@ import akka.annotation.InternalApi import akka.persistence.testkit.SnapshotStorage import akka.persistence.testkit.scaladsl.SnapshotTestKit -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[testkit] object SnapshotStorageEmulatorExtension extends ExtensionId[SnapshotStorage] with ExtensionIdProvider { diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/TestKitStorage.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/TestKitStorage.scala index ce1b546ff06..3365d7123ed 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/TestKitStorage.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/TestKitStorage.scala @@ -11,9 +11,7 @@ import scala.collection.immutable import akka.annotation.InternalApi import akka.persistence.testkit.ProcessingPolicy -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi sealed trait InternalReprSupport[R] { @@ -25,9 +23,7 @@ sealed trait InternalReprSupport[R] { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi sealed trait InMemStorage[K, R] extends InternalReprSupport[R] { @@ -41,11 +37,10 @@ sealed trait InMemStorage[K, R] extends InternalReprSupport[R] { def reprToSeqNum(repr: R): Long def findMany(key: K, fromInclusive: Int, maxNum: Int): Option[Vector[R]] = - read(key).flatMap( - value => - if (value.size > fromInclusive) - Some(value.drop(fromInclusive).take(maxNum)) - else None) + read(key).flatMap(value => + if (value.size > fromInclusive) + Some(value.drop(fromInclusive).take(maxNum)) + else None) def removeFirstInExpectNextQueue(key: K): Unit = lock.synchronized { expectNextQueue.get(key).foreach { item => @@ -62,8 +57,8 @@ sealed trait InMemStorage[K, R] extends InternalReprSupport[R] { def findOneByIndex(key: K, index: Int): Option[R] = lock.synchronized { eventsMap .get(key) - .flatMap { - case (_, value) => if (value.size > index) Some(value(index)) else None + .flatMap { case (_, value) => + if (value.size > index) Some(value(index)) else None } .map(toRepr) } @@ -82,21 +77,15 @@ sealed trait InMemStorage[K, R] extends InternalReprSupport[R] { } } - /** - * Adds elements ordered by seqnum, sets new seqnum as max(old, max(newElemsSeqNums))) - */ + /** Adds elements ordered by seqnum, sets new seqnum as max(old, max(newElemsSeqNums))) */ def add(key: K, elems: immutable.Seq[R]): Unit = updateOrSetNew(key, v => v ++ elems) - /** - * Deletes elements preserving highest sequence number. - */ + /** Deletes elements preserving highest sequence number. */ def delete(key: K, needsToBeDeleted: R => Boolean): Vector[R] = updateOrSetNew(key, v => v.filterNot(needsToBeDeleted)) - /** - * Sets new elements returned by updater ordered by seqnum. Sets new seqnum as max(old, max(newElemsFromUpdaterSeqNums)) - */ + /** Sets new elements returned by updater ordered by seqnum. Sets new seqnum as max(old, max(newElemsFromUpdaterSeqNums)) */ def updateOrSetNew(key: K, updater: Vector[R] => Vector[R]): Vector[R] = lock.synchronized { val (oldSn, oldElems) = eventsMap.getOrElse(key, (0L, Vector.empty)) val newValue = { @@ -120,18 +109,14 @@ sealed trait InMemStorage[K, R] extends InternalReprSupport[R] { eventsMap = Map.empty } - /** - * Removes key and the whole value including seqnum. - */ + /** Removes key and the whole value including seqnum. */ def removeKey(key: K): Vector[R] = lock.synchronized { val ret = eventsMap.get(key) eventsMap = eventsMap - key ret.map(_._2).getOrElse(Vector.empty).map(toRepr) } - /** - * Reads elems within the range of seqnums. - */ + /** Reads elems within the range of seqnums. */ def read(key: K, fromInclusive: Long, toInclusive: Long, maxNumber: Long): immutable.Seq[R] = lock.synchronized { read(key) .getOrElse(Vector.empty) @@ -149,9 +134,11 @@ sealed trait InMemStorage[K, R] extends InternalReprSupport[R] { } def deleteToSeqNumber(key: K, toSeqNumberInclusive: Long): Unit = - updateOrSetNew(key, value => { - value.dropWhile(reprToSeqNum(_) <= toSeqNumberInclusive) - }) + updateOrSetNew( + key, + value => { + value.dropWhile(reprToSeqNum(_) <= toSeqNumberInclusive) + }) def clearAllPreservingSeqNumbers(): Unit = lock.synchronized { eventsMap.keys.foreach(removePreservingSeqNumber) @@ -164,9 +151,7 @@ sealed trait InMemStorage[K, R] extends InternalReprSupport[R] { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi sealed trait PolicyOps[U] { @@ -185,8 +170,6 @@ sealed trait PolicyOps[U] { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[testkit] trait TestKitStorage[P, R] extends InMemStorage[String, R] with PolicyOps[P] diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/Unpersistent.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/Unpersistent.scala index 9d4c53aabdf..096af60db05 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/Unpersistent.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/internal/Unpersistent.scala @@ -23,9 +23,7 @@ import akka.persistence.typed.state.internal.Running.WithRevisionAccessible import akka.util.ConstantFun.{ scalaAnyToUnit => doNothing } import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Unpersistent { @@ -122,7 +120,7 @@ private[akka] object Unpersistent { val snapshotFromRetention = retention match { case DisabledRetentionCriteria => false case s: SnapshotCountRetentionCriteriaImpl => s.snapshotWhen(sequenceNr) - case unexpected => throw new IllegalStateException(s"Unexpected retention criteria: $unexpected") + case unexpected => throw new IllegalStateException(s"Unexpected retention criteria: $unexpected") } snapshotFromRetention || snapshotWhen(state, evt, sequenceNr) @@ -150,11 +148,10 @@ private[akka] object Unpersistent { (event, sequenceNr, tags) } - eventsWithSeqNrsAndTags.foreach { - case (event, seqNr, tags) => - // technically doesn't persist them atomically, but in tests that shouldn't matter - onEvent(event, seqNr, tags) - shouldSnapshot = shouldSnapshot || snapshotRequested(event) + eventsWithSeqNrsAndTags.foreach { case (event, seqNr, tags) => + // technically doesn't persist them atomically, but in tests that shouldn't matter + onEvent(event, seqNr, tags) + shouldSnapshot = shouldSnapshot || snapshotRequested(event) } sideEffect(sideEffects) @@ -297,9 +294,7 @@ private[akka] object Unpersistent { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class PersistenceProbeImpl[T] { type Element = (T, Long, Set[String]) diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/EventSourcedBehaviorTestKit.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/EventSourcedBehaviorTestKit.scala index b6a42bf7ed9..091a94098e7 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/EventSourcedBehaviorTestKit.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/EventSourcedBehaviorTestKit.scala @@ -89,9 +89,7 @@ object EventSourcedBehaviorTestKit { } } - /** - * Factory method to create a new EventSourcedBehaviorTestKit. - */ + /** Factory method to create a new EventSourcedBehaviorTestKit. */ def create[Command, Event, State]( system: ActorSystem[_], behavior: Behavior[Command]): EventSourcedBehaviorTestKit[Command, Event, State] = @@ -115,15 +113,11 @@ object EventSourcedBehaviorTestKit { new EventSourcedBehaviorTestKit(scaladsl.EventSourcedBehaviorTestKit(system, behavior, scaladslSettings)) } - /** - * The result of running a command. - */ + /** The result of running a command. */ @DoNotInherit class CommandResult[Command, Event, State]( delegate: scaladsl.EventSourcedBehaviorTestKit.CommandResult[Command, Event, State]) { - /** - * The command that was run. - */ + /** The command that was run. */ def command: Command = delegate.command @@ -135,15 +129,11 @@ object EventSourcedBehaviorTestKit { def events: JList[Event] = delegate.events.asJava - /** - * `true` if no events were emitted by the command. - */ + /** `true` if no events were emitted by the command. */ def hasNoEvents: Boolean = delegate.hasNoEvents - /** - * The first event. It will throw `AssertionError` if there is no event. - */ + /** The first event. It will throw `AssertionError` if there is no event. */ def event: Event = delegate.event @@ -154,15 +144,11 @@ object EventSourcedBehaviorTestKit { def eventOfType[E <: Event](eventClass: Class[E]): E = delegate.eventOfType(ClassTag[E](eventClass)) - /** - * The state after applying the events. - */ + /** The state after applying the events. */ def state: State = delegate.state - /** - * The state as a given expected type. It will throw `AssertionError` if the state is of a different type. - */ + /** The state as a given expected type. It will throw `AssertionError` if the state is of a different type. */ def stateOfType[S <: State](stateClass: Class[S]): S = delegate.stateOfType(ClassTag[S](stateClass)) } @@ -175,9 +161,7 @@ object EventSourcedBehaviorTestKit { delegate: scaladsl.EventSourcedBehaviorTestKit.CommandResultWithReply[Command, Event, State, Reply]) extends CommandResult[Command, Event, State](delegate) { - /** - * The reply. It will throw `AssertionError` if there was no reply. - */ + /** The reply. It will throw `AssertionError` if there was no reply. */ def reply: Reply = delegate.reply @@ -188,21 +172,15 @@ object EventSourcedBehaviorTestKit { def replyOfType[R <: Reply](replyClass: Class[R]): R = delegate.replyOfType(ClassTag[R](replyClass)) - /** - * `true` if there is no reply. - */ + /** `true` if there is no reply. */ def hasNoReply: Boolean = delegate.hasNoReply } - /** - * The result of restarting the behavior. - */ + /** The result of restarting the behavior. */ final class RestartResult[State](delegate: scaladsl.EventSourcedBehaviorTestKit.RestartResult[State]) { - /** - * The state after recovery. - */ + /** The state after recovery. */ def state: State = delegate.state } @@ -235,9 +213,7 @@ final class EventSourcedBehaviorTestKit[Command, Event, State]( def runCommand[R](creator: JFunction[ActorRef[R], Command]): CommandResultWithReply[Command, Event, State, R] = new CommandResultWithReply(delegate.runCommand(replyTo => creator.apply(replyTo))) - /** - * Retrieve the current state of the Behavior. - */ + /** Retrieve the current state of the Behavior. */ def getState(): State = delegate.getState() @@ -248,15 +224,11 @@ final class EventSourcedBehaviorTestKit[Command, Event, State]( def restart(): RestartResult[State] = new RestartResult(delegate.restart()) - /** - * Clears the in-memory journal and snapshot storage and restarts the behavior. - */ + /** Clears the in-memory journal and snapshot storage and restarts the behavior. */ def clear(): Unit = delegate.clear() - /** - * Initializes behavior from provided state and/or events. - */ + /** Initializes behavior from provided state and/or events. */ @varargs def initialize(state: State, events: Event*): Unit = delegate.initialize(state, events: _*) @varargs diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/PersistenceTestKit.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/PersistenceTestKit.scala index 530f7b5c2c1..6b82c95b23c 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/PersistenceTestKit.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/PersistenceTestKit.scala @@ -15,305 +15,191 @@ import akka.persistence.testkit.scaladsl.{ PersistenceTestKit => ScalaTestKit } import akka.util.JavaDurationConverters._ import akka.util.ccompat.JavaConverters._ -/** - * Class for testing persisted events in persistent actors. - */ +/** Class for testing persisted events in persistent actors. */ @ApiMayChange class PersistenceTestKit(scalaTestkit: ScalaTestKit) { def this(system: ActorSystem) = this(new ScalaTestKit(system)) - /** - * Check that nothing has been saved in the storage. - */ + /** Check that nothing has been saved in the storage. */ def expectNothingPersisted(persistenceId: String): Unit = scalaTestkit.expectNothingPersisted(persistenceId) - /** - * Check for `max` time that nothing has been saved in the storage. - */ + /** Check for `max` time that nothing has been saved in the storage. */ def expectNothingPersisted(persistenceId: String, max: Duration): Unit = scalaTestkit.expectNothingPersisted(persistenceId, max.asScala) - /** - * Check that `event` has been saved in the storage. - */ + /** Check that `event` has been saved in the storage. */ def expectNextPersisted[A](persistenceId: String, event: A): A = scalaTestkit.expectNextPersisted(persistenceId, event) - /** - * Check for `max` time that `event` has been saved in the storage. - */ + /** Check for `max` time that `event` has been saved in the storage. */ def expectNextPersisted[A](persistenceId: String, event: A, max: Duration): A = scalaTestkit.expectNextPersisted(persistenceId, event, max.asScala) - /** - * Check that next persisted in storage for particular persistence id event has expected type. - */ + /** Check that next persisted in storage for particular persistence id event has expected type. */ def expectNextPersistedClass[A](persistenceId: String, cla: Class[A]): A = scalaTestkit.expectNextPersistedClass(persistenceId, cla) - /** - * Check for `max` time that next persisted in storage for particular persistence id event has expected type. - */ + /** Check for `max` time that next persisted in storage for particular persistence id event has expected type. */ def expectNextPersistedClass[A](persistenceId: String, cla: Class[A], max: Duration): A = scalaTestkit.expectNextPersistedClass(persistenceId, cla, max.asScala) - /** - * Fail next `n` write operations with the `cause` exception for particular persistence id. - */ + /** Fail next `n` write operations with the `cause` exception for particular persistence id. */ def failNextNPersisted(persistenceId: String, n: Int, cause: Throwable): Unit = scalaTestkit.failNextNPersisted(persistenceId, n, cause) - /** - * Fail next `n` write operations for particular persistence id. - */ + /** Fail next `n` write operations for particular persistence id. */ def failNextNPersisted(persistenceId: String, n: Int): Unit = failNextNPersisted(persistenceId, n, ExpectedFailure) - /** - * Fail next `n` write operations with the `cause` exception for any persistence id. - */ + /** Fail next `n` write operations with the `cause` exception for any persistence id. */ def failNextNPersisted(n: Int, cause: Throwable): Unit = scalaTestkit.failNextNPersisted(n, cause) - /** - * Fail next `n` write operations with default exception for any persistence id. - */ + /** Fail next `n` write operations with default exception for any persistence id. */ def failNextNPersisted(n: Int): Unit = failNextNPersisted(n, ExpectedFailure) - /** - * Fail next write operation with `cause` exception for particular persistence id. - */ + /** Fail next write operation with `cause` exception for particular persistence id. */ def failNextPersisted(persistenceId: String, cause: Throwable): Unit = failNextNPersisted(persistenceId, 1, cause) - /** - * Fail next write operation with default exception for particular persistence id. - */ + /** Fail next write operation with default exception for particular persistence id. */ def failNextPersisted(persistenceId: String): Unit = failNextNPersisted(persistenceId, 1) - /** - * Fail next write operation event with `cause` exception for any persistence id. - */ + /** Fail next write operation event with `cause` exception for any persistence id. */ def failNextPersisted(cause: Throwable): Unit = failNextNPersisted(1, cause) - /** - * Fail next write operation with default exception for any persistence id. - */ + /** Fail next write operation with default exception for any persistence id. */ def failNextPersisted(): Unit = failNextNPersisted(1) - /** - * Fail next read from storage (recovery) attempt with `cause` exception for any persistence id. - */ + /** Fail next read from storage (recovery) attempt with `cause` exception for any persistence id. */ def failNextRead(cause: Throwable): Unit = failNextNReads(1, cause) - /** - * Fail next read from storage (recovery) attempt with default exception for any persistence id. - */ + /** Fail next read from storage (recovery) attempt with default exception for any persistence id. */ def failNextRead(): Unit = failNextNReads(1) - /** - * Fail next read from storage (recovery) attempt with `cause` exception for particular persistence id. - */ + /** Fail next read from storage (recovery) attempt with `cause` exception for particular persistence id. */ def failNextRead(persistenceId: String, cause: Throwable): Unit = failNextNReads(persistenceId, 1, cause) - /** - * Fail next read from storage (recovery) attempt with default exception for any persistence id. - */ + /** Fail next read from storage (recovery) attempt with default exception for any persistence id. */ def failNextRead(persistenceId: String): Unit = failNextNReads(persistenceId, 1) - /** - * Fail next n read from storage (recovery) attempts with `cause` exception for any persistence id. - */ + /** Fail next n read from storage (recovery) attempts with `cause` exception for any persistence id. */ def failNextNReads(n: Int, cause: Throwable): Unit = scalaTestkit.failNextNReads(n, cause) - /** - * Fail next n read from storage (recovery) attempts with default exception for any persistence id. - */ + /** Fail next n read from storage (recovery) attempts with default exception for any persistence id. */ def failNextNReads(n: Int): Unit = failNextNReads(n, ExpectedFailure) - /** - * Fail next n read from storage (recovery) attempts with `cause` exception for particular persistence id. - */ + /** Fail next n read from storage (recovery) attempts with `cause` exception for particular persistence id. */ def failNextNReads(persistenceId: String, n: Int, cause: Throwable): Unit = scalaTestkit.failNextNReads(persistenceId, n, cause) - /** - * Fail next n read from storage (recovery) attempts with default exception for particular persistence id. - */ + /** Fail next n read from storage (recovery) attempts with default exception for particular persistence id. */ def failNextNReads(persistenceId: String, n: Int): Unit = failNextNReads(persistenceId, n, ExpectedFailure) - /** - * Fail next delete from storage attempt with `cause` exception for any persistence id. - */ + /** Fail next delete from storage attempt with `cause` exception for any persistence id. */ def failNextDelete(cause: Throwable): Unit = failNextNDeletes(1, cause) - /** - * Fail next delete from storage attempt with default exception for any persistence id. - */ + /** Fail next delete from storage attempt with default exception for any persistence id. */ def failNextDelete(): Unit = failNextNDeletes(1) - /** - * Fail next delete from storage attempt with `cause` exception for particular persistence id. - */ + /** Fail next delete from storage attempt with `cause` exception for particular persistence id. */ def failNextDelete(persistenceId: String, cause: Throwable): Unit = failNextNDeletes(persistenceId, 1, cause) - /** - * Fail next delete from storage attempt with default exception for particular persistence id. - */ + /** Fail next delete from storage attempt with default exception for particular persistence id. */ def failNextDelete(persistenceId: String): Unit = failNextNDeletes(persistenceId, 1) - /** - * Fail next n delete from storage attempts with `cause` exception for any persistence id. - */ + /** Fail next n delete from storage attempts with `cause` exception for any persistence id. */ def failNextNDeletes(n: Int, cause: Throwable): Unit = scalaTestkit.failNextNDeletes(n, cause) - /** - * Fail next n delete from storage attempts with default exception for any persistence id. - */ + /** Fail next n delete from storage attempts with default exception for any persistence id. */ def failNextNDeletes(n: Int): Unit = failNextNDeletes(n, ExpectedFailure) - /** - * Fail next n delete from storage attempts with `cause` exception for particular persistence id. - */ + /** Fail next n delete from storage attempts with `cause` exception for particular persistence id. */ def failNextNDeletes(persistenceId: String, n: Int, cause: Throwable): Unit = scalaTestkit.failNextNDeletes(persistenceId, n, cause) - /** - * Fail next n delete from storage attempts with default exception for particular persistence id. - */ + /** Fail next n delete from storage attempts with default exception for particular persistence id. */ def failNextNDeletes(persistenceId: String, n: Int): Unit = failNextNDeletes(persistenceId, n, ExpectedFailure) - /** - * Receive next n events from the storage. - */ + /** Receive next n events from the storage. */ def receivePersisted[A](persistenceId: String, n: Int, cla: Class[A]): JList[A] = scalaTestkit.receivePersisted(persistenceId, n, cla).asJava - /** - * Receive for `max` time next n events from the storage. - */ + /** Receive for `max` time next n events from the storage. */ def receivePersisted[A](persistenceId: String, n: Int, cla: Class[A], max: Duration): JList[A] = scalaTestkit.receivePersisted(persistenceId, n, cla, max.asScala).asJava - /** - * Reject next n save in storage operations for particular persistence id with `cause` exception. - */ + /** Reject next n save in storage operations for particular persistence id with `cause` exception. */ def rejectNextNPersisted(persistenceId: String, n: Int, cause: Throwable): Unit = scalaTestkit.rejectNextNPersisted(persistenceId, n, cause) - /** - * Reject next n save in storage operations for particular persistence id with default exception. - */ + /** Reject next n save in storage operations for particular persistence id with default exception. */ def rejectNextNPersisted(persistenceId: String, n: Int): Unit = rejectNextNPersisted(persistenceId, n, ExpectedRejection) - /** - * Reject next n save in storage operations for any persistence id with default exception. - */ + /** Reject next n save in storage operations for any persistence id with default exception. */ def rejectNextNPersisted(n: Int): Unit = rejectNextNPersisted(n, ExpectedRejection) - /** - * Reject next n save in storage operations for any persistence id with `cause` exception. - */ + /** Reject next n save in storage operations for any persistence id with `cause` exception. */ def rejectNextNPersisted(n: Int, cause: Throwable): Unit = scalaTestkit.rejectNextNPersisted(n, cause) - /** - * Reject next save in storage operation for particular persistence id with default exception. - */ + /** Reject next save in storage operation for particular persistence id with default exception. */ def rejectNextPersisted(persistenceId: String): Unit = rejectNextNPersisted(persistenceId, 1) - /** - * Reject next save in storage operation for particular persistence id with `cause` exception. - */ + /** Reject next save in storage operation for particular persistence id with `cause` exception. */ def rejectNextPersisted(persistenceId: String, cause: Throwable): Unit = rejectNextNPersisted(persistenceId, 1, cause) - /** - * Reject next save in storage operation for any persistence id with `cause` exception. - */ + /** Reject next save in storage operation for any persistence id with `cause` exception. */ def rejectNextPersisted(cause: Throwable): Unit = rejectNextNPersisted(1, cause) - /** - * Reject next save in storage operation for any persistence id with default exception. - */ + /** Reject next save in storage operation for any persistence id with default exception. */ def rejectNextPersisted(): Unit = rejectNextNPersisted(1) - /** - * Reject next read from storage operation for any persistence id with default exception. - */ + /** Reject next read from storage operation for any persistence id with default exception. */ def rejectNextRead(): Unit = rejectNextNReads(1) - /** - * Reject next read from storage operation for any persistence id with `cause` exception. - */ + /** Reject next read from storage operation for any persistence id with `cause` exception. */ def rejectNextRead(cause: Throwable): Unit = rejectNextNReads(1, cause) - /** - * Reject next n read from storage operations for any persistence id with default exception. - */ + /** Reject next n read from storage operations for any persistence id with default exception. */ def rejectNextNReads(n: Int): Unit = rejectNextNReads(n, ExpectedRejection) - /** - * Reject next n read from storage operations for any persistence id with `cause` exception. - */ + /** Reject next n read from storage operations for any persistence id with `cause` exception. */ def rejectNextNReads(n: Int, cause: Throwable): Unit = scalaTestkit.rejectNextNReads(n, cause) - /** - * Reject next read from storage operation for particular persistence id with default exception. - */ + /** Reject next read from storage operation for particular persistence id with default exception. */ def rejectNextRead(persistenceId: String): Unit = rejectNextNReads(persistenceId, 1) - /** - * Reject next read from storage operation for particular persistence id with `cause` exception. - */ + /** Reject next read from storage operation for particular persistence id with `cause` exception. */ def rejectNextRead(persistenceId: String, cause: Throwable): Unit = rejectNextNReads(persistenceId, 1, cause) - /** - * Reject next n read from storage operations for particular persistence id with default exception. - */ + /** Reject next n read from storage operations for particular persistence id with default exception. */ def rejectNextNReads(persistenceId: String, n: Int): Unit = rejectNextNReads(persistenceId, n, ExpectedRejection) - /** - * Reject next n read from storage operations for particular persistence id with `cause` exception. - */ + /** Reject next n read from storage operations for particular persistence id with `cause` exception. */ def rejectNextNReads(persistenceId: String, n: Int, cause: Throwable): Unit = scalaTestkit.rejectNextNReads(persistenceId, n, cause) - /** - * Reject next delete from storage operation for any persistence id with default exception. - */ + /** Reject next delete from storage operation for any persistence id with default exception. */ def rejectNextDelete(): Unit = rejectNextNDeletes(1) - /** - * Reject next delete from storage operation for any persistence id with `cause` exception. - */ + /** Reject next delete from storage operation for any persistence id with `cause` exception. */ def rejectNextDelete(cause: Throwable): Unit = rejectNextNDeletes(1, cause) - /** - * Reject next n delete from storage operations for any persistence id with default exception. - */ + /** Reject next n delete from storage operations for any persistence id with default exception. */ def rejectNextNDeletes(n: Int): Unit = rejectNextNDeletes(n, ExpectedRejection) - /** - * Reject next n delete from storage operations for any persistence id with `cause` exception. - */ + /** Reject next n delete from storage operations for any persistence id with `cause` exception. */ def rejectNextNDeletes(n: Int, cause: Throwable): Unit = scalaTestkit.rejectNextNDeletes(n, cause) - /** - * Reject next delete from storage operations for particular persistence id with default exception. - */ + /** Reject next delete from storage operations for particular persistence id with default exception. */ def rejectNextDelete(persistenceId: String): Unit = rejectNextNDeletes(persistenceId, 1) - /** - * Reject next delete from storage operations for particular persistence id with `cause` exception. - */ + /** Reject next delete from storage operations for particular persistence id with `cause` exception. */ def rejectNextDelete(persistenceId: String, cause: Throwable): Unit = rejectNextNDeletes(persistenceId, 1, cause) - /** - * Reject next n delete from storage operations for particular persistence id with default exception. - */ + /** Reject next n delete from storage operations for particular persistence id with default exception. */ def rejectNextNDeletes(persistenceId: String, n: Int): Unit = rejectNextNDeletes(persistenceId, n, ExpectedRejection) - /** - * Reject next n delete from storage operations for particular persistence id with `cause` exception. - */ + /** Reject next n delete from storage operations for particular persistence id with `cause` exception. */ def rejectNextNDeletes(persistenceId: String, n: Int, cause: Throwable): Unit = scalaTestkit.rejectNextNDeletes(persistenceId, n, cause) @@ -345,15 +231,11 @@ class PersistenceTestKit(scalaTestkit: ScalaTestKit) { */ def rejectNextNOps(n: Int, cause: Throwable): Unit = scalaTestkit.rejectNextNOps(n, cause) - /** - * Persist `events` into storage in order. - */ + /** Persist `events` into storage in order. */ def persistForRecovery(persistenceId: String, events: JList[Any]): Unit = scalaTestkit.persistForRecovery(persistenceId, events.asScala.toVector) - /** - * Retrieve all events saved in storage by persistence id. - */ + /** Retrieve all events saved in storage by persistence id. */ def persistedInStorage(persistenceId: String): JList[Any] = scalaTestkit.persistedInStorage(persistenceId).asJava /** @@ -428,9 +310,7 @@ class PersistenceTestKit(scalaTestkit: ScalaTestKit) { this } - /** - * Returns default policy if it was changed by [[PersistenceTestKit.withPolicy()]]. - */ + /** Returns default policy if it was changed by [[PersistenceTestKit.withPolicy()]]. */ def resetPolicy(): Unit = scalaTestkit.resetPolicy() } diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/SnapshotTestKit.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/SnapshotTestKit.scala index e90d39e3b67..3aee550d02b 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/SnapshotTestKit.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/SnapshotTestKit.scala @@ -16,204 +16,130 @@ import akka.persistence.testkit.scaladsl.{ SnapshotTestKit => ScalaTestKit } import akka.util.JavaDurationConverters._ import akka.util.ccompat.JavaConverters._ -/** - * Class for testing persisted snapshots in persistent actors. - */ +/** Class for testing persisted snapshots in persistent actors. */ @ApiMayChange class SnapshotTestKit(scalaTestkit: ScalaTestKit) { def this(system: ActorSystem) = this(new ScalaTestKit(system)) - /** - * Check that nothing has been saved in the storage. - */ + /** Check that nothing has been saved in the storage. */ def expectNothingPersisted(persistenceId: String): Unit = scalaTestkit.expectNothingPersisted(persistenceId) - /** - * Check for `max` time that nothing has been saved in the storage. - */ + /** Check for `max` time that nothing has been saved in the storage. */ def expectNothingPersisted(persistenceId: String, max: Duration): Unit = scalaTestkit.expectNothingPersisted(persistenceId, max.asScala) - /** - * Check that `snapshot` has been saved in the storage. - */ + /** Check that `snapshot` has been saved in the storage. */ def expectNextPersisted[A](persistenceId: String, snapshot: A): A = scalaTestkit.expectNextPersisted(persistenceId, snapshot) - /** - * Check for `max` time that `snapshot` has been saved in the storage. - */ + /** Check for `max` time that `snapshot` has been saved in the storage. */ def expectNextPersisted[A](persistenceId: String, snapshot: A, max: Duration): A = scalaTestkit.expectNextPersisted(persistenceId, snapshot, max.asScala) - /** - * Check that next persisted in storage for particular persistence id snapshot has expected type. - */ + /** Check that next persisted in storage for particular persistence id snapshot has expected type. */ def expectNextPersistedClass[A](persistenceId: String, cla: Class[A]): A = scalaTestkit.expectNextPersistedClass[A](persistenceId, cla) - /** - * Check for `max` time that next persisted in storage for particular persistence id snapshot has expected type. - */ + /** Check for `max` time that next persisted in storage for particular persistence id snapshot has expected type. */ def expectNextPersistedClass[A](persistenceId: String, cla: Class[A], max: Duration): A = scalaTestkit.expectNextPersistedClass[A](persistenceId, cla, max.asScala) - /** - * Fail next `n` write operations with the `cause` exception for particular persistence id. - */ + /** Fail next `n` write operations with the `cause` exception for particular persistence id. */ def failNextNPersisted(persistenceId: String, n: Int, cause: Throwable): Unit = scalaTestkit.failNextNPersisted(persistenceId, n, cause) - /** - * Fail next `n` write operations for particular persistence id. - */ + /** Fail next `n` write operations for particular persistence id. */ def failNextNPersisted(persistenceId: String, n: Int): Unit = failNextNPersisted(persistenceId, n, ExpectedFailure) - /** - * Fail next `n` write operations with the `cause` exception for any persistence id. - */ + /** Fail next `n` write operations with the `cause` exception for any persistence id. */ def failNextNPersisted(n: Int, cause: Throwable): Unit = scalaTestkit.failNextNPersisted(n, cause) - /** - * Fail next `n` write operations with default exception for any persistence id. - */ + /** Fail next `n` write operations with default exception for any persistence id. */ def failNextNPersisted(n: Int): Unit = failNextNPersisted(n, ExpectedFailure) - /** - * Fail next write operations with `cause` exception for particular persistence id. - */ + /** Fail next write operations with `cause` exception for particular persistence id. */ def failNextPersisted(persistenceId: String, cause: Throwable): Unit = failNextNPersisted(persistenceId, 1, cause) - /** - * Fail next write operations with default exception for particular persistence id. - */ + /** Fail next write operations with default exception for particular persistence id. */ def failNextPersisted(persistenceId: String): Unit = failNextNPersisted(persistenceId, 1) - /** - * Fail next write operations with `cause` exception for any persistence id. - */ + /** Fail next write operations with `cause` exception for any persistence id. */ def failNextPersisted(cause: Throwable): Unit = failNextNPersisted(1, cause) - /** - * Fail next write operations with default exception for any persistence id. - */ + /** Fail next write operations with default exception for any persistence id. */ def failNextPersisted(): Unit = failNextNPersisted(1) - /** - * Fail next read from storage (recovery) attempt with `cause` exception for any persistence id. - */ + /** Fail next read from storage (recovery) attempt with `cause` exception for any persistence id. */ def failNextRead(cause: Throwable): Unit = failNextNReads(1, cause) - /** - * Fail next read from storage (recovery) attempt with default exception for any persistence id. - */ + /** Fail next read from storage (recovery) attempt with default exception for any persistence id. */ def failNextRead(): Unit = failNextNReads(1) - /** - * Fail next read from storage (recovery) attempt with `cause` exception for particular persistence id. - */ + /** Fail next read from storage (recovery) attempt with `cause` exception for particular persistence id. */ def failNextRead(persistenceId: String, cause: Throwable): Unit = failNextNReads(persistenceId, 1, cause) - /** - * Fail next read from storage (recovery) attempt with default exception for any persistence id. - */ + /** Fail next read from storage (recovery) attempt with default exception for any persistence id. */ def failNextRead(persistenceId: String): Unit = failNextNReads(persistenceId, 1) - /** - * Fail next n read from storage (recovery) attempts with `cause` exception for any persistence id. - */ + /** Fail next n read from storage (recovery) attempts with `cause` exception for any persistence id. */ def failNextNReads(n: Int, cause: Throwable): Unit = scalaTestkit.failNextNReads(n, cause) - /** - * Fail next n read from storage (recovery) attempts with default exception for any persistence id. - */ + /** Fail next n read from storage (recovery) attempts with default exception for any persistence id. */ def failNextNReads(n: Int): Unit = failNextNReads(n, ExpectedFailure) - /** - * Fail next n read from storage (recovery) attempts with `cause` exception for particular persistence id. - */ + /** Fail next n read from storage (recovery) attempts with `cause` exception for particular persistence id. */ def failNextNReads(persistenceId: String, n: Int, cause: Throwable): Unit = scalaTestkit.failNextNReads(persistenceId, n, cause) - /** - * Fail next n read from storage (recovery) attempts with default exception for particular persistence id. - */ + /** Fail next n read from storage (recovery) attempts with default exception for particular persistence id. */ def failNextNReads(persistenceId: String, n: Int): Unit = failNextNReads(persistenceId, n, ExpectedFailure) - /** - * Fail next delete from storage attempt with `cause` exception for any persistence id. - */ + /** Fail next delete from storage attempt with `cause` exception for any persistence id. */ def failNextDelete(cause: Throwable): Unit = failNextNDeletes(1, cause) - /** - * Fail next delete from storage attempt with default exception for any persistence id. - */ + /** Fail next delete from storage attempt with default exception for any persistence id. */ def failNextDelete(): Unit = failNextNDeletes(1) - /** - * Fail next delete from storage attempt with `cause` exception for particular persistence id. - */ + /** Fail next delete from storage attempt with `cause` exception for particular persistence id. */ def failNextDelete(persistenceId: String, cause: Throwable): Unit = failNextNDeletes(persistenceId, 1, cause) - /** - * Fail next delete from storage attempt with default exception for particular persistence id. - */ + /** Fail next delete from storage attempt with default exception for particular persistence id. */ def failNextDelete(persistenceId: String): Unit = failNextNDeletes(persistenceId, 1) - /** - * Fail next n delete from storage attempts with `cause` exception for any persistence id. - */ + /** Fail next n delete from storage attempts with `cause` exception for any persistence id. */ def failNextNDeletes(n: Int, cause: Throwable): Unit = scalaTestkit.failNextNDeletes(n, cause) - /** - * Fail next n delete from storage attempts with default exception for any persistence id. - */ + /** Fail next n delete from storage attempts with default exception for any persistence id. */ def failNextNDeletes(n: Int): Unit = failNextNDeletes(n, ExpectedFailure) - /** - * Fail next n delete from storage attempts with `cause` exception for particular persistence id. - */ + /** Fail next n delete from storage attempts with `cause` exception for particular persistence id. */ def failNextNDeletes(persistenceId: String, n: Int, cause: Throwable): Unit = scalaTestkit.failNextNDeletes(persistenceId, n, cause) - /** - * Fail next n delete from storage attempts with default exception for particular persistence id. - */ + /** Fail next n delete from storage attempts with default exception for particular persistence id. */ def failNextNDeletes(persistenceId: String, n: Int): Unit = failNextNDeletes(persistenceId, n, ExpectedFailure) - /** - * Receive next `n` snapshots that have been persisted in the storage. - */ + /** Receive next `n` snapshots that have been persisted in the storage. */ def receivePersisted[A](persistenceId: String, n: Int, cla: Class[A]): JList[A] = scalaTestkit.receivePersisted[A](persistenceId, n, cla).asJava - /** - * Receive for `max` time next `n` snapshots that have been persisted in the storage. - */ + /** Receive for `max` time next `n` snapshots that have been persisted in the storage. */ def receivePersisted[A](persistenceId: String, n: Int, cla: Class[A], max: Duration): JList[A] = scalaTestkit.receivePersisted[A](persistenceId, n, cla, max.asScala).asJava - /** - * Persist `snapshots` with metadata into storage in order. - */ + /** Persist `snapshots` with metadata into storage in order. */ def persistForRecovery(persistenceId: String, snapshots: JList[Pair[SnapshotMeta, Any]]): Unit = scalaTestkit.persistForRecovery(persistenceId, snapshots.asScala.toVector.map(_.toScala)) - /** - * Retrieve all snapshots and their metadata saved in storage by persistence id. - */ + /** Retrieve all snapshots and their metadata saved in storage by persistence id. */ def persistedInStorage(persistenceId: String): JList[Pair[SnapshotMeta, Any]] = scalaTestkit.persistedInStorage(persistenceId).map(p => Pair(p._1, p._2)).asJava - /** - * Clear all data from storage. - */ + /** Clear all data from storage. */ def clearAll(): Unit = scalaTestkit.clearAll() - /** - * Clear all data from storage for particular persistence id. - */ + /** Clear all data from storage for particular persistence id. */ def clearByPersistenceId(persistenceId: String): Unit = scalaTestkit.clearByPersistenceId(persistenceId) /** @@ -255,9 +181,7 @@ class SnapshotTestKit(scalaTestkit: ScalaTestKit) { this } - /** - * Returns default policy if it was changed by [[SnapshotTestKit.withPolicy()]]. - */ + /** Returns default policy if it was changed by [[SnapshotTestKit.withPolicy()]]. */ def resetPolicy(): Unit = scalaTestkit.resetPolicy() } diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/UnpersistentBehavior.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/UnpersistentBehavior.scala index d790d77ed71..124f1a3098f 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/UnpersistentBehavior.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/javadsl/UnpersistentBehavior.scala @@ -15,7 +15,8 @@ import akka.persistence.testkit.internal.{ PersistenceProbeImpl, Unpersistent } object UnpersistentBehavior { - /** Given an EventSourcedBehavior, produce a non-persistent Behavior which synchronously publishes events and snapshots + /** + * Given an EventSourcedBehavior, produce a non-persistent Behavior which synchronously publishes events and snapshots * for inspection. State is updated as in the EventSourcedBehavior, and side effects are performed synchronously. The * resulting Behavior is, contingent on the command handling, event handling, and side effects being compatible with the * BehaviorTestKit, testable with the BehaviorTestKit. @@ -103,9 +104,7 @@ final class UnpersistentBehavior[Command, Event, State] private ( final case class PersistenceEffect[T](persistedObject: T, sequenceNr: Long, tags: Set[String]) -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit trait PersistenceProbe[T] { @@ -115,7 +114,8 @@ trait PersistenceProbe[T] { /** Get and remove the oldest persistence effect from the probe */ def extract(): PersistenceEffect[T] - /** Get and remove the oldest persistence effect from the probe, failing if the + /** + * Get and remove the oldest persistence effect from the probe, failing if the * persisted object is not of the requested type */ def expectPersistedClass[S <: T](clazz: Class[S]): PersistenceEffect[S] @@ -123,18 +123,21 @@ trait PersistenceProbe[T] { /** Are there any persistence effects */ def hasEffects: Boolean - /** Assert that the given object was persisted in the oldest persistence effect and + /** + * Assert that the given object was persisted in the oldest persistence effect and * remove that persistence effect */ def expectPersisted(obj: T): PersistenceProbe[T] - /** Assert that the given object was persisted with the given tag in the oldest persistence + /** + * Assert that the given object was persisted with the given tag in the oldest persistence * effect and remove that persistence effect. If the persistence effect has multiple tags, * only one of them has to match in order for the assertion to succeed. */ def expectPersisted(obj: T, tag: String): PersistenceProbe[T] - /** Assert that the given object was persisted with the given tag in the oldest persistence + /** + * Assert that the given object was persisted with the given tag in the oldest persistence * effect and remove that persistence effect. If the persistence effect has tags which are * not given, the assertion fails. */ diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/internal/EventsByPersistenceIdStage.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/internal/EventsByPersistenceIdStage.scala index 855534852dd..54a02f78299 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/internal/EventsByPersistenceIdStage.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/internal/EventsByPersistenceIdStage.scala @@ -19,9 +19,7 @@ import akka.stream.stage.GraphStageLogic import akka.stream.stage.GraphStageLogicWithLogging import akka.stream.stage.OutHandler -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final private[akka] class EventsByPersistenceIdStage[Event]( persistenceId: String, diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala index bf87a3dd258..ba22419cd94 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/query/scaladsl/PersistenceTestKitReadJournal.scala @@ -43,18 +43,14 @@ import akka.util.unused object PersistenceTestKitReadJournal { val Identifier = "akka.persistence.testkit.query" - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def tagsFor(payload: Any): Set[String] = payload match { case Tagged(_, tags) => tags case _ => Set.empty } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def timestampOffsetFor(pr: PersistentRepr) = { // Note: we don't really have microsecond granularity here, but the testkit uses an increasing unique timestamp @@ -106,15 +102,14 @@ final class PersistenceTestKitReadJournal(system: ExtendedActorSystem, @unused c toSequenceNr, storage, persistence.sliceForPersistenceId)) - .map( - env => - EventEnvelope( - Sequence(env.sequenceNr), - env.persistenceId, - env.sequenceNr, - env.event, - env.timestamp, - env.eventMetadata)) + .map(env => + EventEnvelope( + Sequence(env.sequenceNr), + env.persistenceId, + env.sequenceNr, + env.event, + env.timestamp, + env.eventMetadata)) override def eventsByPersistenceIdTyped[Event]( persistenceId: String, @@ -192,11 +187,13 @@ final class PersistenceTestKitReadJournal(system: ExtendedActorSystem, @unused c case _ => throw new UnsupportedOperationException("Offsets not supported for persistence test kit currentEventsByTag yet") } - val prs = storage.tryRead(entityType, repr => { - val pid = repr.persistenceId - val slice = persistence.sliceForPersistenceId(pid) - PersistenceId.extractEntityType(pid) == entityType && slice >= minSlice && slice <= maxSlice - }) + val prs = storage.tryRead( + entityType, + repr => { + val pid = repr.persistenceId + val slice = persistence.sliceForPersistenceId(pid) + PersistenceId.extractEntityType(pid) == entityType && slice >= minSlice && slice <= maxSlice + }) Source(prs).map { pr => val slice = persistence.sliceForPersistenceId(pr.persistenceId) new typed.EventEnvelope[Event]( diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/EventSourcedBehaviorTestKit.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/EventSourcedBehaviorTestKit.scala index 0cdebea755b..a96ea373bc3 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/EventSourcedBehaviorTestKit.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/EventSourcedBehaviorTestKit.scala @@ -36,10 +36,13 @@ object EventSourcedBehaviorTestKit { * constructor parameter to `ScalaTestWithActorTestKit`. The configuration enables the in-memory * journal and snapshot storage. */ - val config: Config = ConfigFactory.parseString(""" + val config: Config = ConfigFactory + .parseString(""" akka.persistence.testkit.events.serialize = off akka.persistence.testkit.snapshots.serialize = off - """).withFallback(PersistenceTestKitPlugin.config).withFallback(PersistenceTestKitSnapshotPlugin.config) + """) + .withFallback(PersistenceTestKitPlugin.config) + .withFallback(PersistenceTestKitSnapshotPlugin.config) object SerializationSettings { val enabled: SerializationSettings = new SerializationSettings( @@ -93,9 +96,7 @@ object EventSourcedBehaviorTestKit { } } - /** - * Factory method to create a new EventSourcedBehaviorTestKit. - */ + /** Factory method to create a new EventSourcedBehaviorTestKit. */ def apply[Command, Event, State]( system: ActorSystem[_], behavior: Behavior[Command]): EventSourcedBehaviorTestKit[Command, Event, State] = @@ -113,14 +114,10 @@ object EventSourcedBehaviorTestKit { serializationSettings: SerializationSettings): EventSourcedBehaviorTestKit[Command, Event, State] = new EventSourcedBehaviorTestKitImpl(ActorTestKit(system), behavior, serializationSettings) - /** - * The result of running a command. - */ + /** The result of running a command. */ @DoNotInherit trait CommandResult[Command, Event, State] { - /** - * The command that was run. - */ + /** The command that was run. */ def command: Command /** @@ -130,14 +127,10 @@ object EventSourcedBehaviorTestKit { */ def events: immutable.Seq[Event] - /** - * `true` if no events were emitted by the command. - */ + /** `true` if no events were emitted by the command. */ def hasNoEvents: Boolean - /** - * The first event. It will throw `AssertionError` if there is no event. - */ + /** The first event. It will throw `AssertionError` if there is no event. */ def event: Event /** @@ -146,14 +139,10 @@ object EventSourcedBehaviorTestKit { */ def eventOfType[E <: Event: ClassTag]: E - /** - * The state after applying the events. - */ + /** The state after applying the events. */ def state: State - /** - * The state as a given expected type. It will throw `AssertionError` if the state is of a different type. - */ + /** The state as a given expected type. It will throw `AssertionError` if the state is of a different type. */ def stateOfType[S <: State: ClassTag]: S } @@ -164,9 +153,7 @@ object EventSourcedBehaviorTestKit { @DoNotInherit trait CommandResultWithReply[Command, Event, State, Reply] extends CommandResult[Command, Event, State] { - /** - * The reply. It will throw `AssertionError` if there was no reply. - */ + /** The reply. It will throw `AssertionError` if there was no reply. */ def reply: Reply /** @@ -175,21 +162,15 @@ object EventSourcedBehaviorTestKit { */ def replyOfType[R <: Reply: ClassTag]: R - /** - * `true` if there is no reply. - */ + /** `true` if there is no reply. */ def hasNoReply: Boolean } - /** - * The result of restarting the behavior. - */ + /** The result of restarting the behavior. */ @DoNotInherit trait RestartResult[State] { - /** - * The state after recovery. - */ + /** The state after recovery. */ def state: State } @@ -212,9 +193,7 @@ object EventSourcedBehaviorTestKit { */ def runCommand[R](creator: ActorRef[R] => Command): CommandResultWithReply[Command, Event, State, R] - /** - * Retrieve the current state of the Behavior. - */ + /** Retrieve the current state of the Behavior. */ def getState(): State /** @@ -223,9 +202,7 @@ object EventSourcedBehaviorTestKit { */ def restart(): RestartResult[State] - /** - * Clears the in-memory journal and snapshot storage and restarts the behavior. - */ + /** Clears the in-memory journal and snapshot storage and restarts the behavior. */ def clear(): Unit /** @@ -242,9 +219,7 @@ object EventSourcedBehaviorTestKit { */ def snapshotTestKit: Option[SnapshotTestKit] - /** - * Initializes behavior from provided state and/or events. - */ + /** Initializes behavior from provided state and/or events. */ def initialize(state: State, events: Event*): Unit def initialize(events: Event*): Unit } diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/PersistenceTestKit.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/PersistenceTestKit.scala index 917452ae7f3..e9628a00c5b 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/PersistenceTestKit.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/PersistenceTestKit.scala @@ -29,144 +29,88 @@ import akka.testkit.TestProbe private[testkit] trait CommonTestKitOps[S, P] extends ClearOps with PolicyOpsTestKit[P] { this: HasStorage[P, S] => - /** - * Check that nothing has been saved in the storage. - */ + /** Check that nothing has been saved in the storage. */ def expectNothingPersisted(persistenceId: String): Unit - /** - * Check for `max` time that nothing has been saved in the storage. - */ + /** Check for `max` time that nothing has been saved in the storage. */ def expectNothingPersisted(persistenceId: String, max: FiniteDuration): Unit - /** - * Check that `event` has been saved in the storage. - */ + /** Check that `event` has been saved in the storage. */ def expectNextPersisted[A](persistenceId: String, event: A): A - /** - * Check for `max` time that `event` has been saved in the storage. - */ + /** Check for `max` time that `event` has been saved in the storage. */ def expectNextPersisted[A](persistenceId: String, event: A, max: FiniteDuration): A - /** - * Fail next `n` write operations with the `cause` exception for particular persistence id. - */ + /** Fail next `n` write operations with the `cause` exception for particular persistence id. */ def failNextNPersisted(persistenceId: String, n: Int, cause: Throwable): Unit - /** - * Fail next `n` write operations for particular persistence id. - */ + /** Fail next `n` write operations for particular persistence id. */ def failNextNPersisted(persistenceId: String, n: Int): Unit = failNextNPersisted(persistenceId, n, ExpectedFailure) - /** - * Fail next `n` write operations with the `cause` exception for any persistence id. - */ + /** Fail next `n` write operations with the `cause` exception for any persistence id. */ def failNextNPersisted(n: Int, cause: Throwable): Unit - /** - * Fail next `n` write operations with default exception for any persistence id. - */ + /** Fail next `n` write operations with default exception for any persistence id. */ def failNextNPersisted(n: Int): Unit = failNextNPersisted(n, ExpectedFailure) - /** - * Fail next write operation with `cause` exception for particular persistence id. - */ + /** Fail next write operation with `cause` exception for particular persistence id. */ def failNextPersisted(persistenceId: String, cause: Throwable): Unit = failNextNPersisted(persistenceId, 1, cause) - /** - * Fail next write operation with default exception for particular persistence id. - */ + /** Fail next write operation with default exception for particular persistence id. */ def failNextPersisted(persistenceId: String): Unit = failNextNPersisted(persistenceId, 1) - /** - * Fail next write operation with `cause` exception for any persistence id. - */ + /** Fail next write operation with `cause` exception for any persistence id. */ def failNextPersisted(cause: Throwable): Unit = failNextNPersisted(1, cause) - /** - * Fail next write operation with default exception for any persistence id. - */ + /** Fail next write operation with default exception for any persistence id. */ def failNextPersisted(): Unit = failNextNPersisted(1) - /** - * Fail next read from storage (recovery) attempt with `cause` exception for any persistence id. - */ + /** Fail next read from storage (recovery) attempt with `cause` exception for any persistence id. */ def failNextRead(cause: Throwable): Unit = failNextNReads(1, cause) - /** - * Fail next read from storage (recovery) attempt with default exception for any persistence id. - */ + /** Fail next read from storage (recovery) attempt with default exception for any persistence id. */ def failNextRead(): Unit = failNextNReads(1) - /** - * Fail next read from storage (recovery) attempt with `cause` exception for particular persistence id. - */ + /** Fail next read from storage (recovery) attempt with `cause` exception for particular persistence id. */ def failNextRead(persistenceId: String, cause: Throwable): Unit = failNextNReads(persistenceId, 1, cause) - /** - * Fail next read from storage (recovery) attempt with default exception for any persistence id. - */ + /** Fail next read from storage (recovery) attempt with default exception for any persistence id. */ def failNextRead(persistenceId: String): Unit = failNextNReads(persistenceId, 1) - /** - * Fail next n read from storage (recovery) attempts with `cause` exception for any persistence id. - */ + /** Fail next n read from storage (recovery) attempts with `cause` exception for any persistence id. */ def failNextNReads(n: Int, cause: Throwable): Unit - /** - * Fail next n read from storage (recovery) attempts with default exception for any persistence id. - */ + /** Fail next n read from storage (recovery) attempts with default exception for any persistence id. */ def failNextNReads(n: Int): Unit = failNextNReads(n, ExpectedFailure) - /** - * Fail next n read from storage (recovery) attempts with `cause` exception for particular persistence id. - */ + /** Fail next n read from storage (recovery) attempts with `cause` exception for particular persistence id. */ def failNextNReads(persistenceId: String, n: Int, cause: Throwable): Unit - /** - * Fail next n read from storage (recovery) attempts with default exception for particular persistence id. - */ + /** Fail next n read from storage (recovery) attempts with default exception for particular persistence id. */ def failNextNReads(persistenceId: String, n: Int): Unit = failNextNReads(persistenceId, n, ExpectedFailure) - /** - * Fail next delete from storage attempt with `cause` exception for any persistence id. - */ + /** Fail next delete from storage attempt with `cause` exception for any persistence id. */ def failNextDelete(cause: Throwable): Unit = failNextNDeletes(1, cause) - /** - * Fail next delete from storage attempt with default exception for any persistence id. - */ + /** Fail next delete from storage attempt with default exception for any persistence id. */ def failNextDelete(): Unit = failNextNDeletes(1) - /** - * Fail next delete from storage attempt with `cause` exception for particular persistence id. - */ + /** Fail next delete from storage attempt with `cause` exception for particular persistence id. */ def failNextDelete(persistenceId: String, cause: Throwable): Unit = failNextNDeletes(persistenceId, 1, cause) - /** - * Fail next delete from storage attempt with default exception for particular persistence id. - */ + /** Fail next delete from storage attempt with default exception for particular persistence id. */ def failNextDelete(persistenceId: String): Unit = failNextNDeletes(persistenceId, 1) - /** - * Fail next n delete from storage attempts with `cause` exception for any persistence id. - */ + /** Fail next n delete from storage attempts with `cause` exception for any persistence id. */ def failNextNDeletes(n: Int, cause: Throwable): Unit - /** - * Fail next n delete from storage attempts with default exception for any persistence id. - */ + /** Fail next n delete from storage attempts with default exception for any persistence id. */ def failNextNDeletes(n: Int): Unit = failNextNDeletes(n, ExpectedFailure) - /** - * Fail next n delete from storage attempts with `cause` exception for particular persistence id. - */ + /** Fail next n delete from storage attempts with `cause` exception for particular persistence id. */ def failNextNDeletes(persistenceId: String, n: Int, cause: Throwable): Unit - /** - * Fail next n delete from storage attempts with default exception for particular persistence id. - */ + /** Fail next n delete from storage attempts with default exception for particular persistence id. */ def failNextNDeletes(persistenceId: String, n: Int): Unit = failNextNDeletes(persistenceId, n, ExpectedFailure) } @@ -177,135 +121,83 @@ private[testkit] trait PersistenceTestKitOps[S, P] with CommonTestKitOps[S, P] { this: HasStorage[P, S] => - /** - * Reject next n save in storage operations for particular persistence id with `cause` exception. - */ + /** Reject next n save in storage operations for particular persistence id with `cause` exception. */ def rejectNextNPersisted(persistenceId: String, n: Int, cause: Throwable): Unit - /** - * Reject next n save in storage operations for particular persistence id with default exception. - */ + /** Reject next n save in storage operations for particular persistence id with default exception. */ def rejectNextNPersisted(persistenceId: String, n: Int): Unit = rejectNextNPersisted(persistenceId, n, ExpectedRejection) - /** - * Reject next n save in storage operations for any persistence id with default exception. - */ + /** Reject next n save in storage operations for any persistence id with default exception. */ def rejectNextNPersisted(n: Int): Unit = rejectNextNPersisted(n, ExpectedRejection) - /** - * Reject next n save in storage operations for any persistence id with `cause` exception. - */ + /** Reject next n save in storage operations for any persistence id with `cause` exception. */ def rejectNextNPersisted(n: Int, cause: Throwable): Unit - /** - * Reject next save in storage operation for particular persistence id with default exception. - */ + /** Reject next save in storage operation for particular persistence id with default exception. */ def rejectNextPersisted(persistenceId: String): Unit = rejectNextNPersisted(persistenceId, 1) - /** - * Reject next save in storage operation for particular persistence id with `cause` exception. - */ + /** Reject next save in storage operation for particular persistence id with `cause` exception. */ def rejectNextPersisted(persistenceId: String, cause: Throwable): Unit = rejectNextNPersisted(persistenceId, 1, cause) - /** - * Reject next save in storage operation for any persistence id with `cause` exception. - */ + /** Reject next save in storage operation for any persistence id with `cause` exception. */ def rejectNextPersisted(cause: Throwable): Unit = rejectNextNPersisted(1, cause) - /** - * Reject next save in storage operation for any persistence id with default exception. - */ + /** Reject next save in storage operation for any persistence id with default exception. */ def rejectNextPersisted(): Unit = rejectNextNPersisted(1) - /** - * Reject next read from storage operation for any persistence id with default exception. - */ + /** Reject next read from storage operation for any persistence id with default exception. */ def rejectNextRead(): Unit = rejectNextNReads(1) - /** - * Reject next read from storage operation for any persistence id with `cause` exception. - */ + /** Reject next read from storage operation for any persistence id with `cause` exception. */ def rejectNextRead(cause: Throwable): Unit = rejectNextNReads(1, cause) - /** - * Reject next n read from storage operations for any persistence id with default exception. - */ + /** Reject next n read from storage operations for any persistence id with default exception. */ def rejectNextNReads(n: Int): Unit = rejectNextNReads(n, ExpectedRejection) - /** - * Reject next n read from storage operations for any persistence id with `cause` exception. - */ + /** Reject next n read from storage operations for any persistence id with `cause` exception. */ def rejectNextNReads(n: Int, cause: Throwable): Unit - /** - * Reject next read from storage operation for particular persistence id with default exception. - */ + /** Reject next read from storage operation for particular persistence id with default exception. */ def rejectNextRead(persistenceId: String): Unit = rejectNextNReads(persistenceId, 1) - /** - * Reject next read from storage operation for particular persistence id with `cause` exception. - */ + /** Reject next read from storage operation for particular persistence id with `cause` exception. */ def rejectNextRead(persistenceId: String, cause: Throwable): Unit = rejectNextNReads(persistenceId, 1, cause) - /** - * Reject next n read from storage operations for particular persistence id with default exception. - */ + /** Reject next n read from storage operations for particular persistence id with default exception. */ def rejectNextNReads(persistenceId: String, n: Int): Unit = rejectNextNReads(persistenceId, n, ExpectedRejection) - /** - * Reject next n read from storage operations for particular persistence id with `cause` exception. - */ + /** Reject next n read from storage operations for particular persistence id with `cause` exception. */ def rejectNextNReads(persistenceId: String, n: Int, cause: Throwable): Unit - /** - * Reject next delete from storage operation for any persistence id with default exception. - */ + /** Reject next delete from storage operation for any persistence id with default exception. */ def rejectNextDelete(): Unit = rejectNextNDeletes(1) - /** - * Reject next delete from storage operation for any persistence id with `cause` exception. - */ + /** Reject next delete from storage operation for any persistence id with `cause` exception. */ def rejectNextDelete(cause: Throwable): Unit = rejectNextNDeletes(1, cause) - /** - * Reject next n delete from storage operations for any persistence id with default exception. - */ + /** Reject next n delete from storage operations for any persistence id with default exception. */ def rejectNextNDeletes(n: Int): Unit = rejectNextNDeletes(n, ExpectedRejection) - /** - * Reject next n delete from storage operations for any persistence id with `cause` exception. - */ + /** Reject next n delete from storage operations for any persistence id with `cause` exception. */ def rejectNextNDeletes(n: Int, cause: Throwable): Unit - /** - * Reject next delete from storage operations for particular persistence id with default exception. - */ + /** Reject next delete from storage operations for particular persistence id with default exception. */ def rejectNextDelete(persistenceId: String): Unit = rejectNextNDeletes(persistenceId, 1) - /** - * Reject next delete from storage operations for particular persistence id with `cause` exception. - */ + /** Reject next delete from storage operations for particular persistence id with `cause` exception. */ def rejectNextDelete(persistenceId: String, cause: Throwable): Unit = rejectNextNDeletes(persistenceId, 1, cause) - /** - * Reject next n delete from storage operations for particular persistence id with default exception. - */ + /** Reject next n delete from storage operations for particular persistence id with default exception. */ def rejectNextNDeletes(persistenceId: String, n: Int): Unit = rejectNextNDeletes(persistenceId, n, ExpectedRejection) - /** - * Reject next n delete from storage operations for particular persistence id with `cause` exception. - */ + /** Reject next n delete from storage operations for particular persistence id with `cause` exception. */ def rejectNextNDeletes(persistenceId: String, n: Int, cause: Throwable): Unit - /** - * Persist `snapshots` into storage in order. - */ + /** Persist `snapshots` into storage in order. */ def persistForRecovery(persistenceId: String, events: immutable.Seq[Any]): Unit - /** - * Retrieve all snapshots saved in storage by persistence id. - */ + /** Retrieve all snapshots saved in storage by persistence id. */ def persistedInStorage(persistenceId: String): immutable.Seq[Any] } @@ -368,25 +260,18 @@ class SnapshotTestKit(system: ActorSystem) override def failNextNDeletes(persistenceId: String, n: Int, cause: Throwable): Unit = failNextNOpsCond((pid, op) => pid == persistenceId && op.isInstanceOf[DeleteSnapshot], n, cause) - /** - * Persist `elems` pairs of (snapshot metadata, snapshot payload) into storage. - */ + /** Persist `elems` pairs of (snapshot metadata, snapshot payload) into storage. */ def persistForRecovery(persistenceId: String, elems: immutable.Seq[(SnapshotMeta, Any)]): Unit = - elems.foreach { - case (m, p) => - storage.add(persistenceId, (SnapshotMetadata(persistenceId, m.sequenceNr, m.timestamp), p)) - addToIndex(persistenceId, 1) + elems.foreach { case (m, p) => + storage.add(persistenceId, (SnapshotMetadata(persistenceId, m.sequenceNr, m.timestamp), p)) + addToIndex(persistenceId, 1) } - /** - * Persist a pair of (snapshot metadata, snapshot payload) into storage. - */ + /** Persist a pair of (snapshot metadata, snapshot payload) into storage. */ def persistForRecovery(persistenceId: String, elem: (SnapshotMeta, Any)): Unit = persistForRecovery(persistenceId, immutable.Seq(elem)) - /** - * Retrieve snapshots and their metadata from storage by persistence id. - */ + /** Retrieve snapshots and their metadata from storage by persistence id. */ def persistedInStorage(persistenceId: String): immutable.Seq[(SnapshotMeta, Any)] = storage .read(persistenceId) diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/TestOps.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/TestOps.scala index db822839da9..d2b1b6ba346 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/TestOps.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/TestOps.scala @@ -106,9 +106,7 @@ private[testkit] trait PolicyOpsTestKit[P] { this } - /** - * Returns default policy if it was changed by [[PolicyOpsTestKit.this.withPolicy()]]. - */ + /** Returns default policy if it was changed by [[PolicyOpsTestKit.this.withPolicy()]]. */ def resetPolicy(): Unit = storage.resetPolicy() } @@ -128,106 +126,91 @@ private[testkit] trait ExpectOps[U] { private[testkit] def reprToAny(repr: U): Any - /** - * Check that next persisted in storage for particular persistence id event/snapshot was `event`. - */ + /** Check that next persisted in storage for particular persistence id event/snapshot was `event`. */ def expectNextPersisted[A](persistenceId: String, event: A): A = expectNextPersisted(persistenceId, event, maxTimeout) def getItem(persistenceId: String, nextInd: Int): Option[Any] = storage.findOneByIndex(persistenceId, nextInd).map(reprToAny) - /** - * Check for `max` time that next persisted in storage for particular persistence id event/snapshot was `event`. - */ + /** Check for `max` time that next persisted in storage for particular persistence id event/snapshot was `event`. */ def expectNextPersisted[A](persistenceId: String, event: A, max: FiniteDuration): A = { val nextInd = nextIndex(persistenceId) val expected = Some(event) - val res = awaitAssert({ - val actual = getItem(persistenceId, nextInd) - assert(actual == expected, s"Failed to persist $event, got $actual instead") - actual - }, max = max.dilated, interval = pollInterval) + val res = awaitAssert( + { + val actual = getItem(persistenceId, nextInd) + assert(actual == expected, s"Failed to persist $event, got $actual instead") + actual + }, + max = max.dilated, + interval = pollInterval) setIndex(persistenceId, nextInd + 1) res.get.asInstanceOf[A] } - /** - * Check that next persisted in storage for particular persistence id event/snapshot has expected type. - */ + /** Check that next persisted in storage for particular persistence id event/snapshot has expected type. */ def expectNextPersistedType[A](persistenceId: String)(implicit t: ClassTag[A]): A = expectNextPersistedType(persistenceId, maxTimeout) - /** - * Check for `max` time that next persisted in storage for particular persistence id event/snapshot has expected type. - */ + /** Check for `max` time that next persisted in storage for particular persistence id event/snapshot has expected type. */ def expectNextPersistedType[A](persistenceId: String, max: FiniteDuration)(implicit t: ClassTag[A]): A = expectNextPersistedClass(persistenceId, t.runtimeClass.asInstanceOf[Class[A]], max) - /** - * Check that next persisted in storage for particular persistence id event/snapshot has expected type. - */ + /** Check that next persisted in storage for particular persistence id event/snapshot has expected type. */ def expectNextPersistedClass[A](persistenceId: String, cla: Class[A]): A = expectNextPersistedClass(persistenceId, cla, maxTimeout) - /** - * Check for `max` time that next persisted in storage for particular persistence id event/snapshot has expected type. - */ + /** Check for `max` time that next persisted in storage for particular persistence id event/snapshot has expected type. */ def expectNextPersistedClass[A](persistenceId: String, cla: Class[A], max: FiniteDuration): A = { val nextInd = nextIndex(persistenceId) val c = util.BoxedType(cla) - val res = awaitAssert({ - val actual = storage.findOneByIndex(persistenceId, nextInd).map(reprToAny) - assert(actual.isDefined, s"Expected: $cla but got no event") - val a = actual.get - assert(c.isInstance(a), s"Expected: $cla but got unexpected ${a.getClass}") - a.asInstanceOf[A] - }, max.dilated, interval = pollInterval) + val res = awaitAssert( + { + val actual = storage.findOneByIndex(persistenceId, nextInd).map(reprToAny) + assert(actual.isDefined, s"Expected: $cla but got no event") + val a = actual.get + assert(c.isInstance(a), s"Expected: $cla but got unexpected ${a.getClass}") + a.asInstanceOf[A] + }, + max.dilated, + interval = pollInterval) setIndex(persistenceId, nextInd + 1) res } - /** - * Check that nothing was persisted in storage for particular persistence id. - */ + /** Check that nothing was persisted in storage for particular persistence id. */ def expectNothingPersisted(persistenceId: String): Unit = expectNothingPersisted(persistenceId, maxTimeout) - /** - * Check for `max` time that nothing was persisted in storage for particular persistence id. - */ + /** Check for `max` time that nothing was persisted in storage for particular persistence id. */ def expectNothingPersisted(persistenceId: String, max: FiniteDuration): Unit = { val nextInd = nextIndex(persistenceId) - assertForDuration({ - val actual = storage.findOneByIndex(persistenceId, nextInd).map(reprToAny) - val res = actual.isEmpty - assert(res, s"Found persisted event $actual, but expected None instead") - }, max = max.dilated, interval = pollInterval) + assertForDuration( + { + val actual = storage.findOneByIndex(persistenceId, nextInd).map(reprToAny) + val res = actual.isEmpty + assert(res, s"Found persisted event $actual, but expected None instead") + }, + max = max.dilated, + interval = pollInterval) } - /** - * Receive for `max` time next `n` events/snapshots that have been persisted in the storage. - */ - def receivePersisted[A](persistenceId: String, n: Int, max: FiniteDuration)( - implicit t: ClassTag[A]): immutable.Seq[A] = + /** Receive for `max` time next `n` events/snapshots that have been persisted in the storage. */ + def receivePersisted[A](persistenceId: String, n: Int, max: FiniteDuration)(implicit + t: ClassTag[A]): immutable.Seq[A] = receivePersisted(persistenceId, n, t.runtimeClass.asInstanceOf[Class[A]], max) - /** - * Receive next `n` events/snapshots that have been persisted in the storage. - */ + /** Receive next `n` events/snapshots that have been persisted in the storage. */ def receivePersisted[A](persistenceId: String, n: Int)(implicit t: ClassTag[A]): immutable.Seq[A] = receivePersisted(persistenceId, n, t.runtimeClass.asInstanceOf[Class[A]], maxTimeout) - /** - * Receive next `n` events/snapshots that have been persisted in the storage. - */ + /** Receive next `n` events/snapshots that have been persisted in the storage. */ def receivePersisted[A](persistenceId: String, n: Int, cla: Class[A]): immutable.Seq[A] = receivePersisted(persistenceId, n, cla, maxTimeout) - /** - * Receive for `max` time next `n` events/snapshots that have been persisted in the storage. - */ + /** Receive for `max` time next `n` events/snapshots that have been persisted in the storage. */ def receivePersisted[A](persistenceId: String, n: Int, cla: Class[A], max: FiniteDuration): immutable.Seq[A] = { val nextInd = nextIndex(persistenceId) val bt = BoxedType(cla) @@ -316,7 +299,7 @@ private[testkit] trait HasStorage[P, R] { protected def storage: TestKitStorage[P, R] - //todo needs to be thread safe (atomic read-increment-write) for parallel tests. Do we need parallel tests support? + // todo needs to be thread safe (atomic read-increment-write) for parallel tests. Do we need parallel tests support? @volatile private var nextIndexByPersistenceId: immutable.Map[String, Int] = Map.empty diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/UnpersistentBehavior.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/UnpersistentBehavior.scala index edc24c74a8d..9bac514744b 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/UnpersistentBehavior.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/scaladsl/UnpersistentBehavior.scala @@ -20,7 +20,8 @@ sealed trait UnpersistentBehavior[Command, State] { object UnpersistentBehavior { - /** Given an EventSourcedBehavior, produce a non-persistent Behavior which synchronously publishes events and snapshots + /** + * Given an EventSourcedBehavior, produce a non-persistent Behavior which synchronously publishes events and snapshots * for inspection. State is updated as in the EventSourcedBehavior, and side effects are performed synchronously. The * resulting Behavior is, contingent on the command handling, event handling, and side effects being compatible with the * BehaviorTestKit, testable with the BehaviorTestKit. @@ -84,9 +85,7 @@ object UnpersistentBehavior { final case class PersistenceEffect[T](persistedObject: T, sequenceNr: Long, tags: Set[String]) -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit trait PersistenceProbe[T] { @@ -96,7 +95,8 @@ trait PersistenceProbe[T] { /** Get and remove the oldest persistence effect from the probe */ def extract(): PersistenceEffect[T] - /** Get and remove the oldest persistence effect from the probe, failing if the + /** + * Get and remove the oldest persistence effect from the probe, failing if the * persisted object is not of the requested type */ def expectPersistedType[S <: T: ClassTag](): PersistenceEffect[S] @@ -104,19 +104,22 @@ trait PersistenceProbe[T] { /** Are there any persistence effects? */ def hasEffects: Boolean - /** Assert that the given object was persisted in the oldest persistence effect and + /** + * Assert that the given object was persisted in the oldest persistence effect and * remove that persistence effect */ def expectPersisted(obj: T): PersistenceProbe[T] - /** Assert that the given object was persisted with the given tag in the oldest + /** + * Assert that the given object was persisted with the given tag in the oldest * persistence effect and remove that persistence effect. If the persistence * effect has multiple tags, only one of them has to match in order for the * assertion to succeed. */ def expectPersisted(obj: T, tag: String): PersistenceProbe[T] - /** Assert that the given object was persisted with the given tags in the oldest + /** + * Assert that the given object was persisted with the given tags in the oldest * persistence effect and remove that persistence effect. If the persistence * effect has tags which are not given, the assertion fails. */ diff --git a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStore.scala b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStore.scala index 6b0a2173239..15c0eb75c8e 100644 --- a/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStore.scala +++ b/akka-persistence-testkit/src/main/scala/akka/persistence/testkit/state/scaladsl/PersistenceTestKitDurableStateStore.scala @@ -79,7 +79,7 @@ class PersistenceTestKitDurableStateStore[A](val system: ExtendedActorSystem) val updatedRecord = Record[A](globalOffset, persistenceId, revision, None, record.tag) store = store + (persistenceId -> updatedRecord) publisher ! updatedRecord - case None => //ignore + case None => // ignore } Future.successful(Done) @@ -118,12 +118,14 @@ class PersistenceTestKitDurableStateStore[A](val system: ExtendedActorSystem) override def currentChanges(tag: String, offset: Offset): Source[DurableStateChange[A], akka.NotUsed] = this.synchronized { val currentGlobalOffset = lastGlobalOffset.get() - changes(tag, offset).takeWhile(_.offset match { - case Sequence(fromOffset) => - fromOffset < currentGlobalOffset - case offset => - throw new UnsupportedOperationException(s"$offset not supported in PersistenceTestKitDurableStateStore.") - }, inclusive = true) + changes(tag, offset).takeWhile( + _.offset match { + case Sequence(fromOffset) => + fromOffset < currentGlobalOffset + case offset => + throw new UnsupportedOperationException(s"$offset not supported in PersistenceTestKitDurableStateStore.") + }, + inclusive = true) } override def currentChangesBySlices( @@ -133,12 +135,14 @@ class PersistenceTestKitDurableStateStore[A](val system: ExtendedActorSystem) offset: Offset): Source[DurableStateChange[A], NotUsed] = this.synchronized { val currentGlobalOffset = lastGlobalOffset.get() - changesBySlices(entityType, minSlice, maxSlice, offset).takeWhile(_.offset match { - case Sequence(fromOffset) => - fromOffset < currentGlobalOffset - case offset => - throw new UnsupportedOperationException(s"$offset not supported in PersistenceTestKitDurableStateStore.") - }, inclusive = true) + changesBySlices(entityType, minSlice, maxSlice, offset).takeWhile( + _.offset match { + case Sequence(fromOffset) => + fromOffset < currentGlobalOffset + case offset => + throw new UnsupportedOperationException(s"$offset not supported in PersistenceTestKitDurableStateStore.") + }, + inclusive = true) } override def changesBySlices( @@ -155,7 +159,8 @@ class PersistenceTestKitDurableStateStore[A](val system: ExtendedActorSystem) } def bySliceFromOffset(rec: Record[A]) = { val slice = persistence.sliceForPersistenceId(rec.persistenceId) - PersistenceId.extractEntityType(rec.persistenceId) == entityType && slice >= minSlice && slice <= maxSlice && rec.globalOffset > fromOffset + PersistenceId.extractEntityType( + rec.persistenceId) == entityType && slice >= minSlice && slice <= maxSlice && rec.globalOffset > fromOffset } def bySliceFromOffsetNotDeleted(rec: Record[A]) = bySliceFromOffset(rec) && storeContains(rec.persistenceId) diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonSnapshotTests.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonSnapshotTests.scala index 40d98607c79..fc3df839b39 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonSnapshotTests.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonSnapshotTests.scala @@ -148,7 +148,7 @@ trait CommonSnapshotTests extends JavaDslUtils { val a = system.actorOf(Props(classOf[A], pid, Some(testActor))) - //consecutive calls should stack + // consecutive calls should stack failNextPersisted() failNextPersisted() diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonTestKitTests.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonTestKitTests.scala index 767b22db22e..7aa03d74859 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonTestKitTests.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/javadsl/CommonTestKitTests.scala @@ -104,10 +104,10 @@ trait CommonTestKitTests extends JavaDslUtils { override def tryProcess(persistenceId: String, processingUnit: JournalOperation): ProcessingResult = { processingUnit match { case WriteEvents(msgs) => - val ex = msgs.exists({ + val ex = msgs.exists { case B(666) => true case _ => false - }) + } if (ex) { ProcessingSuccess } else { @@ -145,7 +145,7 @@ trait CommonTestKitTests extends JavaDslUtils { val a = system.actorOf(Props(classOf[A], pid, None)) - //consecutive calls should stack + // consecutive calls should stack rejectNextPersisted() rejectNextPersisted() @@ -193,7 +193,7 @@ trait CommonTestKitTests extends JavaDslUtils { val a = system.actorOf(Props(classOf[A], pid, None)) - //consecutive calls should stack + // consecutive calls should stack failNextPersisted() failNextPersisted() diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/query/EventsByPersistenceIdSpec.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/query/EventsByPersistenceIdSpec.scala index 35129a5d55d..448c076185a 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/query/EventsByPersistenceIdSpec.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/query/EventsByPersistenceIdSpec.scala @@ -21,8 +21,7 @@ import akka.persistence.typed.scaladsl.{ Effect, EventSourcedBehavior } import akka.stream.testkit.scaladsl.TestSink object EventsByPersistenceIdSpec { - val config = PersistenceTestKitPlugin.config.withFallback( - ConfigFactory.parseString(""" + val config = PersistenceTestKitPlugin.config.withFallback(ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] akka.persistence.testkit.events.serialize = off diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/query/EventsByPersistenceIdTypedSpec.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/query/EventsByPersistenceIdTypedSpec.scala index b00d7c0e0ab..82659968362 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/query/EventsByPersistenceIdTypedSpec.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/query/EventsByPersistenceIdTypedSpec.scala @@ -24,8 +24,7 @@ import akka.stream.scaladsl.Sink import akka.stream.testkit.scaladsl.TestSink object EventsByPersistenceIdTypedSpec { - val config = PersistenceTestKitPlugin.config.withFallback( - ConfigFactory.parseString(""" + val config = PersistenceTestKitPlugin.config.withFallback(ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] akka.persistence.testkit.events.serialize = off @@ -96,7 +95,7 @@ class EventsByPersistenceIdTypedSpec val currentResult = queries.currentEventsByPersistenceIdTyped[String]("d", 0L, Long.MaxValue).runWith(Sink.seq).futureValue - currentResult should have size (4) + currentResult should have size 4 currentResult.last should ===(envelope) } } diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonSnapshotTests.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonSnapshotTests.scala index c0332ea4113..5a926d335ee 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonSnapshotTests.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonSnapshotTests.scala @@ -155,7 +155,7 @@ trait CommonSnapshotTests extends ScalaDslUtils { val a = system.actorOf(Props(classOf[A], pid, Some(testActor))) - //consecutive calls should stack + // consecutive calls should stack failNextPersisted() failNextPersisted() diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonTestKitTests.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonTestKitTests.scala index e40f05aac92..a193b28f0c3 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonTestKitTests.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/CommonTestKitTests.scala @@ -122,10 +122,10 @@ trait CommonTestKitTests extends ScalaDslUtils { override def tryProcess(persistenceId: String, processingUnit: JournalOperation): ProcessingResult = { processingUnit match { case WriteEvents(msgs) => - val ex = msgs.exists({ + val ex = msgs.exists { case B(666) => true case _ => false - }) + } if (ex) { ProcessingSuccess } else { @@ -161,7 +161,7 @@ trait CommonTestKitTests extends ScalaDslUtils { val a = system.actorOf(Props(classOf[A], pid, None)) - //consecutive calls should stack + // consecutive calls should stack rejectNextPersisted() rejectNextPersisted() @@ -209,7 +209,7 @@ trait CommonTestKitTests extends ScalaDslUtils { val a = system.actorOf(Props(classOf[A], pid, None)) - //consecutive calls should stack + // consecutive calls should stack failNextPersisted() failNextPersisted() diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/EventSourcedBehaviorNoSnapshotTestKitSpec.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/EventSourcedBehaviorNoSnapshotTestKitSpec.scala index 05ea4111edd..f5b912bb512 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/EventSourcedBehaviorNoSnapshotTestKitSpec.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/EventSourcedBehaviorNoSnapshotTestKitSpec.scala @@ -14,7 +14,9 @@ import akka.persistence.testkit.scaladsl.EventSourcedBehaviorTestKitSpec.TestCou import akka.persistence.typed.PersistenceId class EventSourcedBehaviorNoSnapshotTestKitSpec - extends ScalaTestWithActorTestKit(ConfigFactory.parseString(""" + extends ScalaTestWithActorTestKit( + ConfigFactory + .parseString(""" akka.persistence.testkit.events.serialize = off akka.persistence.testkit.snapshots.serialize = off """).withFallback(PersistenceTestKitPlugin.config)) diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/MultipleJournalsSpec.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/MultipleJournalsSpec.scala index 3e4c7228255..42b839a320b 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/MultipleJournalsSpec.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/MultipleJournalsSpec.scala @@ -46,7 +46,8 @@ object MultipleJournalsSpec { } - def config = ConfigFactory.parseString(s""" + def config = ConfigFactory + .parseString(s""" journal1 { # journal and query expected to be next to each other under config path journal.class = "${classOf[PersistenceTestKitPlugin].getName}" @@ -56,7 +57,9 @@ object MultipleJournalsSpec { journal.class = "${classOf[PersistenceTestKitPlugin].getName}" query = $${akka.persistence.testkit.query} } - """).withFallback(ConfigFactory.load()).resolve() + """) + .withFallback(ConfigFactory.load()) + .resolve() } diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/ScalaDslUtils.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/ScalaDslUtils.scala index 3513539b3ca..bd376bc4ada 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/ScalaDslUtils.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/ScalaDslUtils.scala @@ -23,13 +23,17 @@ import akka.persistence.typed.scaladsl.{ Effect, EventSourcedBehavior } trait ScalaDslUtils extends CommonUtils { def eventSourcedBehavior(pid: String, replyOnRecovery: Option[ActorRef[Any]] = None) = - EventSourcedBehavior[TestCommand, Evt, EmptyState](PersistenceId.ofUniqueId(pid), EmptyState(), (_, cmd) => { - cmd match { - case Cmd(data) => Effect.persist(Evt(data)) - case Passivate => Effect.stop().thenRun(_ => replyOnRecovery.foreach(_ ! Stopped)) - } - }, (_, _) => EmptyState()).snapshotWhen((_, _, _) => true).receiveSignal { - case (_, RecoveryCompleted) => replyOnRecovery.foreach(_ ! Recovered) + EventSourcedBehavior[TestCommand, Evt, EmptyState]( + PersistenceId.ofUniqueId(pid), + EmptyState(), + (_, cmd) => { + cmd match { + case Cmd(data) => Effect.persist(Evt(data)) + case Passivate => Effect.stop().thenRun(_ => replyOnRecovery.foreach(_ ! Stopped)) + } + }, + (_, _) => EmptyState()).snapshotWhen((_, _, _) => true).receiveSignal { case (_, RecoveryCompleted) => + replyOnRecovery.foreach(_ ! Recovered) } def eventSourcedBehaviorWithState(pid: String, replyOnRecovery: Option[ActorRef[Any]] = None) = diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/UnpersistentDurableStateSpec.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/UnpersistentDurableStateSpec.scala index 1fc76eee0f4..1945df2a3db 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/UnpersistentDurableStateSpec.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/UnpersistentDurableStateSpec.scala @@ -70,10 +70,9 @@ object UnpersistentDurableStateSpec { persistenceId = PersistenceId.ofUniqueId(id), emptyState = State(0, Map.empty, Int.MaxValue), commandHandler = applyCommand(_, _, context)) - .receiveSignal { - case (state, RecoveryCompleted) => - context.log.debug2("Recovered state for id [{}] is [{}]", id, state) - recoveryDone ! Done + .receiveSignal { case (state, RecoveryCompleted) => + context.log.debug2("Recovered state for id [{}] is [{}]", id, state) + recoveryDone ! Done } .withTag("count") } @@ -87,7 +86,7 @@ object UnpersistentDurableStateSpec { .thenRun { nextState => // should be the same as newState, but... state.notifyAfter.keysIterator .filter { at => - (at <= nextState.nextNotifyAt) && !(nextState.notifyAfter.isDefinedAt(at)) + (at <= nextState.nextNotifyAt) && !nextState.notifyAfter.isDefinedAt(at) } .foreach { at => state.notifyAfter(at) ! Done diff --git a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/UnpersistentEventSourcedSpec.scala b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/UnpersistentEventSourcedSpec.scala index b8b76b60c1e..af16781654f 100644 --- a/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/UnpersistentEventSourcedSpec.scala +++ b/akka-persistence-testkit/src/test/scala/akka/persistence/testkit/scaladsl/UnpersistentEventSourcedSpec.scala @@ -48,10 +48,9 @@ object UnpersistentEventSourcedSpec { emptyState = initialState, commandHandler = applyCommand(_, _, context), eventHandler = applyEvent(_, _)) - .receiveSignal { - case (state, RecoveryCompleted) => - context.log.debug2("Recovered state for id [{}] is [{}]", id, state) - recoveryDone ! Done + .receiveSignal { case (state, RecoveryCompleted) => + context.log.debug2("Recovered state for id [{}] is [{}]", id, state) + recoveryDone ! Done } .snapshotWhen { case (_, SnapshotMade, _) => true @@ -71,7 +70,7 @@ object UnpersistentEventSourcedSpec { .thenRun { newState => state.notifyAfter.keysIterator .filter { at => - (at <= newState.nextNotifyAt) && !(newState.notifyAfter.isDefinedAt(at)) + (at <= newState.nextNotifyAt) && !newState.notifyAfter.isDefinedAt(at) } .foreach { at => state.notifyAfter(at) ! Done @@ -317,14 +316,15 @@ class UnpersistentEventSourcedSpec extends AnyWordSpec with Matchers { case x => x } - UnpersistentBehavior.fromEventSourced[Command, Event, State](behavior, Some(initialState -> randomStartingOffset)) { - (testkit, eventProbe, snapshotProbe) => - val replyTo = TestInbox[Long]() + UnpersistentBehavior.fromEventSourced[Command, Event, State]( + behavior, + Some(initialState -> randomStartingOffset)) { (testkit, eventProbe, snapshotProbe) => + val replyTo = TestInbox[Long]() - testkit.run(GetSequenceNumber(replyTo.ref)) - eventProbe.drain() shouldBe empty - snapshotProbe.drain() shouldBe empty - replyTo.expectMessage(randomStartingOffset) + testkit.run(GetSequenceNumber(replyTo.ref)) + eventProbe.drain() shouldBe empty + snapshotProbe.drain() shouldBe empty + replyTo.expectMessage(randomStartingOffset) } } } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/EventSourcedBehaviorLoggingSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/EventSourcedBehaviorLoggingSpec.scala index d42e0967352..5ca5fc1739d 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/EventSourcedBehaviorLoggingSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/EventSourcedBehaviorLoggingSpec.scala @@ -91,9 +91,10 @@ abstract class EventSourcedBehaviorLoggingSpec(config: Config) s"log internal messages in '$loggerId' logger without logging user data (PersistAll)" in { val doneProbe = createTestProbe[Done]() LoggingTestKit - .debug("Handled command [akka.persistence.typed.EventSourcedBehaviorLoggingSpec$ChattyEventSourcingBehavior$Hellos], " + - "resulting effect: [PersistAll(akka.persistence.typed.EventSourcedBehaviorLoggingSpec$ChattyEventSourcingBehavior$Event," + - "akka.persistence.typed.EventSourcedBehaviorLoggingSpec$ChattyEventSourcingBehavior$Event)], side effects: [1]") + .debug( + "Handled command [akka.persistence.typed.EventSourcedBehaviorLoggingSpec$ChattyEventSourcingBehavior$Hellos], " + + "resulting effect: [PersistAll(akka.persistence.typed.EventSourcedBehaviorLoggingSpec$ChattyEventSourcingBehavior$Event," + + "akka.persistence.typed.EventSourcedBehaviorLoggingSpec$ChattyEventSourcingBehavior$Event)], side effects: [1]") .withLoggerName(loggerName) .expect { chattyActor ! Hellos("Mary", "Joe", doneProbe.ref) diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/MultiJournalReplicationSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/MultiJournalReplicationSpec.scala index 39beebea950..7a2d2bf5310 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/MultiJournalReplicationSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/MultiJournalReplicationSpec.scala @@ -53,7 +53,8 @@ object MultiJournalReplicationSpec { } } - def separateJournalsConfig: Config = ConfigFactory.parseString(s""" + def separateJournalsConfig: Config = ConfigFactory + .parseString(s""" journal1 { journal.class = "${classOf[PersistenceTestKitPlugin].getName}" query = $${akka.persistence.testkit.query} @@ -62,7 +63,9 @@ object MultiJournalReplicationSpec { journal.class = "${classOf[PersistenceTestKitPlugin].getName}" query = $${akka.persistence.testkit.query} } - """).withFallback(ConfigFactory.load()).resolve() + """) + .withFallback(ConfigFactory.load()) + .resolve() } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventPublishingSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventPublishingSpec.scala index 182d85d7063..aebcbbcd8b7 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventPublishingSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventPublishingSpec.scala @@ -34,27 +34,26 @@ object ReplicatedEventPublishingSpec { ReplicatedEventSourcing.commonJournalConfig( ReplicationId(EntityType, entityId, replicaId), allReplicas, - PersistenceTestKitReadJournal.Identifier)( - replicationContext => - EventSourcedBehavior[Command, String, Set[String]]( - replicationContext.persistenceId, - Set.empty, - (state, command) => - command match { - case Add(string, replyTo) => - ctx.log.debug("Persisting [{}]", string) - Effect.persist(string).thenRun { _ => - ctx.log.debug("Ack:ing [{}]", string) - replyTo ! Done - } - case Get(replyTo) => - replyTo ! state - Effect.none - case Stop => - Effect.stop() - case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") - }, - (state, string) => state + string)) + PersistenceTestKitReadJournal.Identifier)(replicationContext => + EventSourcedBehavior[Command, String, Set[String]]( + replicationContext.persistenceId, + Set.empty, + (state, command) => + command match { + case Add(string, replyTo) => + ctx.log.debug("Persisting [{}]", string) + Effect.persist(string).thenRun { _ => + ctx.log.debug("Ack:ing [{}]", string) + replyTo ! Done + } + case Get(replyTo) => + replyTo ! state + Effect.none + case Stop => + Effect.stop() + case unexpected => throw new RuntimeException(s"Unexpected: $unexpected") + }, + (state, string) => state + string)) } def externalReplication(entityId: String, replicaId: ReplicaId, allReplicas: Set[ReplicaId]): Behavior[Command] = diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventSourcingTaggingSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventSourcingTaggingSpec.scala index 476d1029326..8d2d0df5c60 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventSourcingTaggingSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicatedEventSourcingTaggingSpec.scala @@ -46,29 +46,27 @@ object ReplicatedEventSourcingTaggingSpec { ReplicatedEventSourcing.commonJournalConfig( ReplicationId("TaggingSpec", entityId, replica), allReplicas, - queryPluginId)( - replicationContext => - EventSourcedBehavior[Command, String, State]( - replicationContext.persistenceId, - State(Set.empty), - (state, command) => - command match { - case Add(string, ack) => - if (state.strings.contains(string)) Effect.none.thenRun(_ => ack ! Done) - else Effect.persist(string).thenRun(_ => ack ! Done) - case GetStrings(replyTo) => - replyTo ! state.strings - Effect.none - }, - (state, event) => state.copy(strings = state.strings + event)) + queryPluginId)(replicationContext => + EventSourcedBehavior[Command, String, State]( + replicationContext.persistenceId, + State(Set.empty), + (state, command) => + command match { + case Add(string, ack) => + if (state.strings.contains(string)) Effect.none.thenRun(_ => ack ! Done) + else Effect.persist(string).thenRun(_ => ack ! Done) + case GetStrings(replyTo) => + replyTo ! state.strings + Effect.none + }, + (state, event) => state.copy(strings = state.strings + event)) // use withTagger to define tagging logic - .withTagger( - event => - // don't apply tags if event was replicated here, it already will appear in queries by tag - // as the origin replica would have tagged it already - if (replicationContext.origin != replicationContext.replicaId) Set.empty - else if (event.length > 10) Set("long-strings", "strings") - else Set("strings"))) + .withTagger(event => + // don't apply tags if event was replicated here, it already will appear in queries by tag + // as the origin replica would have tagged it already + if (replicationContext.origin != replicationContext.replicaId) Set.empty + else if (event.length > 10) Set("long-strings", "strings") + else Set("strings"))) // #tagging } } @@ -111,7 +109,7 @@ class ReplicatedEventSourcingTaggingSpec stringTaggedEvents.map(_.event).toSet should equal(allEvents) val longStrings = query.currentEventsByTag("long-strings", NoOffset).runWith(Sink.seq).futureValue - longStrings should have size (1) + longStrings should have size 1 } } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicationIllegalAccessSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicationIllegalAccessSpec.scala index 30cd5742c57..0f8dcd03fe9 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicationIllegalAccessSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/ReplicationIllegalAccessSpec.scala @@ -32,36 +32,37 @@ object ReplicationIllegalAccessSpec { ReplicatedEventSourcing.commonJournalConfig( ReplicationId("IllegalAccessSpec", entityId, replica), AllReplicas, - PersistenceTestKitReadJournal.Identifier)( - replicationContext => - EventSourcedBehavior[Command, String, State]( - replicationContext.persistenceId, - State(Nil), - (_, command) => - command match { - case AccessInCommandHandler(replyTo) => - val exception = try { + PersistenceTestKitReadJournal.Identifier)(replicationContext => + EventSourcedBehavior[Command, String, State]( + replicationContext.persistenceId, + State(Nil), + (_, command) => + command match { + case AccessInCommandHandler(replyTo) => + val exception = + try { replicationContext.origin None } catch { case t: Throwable => Some(t) } - replyTo ! Thrown(exception) - Effect.none - case AccessInPersistCallback(replyTo) => - Effect.persist("cat").thenRun { _ => - val exception = try { + replyTo ! Thrown(exception) + Effect.none + case AccessInPersistCallback(replyTo) => + Effect.persist("cat").thenRun { _ => + val exception = + try { replicationContext.concurrent None } catch { case t: Throwable => Some(t) } - replyTo ! Thrown(exception) - } - }, - (state, event) => state.copy(all = event :: state.all))) + replyTo ! Thrown(exception) + } + }, + (state, event) => state.copy(all = event :: state.all))) } } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/internal/EventWriterFillGapsSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/internal/EventWriterFillGapsSpec.scala index f2e9644c47f..31d4800be88 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/internal/EventWriterFillGapsSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/internal/EventWriterFillGapsSpec.scala @@ -20,9 +20,12 @@ import akka.persistence.JournalProtocol object EventWriterFillGapsSpec { def config = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.persistence.journal.inmem.delay-writes=10ms - """).withFallback(ConfigFactory.load()).resolve() + """) + .withFallback(ConfigFactory.load()) + .resolve() } class EventWriterFillGapsSpec @@ -385,7 +388,7 @@ class EventWriterFillGapsSpec } def journalAckWrite(pid: String = pid1, expectedSequenceNumbers: Vector[Long] = Vector.empty): Int = { val write = fakeJournal.expectMessageType[JournalProtocol.WriteMessages] - write.messages should have size (1) + write.messages should have size 1 val atomicWrite = write.messages.head.asInstanceOf[AtomicWrite] val seqNrs = @@ -402,7 +405,7 @@ class EventWriterFillGapsSpec def journalFailWrite(reason: String, pid: String = pid1): Int = { val write = fakeJournal.expectMessageType[JournalProtocol.WriteMessages] - write.messages should have size (1) + write.messages should have size 1 val atomicWrite = write.messages.head.asInstanceOf[AtomicWrite] atomicWrite.payload.foreach { repr => repr.persistenceId should ===(pid) diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/internal/EventWriterSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/internal/EventWriterSpec.scala index 1f4743c92a2..b068df835ff 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/internal/EventWriterSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/internal/EventWriterSpec.scala @@ -19,9 +19,12 @@ import akka.actor.testkit.typed.TestException object EventWriterSpec { def config = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.persistence.journal.inmem.delay-writes=10ms - """).withFallback(ConfigFactory.load()).resolve() + """) + .withFallback(ConfigFactory.load()) + .resolve() } class EventWriterSpec extends ScalaTestWithActorTestKit(EventWriterSpec.config) with AnyWordSpecLike with LogCapturing { @@ -170,7 +173,7 @@ class EventWriterSpec extends ScalaTestWithActorTestKit(EventWriterSpec.config) } def journalAckWrite(pid: String = pid1): Int = { val write = fakeJournal.expectMessageType[JournalProtocol.WriteMessages] - write.messages should have size (1) + write.messages should have size 1 val atomicWrite = write.messages.head.asInstanceOf[AtomicWrite] atomicWrite.payload.foreach { repr => repr.persistenceId should ===(pid) @@ -182,7 +185,7 @@ class EventWriterSpec extends ScalaTestWithActorTestKit(EventWriterSpec.config) def journalFailWrite(reason: String, pid: String = pid1): Int = { val write = fakeJournal.expectMessageType[JournalProtocol.WriteMessages] - write.messages should have size (1) + write.messages should have size 1 val atomicWrite = write.messages.head.asInstanceOf[AtomicWrite] atomicWrite.payload.foreach { repr => repr.persistenceId should ===(pid) diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala index 511b9797bea..3661efe7199 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala @@ -70,14 +70,17 @@ class ChaosJournal extends InmemJournal { object EventSourcedBehaviorFailureSpec { - val conf: Config = ConfigFactory.parseString(s""" + val conf: Config = ConfigFactory + .parseString(s""" akka.loglevel = INFO akka.persistence.journal.plugin = "failure-journal" failure-journal = $${akka.persistence.journal.inmem} failure-journal { class = "akka.persistence.typed.scaladsl.ChaosJournal" } - """).withFallback(ConfigFactory.defaultReference()).resolve() + """) + .withFallback(ConfigFactory.defaultReference()) + .resolve() } class EventSourcedBehaviorFailureSpec @@ -124,10 +127,13 @@ class EventSourcedBehaviorFailureSpec LoggingTestKit.error[JournalFailureException].expect { val probe = TestProbe[String]() val excProbe = TestProbe[Throwable]() - spawn(failingPersistentActor(PersistenceId.ofUniqueId("fail-recovery"), probe.ref, { - case (_, RecoveryFailed(t)) => - excProbe.ref ! t - })) + spawn( + failingPersistentActor( + PersistenceId.ofUniqueId("fail-recovery"), + probe.ref, + { case (_, RecoveryFailed(t)) => + excProbe.ref ! t + })) excProbe.expectMessageType[TestException].message shouldEqual "Nope" probe.expectMessage("stopped") @@ -136,10 +142,13 @@ class EventSourcedBehaviorFailureSpec "handle exceptions from RecoveryFailed signal handler" in { val probe = TestProbe[String]() - val pa = spawn(failingPersistentActor(PersistenceId.ofUniqueId("fail-recovery-twice"), probe.ref, { - case (_, RecoveryFailed(_)) => - throw TestException("recovery call back failure") - })) + val pa = spawn( + failingPersistentActor( + PersistenceId.ofUniqueId("fail-recovery-twice"), + probe.ref, + { case (_, RecoveryFailed(_)) => + throw TestException("recovery call back failure") + })) pa ! "one" probe.expectMessage("starting") probe.expectMessage("persisting") @@ -161,10 +170,13 @@ class EventSourcedBehaviorFailureSpec LoggingTestKit.error[JournalFailureException].expect { // start again and then the event handler will throw - spawn(failingPersistentActor(pid, probe.ref, { - case (_, RecoveryFailed(t)) => - excProbe.ref ! t - })) + spawn( + failingPersistentActor( + pid, + probe.ref, + { case (_, RecoveryFailed(t)) => + excProbe.ref ! t + })) excProbe.expectMessageType[TestException].message shouldEqual "wrong event" probe.expectMessage("stopped") @@ -178,10 +190,10 @@ class EventSourcedBehaviorFailureSpec Behaviors .supervise(failingPersistentActor( PersistenceId.ofUniqueId("recovery-ok"), - probe.ref, { - case (_, RecoveryCompleted) => - probe.ref.tell("starting") - throw TestException("recovery call back failure") + probe.ref, + { case (_, RecoveryCompleted) => + probe.ref.tell("starting") + throw TestException("recovery call back failure") })) // since recovery fails restart supervision is not supposed to be used .onFailure(SupervisorStrategy.restart)) @@ -293,9 +305,12 @@ class EventSourcedBehaviorFailureSpec case object SomeSignal extends Signal LoggingTestKit.error[TestException].expect { val probe = TestProbe[String]() - val behav = failingPersistentActor(PersistenceId.ofUniqueId("wrong-signal-handler"), probe.ref, { - case (_, SomeSignal) => throw TestException("from signal") - }) + val behav = failingPersistentActor( + PersistenceId.ofUniqueId("wrong-signal-handler"), + probe.ref, + { case (_, SomeSignal) => + throw TestException("from signal") + }) val c = spawn(behav) probe.expectMessage("starting") c.toClassic ! SomeSignal diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorInterceptorSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorInterceptorSpec.scala index 2c45160290e..93d88668ed1 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorInterceptorSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorInterceptorSpec.scala @@ -81,8 +81,8 @@ class EventSourcedBehaviorInterceptorSpec "be possible to combine with transformMessages" in { val probe = createTestProbe[String]() val pid = nextPid() - val ref = spawn(testBehavior(pid, probe.ref).transformMessages[String] { - case s => s.toUpperCase() + val ref = spawn(testBehavior(pid, probe.ref).transformMessages[String] { case s => + s.toUpperCase() }) ref ! "a" diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorRecoveryTimeoutSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorRecoveryTimeoutSpec.scala index 9390357bf47..73cdfece2e9 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorRecoveryTimeoutSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorRecoveryTimeoutSpec.scala @@ -42,9 +42,8 @@ object EventSourcedBehaviorRecoveryTimeoutSpec { persistenceId, emptyState = "", commandHandler = (_, command) => Effect.persist(command).thenRun(_ => probe ! command), - eventHandler = (state, evt) => state + evt).receiveSignal { - case (_, RecoveryFailed(cause)) => - probe ! cause + eventHandler = (state, evt) => state + evt).receiveSignal { case (_, RecoveryFailed(cause)) => + probe ! cause } } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorRetentionOnlyOneSnapshotSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorRetentionOnlyOneSnapshotSpec.scala index 413cd271274..44e839a6d6f 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorRetentionOnlyOneSnapshotSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorRetentionOnlyOneSnapshotSpec.scala @@ -21,11 +21,13 @@ import akka.persistence.typed.EventSourcedSignal import akka.persistence.typed.PersistenceId object EventSourcedBehaviorRetentionOnlyOneSnapshotSpec { - private val config = ConfigFactory.parseString(s""" + private val config = ConfigFactory + .parseString(s""" ${PersistenceTestKitSnapshotPlugin.PluginId} { only-one-snapshot = on } - """).withFallback(PersistenceTestKitPlugin.config.withFallback(PersistenceTestKitSnapshotPlugin.config)) + """) + .withFallback(PersistenceTestKitPlugin.config.withFallback(PersistenceTestKitSnapshotPlugin.config)) } class EventSourcedBehaviorRetentionOnlyOneSnapshotSpec @@ -85,14 +87,13 @@ class EventSourcedBehaviorRetentionOnlyOneSnapshotSpec val replyProbe = TestProbe[State]() val persistentActor = spawn( - Behaviors.setup[Command]( - ctx => - counter( - ctx, - pid, - snapshotSignalProbe = Some(snapshotSignalProbe.ref), - deleteSnapshotSignalProbe = Some(deleteSnapshotSignalProbe.ref)) - .withRetention(RetentionCriteria.snapshotEvery(numberOfEvents = 3, keepNSnapshots = 2)))) + Behaviors.setup[Command](ctx => + counter( + ctx, + pid, + snapshotSignalProbe = Some(snapshotSignalProbe.ref), + deleteSnapshotSignalProbe = Some(deleteSnapshotSignalProbe.ref)) + .withRetention(RetentionCriteria.snapshotEvery(numberOfEvents = 3, keepNSnapshots = 2)))) (1 to 10).foreach(_ => persistentActor ! Increment) persistentActor ! GetValue(replyProbe.ref) diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorRetentionSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorRetentionSpec.scala index 37eab1f931e..d22283d75c0 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorRetentionSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorRetentionSpec.scala @@ -264,14 +264,13 @@ class EventSourcedBehaviorRetentionSpec val replyProbe = TestProbe[State]() val persistentActor = spawn( - Behaviors.setup[Command]( - ctx => - counter( - ctx, - pid, - snapshotSignalProbe = Some(snapshotSignalProbe.ref), - deleteSnapshotSignalProbe = Some(deleteSnapshotSignalProbe.ref)) - .withRetention(RetentionCriteria.snapshotEvery(numberOfEvents = 3, keepNSnapshots = 2)))) + Behaviors.setup[Command](ctx => + counter( + ctx, + pid, + snapshotSignalProbe = Some(snapshotSignalProbe.ref), + deleteSnapshotSignalProbe = Some(deleteSnapshotSignalProbe.ref)) + .withRetention(RetentionCriteria.snapshotEvery(numberOfEvents = 3, keepNSnapshots = 2)))) (1 to 10).foreach(_ => persistentActor ! Increment) persistentActor ! GetValue(replyProbe.ref) @@ -500,14 +499,13 @@ class EventSourcedBehaviorRetentionSpec val replyProbe = TestProbe[State]() val persistentActor = spawn( - Behaviors.setup[Command]( - ctx => - counter( - ctx, - pid, - snapshotSignalProbe = Some(snapshotSignalProbe.ref), - deleteSnapshotSignalProbe = Some(deleteSnapshotSignalProbe.ref)) - .withRetention(RetentionCriteria.snapshotEvery(numberOfEvents = 1, keepNSnapshots = 3)))) + Behaviors.setup[Command](ctx => + counter( + ctx, + pid, + snapshotSignalProbe = Some(snapshotSignalProbe.ref), + deleteSnapshotSignalProbe = Some(deleteSnapshotSignalProbe.ref)) + .withRetention(RetentionCriteria.snapshotEvery(numberOfEvents = 1, keepNSnapshots = 3)))) (1 to 4).foreach(_ => persistentActor ! Increment) persistentActor ! GetValue(replyProbe.ref) diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala index efe4f95c46f..61c48a593ef 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala @@ -129,12 +129,12 @@ object EventSourcedBehaviorSpec { def counter(persistenceId: PersistenceId)(implicit system: ActorSystem[_]): Behavior[Command] = Behaviors.setup(ctx => counter(ctx, persistenceId)) - def counter(persistenceId: PersistenceId, logging: ActorRef[String])( - implicit system: ActorSystem[_]): Behavior[Command] = + def counter(persistenceId: PersistenceId, logging: ActorRef[String])(implicit + system: ActorSystem[_]): Behavior[Command] = Behaviors.setup(ctx => counter(ctx, persistenceId, logging)) - def counter(ctx: ActorContext[Command], persistenceId: PersistenceId)( - implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = + def counter(ctx: ActorContext[Command], persistenceId: PersistenceId)(implicit + system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = counter( ctx, persistenceId, @@ -142,8 +142,8 @@ object EventSourcedBehaviorSpec { probe = TestProbe[(State, Event)]().ref, snapshotProbe = TestProbe[Try[SnapshotMetadata]]().ref) - def counter(ctx: ActorContext[Command], persistenceId: PersistenceId, logging: ActorRef[String])( - implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = + def counter(ctx: ActorContext[Command], persistenceId: PersistenceId, logging: ActorRef[String])(implicit + system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = counter( ctx, persistenceId, @@ -155,8 +155,8 @@ object EventSourcedBehaviorSpec { ctx: ActorContext[Command], persistenceId: PersistenceId, probe: ActorRef[(State, Event)], - snapshotProbe: ActorRef[Try[SnapshotMetadata]])( - implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = + snapshotProbe: ActorRef[Try[SnapshotMetadata]])(implicit + system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = counter(ctx, persistenceId, TestProbe[String]().ref, probe, snapshotProbe) def counterWithProbe(ctx: ActorContext[Command], persistenceId: PersistenceId, probe: ActorRef[(State, Event)])( @@ -166,8 +166,8 @@ object EventSourcedBehaviorSpec { def counterWithSnapshotProbe( ctx: ActorContext[Command], persistenceId: PersistenceId, - probe: ActorRef[Try[SnapshotMetadata]])( - implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = + probe: ActorRef[Try[SnapshotMetadata]])(implicit + system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = counter(ctx, persistenceId, TestProbe[String]().ref, TestProbe[(State, Event)]().ref, snapshotProbe = probe) def counter( @@ -796,7 +796,7 @@ class EventSourcedBehaviorSpec firstThree.size shouldBe 3 val others = queries.currentPersistenceIds(Some(firstThree.last), Long.MaxValue).runWith(Sink.seq).futureValue - firstThree ++ others should contain theSameElementsInOrderAs (all) + firstThree ++ others should contain theSameElementsInOrderAs all } def watcher(toWatch: ActorRef[_]): TestProbe[String] = { @@ -807,10 +807,9 @@ class EventSourcedBehaviorSpec .receive[Any] { (_, _) => Behaviors.same } - .receiveSignal { - case (_, _: Terminated) => - probe.ref ! "Terminated" - Behaviors.stopped + .receiveSignal { case (_, _: Terminated) => + probe.ref ! "Terminated" + Behaviors.stopped } } spawn(w) diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala index 61822692f27..7e51e86a66c 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala @@ -32,7 +32,8 @@ import akka.persistence.typed.PersistenceId import akka.persistence.typed.RecoveryCompleted object EventSourcedBehaviorStashSpec { - def conf: Config = ConfigFactory.parseString(s""" + def conf: Config = ConfigFactory + .parseString(s""" #akka.loglevel = DEBUG #akka.persistence.typed.log-stashing = on akka.persistence.journal.plugin = "akka.persistence.journal.inmem" @@ -43,7 +44,9 @@ object EventSourcedBehaviorStashSpec { failure-journal { class = "akka.persistence.typed.scaladsl.ChaosJournal" } - """).withFallback(ConfigFactory.defaultReference()).resolve() + """) + .withFallback(ConfigFactory.defaultReference()) + .resolve() sealed trait Command[ReplyMessage] // Unstash and change to active mode @@ -560,7 +563,8 @@ class EventSourcedBehaviorStashSpec Effect.stash() } } - }, { + }, + { case (_, "start-stashing") => true case (_, "unstash") => false case (_, _) => throw new IllegalArgumentException() diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorWatchSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorWatchSpec.scala index e99aa42d8b3..49ed8a919cb 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorWatchSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorWatchSpec.scala @@ -91,10 +91,14 @@ class EventSourcedBehaviorWatchSpec context.watch(child) - EventSourcedBehavior[Command, String, String](nextPid, emptyState = "", commandHandler = (_, cmd) => { - child ! cmd - Effect.none - }, eventHandler = (state, evt) => state + evt) + EventSourcedBehavior[Command, String, String]( + nextPid, + emptyState = "", + commandHandler = (_, cmd) => { + child ! cmd + Effect.none + }, + eventHandler = (state, evt) => state + evt) }) LoggingTestKit.error[TestException].expect { @@ -106,10 +110,9 @@ class EventSourcedBehaviorWatchSpec } "behave as expected if a user's signal handler is side effecting" in { - val signalHandler: PartialFunction[(String, Signal), Unit] = { - case (_, RecoveryCompleted) => - java.time.Instant.now.getNano - Behaviors.same + val signalHandler: PartialFunction[(String, Signal), Unit] = { case (_, RecoveryCompleted) => + java.time.Instant.now.getNano + Behaviors.same } Behaviors.setup[Command] { context => @@ -129,10 +132,14 @@ class EventSourcedBehaviorWatchSpec context.watch(child) - EventSourcedBehavior[Command, String, String](nextPid, emptyState = "", commandHandler = (_, cmd) => { - child ! cmd - Effect.none - }, eventHandler = (state, evt) => state + evt).receiveSignal(signalHandler) + EventSourcedBehavior[Command, String, String]( + nextPid, + emptyState = "", + commandHandler = (_, cmd) => { + child ! cmd + Effect.none + }, + eventHandler = (state, evt) => state + evt).receiveSignal(signalHandler) }) LoggingTestKit.error[TestException].expect { @@ -158,13 +165,16 @@ class EventSourcedBehaviorWatchSpec probe.ref ! child context.watch(child) - EventSourcedBehavior[Stop.type, String, String](nextPid, emptyState = "", commandHandler = (_, cmd) => { - child ! cmd - Effect.none - }, eventHandler = (state, evt) => state + evt).receiveSignal { - case (_, t: Terminated) => - probe.ref ! HasTerminated(t.ref) - Behaviors.stopped + EventSourcedBehavior[Stop.type, String, String]( + nextPid, + emptyState = "", + commandHandler = (_, cmd) => { + child ! cmd + Effect.none + }, + eventHandler = (state, evt) => state + evt).receiveSignal { case (_, t: Terminated) => + probe.ref ! HasTerminated(t.ref) + Behaviors.stopped } }) @@ -186,13 +196,16 @@ class EventSourcedBehaviorWatchSpec probe.ref ! child context.watch(child) - EventSourcedBehavior[Fail.type, String, String](nextPid, emptyState = "", commandHandler = (_, cmd) => { - child ! cmd - Effect.none - }, eventHandler = (state, evt) => state + evt).receiveSignal { - case (_, t: ChildFailed) => - probe.ref ! ChildHasFailed(t) - Behaviors.same + EventSourcedBehavior[Fail.type, String, String]( + nextPid, + emptyState = "", + commandHandler = (_, cmd) => { + child ! cmd + Effect.none + }, + eventHandler = (state, evt) => state + evt).receiveSignal { case (_, t: ChildFailed) => + probe.ref ! ChildHasFailed(t) + Behaviors.same } }) diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedEventAdapterSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedEventAdapterSpec.scala index 7a99827bd5a..a99be4419a6 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedEventAdapterSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedEventAdapterSpec.scala @@ -77,8 +77,11 @@ object EventSourcedEventAdapterSpec { } class EventSourcedEventAdapterSpec - extends ScalaTestWithActorTestKit(ConfigFactory.parseString(""" - akka.persistence.testkit.events.serialize = true""").withFallback(PersistenceTestKitPlugin.config)) + extends ScalaTestWithActorTestKit( + ConfigFactory + .parseString(""" + akka.persistence.testkit.events.serialize = true""") + .withFallback(PersistenceTestKitPlugin.config)) with AnyWordSpecLike with LogCapturing { import EventSourcedBehaviorSpec._ @@ -91,11 +94,15 @@ class EventSourcedEventAdapterSpec PersistenceQuery(system).readJournalFor[PersistenceTestKitReadJournal](PersistenceTestKitReadJournal.Identifier) private def behavior(pid: PersistenceId, probe: ActorRef[String]): EventSourcedBehavior[String, String, String] = - EventSourcedBehavior(pid, "", commandHandler = { (_, command) => - Effect.persist(command).thenRun(newState => probe ! newState) - }, eventHandler = { (state, evt) => - state + evt - }) + EventSourcedBehavior( + pid, + "", + commandHandler = { (_, command) => + Effect.persist(command).thenRun(newState => probe ! newState) + }, + eventHandler = { (state, evt) => + state + evt + }) "Event adapter" must { diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala index 119e837744b..76b7b8a5c5c 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala @@ -33,45 +33,44 @@ class EventSourcedSequenceNumberSpec with LogCapturing { private def behavior(pid: PersistenceId, probe: ActorRef[String]): Behavior[String] = - Behaviors.setup( - ctx => - EventSourcedBehavior[String, String, String](pid, "", { - (state, command) => - state match { - case "stashing" => - command match { - case "unstash" => - probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} unstash" - Effect.persist("normal").thenUnstashAll() - case _ => - Effect.stash() - } - case _ => - command match { - case "cmd" => - probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} onCommand" - Effect - .persist("evt") - .thenRun(_ => probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} thenRun") - case "cmd3" => - probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} onCommand" - Effect - .persist("evt1", "evt2", "evt3") - .thenRun(_ => probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} thenRun") - case "stash" => - probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} stash" - Effect.persist("stashing") - case "snapshot" => - Effect.persist("snapshot") - } - } - }, { (_, evt) => + Behaviors.setup(ctx => + EventSourcedBehavior[String, String, String]( + pid, + "", + { (state, command) => + state match { + case "stashing" => + command match { + case "unstash" => + probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} unstash" + Effect.persist("normal").thenUnstashAll() + case _ => + Effect.stash() + } + case _ => + command match { + case "cmd" => + probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} onCommand" + Effect.persist("evt").thenRun(_ => probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} thenRun") + case "cmd3" => + probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} onCommand" + Effect + .persist("evt1", "evt2", "evt3") + .thenRun(_ => probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} thenRun") + case "stash" => + probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} stash" + Effect.persist("stashing") + case "snapshot" => + Effect.persist("snapshot") + } + } + }, + { (_, evt) => probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} eventHandler $evt" evt - }).snapshotWhen((_, event, _) => event == "snapshot").receiveSignal { - case (_, RecoveryCompleted) => - probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} onRecoveryComplete" - }) + }).snapshotWhen((_, event, _) => event == "snapshot").receiveSignal { case (_, RecoveryCompleted) => + probe ! s"${EventSourcedBehavior.lastSequenceNumber(ctx)} onRecoveryComplete" + }) "The sequence number" must { diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedStashOverflowSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedStashOverflowSpec.scala index 6cadc4ca159..1c58cde1bbb 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedStashOverflowSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedStashOverflowSpec.scala @@ -28,12 +28,14 @@ object EventSourcedStashOverflowSpec { def apply(persistenceId: PersistenceId): Behavior[Command] = EventSourcedBehavior[Command, String, List[String]]( persistenceId, - Nil, { (_, command) => + Nil, + { (_, command) => command match { case DoNothing(replyTo) => Effect.persist(List.empty[String]).thenRun(_ => replyTo ! Done) } - }, { (state, event) => + }, + { (state, event) => // original reproducer slept 2 seconds here but a pure application of an event seems unlikely to take that long // so instead we delay recovery using a special journal event :: state diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/LoggerSourceSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/LoggerSourceSpec.scala index 24b85adc7eb..31b1657765d 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/LoggerSourceSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/LoggerSourceSpec.scala @@ -30,13 +30,17 @@ class LoggerSourceSpec def behavior: Behavior[String] = Behaviors.setup { ctx => ctx.log.info("setting-up-behavior") - EventSourcedBehavior[String, String, String](nextPid(), emptyState = "", commandHandler = (_, _) => { - ctx.log.info("command-received") - Effect.persist("evt") - }, eventHandler = (state, _) => { - ctx.log.info("event-received") - state - }).receiveSignal { + EventSourcedBehavior[String, String, String]( + nextPid(), + emptyState = "", + commandHandler = (_, _) => { + ctx.log.info("command-received") + Effect.persist("evt") + }, + eventHandler = (state, _) => { + ctx.log.info("event-received") + state + }).receiveSignal { case (_, RecoveryCompleted) => ctx.log.info("recovery-completed") case (_, SnapshotCompleted(_)) => case (_, SnapshotFailed(_, _)) => @@ -93,10 +97,14 @@ class LoggerSourceSpec val behavior: Behavior[String] = Behaviors.setup[String] { ctx => ctx.setLoggerName("my-custom-name") - EventSourcedBehavior[String, String, String](nextPid(), emptyState = "", commandHandler = (_, _) => { - ctx.log.info("command-received") - Effect.persist("evt") - }, eventHandler = (state, _) => state) + EventSourcedBehavior[String, String, String]( + nextPid(), + emptyState = "", + commandHandler = (_, _) => { + ctx.log.info("command-received") + Effect.persist("evt") + }, + eventHandler = (state, _) => state) } val actor = spawn(behavior) diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/NullEmptyStateSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/NullEmptyStateSpec.scala index 798d46a4b0e..f3e3c4df6de 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/NullEmptyStateSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/NullEmptyStateSpec.scala @@ -42,9 +42,8 @@ class NullEmptyStateSpec eventHandler = (state, event) => { probe.tell("eventHandler:" + state + ":" + event) if (state == null) event else state + event - }).receiveSignal { - case (state, RecoveryCompleted) => - probe.tell("onRecoveryCompleted:" + state) + }).receiveSignal { case (state, RecoveryCompleted) => + probe.tell("onRecoveryCompleted:" + state) } "A typed persistent actor with null empty state" must { diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala index d4b0a937290..260276ba10d 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala @@ -33,8 +33,8 @@ object OptionalSnapshotStoreSpec { commandHandler = CommandHandler.command { _ => Effect.persist(Event()).thenRun(probe.ref ! _) }, - eventHandler = { - case (_, _) => State() + eventHandler = { case (_, _) => + State() }).snapshotWhen { case _ => true } def persistentBehaviorWithSnapshotPlugin(probe: TestProbe[State]) = @@ -42,14 +42,17 @@ object OptionalSnapshotStoreSpec { } -class OptionalSnapshotStoreSpec extends ScalaTestWithActorTestKit(s""" +class OptionalSnapshotStoreSpec + extends ScalaTestWithActorTestKit(s""" akka.persistence.publish-plugin-commands = on akka.persistence.journal.plugin = "akka.persistence.journal.inmem" akka.persistence.journal.inmem.test-serialization = on # snapshot store plugin is NOT defined, things should still work akka.persistence.snapshot-store.local.dir = "target/snapshots-${classOf[OptionalSnapshotStoreSpec].getName}/" - """) with AnyWordSpecLike with LogCapturing { + """) + with AnyWordSpecLike + with LogCapturing { import OptionalSnapshotStoreSpec._ diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala index 1baad1c1429..57dfc85bf5a 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala @@ -74,7 +74,7 @@ object PerformanceSpec { def behavior(name: String, probe: TestProbe[Reply])(other: (Command, Parameters) => Effect[String, String]) = { Behaviors - .supervise({ + .supervise { val parameters = Parameters() EventSourcedBehavior[Command, String, String]( persistenceId = PersistenceId.ofUniqueId(name), @@ -86,13 +86,12 @@ object PerformanceSpec { Effect.none.thenRun(_ => parameters.failAt = sequence) case command => other(command, parameters) }, - eventHandler = { - case (state, _) => state - }).receiveSignal { - case (_, RecoveryCompleted) => - if (parameters.every(1000)) print("r") + eventHandler = { case (state, _) => + state + }).receiveSignal { case (_, RecoveryCompleted) => + if (parameters.every(1000)) print("r") } - }) + } .onFailure(SupervisorStrategy.restart.withLoggingEnabled(false)) } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/PrimitiveStateSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/PrimitiveStateSpec.scala index b7fbbb74c6c..735016c0cab 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/PrimitiveStateSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/PrimitiveStateSpec.scala @@ -39,9 +39,8 @@ class PrimitiveStateSpec eventHandler = (state, event) => { probe.tell("eventHandler:" + state + ":" + event) state + event - }).receiveSignal { - case (n, RecoveryCompleted) => - probe.tell("onRecoveryCompleted:" + n) + }).receiveSignal { case (n, RecoveryCompleted) => + probe.tell("onRecoveryCompleted:" + n) } "A typed persistent actor with primitive state" must { diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/SlowInMemorySnapshotStore.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/SlowInMemorySnapshotStore.scala index 16d707f89b6..0e9210e3686 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/SlowInMemorySnapshotStore.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/scaladsl/SlowInMemorySnapshotStore.scala @@ -17,8 +17,8 @@ class SlowInMemorySnapshotStore extends SnapshotStore { private var state = Map.empty[String, (Any, ClassicSnapshotMetadata)] def loadAsync(persistenceId: String, criteria: ClassicSnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = { - Future.successful(state.get(persistenceId).map { - case (snap, meta) => SelectedSnapshot(meta, snap) + Future.successful(state.get(persistenceId).map { case (snap, meta) => + SelectedSnapshot(meta, snap) }) } @@ -38,15 +38,15 @@ class SlowInMemorySnapshotStore extends SnapshotStore { } override def deleteAsync(metadata: ClassicSnapshotMetadata): Future[Unit] = { - state = state.filterNot { - case (pid, (_, meta)) => pid == metadata.persistenceId && meta.sequenceNr == metadata.sequenceNr + state = state.filterNot { case (pid, (_, meta)) => + pid == metadata.persistenceId && meta.sequenceNr == metadata.sequenceNr } Future.successful(()) } override def deleteAsync(persistenceId: String, criteria: ClassicSnapshotSelectionCriteria): Future[Unit] = { - state = state.filterNot { - case (pid, (_, meta)) => pid == persistenceId && criteria.matches(meta) + state = state.filterNot { case (pid, (_, meta)) => + pid == persistenceId && criteria.matches(meta) } Future.successful(()) } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateBehaviorInterceptorSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateBehaviorInterceptorSpec.scala index 881c5f531ae..6b80af3a697 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateBehaviorInterceptorSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateBehaviorInterceptorSpec.scala @@ -76,8 +76,8 @@ class DurableStateBehaviorInterceptorSpec "be possible to combine with transformMessages" in { val probe = createTestProbe[String]() val pid = nextPid() - val ref = spawn(testBehavior(pid, probe.ref).transformMessages[String] { - case s => s.toUpperCase() + val ref = spawn(testBehavior(pid, probe.ref).transformMessages[String] { case s => + s.toUpperCase() }) ref ! "a" diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateRevisionSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateRevisionSpec.scala index 5118a8a61ff..d7f9c8aeac9 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateRevisionSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateRevisionSpec.scala @@ -57,9 +57,8 @@ class DurableStateRevisionSpec case "snapshot" => Effect.persist("snapshot") } - }).receiveSignal { - case (_, RecoveryCompleted) => - probe ! s"${DurableStateBehavior.lastSequenceNumber(ctx)} onRecoveryComplete" + }).receiveSignal { case (_, RecoveryCompleted) => + probe ! s"${DurableStateBehavior.lastSequenceNumber(ctx)} onRecoveryComplete" } } diff --git a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateStashOverflowSpec.scala b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateStashOverflowSpec.scala index 731775d49e3..b3a2a6dc85c 100644 --- a/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateStashOverflowSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/akka/persistence/typed/state/scaladsl/DurableStateStashOverflowSpec.scala @@ -76,7 +76,7 @@ class DurableStateStashOverflowSpec val droppedMessageProbe = testKit.createDroppedMessageProbe() val stashCapacity = testKit.config.getInt("akka.persistence.typed.stash-capacity") - for (_ <- 0 until (stashCapacity)) { + for (_ <- 0 until stashCapacity) { ref.tell(Stasher.Hey(probe.ref)) } diff --git a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedAuctionExampleSpec.scala b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedAuctionExampleSpec.scala index ca3131ea385..0eb50b73e49 100644 --- a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedAuctionExampleSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedAuctionExampleSpec.scala @@ -32,12 +32,12 @@ import akka.serialization.jackson.CborSerializable object ReplicatedAuctionExampleSpec { - //#setup + // #setup object AuctionEntity { - //#setup + // #setup - //#commands + // #commands type MoneyAmount = Int case class Bid(bidder: String, offer: MoneyAmount, timestamp: Instant, originReplica: ReplicaId) @@ -48,17 +48,17 @@ object ReplicatedAuctionExampleSpec { final case class GetHighestBid(replyTo: ActorRef[Bid]) extends Command final case class IsClosed(replyTo: ActorRef[Boolean]) extends Command private case object Close extends Command // Internal, should not be sent from the outside - //#commands + // #commands - //#events + // #events sealed trait Event extends CborSerializable final case class BidRegistered(bid: Bid) extends Event final case class AuctionFinished(atReplica: ReplicaId) extends Event final case class WinnerDecided(atReplica: ReplicaId, winningBid: Bid, highestCounterOffer: MoneyAmount) extends Event - //#events + // #events - //#phase + // #phase /** * The auction passes through several workflow phases. * First, in `Running` `OfferBid` commands are accepted. @@ -78,15 +78,14 @@ object ReplicatedAuctionExampleSpec { * When the responsible DC has seen all `AuctionFinished` events from other DCs * all other events have also been propagated and it can persist `WinnerDecided` and * the auction is finally `Closed`. - * */ sealed trait AuctionPhase case object Running extends AuctionPhase final case class Closing(finishedAtReplica: Set[ReplicaId]) extends AuctionPhase case object Closed extends AuctionPhase - //#phase + // #phase - //#state + // #state case class AuctionState(phase: AuctionPhase, highestBid: Bid, highestCounterOffer: MoneyAmount) extends CborSerializable { @@ -113,7 +112,9 @@ object ReplicatedAuctionExampleSpec { def withNewHighestBid(bid: Bid): AuctionState = { require(phase != Closed) require(isHigherBid(bid, highestBid)) - copy(highestBid = bid, highestCounterOffer = highestBid.offer // keep last highest bid around + copy( + highestBid = bid, + highestCounterOffer = highestBid.offer // keep last highest bid around ) } @@ -132,9 +133,9 @@ object ReplicatedAuctionExampleSpec { (first.offer == second.offer && first.timestamp.equals(second.timestamp) && first.originReplica.id .compareTo(second.originReplica.id) < 0) } - //#state + // #state - //#setup + // #setup def apply( replica: ReplicaId, name: String, @@ -169,8 +170,8 @@ object ReplicatedAuctionExampleSpec { replicationContext.persistenceId, AuctionState(phase = Running, highestBid = initialBid, highestCounterOffer = initialBid.offer), commandHandler, - eventHandler).receiveSignal { - case (state, RecoveryCompleted) => recoveryCompleted(state) + eventHandler).receiveSignal { case (state, RecoveryCompleted) => + recoveryCompleted(state) } private def recoveryCompleted(state: AuctionState): Unit = { @@ -180,9 +181,9 @@ object ReplicatedAuctionExampleSpec { val millisUntilClosing = closingAt.toEpochMilli - replicationContext.currentTimeMillis() timers.startSingleTimer(Finish, millisUntilClosing.millis) } - //#setup + // #setup - //#command-handler + // #command-handler def commandHandler(state: AuctionState, command: Command): Effect[Event, AuctionState] = { state.phase match { case Closing(_) | Closed => @@ -230,9 +231,9 @@ object ReplicatedAuctionExampleSpec { } } } - //#command-handler + // #command-handler - //#event-handler + // #event-handler def eventHandler(state: AuctionState, event: Event): AuctionState = { val newState = state.applyEvent(event) @@ -244,9 +245,9 @@ object ReplicatedAuctionExampleSpec { } - //#event-handler + // #event-handler - //#event-triggers + // #event-triggers private def eventTriggers(event: Event, newState: AuctionState): Unit = { event match { case finished: AuctionFinished => @@ -284,11 +285,11 @@ object ReplicatedAuctionExampleSpec { false }) } - //#event-triggers + // #event-triggers - //#setup + // #setup } - //#setup + // #setup } class ReplicatedAuctionExampleSpec diff --git a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedBlogExampleSpec.scala b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedBlogExampleSpec.scala index 9a5d9ef748b..5190f1c0ef2 100644 --- a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedBlogExampleSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedBlogExampleSpec.scala @@ -65,7 +65,7 @@ object ReplicatedBlogExampleSpec { } } - //#command-handler + // #command-handler private def commandHandler( ctx: ActorContext[Command], replicationContext: ReplicationContext, @@ -100,9 +100,9 @@ object ReplicatedBlogExampleSpec { Effect.none } } - //#command-handler + // #command-handler - //#event-handler + // #event-handler private def eventHandler( ctx: ActorContext[Command], replicationContext: ReplicationContext, @@ -127,7 +127,7 @@ object ReplicatedBlogExampleSpec { state.copy(published = true) } } - //#event-handler + // #event-handler } } diff --git a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedEventSourcingCompileOnlySpec.scala b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedEventSourcingCompileOnlySpec.scala index 5a1e913971d..09d07a70dd2 100644 --- a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedEventSourcingCompileOnlySpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedEventSourcingCompileOnlySpec.scala @@ -15,11 +15,11 @@ import akka.persistence.typed.scaladsl.ReplicatedEventSourcing @nowarn("msg=never used") object ReplicatedEventSourcingCompileOnlySpec { - //#replicas + // #replicas val DCA = ReplicaId("DC-A") val DCB = ReplicaId("DC-B") val AllReplicas = Set(DCA, DCB) - //#replicas + // #replicas val queryPluginId = "" @@ -28,7 +28,7 @@ object ReplicatedEventSourcingCompileOnlySpec { trait Event object Shared { - //#factory-shared + // #factory-shared def apply( system: ActorSystem[_], entityId: String, @@ -40,11 +40,11 @@ object ReplicatedEventSourcingCompileOnlySpec { EventSourcedBehavior[Command, State, Event](???, ???, ???, ???) } } - //#factory-shared + // #factory-shared } object PerReplica { - //#factory + // #factory def apply( system: ActorSystem[_], entityId: String, @@ -56,7 +56,7 @@ object ReplicatedEventSourcingCompileOnlySpec { } } - //#factory + // #factory } } diff --git a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedMovieWatchListExampleSpec.scala b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedMovieWatchListExampleSpec.scala index 49380b925d3..8360ba3a8ba 100644 --- a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedMovieWatchListExampleSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedMovieWatchListExampleSpec.scala @@ -19,7 +19,7 @@ import akka.persistence.typed.scaladsl.EventSourcedBehavior import akka.persistence.typed.scaladsl.ReplicatedEventSourcing object ReplicatedMovieWatchListExampleSpec { - //#movie-entity + // #movie-entity object MovieWatchList { sealed trait Command final case class AddMovie(movieId: String) extends Command @@ -57,7 +57,7 @@ object ReplicatedMovieWatchListExampleSpec { } } - //#movie-entity + // #movie-entity } diff --git a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedShoppingCartExampleSpec.scala b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedShoppingCartExampleSpec.scala index a77be46bb4a..0a78cf3a3c4 100644 --- a/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedShoppingCartExampleSpec.scala +++ b/akka-persistence-typed-tests/src/test/scala/docs/akka/persistence/typed/ReplicatedShoppingCartExampleSpec.scala @@ -24,7 +24,7 @@ import akka.serialization.jackson.CborSerializable object ReplicatedShoppingCartExampleSpec { - //#shopping-cart + // #shopping-cart object ShoppingCart { type ProductId = String @@ -79,7 +79,7 @@ object ReplicatedShoppingCartExampleSpec { } } } - //#shopping-cart + // #shopping-cart } class ReplicatedShoppingCartExampleSpec diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventAdapter.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventAdapter.scala index 5188825a367..436cb5130c9 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventAdapter.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventAdapter.scala @@ -107,17 +107,13 @@ object EventSeq { override def size: Int = events.size } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object NoOpEventAdapter { private val i = new NoOpEventAdapter[Nothing] def instance[E]: NoOpEventAdapter[E] = i.asInstanceOf[NoOpEventAdapter[E]] } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class NoOpEventAdapter[E] extends EventAdapter[E, Any] { override def toJournal(e: E): Any = e override def fromJournal(p: Any, manifest: String): EventSeq[E] = EventSeq.single(p.asInstanceOf[E]) diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventRejectedException.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventRejectedException.scala index b04351c40e5..24403c2abf9 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventRejectedException.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventRejectedException.scala @@ -4,8 +4,6 @@ package akka.persistence.typed -/** - * Thrown if a journal rejects an event e.g. due to a serialization error. - */ +/** Thrown if a journal rejects an event e.g. due to a serialization error. */ final class EventRejectedException(persistenceId: PersistenceId, sequenceNr: Long, cause: Throwable) extends RuntimeException(s"Rejected event, persistenceId [${persistenceId.id}], sequenceNr [$sequenceNr]", cause) diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventSourcedSignal.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventSourcedSignal.scala index d1d93735fa7..84782cb87af 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventSourcedSignal.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventSourcedSignal.scala @@ -23,30 +23,22 @@ case object RecoveryCompleted extends RecoveryCompleted { final case class RecoveryFailed(failure: Throwable) extends EventSourcedSignal { - /** - * Java API - */ + /** Java API */ def getFailure(): Throwable = failure } final case class SnapshotCompleted(metadata: SnapshotMetadata) extends EventSourcedSignal { - /** - * Java API - */ + /** Java API */ def getSnapshotMetadata(): SnapshotMetadata = metadata } final case class SnapshotFailed(metadata: SnapshotMetadata, failure: Throwable) extends EventSourcedSignal { - /** - * Java API - */ + /** Java API */ def getFailure(): Throwable = failure - /** - * Java API - */ + /** Java API */ def getSnapshotMetadata(): SnapshotMetadata = metadata } @@ -61,9 +53,7 @@ object SnapshotMetadata { def apply(persistenceId: String, sequenceNr: Long, timestamp: Long): SnapshotMetadata = new SnapshotMetadata(persistenceId, sequenceNr, timestamp) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def fromClassic(metadata: akka.persistence.SnapshotMetadata): SnapshotMetadata = new SnapshotMetadata(metadata.persistenceId, metadata.sequenceNr, metadata.timestamp) } @@ -83,64 +73,46 @@ final class SnapshotMetadata(val persistenceId: String, val sequenceNr: Long, va final case class DeleteSnapshotsCompleted(target: DeletionTarget) extends EventSourcedSignal { - /** - * Java API - */ + /** Java API */ def getTarget(): DeletionTarget = target } final case class DeleteSnapshotsFailed(target: DeletionTarget, failure: Throwable) extends EventSourcedSignal { - /** - * Java API - */ + /** Java API */ def getFailure(): Throwable = failure - /** - * Java API - */ + /** Java API */ def getTarget(): DeletionTarget = target } final case class DeleteEventsCompleted(toSequenceNr: Long) extends EventSourcedSignal { - /** - * Java API - */ + /** Java API */ def getToSequenceNr(): Long = toSequenceNr } final case class DeleteEventsFailed(toSequenceNr: Long, failure: Throwable) extends EventSourcedSignal { - /** - * Java API - */ + /** Java API */ def getFailure(): Throwable = failure - /** - * Java API - */ + /** Java API */ def getToSequenceNr(): Long = toSequenceNr } -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit sealed trait DeletionTarget object DeletionTarget { final case class Individual(metadata: SnapshotMetadata) extends DeletionTarget { - /** - * Java API - */ + /** Java API */ def getSnapshotMetadata(): SnapshotMetadata = metadata } final case class Criteria(selection: SnapshotSelectionCriteria) extends DeletionTarget { - /** - * Java API - */ + /** Java API */ def getSnapshotSelection(): SnapshotSelectionCriteria = selection } } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/PersistenceId.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/PersistenceId.scala index 195ff091146..0b9af9aa517 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/PersistenceId.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/PersistenceId.scala @@ -119,9 +119,7 @@ object PersistenceId { def of(entityTypeHint: String, entityId: String, separator: String): PersistenceId = apply(entityTypeHint, entityId, separator) - /** - * Constructs a [[PersistenceId]] with `id` as the full unique identifier. - */ + /** Constructs a [[PersistenceId]] with `id` as the full unique identifier. */ def ofUniqueId(id: String): PersistenceId = new PersistenceId(id) diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/ReplicaId.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/ReplicaId.scala index 13bb3eeb60c..018ab8e19a3 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/ReplicaId.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/ReplicaId.scala @@ -4,7 +4,5 @@ package akka.persistence.typed -/** - * Identifies a replica in Replicated Event Sourcing, could be a datacenter name or a logical identifier. - */ +/** Identifies a replica in Replicated Event Sourcing, could be a datacenter name or a logical identifier. */ final case class ReplicaId(id: String) diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/SnapshotAdapter.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/SnapshotAdapter.scala index a6c0e6c2aa3..e68961e608d 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/SnapshotAdapter.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/SnapshotAdapter.scala @@ -13,9 +13,7 @@ package akka.persistence.typed */ trait SnapshotAdapter[State] { - /** - * Transform the state to a different type before sending to the journal. - */ + /** Transform the state to a different type before sending to the journal. */ def toJournal(state: State): Any /** diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/SnapshotSelectionCriteria.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/SnapshotSelectionCriteria.scala index dc5f78fc296..a77f516e0b7 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/SnapshotSelectionCriteria.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/SnapshotSelectionCriteria.scala @@ -10,9 +10,7 @@ import akka.util.HashCode object SnapshotSelectionCriteria { - /** - * The latest saved snapshot. - */ + /** The latest saved snapshot. */ val latest: SnapshotSelectionCriteria = new SnapshotSelectionCriteria( maxSequenceNr = Long.MaxValue, @@ -20,32 +18,24 @@ object SnapshotSelectionCriteria { minSequenceNr = 0L, minTimestamp = 0L) - /** - * No saved snapshot matches. - */ + /** No saved snapshot matches. */ val none: SnapshotSelectionCriteria = new SnapshotSelectionCriteria(maxSequenceNr = 0L, maxTimestamp = 0L, minSequenceNr = 0L, minTimestamp = 0L) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def fromClassic(c: ClassicSnapshotSelectionCriteria): SnapshotSelectionCriteria = new SnapshotSelectionCriteria(c.maxSequenceNr, c.maxTimestamp, c.minSequenceNr, c.minTimestamp) } -/** - * Selection criteria for loading and deleting snapshots. - */ +/** Selection criteria for loading and deleting snapshots. */ final class SnapshotSelectionCriteria @InternalApi private[akka] ( val maxSequenceNr: Long, val maxTimestamp: Long, val minSequenceNr: Long, val minTimestamp: Long) { - /** - * upper bound for a selected snapshot's sequence number - */ + /** upper bound for a selected snapshot's sequence number */ def withMaxSequenceNr(newMaxSequenceNr: Long): SnapshotSelectionCriteria = copy(maxSequenceNr = newMaxSequenceNr) @@ -56,9 +46,7 @@ final class SnapshotSelectionCriteria @InternalApi private[akka] ( def withMaxTimestamp(newMaxTimestamp: Long): SnapshotSelectionCriteria = copy(maxTimestamp = newMaxTimestamp) - /** - * lower bound for a selected snapshot's sequence number - */ + /** lower bound for a selected snapshot's sequence number */ def withMinSequenceNr(newMinSequenceNr: Long): SnapshotSelectionCriteria = copy(minSequenceNr = newMinSequenceNr) @@ -79,9 +67,7 @@ final class SnapshotSelectionCriteria @InternalApi private[akka] ( override def toString: String = s"SnapshotSelectionCriteria($maxSequenceNr,$maxTimestamp,$minSequenceNr,$minTimestamp)" - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def toClassic: akka.persistence.SnapshotSelectionCriteria = akka.persistence.SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, minSequenceNr, minTimestamp) diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/crdt/Counter.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/crdt/Counter.scala index ab0060e707e..6c9d89c2359 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/crdt/Counter.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/crdt/Counter.scala @@ -9,14 +9,10 @@ object Counter { final case class Updated(delta: BigInt) { - /** - * JAVA API - */ + /** JAVA API */ def this(delta: java.math.BigInteger) = this(delta: BigInt) - /** - * JAVA API - */ + /** JAVA API */ def this(delta: Int) = this(delta: BigInt) } } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/crdt/ORSet.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/crdt/ORSet.scala index 40a63067189..e25a2c61bd8 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/crdt/ORSet.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/crdt/ORSet.scala @@ -17,28 +17,20 @@ object ORSet { def empty[A](originReplica: ReplicaId): ORSet[A] = new ORSet(originReplica.id, Map.empty, VersionVector.empty) def apply[A](originReplica: ReplicaId): ORSet[A] = empty(originReplica) - /** - * Java API - */ + /** Java API */ def create[A](originReplica: ReplicaId): ORSet[A] = empty(originReplica) - /** - * Extract the [[ORSet#elements]]. - */ + /** Extract the [[ORSet#elements]]. */ def unapply[A](s: ORSet[A]): Option[Set[A]] = Some(s.elements) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] type Dot = VersionVector sealed trait DeltaOp { def merge(that: DeltaOp): DeltaOp } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] sealed abstract class AtomicDeltaOp[A] extends DeltaOp { def underlying: ORSet[A] } @@ -86,9 +78,7 @@ object ORSet { } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final case class DeltaGroup[A](ops: immutable.IndexedSeq[DeltaOp]) extends DeltaOp { override def merge(that: DeltaOp): DeltaOp = that match { case thatAdd: AddDeltaOp[A @unchecked] => @@ -154,64 +144,63 @@ object ORSet { mergeCommonKeys(commonKeys.iterator, lhs, rhs) private def mergeCommonKeys[A](commonKeys: Iterator[A], lhs: ORSet[A], rhs: ORSet[A]): Map[A, ORSet.Dot] = { - commonKeys.foldLeft(Map.empty[A, ORSet.Dot]) { - case (acc, k) => - val lhsDots = lhs.elementsMap(k) - val rhsDots = rhs.elementsMap(k) - (lhsDots, rhsDots) match { - case (OneVersionVector(n1, v1), OneVersionVector(n2, v2)) => - if (n1 == n2 && v1 == v2) - // one single common dot - acc.updated(k, lhsDots) - else { - // no common, lhsUniqueDots == lhsDots, rhsUniqueDots == rhsDots - val lhsKeep = ORSet.subtractDots(lhsDots, rhs.vvector) - val rhsKeep = ORSet.subtractDots(rhsDots, lhs.vvector) - val merged = lhsKeep.merge(rhsKeep) - // Perfectly possible that an item in both sets should be dropped - if (merged.isEmpty) acc - else acc.updated(k, merged) - } - case (ManyVersionVector(lhsVs), ManyVersionVector(rhsVs)) => - val commonDots = lhsVs.filter { - case (thisDotNode, v) => rhsVs.get(thisDotNode).exists(_ == v) - } - val commonDotsKeys = commonDots.keys - val lhsUniqueDots = lhsVs -- commonDotsKeys - val rhsUniqueDots = rhsVs -- commonDotsKeys - val lhsKeep = ORSet.subtractDots(VersionVector(lhsUniqueDots), rhs.vvector) - val rhsKeep = ORSet.subtractDots(VersionVector(rhsUniqueDots), lhs.vvector) - val merged = lhsKeep.merge(rhsKeep).merge(VersionVector(commonDots)) - // Perfectly possible that an item in both sets should be dropped - if (merged.isEmpty) acc - else acc.updated(k, merged) - case (ManyVersionVector(lhsVs), OneVersionVector(n2, v2)) => - val commonDots = lhsVs.filter { - case (n1, v1) => v1 == v2 && n1 == n2 - } - val commonDotsKeys = commonDots.keys - val lhsUniqueDots = lhsVs -- commonDotsKeys - val rhsUnique = if (commonDotsKeys.isEmpty) rhsDots else VersionVector.empty - val lhsKeep = ORSet.subtractDots(VersionVector(lhsUniqueDots), rhs.vvector) - val rhsKeep = ORSet.subtractDots(rhsUnique, lhs.vvector) - val merged = lhsKeep.merge(rhsKeep).merge(VersionVector(commonDots)) - // Perfectly possible that an item in both sets should be dropped - if (merged.isEmpty) acc - else acc.updated(k, merged) - case (OneVersionVector(n1, v1), ManyVersionVector(rhsVs)) => - val commonDots = rhsVs.filter { - case (n2, v2) => v1 == v2 && n1 == n2 - } - val commonDotsKeys = commonDots.keys - val lhsUnique = if (commonDotsKeys.isEmpty) lhsDots else VersionVector.empty - val rhsUniqueDots = rhsVs -- commonDotsKeys - val lhsKeep = ORSet.subtractDots(lhsUnique, rhs.vvector) - val rhsKeep = ORSet.subtractDots(VersionVector(rhsUniqueDots), lhs.vvector) - val merged = lhsKeep.merge(rhsKeep).merge(VersionVector(commonDots)) + commonKeys.foldLeft(Map.empty[A, ORSet.Dot]) { case (acc, k) => + val lhsDots = lhs.elementsMap(k) + val rhsDots = rhs.elementsMap(k) + (lhsDots, rhsDots) match { + case (OneVersionVector(n1, v1), OneVersionVector(n2, v2)) => + if (n1 == n2 && v1 == v2) + // one single common dot + acc.updated(k, lhsDots) + else { + // no common, lhsUniqueDots == lhsDots, rhsUniqueDots == rhsDots + val lhsKeep = ORSet.subtractDots(lhsDots, rhs.vvector) + val rhsKeep = ORSet.subtractDots(rhsDots, lhs.vvector) + val merged = lhsKeep.merge(rhsKeep) // Perfectly possible that an item in both sets should be dropped if (merged.isEmpty) acc else acc.updated(k, merged) - } + } + case (ManyVersionVector(lhsVs), ManyVersionVector(rhsVs)) => + val commonDots = lhsVs.filter { case (thisDotNode, v) => + rhsVs.get(thisDotNode).exists(_ == v) + } + val commonDotsKeys = commonDots.keys + val lhsUniqueDots = lhsVs -- commonDotsKeys + val rhsUniqueDots = rhsVs -- commonDotsKeys + val lhsKeep = ORSet.subtractDots(VersionVector(lhsUniqueDots), rhs.vvector) + val rhsKeep = ORSet.subtractDots(VersionVector(rhsUniqueDots), lhs.vvector) + val merged = lhsKeep.merge(rhsKeep).merge(VersionVector(commonDots)) + // Perfectly possible that an item in both sets should be dropped + if (merged.isEmpty) acc + else acc.updated(k, merged) + case (ManyVersionVector(lhsVs), OneVersionVector(n2, v2)) => + val commonDots = lhsVs.filter { case (n1, v1) => + v1 == v2 && n1 == n2 + } + val commonDotsKeys = commonDots.keys + val lhsUniqueDots = lhsVs -- commonDotsKeys + val rhsUnique = if (commonDotsKeys.isEmpty) rhsDots else VersionVector.empty + val lhsKeep = ORSet.subtractDots(VersionVector(lhsUniqueDots), rhs.vvector) + val rhsKeep = ORSet.subtractDots(rhsUnique, lhs.vvector) + val merged = lhsKeep.merge(rhsKeep).merge(VersionVector(commonDots)) + // Perfectly possible that an item in both sets should be dropped + if (merged.isEmpty) acc + else acc.updated(k, merged) + case (OneVersionVector(n1, v1), ManyVersionVector(rhsVs)) => + val commonDots = rhsVs.filter { case (n2, v2) => + v1 == v2 && n1 == n2 + } + val commonDotsKeys = commonDots.keys + val lhsUnique = if (commonDotsKeys.isEmpty) lhsDots else VersionVector.empty + val rhsUniqueDots = rhsVs -- commonDotsKeys + val lhsKeep = ORSet.subtractDots(lhsUnique, rhs.vvector) + val rhsKeep = ORSet.subtractDots(VersionVector(rhsUniqueDots), lhs.vvector) + val merged = lhsKeep.merge(rhsKeep).merge(VersionVector(commonDots)) + // Perfectly possible that an item in both sets should be dropped + if (merged.isEmpty) acc + else acc.updated(k, merged) + } } } @@ -231,16 +220,15 @@ object ORSet { elementsMap: Map[A, ORSet.Dot], vvector: VersionVector, accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] = { - keys.foldLeft(accumulator) { - case (acc, k) => - val dots = elementsMap(k) - if (vvector > dots || vvector == dots) - acc - else { - // Optimise the set of stored dots to include only those unseen - val newDots = subtractDots(dots, vvector) - acc.updated(k, newDots) - } + keys.foldLeft(accumulator) { case (acc, k) => + val dots = elementsMap(k) + if (vvector > dots || vvector == dots) + acc + else { + // Optimise the set of stored dots to include only those unseen + val newDots = subtractDots(dots, vvector) + acc.updated(k, newDots) + } } } } @@ -283,14 +271,10 @@ final class ORSet[A] private[akka] ( type T = ORSet[A] type D = ORSet.DeltaOp - /** - * Scala API - */ + /** Scala API */ def elements: Set[A] = elementsMap.keySet - /** - * Java API - */ + /** Java API */ def getElements(): java.util.Set[A] = { import akka.util.ccompat.JavaConverters._ elements.asJava @@ -302,14 +286,10 @@ final class ORSet[A] private[akka] ( def size: Int = elementsMap.size - /** - * Adds an element to the set - */ + /** Adds an element to the set */ def +(element: A): ORSet.DeltaOp = add(element) - /** - * Adds an element to the set - */ + /** Adds an element to the set */ def add(element: A): ORSet.DeltaOp = { val newVvector = vvector + originReplica val newDot = VersionVector(originReplica, newVvector.versionAt(originReplica)) @@ -335,24 +315,19 @@ final class ORSet[A] private[akka] ( else { val (first, rest) = elems.splitAt(1) val firstOp = add(first.head) - val (mergedOps, _) = rest.foldLeft((firstOp, applyOperation(firstOp))) { - case ((op, state), elem) => - val nextOp = state.add(elem) - val mergedOp = op.merge(nextOp) - (mergedOp, state.applyOperation(nextOp)) + val (mergedOps, _) = rest.foldLeft((firstOp, applyOperation(firstOp))) { case ((op, state), elem) => + val nextOp = state.add(elem) + val mergedOp = op.merge(nextOp) + (mergedOp, state.applyOperation(nextOp)) } mergedOps } } - /** - * Removes an element from the set. - */ + /** Removes an element from the set. */ def -(element: A): ORSet.DeltaOp = remove(element) - /** - * Removes an element from the set. - */ + /** Removes an element from the set. */ def remove(element: A): ORSet.DeltaOp = { val deltaDot = VersionVector(originReplica, vvector.versionAt(originReplica)) ORSet.RemoveDeltaOp(new ORSet(originReplica, Map(element -> deltaDot), vvector)) @@ -377,11 +352,10 @@ final class ORSet[A] private[akka] ( else { val (first, rest) = elems.splitAt(1) val firstOp = remove(first.head) - val (mergedOps, _) = rest.foldLeft((firstOp, applyOperation(firstOp))) { - case ((op, state), elem) => - val nextOp = state.remove(elem) - val mergedOp = op.merge(nextOp) - (mergedOp, state.applyOperation(nextOp)) + val (mergedOps, _) = rest.foldLeft((firstOp, applyOperation(firstOp))) { case ((op, state), elem) => + val nextOp = state.remove(elem) + val mergedOp = op.merge(nextOp) + (mergedOp, state.applyOperation(nextOp)) } mergedOps } @@ -421,7 +395,8 @@ final class ORSet[A] private[akka] ( val entries00 = ORSet.mergeCommonKeys(commonKeys, this, that) val entries0 = if (addDeltaOp) - entries00 ++ this.elementsMap.filter { case (elem, _) => !that.elementsMap.contains(elem) } else { + entries00 ++ this.elementsMap.filter { case (elem, _) => !that.elementsMap.contains(elem) } + else { val thisUniqueKeys = this.elementsMap.keysIterator.filterNot(that.elementsMap.contains) ORSet.mergeDisjointKeys(thisUniqueKeys, this.elementsMap, that.vvector, entries00) } @@ -456,12 +431,11 @@ final class ORSet[A] private[akka] ( def deleteDotsNodes = deleteDots.map { case (dotNode, _) => dotNode } val newElementsMap = { val thisDotOption = this.elementsMap.get(elem) - val deleteDotsAreGreater = deleteDots.forall { - case (dotNode, dotV) => - thisDotOption match { - case Some(thisDot) => thisDot.versionAt(dotNode) <= dotV - case None => false - } + val deleteDotsAreGreater = deleteDots.forall { case (dotNode, dotV) => + thisDotOption match { + case Some(thisDot) => thisDot.versionAt(dotNode) <= dotV + case None => false + } } if (deleteDotsAreGreater) { thisDotOption match { diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/delivery/EventSourcedProducerQueue.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/delivery/EventSourcedProducerQueue.scala index acc27133675..3bf25417d8a 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/delivery/EventSourcedProducerQueue.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/delivery/EventSourcedProducerQueue.scala @@ -93,39 +93,27 @@ object EventSourcedProducerQueue { def withDeleteEvents(newDeleteEvents: Boolean): Settings = copy(deleteEvents = newDeleteEvents) - /** - * Scala API - */ + /** Scala API */ def withRestartMaxBackoff(newRestartMaxBackoff: FiniteDuration): Settings = copy(restartMaxBackoff = newRestartMaxBackoff) - /** - * Java API - */ + /** Java API */ def withRestartMaxBackoff(newRestartMaxBackoff: JavaDuration): Settings = copy(restartMaxBackoff = newRestartMaxBackoff.asScala) - /** - * Java API - */ + /** Java API */ def getRestartMaxBackoff(): JavaDuration = restartMaxBackoff.asJava - /** - * Scala API - */ + /** Scala API */ def withCleanupUnusedAfter(newCleanupUnusedAfter: FiniteDuration): Settings = copy(cleanupUnusedAfter = newCleanupUnusedAfter) - /** - * Java API - */ + /** Java API */ def withCleanupUnusedAfter(newCleanupUnusedAfter: JavaDuration): Settings = copy(cleanupUnusedAfter = newCleanupUnusedAfter.asScala) - /** - * Java API - */ + /** Java API */ def getCleanupUnusedAfter(): JavaDuration = cleanupUnusedAfter.asJava @@ -135,9 +123,7 @@ object EventSourcedProducerQueue { def withSnapshotPluginId(id: String): Settings = copy(snapshotPluginId = id) - /** - * Private copy method for internal use only. - */ + /** Private copy method for internal use only. */ private def copy( restartMaxBackoff: FiniteDuration = restartMaxBackoff, snapshotEvery: Int = snapshotEvery, @@ -198,23 +184,17 @@ object EventSourcedProducerQueue { } } - /** - * Java API - */ + /** Java API */ def create[A](persistenceId: PersistenceId): Behavior[DurableProducerQueue.Command[A]] = apply(persistenceId) - /** - * Java API - */ + /** Java API */ def create[A](persistenceId: PersistenceId, settings: Settings): Behavior[DurableProducerQueue.Command[A]] = apply(persistenceId, settings) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private class EventSourcedProducerQueue[A]( context: ActorContext[DurableProducerQueue.Command[A]], cleanupUnusedAfter: FiniteDuration) { diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala index 0abb97d6229..6383d4e587a 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala @@ -25,9 +25,7 @@ import akka.persistence.typed.scaladsl.EventSourcedBehavior import akka.persistence.typed.scaladsl.RetentionCriteria import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object BehaviorSetup { sealed trait SnapshotAfterPersist case object NoSnapshot extends SnapshotAfterPersist @@ -35,9 +33,7 @@ import akka.util.OptionVal case object SnapshotWithoutRetention extends SnapshotAfterPersist } -/** - * INTERNAL API: Carry state for the Persistent behavior implementation behaviors. - */ +/** INTERNAL API: Carry state for the Persistent behavior implementation behaviors. */ @InternalApi private[akka] final class BehaviorSetup[C, E, S]( val context: ActorContext[InternalProtocol], @@ -77,9 +73,9 @@ private[akka] final class BehaviorSetup[C, E, S]( } if (isSnapshotOptional && (retention match { - case SnapshotCountRetentionCriteriaImpl(_, _, true) => true - case _ => false - })) { + case SnapshotCountRetentionCriteriaImpl(_, _, true) => true + case _ => false + })) { throw new IllegalArgumentException( "Retention criteria with delete events can't be used together with snapshot-is-optional=false. " + "That can result in wrong recovered state if snapshot load fails.") @@ -283,9 +279,7 @@ private[akka] final class BehaviorSetup[C, E, S]( } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object PersistenceMdc { // format: OFF diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedBehaviorImpl.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedBehaviorImpl.scala index 5a5f010e560..0a4e1b1fbb7 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedBehaviorImpl.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedBehaviorImpl.scala @@ -65,9 +65,7 @@ private[akka] object EventSourcedBehaviorImpl { } final case class WriterIdentity(instanceId: Int, writerUuid: String) - /** - * Used by EventSourcedBehaviorTestKit to retrieve the `persistenceId`. - */ + /** Used by EventSourcedBehaviorTestKit to retrieve the `persistenceId`. */ final case class GetPersistenceId(replyTo: ActorRef[PersistenceId]) extends Signal /** @@ -76,14 +74,10 @@ private[akka] object EventSourcedBehaviorImpl { */ final case class GetState[State](replyTo: ActorRef[GetStateReply[State]]) extends InternalProtocol - /** - * Used to send a state being `null` as an Actor message - */ + /** Used to send a state being `null` as an Actor message */ final case class GetStateReply[State](currentState: State) - /** - * Used to start the replication stream at the correct sequence number - */ + /** Used to start the replication stream at the correct sequence number */ final case class GetSeenSequenceNr(replica: ReplicaId, replyTo: ActorRef[Long]) extends InternalProtocol } @@ -215,7 +209,7 @@ private[akka] final case class EventSourcedBehaviorImpl[Command, Event, State]( case res: SnapshotProtocol.Response => InternalProtocol.SnapshotterResponse(res) case RecoveryPermitter.RecoveryPermitGranted => InternalProtocol.RecoveryPermitGranted case internal: InternalProtocol => internal // such as RecoveryTickEvent - case cmd => InternalProtocol.IncomingCommand(cmd.asInstanceOf[Command]) + case cmd => InternalProtocol.IncomingCommand(cmd.asInstanceOf[Command]) } target(ctx, innerMsg) } @@ -296,8 +290,8 @@ private[akka] final case class EventSourcedBehaviorImpl[Command, Event, State]( override private[akka] def withReplication( context: ReplicationContextImpl): EventSourcedBehavior[Command, Event, State] = { - copy( - replication = Some(ReplicationSetup(context.replicationId.replicaId, context.replicasAndQueryPlugins, context))) + copy(replication = + Some(ReplicationSetup(context.replicationId.replicaId, context.replicasAndQueryPlugins, context))) } override def withStashCapacity(size: Int): EventSourcedBehavior[Command, Event, State] = @@ -375,9 +369,7 @@ private[akka] case object ReplicatedEventAck final class ReplicatedPublishedEventMetaData(val replicaId: ReplicaId, private[akka] val version: VersionVector) -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalStableApi private[akka] final case class PublishedEventImpl( persistenceId: PersistenceId, diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedSettings.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedSettings.scala index a619922ac65..c858631365c 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedSettings.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedSettings.scala @@ -14,9 +14,7 @@ import akka.actor.typed.ActorSystem import akka.annotation.InternalApi import akka.persistence.Persistence -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object EventSourcedSettings { def apply(system: ActorSystem[_], journalPluginId: String, snapshotPluginId: String): EventSourcedSettings = @@ -80,9 +78,7 @@ import akka.persistence.Persistence } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class EventSourcedSettings( stashCapacity: Int, @@ -100,15 +96,11 @@ private[akka] final case class EventSourcedSettings( } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] sealed trait StashOverflowStrategy -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object StashOverflowStrategy { case object Drop extends StashOverflowStrategy diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventWriter.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventWriter.scala index 06266dd2332..9de1e695ea8 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventWriter.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventWriter.scala @@ -33,9 +33,7 @@ import akka.persistence.journal.Tagged import akka.util.JavaDurationConverters.JavaDurationOps import akka.util.Timeout -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalStableApi private[akka] object EventWriterExtension extends ExtensionId[EventWriterExtension] { def createExtension(system: ActorSystem[_]): EventWriterExtension = new EventWriterExtension(system) @@ -43,9 +41,7 @@ private[akka] object EventWriterExtension extends ExtensionId[EventWriterExtensi def get(system: ActorSystem[_]): EventWriterExtension = apply(system) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalStableApi private[akka] class EventWriterExtension(system: ActorSystem[_]) extends Extension { @@ -54,7 +50,8 @@ private[akka] class EventWriterExtension(system: ActorSystem[_]) extends Extensi def writerForJournal(journalId: Option[String]): ActorRef[EventWriter.Command] = writersPerJournalId.computeIfAbsent( - journalId.getOrElse(""), { _ => + journalId.getOrElse(""), + { _ => system.systemActorOf( EventWriter(journalId.getOrElse(""), settings), s"EventWriter-${URLEncoder.encode(journalId.getOrElse("default"), "UTF-8")}") @@ -62,9 +59,7 @@ private[akka] class EventWriterExtension(system: ActorSystem[_]) extends Extensi } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalStableApi private[akka] object EventWriter { @@ -179,8 +174,8 @@ private[akka] object EventWriter { perPidWriteState = perPidWriteState - pid } else { // batch waiting for pid - val newReplyTo = newStateForPid.waitingForWrite.map { - case (repr, replyTo) => (repr.sequenceNr, (repr, replyTo)) + val newReplyTo = newStateForPid.waitingForWrite.map { case (repr, replyTo) => + (repr.sequenceNr, (repr, replyTo)) }.toMap val updatedState = newStateForPid.copy( @@ -229,15 +224,15 @@ private[akka] object EventWriter { val accumulationFactor = 1.1 if (perPidWriteState.size >= latestSequenceNumberCacheCapacity * accumulationFactor) { val idleEntries = - perPidWriteState.iterator.filter { - case (_, stateForPid) => stateForPid.idle && stateForPid.waitingForSeqNrLookup.isEmpty + perPidWriteState.iterator.filter { case (_, stateForPid) => + stateForPid.idle && stateForPid.waitingForSeqNrLookup.isEmpty }.toVector if (idleEntries.size >= latestSequenceNumberCacheCapacity * accumulationFactor) { val pidsToRemove = idleEntries - .sortBy { - case (_, stateForPid) => stateForPid.usedTimestamp + .sortBy { case (_, stateForPid) => + stateForPid.usedTimestamp } .take(idleEntries.size - latestSequenceNumberCacheCapacity) .map { case (pid, _) => pid } @@ -421,7 +416,8 @@ private[akka] object EventWriter { if (state.idle) { sendToJournal(state.currentTransactionId + 1, fillRepr :+ repr) val newWaitingForReply = - (fillRepr.map(r => r.sequenceNr -> (r -> ignoreRef)) :+ (repr.sequenceNr -> (repr -> replyTo))).toMap + (fillRepr.map(r => + r.sequenceNr -> (r -> ignoreRef)) :+ (repr.sequenceNr -> (repr -> replyTo))).toMap state.copy( waitingForReply = newWaitingForReply, currentTransactionId = state.currentTransactionId + 1) @@ -500,9 +496,8 @@ private[akka] object EventWriter { val (alreadyInJournal, needsWrite) = sortedSeqs.partition(seqNr => seqNr <= maxSeqNr) if (alreadyInJournal.isEmpty) { // error was not about duplicates - state.waitingForReply.values.foreach { - case (_, replyTo) => - replyTo ! StatusReply.error("Journal write failed") + state.waitingForReply.values.foreach { case (_, replyTo) => + replyTo ! StatusReply.error("Journal write failed") } context.log.warnN( "Failed writing event batch persistence id [{}], sequence nr [{}-{}]: {}", @@ -559,9 +554,8 @@ private[akka] object EventWriter { pid, errorDescInLog) case Some(state) => - state.waitingForReply.values.foreach { - case (_, replyTo) => - replyTo ! StatusReply.error("Journal write failed") + state.waitingForReply.values.foreach { case (_, replyTo) => + replyTo ! StatusReply.error("Journal write failed") } val sortedSeqs = state.waitingForReply.keys.toSeq.sorted context.log.warnN( diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ExternalInteractions.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ExternalInteractions.scala index 5c6606fdb1e..3b4c928c3b1 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ExternalInteractions.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ExternalInteractions.scala @@ -65,9 +65,9 @@ private[akka] trait JournalInteractions[C, E, S] { onWriteInitiated(ctx, cmd, repr) val write = AtomicWrite(metadata match { - case OptionVal.Some(meta) => repr.withMetadata(meta) - case _ => repr - }) :: Nil + case OptionVal.Some(meta) => repr.withMetadata(meta) + case _ => repr + }) :: Nil setup.journal .tell(JournalProtocol.WriteMessages(write, setup.selfClassic, setup.writerIdentity.instanceId), setup.selfClassic) @@ -89,20 +89,19 @@ private[akka] trait JournalInteractions[C, E, S] { if (events.nonEmpty) { var newState = state - val writes = events.map { - case EventToPersist(event, eventAdapterManifest, metadata) => - newState = newState.nextSequenceNr() - val repr = PersistentRepr( - event, - persistenceId = setup.persistenceId.id, - sequenceNr = newState.seqNr, - manifest = eventAdapterManifest, - writerUuid = setup.writerIdentity.writerUuid, - sender = ActorRef.noSender) - metadata match { - case Some(metadata) => repr.withMetadata(metadata) - case None => repr - } + val writes = events.map { case EventToPersist(event, eventAdapterManifest, metadata) => + newState = newState.nextSequenceNr() + val repr = PersistentRepr( + event, + persistenceId = setup.persistenceId.id, + sequenceNr = newState.seqNr, + manifest = eventAdapterManifest, + writerUuid = setup.writerIdentity.writerUuid, + sender = ActorRef.noSender) + metadata match { + case Some(metadata) => repr.withMetadata(metadata) + case None => repr + } } onWritesInitiated(ctx, cmd, writes) diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/FastForwardingFilter.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/FastForwardingFilter.scala index 3a813347b92..08deaebe7f4 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/FastForwardingFilter.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/FastForwardingFilter.scala @@ -15,17 +15,13 @@ import akka.stream.stage.GraphStageWithMaterializedValue import akka.stream.stage.InHandler import akka.stream.stage.OutHandler -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait ReplicationStreamControl { def fastForward(sequenceNumber: Long): Unit } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class FastForwardingFilter extends GraphStageWithMaterializedValue[FlowShape[EventEnvelope, EventEnvelope], ReplicationStreamControl] { diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/NoOpSnapshotAdapter.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/NoOpSnapshotAdapter.scala index f0a05183ae3..a209deec565 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/NoOpSnapshotAdapter.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/NoOpSnapshotAdapter.scala @@ -7,18 +7,14 @@ package akka.persistence.typed.internal import akka.annotation.InternalApi import akka.persistence.typed.SnapshotAdapter -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class NoOpSnapshotAdapter extends SnapshotAdapter[Any] { override def toJournal(state: Any): Any = state override def fromJournal(from: Any): Any = from } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object NoOpSnapshotAdapter { val i = new NoOpSnapshotAdapter diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/RecoveryImpl.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/RecoveryImpl.scala index 596e58f933e..e7cba1c8c6d 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/RecoveryImpl.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/RecoveryImpl.scala @@ -7,35 +7,25 @@ package akka.persistence.typed.internal import akka.annotation.InternalApi import akka.persistence.typed.{ javadsl, scaladsl, SnapshotSelectionCriteria } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case object DefaultRecovery extends javadsl.Recovery with scaladsl.Recovery { override def asScala: scaladsl.Recovery = this override def asJava: javadsl.Recovery = this - /** - * INTERNAL API - */ + /** INTERNAL API */ override private[akka] def toClassic = akka.persistence.Recovery() } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case object DisabledRecovery extends javadsl.Recovery with scaladsl.Recovery { override def asScala: scaladsl.Recovery = this override def asJava: javadsl.Recovery = this - /** - * INTERNAL API - */ + /** INTERNAL API */ override private[akka] def toClassic = akka.persistence.Recovery.none } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case class RecoveryWithSnapshotSelectionCriteria( snapshotSelectionCriteria: SnapshotSelectionCriteria) extends javadsl.Recovery @@ -43,8 +33,6 @@ import akka.persistence.typed.{ javadsl, scaladsl, SnapshotSelectionCriteria } override def asScala: scaladsl.Recovery = this override def asJava: javadsl.Recovery = this - /** - * INTERNAL API - */ + /** INTERNAL API */ override private[akka] def toClassic = akka.persistence.Recovery(snapshotSelectionCriteria.toClassic) } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala index c98bf170af3..c25c518644f 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala @@ -38,7 +38,8 @@ import akka.util.OptionVal import akka.util.PrettyDuration._ import akka.util.unused -/*** +/** + * * * INTERNAL API * * Third (of four) behavior of an EventSourcedBehavior. @@ -110,8 +111,8 @@ private[akka] final class ReplayingEvents[C, E, S]( case cmd: IncomingCommand[C @unchecked] => onInternalCommand(cmd) case get: GetState[S @unchecked] => stashInternal(get) case get: GetSeenSequenceNr => stashInternal(get) - case RecoveryPermitGranted => Behaviors.unhandled // should not happen, we already have the permit - case ContinueUnstash => Behaviors.unhandled + case RecoveryPermitGranted => Behaviors.unhandled // should not happen, we already have the permit + case ContinueUnstash => Behaviors.unhandled } } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplicationSetup.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplicationSetup.scala index 42c450957f1..d9a24105c1e 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplicationSetup.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplicationSetup.scala @@ -12,17 +12,13 @@ import akka.util.OptionVal import akka.util.WallClock import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ReplicationContextImpl { val NoPlugin = "no-plugin" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class ReplicationContextImpl( val replicationId: ReplicationId, @@ -58,7 +54,7 @@ private[akka] final class ReplicationContextImpl( checkAccess("origin") _origin match { case OptionVal.Some(origin) => origin - case _ => throw new IllegalStateException("origin can only be accessed from the event handler") + case _ => throw new IllegalStateException("origin can only be accessed from the event handler") } } @@ -84,9 +80,7 @@ private[akka] final class ReplicationContextImpl( override def getAllReplicas: java.util.Set[ReplicaId] = allReplicas.asJava } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class ReplicationSetup( replicaId: ReplicaId, @@ -95,9 +89,7 @@ private[akka] final case class ReplicationSetup( val allReplicas: Set[ReplicaId] = allReplicasAndQueryPlugins.keySet - /** - * Must only be called on the same thread that will execute the user code - */ + /** Must only be called on the same thread that will execute the user code */ def setContext(recoveryRunning: Boolean, originReplica: ReplicaId, concurrent: Boolean): Unit = { replicationContext._currentThread = OptionVal.Some(Thread.currentThread()) replicationContext._recoveryRunning = recoveryRunning diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/RetentionCriteriaImpl.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/RetentionCriteriaImpl.scala index c2ac9639b49..eed3a8e6dd5 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/RetentionCriteriaImpl.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/RetentionCriteriaImpl.scala @@ -26,9 +26,7 @@ import akka.persistence.typed.scaladsl def snapshotWhen(currentSequenceNr: Long): Boolean = currentSequenceNr % snapshotEveryNEvents == 0 - /** - * Should only be used when `BehaviorSetup.isOnlyOneSnapshot` is true. - */ + /** Should only be used when `BehaviorSetup.isOnlyOneSnapshot` is true. */ def deleteUpperSequenceNr(lastSequenceNr: Long): Long = { // Delete old events, retain the latest math.max(0, lastSequenceNr - (keepNSnapshots.toLong * snapshotEveryNEvents)) @@ -42,9 +40,7 @@ import akka.persistence.typed.scaladsl override def asJava: javadsl.RetentionCriteria = this } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case object DisabledRetentionCriteria extends javadsl.RetentionCriteria with scaladsl.RetentionCriteria { diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala index f1f54bb6eaf..e7263d754b3 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala @@ -175,35 +175,37 @@ private[akka] object Running { meta.version) ReplicatedEventEnvelope(re, replyTo) } - .recoverWithRetries(1, { - // not a failure, the replica is stopping, complete the stream - case _: WatchedActorTerminatedException => - Source.empty - })) + .recoverWithRetries( + 1, + { + // not a failure, the replica is stopping, complete the stream + case _: WatchedActorTerminatedException => + Source.empty + })) source.runWith(Sink.ignore)(SystemMaterializer(system).materializer) // TODO support from journal to fast forward https://github.com/akka/akka/issues/29311 - state.copy( - replicationControl = - state.replicationControl.updated(replicaId, new ReplicationStreamControl { - override def fastForward(sequenceNumber: Long): Unit = { - // (logging is safe here since invoked on message receive - OptionVal(controlRef.get) match { - case OptionVal.Some(control) => - if (setup.internalLogger.isDebugEnabled) - setup.internalLogger.debug("Fast forward replica [{}] to [{}]", replicaId, sequenceNumber) - control.fastForward(sequenceNumber) - case _ => - // stream not started yet, ok, fast forward is an optimization - if (setup.internalLogger.isDebugEnabled) - setup.internalLogger.debug( - "Ignoring fast forward replica [{}] to [{}], stream not started yet", - replicaId, - sequenceNumber) - } + state.copy(replicationControl = state.replicationControl.updated( + replicaId, + new ReplicationStreamControl { + override def fastForward(sequenceNumber: Long): Unit = { + // (logging is safe here since invoked on message receive + OptionVal(controlRef.get) match { + case OptionVal.Some(control) => + if (setup.internalLogger.isDebugEnabled) + setup.internalLogger.debug("Fast forward replica [{}] to [{}]", replicaId, sequenceNumber) + control.fastForward(sequenceNumber) + case _ => + // stream not started yet, ok, fast forward is an optimization + if (setup.internalLogger.isDebugEnabled) + setup.internalLogger.debug( + "Ignoring fast forward replica [{}] to [{}], stream not started yet", + replicaId, + sequenceNumber) } - })) + } + })) } else { state } @@ -267,7 +269,8 @@ private[akka] object Running { def onCommand(state: RunningState[S], cmd: C): Behavior[InternalProtocol] = { val effect = setup.commandHandler(state.state, cmd) - val (next, doUnstash) = applyEffects(cmd, state, effect.asInstanceOf[EffectImpl[E, S]]) // TODO can we avoid the cast? + val (next, doUnstash) = + applyEffects(cmd, state, effect.asInstanceOf[EffectImpl[E, S]]) // TODO can we avoid the cast? if (doUnstash) tryUnstashOne(next) else next } @@ -531,8 +534,14 @@ private[akka] object Running { val metadataTemplate: Option[ReplicatedEventMetadata] = setup.replication match { case Some(replication) => - replication.setContext(recoveryRunning = false, replication.replicaId, concurrent = false) // local events are never concurrent - Some(ReplicatedEventMetadata(replication.replicaId, 0L, state.version, concurrent = false)) // we replace it with actual seqnr later + replication.setContext( + recoveryRunning = false, + replication.replicaId, + concurrent = false + ) // local events are never concurrent + Some( + ReplicatedEventMetadata(replication.replicaId, 0L, state.version, concurrent = false) + ) // we replace it with actual seqnr later case None => None } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/StashManagement.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/StashManagement.scala index c7dff05304e..f36a3dee361 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/StashManagement.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/StashManagement.scala @@ -27,17 +27,13 @@ private[akka] trait StashManagement[C, E, S] { protected def isInternalStashEmpty: Boolean = stashState.internalStashBuffer.isEmpty - /** - * Stash a command to the internal stash buffer, which is used while waiting for persist to be completed. - */ + /** Stash a command to the internal stash buffer, which is used while waiting for persist to be completed. */ protected def stashInternal(msg: InternalProtocol): Behavior[InternalProtocol] = { stash(msg, stashState.internalStashBuffer) Behaviors.same } - /** - * Stash a command to the user stash buffer, which is used when `Stash` effect is used. - */ + /** Stash a command to the user stash buffer, which is used when `Stash` effect is used. */ protected def stashUser(msg: InternalProtocol): Unit = stash(msg, stashState.userStashBuffer) @@ -61,9 +57,7 @@ private[akka] trait StashManagement[C, E, S] { } } - /** - * `tryUnstashOne` is called at the end of processing each command, published event, or when persist is completed - */ + /** `tryUnstashOne` is called at the end of processing each command, published event, or when persist is completed */ protected def tryUnstashOne(behavior: Behavior[InternalProtocol]): Behavior[InternalProtocol] = { val buffer = if (stashState.isUnstashAllInProgress) stashState.userStashBuffer @@ -79,9 +73,7 @@ private[akka] trait StashManagement[C, E, S] { } - /** - * @return false if `tryUnstashOne` will unstash a message - */ + /** @return false if `tryUnstashOne` will unstash a message */ protected def isStashEmpty: Boolean = if (stashState.isUnstashAllInProgress) stashState.userStashBuffer.isEmpty else stashState.internalStashBuffer.isEmpty diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/VersionVector.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/VersionVector.scala index 11de33dbaad..1e446b07864 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/VersionVector.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/VersionVector.scala @@ -40,9 +40,7 @@ private[akka] object VersionVector { case object Same extends Ordering case object Concurrent extends Ordering - /** - * Marker to ensure that we do a full order comparison instead of bailing out early. - */ + /** Marker to ensure that we do a full order comparison instead of bailing out early. */ private case object FullOrder extends Ordering /** INTERNAL API */ @@ -51,9 +49,7 @@ private[akka] object VersionVector { final val EndMarker = Long.MinValue } - /** - * Marker to signal that we have reached the end of a version vector. - */ + /** Marker to signal that we have reached the end of a version vector. */ private val cmpEndMarker = (null, Timestamp.EndMarker) } @@ -79,50 +75,34 @@ private[akka] sealed abstract class VersionVector { import VersionVector._ - /** - * Increment the version for the key passed as argument. Returns a new VersionVector. - */ + /** Increment the version for the key passed as argument. Returns a new VersionVector. */ def +(key: String): VersionVector = increment(key) - /** - * Increment the version for the key passed as argument. Returns a new VersionVector. - */ + /** Increment the version for the key passed as argument. Returns a new VersionVector. */ def increment(key: String): VersionVector def updated(key: String, version: Long): VersionVector def isEmpty: Boolean - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def size: Int def versionAt(key: String): Long - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def contains(key: String): Boolean - /** - * Returns true if this and that are concurrent else false. - */ + /** Returns true if this and that are concurrent else false. */ def <>(that: VersionVector): Boolean = compareOnlyTo(that, Concurrent) eq Concurrent - /** - * Returns true if this is before that else false. - */ + /** Returns true if this is before that else false. */ def <(that: VersionVector): Boolean = compareOnlyTo(that, Before) eq Before - /** - * Returns true if this is after that else false. - */ + /** Returns true if this is after that else false. */ def >(that: VersionVector): Boolean = compareOnlyTo(that, After) eq After - /** - * Returns true if this VersionVector has the same history as the 'that' VersionVector else false. - */ + /** Returns true if this VersionVector has the same history as the 'that' VersionVector else false. */ def ==(that: VersionVector): Boolean = compareOnlyTo(that, Same) eq Same /** @@ -185,9 +165,7 @@ private[akka] sealed abstract class VersionVector { else compare(this.versionsIterator, that.versionsIterator, if (order eq Concurrent) FullOrder else order) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def versionsIterator: Iterator[(String, Long)] /** @@ -208,9 +186,7 @@ private[akka] sealed abstract class VersionVector { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class OneVersionVector private[akka] (key: String, version: Long) extends VersionVector { import VersionVector.Timestamp @@ -264,9 +240,7 @@ private[akka] sealed abstract class VersionVector { // TODO we could add more specialized/optimized implementations for 2 and 3 entries, because // that will be the typical number of data centers -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class ManyVersionVector(versions: TreeMap[String, Long]) extends VersionVector { import VersionVector.Timestamp diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala index 6ac4fcc4e7d..404fe9c0d99 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala @@ -5,7 +5,7 @@ package akka.persistence.typed.javadsl import java.util.Objects -import java.util.function.{ BiFunction, Predicate, Supplier, Function => JFunction } +import java.util.function.{ BiFunction, Function => JFunction, Predicate, Supplier } import scala.compat.java8.FunctionConverters._ @@ -179,9 +179,7 @@ object CommandHandlerBuilderByState { statePredicate: Predicate[State]): CommandHandlerBuilderByState[Command, Event, State, State] = new CommandHandlerBuilderByState(classOf[Any].asInstanceOf[Class[State]], statePredicate) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private final case class CommandHandlerCase[Command, Event, State]( commandPredicate: Command => Boolean, statePredicate: State => Boolean, @@ -198,12 +196,12 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int private def addCase(predicate: Command => Boolean, handler: BiFunction[S, Command, Effect[Event, State]]): Unit = { cases = CommandHandlerCase[Command, Event, State]( - commandPredicate = predicate, - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else - statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - handler.asInstanceOf[BiFunction[State, Command, Effect[Event, State]]]) :: cases + commandPredicate = predicate, + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else + statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), + handler.asInstanceOf[BiFunction[State, Command, Effect[Event, State]]]) :: cases } /** @@ -233,9 +231,11 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int def onCommand( predicate: Predicate[Command], handler: JFunction[Command, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { - addCase(cmd => predicate.test(cmd), new BiFunction[S, Command, Effect[Event, State]] { - override def apply(state: S, cmd: Command): Effect[Event, State] = handler(cmd) - }) + addCase( + cmd => predicate.test(cmd), + new BiFunction[S, Command, Effect[Event, State]] { + override def apply(state: S, cmd: Command): Effect[Event, State] = handler(cmd) + }) this } @@ -268,9 +268,11 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int def onCommand[C <: Command]( commandClass: Class[C], handler: JFunction[C, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, Effect[Event, State]] { - override def apply(state: S, cmd: C): Effect[Event, State] = handler(cmd) - }) + onCommand[C]( + commandClass, + new BiFunction[S, C, Effect[Event, State]] { + override def apply(state: S, cmd: C): Effect[Event, State] = handler(cmd) + }) } /** @@ -285,9 +287,11 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int def onCommand[C <: Command]( commandClass: Class[C], handler: Supplier[Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, Effect[Event, State]] { - override def apply(state: S, cmd: C): Effect[Event, State] = handler.get() - }) + onCommand[C]( + commandClass, + new BiFunction[S, C, Effect[Event, State]] { + override def apply(state: S, cmd: C): Effect[Event, State] = handler.get() + }) } /** @@ -328,9 +332,11 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int * @return A CommandHandler from the appended states. */ def onAnyCommand(handler: JFunction[Command, Effect[Event, State]]): CommandHandler[Command, Event, State] = { - addCase(_ => true, new BiFunction[S, Command, Effect[Event, State]] { - override def apply(state: S, cmd: Command): Effect[Event, State] = handler(cmd) - }) + addCase( + _ => true, + new BiFunction[S, Command, Effect[Event, State]] { + override def apply(state: S, cmd: Command): Effect[Event, State] = handler(cmd) + }) build() } @@ -352,9 +358,11 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int * @return A CommandHandler from the appended states. */ def onAnyCommand(handler: Supplier[Effect[Event, State]]): CommandHandler[Command, Event, State] = { - addCase(_ => true, new BiFunction[S, Command, Effect[Event, State]] { - override def apply(state: S, cmd: Command): Effect[Event, State] = handler.get() - }) + addCase( + _ => true, + new BiFunction[S, Command, Effect[Event, State]] { + override def apply(state: S, cmd: Command): Effect[Event, State] = handler.get() + }) build() } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala index 5bf3d6cb3d9..131dc03cc4b 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala @@ -5,7 +5,7 @@ package akka.persistence.typed.javadsl import java.util.Objects -import java.util.function.{ BiFunction, Predicate, Supplier, Function => JFunction } +import java.util.function.{ BiFunction, Function => JFunction, Predicate, Supplier } import scala.compat.java8.FunctionConverters._ @@ -189,9 +189,7 @@ object CommandHandlerWithReplyBuilderByState { statePredicate: Predicate[State]): CommandHandlerWithReplyBuilderByState[Command, Event, State, State] = new CommandHandlerWithReplyBuilderByState(classOf[Any].asInstanceOf[Class[State]], statePredicate) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private final case class CommandHandlerCase[Command, Event, State]( commandPredicate: Command => Boolean, statePredicate: State => Boolean, @@ -210,12 +208,12 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St predicate: Command => Boolean, handler: BiFunction[S, Command, ReplyEffect[Event, State]]): Unit = { cases = CommandHandlerCase[Command, Event, State]( - commandPredicate = predicate, - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else - statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - handler.asInstanceOf[BiFunction[State, Command, ReplyEffect[Event, State]]]) :: cases + commandPredicate = predicate, + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else + statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), + handler.asInstanceOf[BiFunction[State, Command, ReplyEffect[Event, State]]]) :: cases } /** @@ -243,9 +241,11 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St */ def onCommand(predicate: Predicate[Command], handler: JFunction[Command, ReplyEffect[Event, State]]) : CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { - addCase(cmd => predicate.test(cmd), new BiFunction[S, Command, ReplyEffect[Event, State]] { - override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler(cmd) - }) + addCase( + cmd => predicate.test(cmd), + new BiFunction[S, Command, ReplyEffect[Event, State]] { + override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler(cmd) + }) this } @@ -276,9 +276,11 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St */ def onCommand[C <: Command](commandClass: Class[C], handler: JFunction[C, ReplyEffect[Event, State]]) : CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, ReplyEffect[Event, State]] { - override def apply(state: S, cmd: C): ReplyEffect[Event, State] = handler(cmd) - }) + onCommand[C]( + commandClass, + new BiFunction[S, C, ReplyEffect[Event, State]] { + override def apply(state: S, cmd: C): ReplyEffect[Event, State] = handler(cmd) + }) } /** @@ -293,9 +295,11 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St def onCommand[C <: Command]( commandClass: Class[C], handler: Supplier[ReplyEffect[Event, State]]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, ReplyEffect[Event, State]] { - override def apply(state: S, cmd: C): ReplyEffect[Event, State] = handler.get() - }) + onCommand[C]( + commandClass, + new BiFunction[S, C, ReplyEffect[Event, State]] { + override def apply(state: S, cmd: C): ReplyEffect[Event, State] = handler.get() + }) } /** @@ -338,9 +342,11 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St */ def onAnyCommand( handler: JFunction[Command, ReplyEffect[Event, State]]): CommandHandlerWithReply[Command, Event, State] = { - addCase(_ => true, new BiFunction[S, Command, ReplyEffect[Event, State]] { - override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler(cmd) - }) + addCase( + _ => true, + new BiFunction[S, Command, ReplyEffect[Event, State]] { + override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler(cmd) + }) build() } @@ -362,9 +368,11 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St * @return A CommandHandlerWithReply from the appended states. */ def onAnyCommand(handler: Supplier[ReplyEffect[Event, State]]): CommandHandlerWithReply[Command, Event, State] = { - addCase(_ => true, new BiFunction[S, Command, ReplyEffect[Event, State]] { - override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler.get() - }) + addCase( + _ => true, + new BiFunction[S, Command, ReplyEffect[Event, State]] { + override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler.get() + }) build() } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala index bad4aae01f8..1d803b0f698 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala @@ -12,9 +12,7 @@ import akka.persistence.typed.internal._ import akka.persistence.typed.internal.SideEffect import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API: see `class EffectFactories` - */ +/** INTERNAL API: see `class EffectFactories` */ @InternalApi private[akka] object EffectFactories extends EffectFactories[Nothing, Nothing] /** @@ -25,9 +23,7 @@ import akka.util.ccompat.JavaConverters._ */ @DoNotInherit sealed class EffectFactories[Event, State] { - /** - * Persist a single event - */ + /** Persist a single event */ final def persist(event: Event): EffectBuilder[Event, State] = Persist(event) /** @@ -37,19 +33,13 @@ import akka.util.ccompat.JavaConverters._ */ final def persist(events: java.util.List[Event]): EffectBuilder[Event, State] = PersistAll(events.asScala.toVector) - /** - * Do not persist anything - */ + /** Do not persist anything */ def none(): EffectBuilder[Event, State] = PersistNothing.asInstanceOf[EffectBuilder[Event, State]] - /** - * Stop this persistent actor - */ + /** Stop this persistent actor */ def stop(): EffectBuilder[Event, State] = none().thenStop() - /** - * This command is not handled, but it is not an error that it isn't. - */ + /** This command is not handled, but it is not an error that it isn't. */ def unhandled(): EffectBuilder[Event, State] = Unhandled.asInstanceOf[EffectBuilder[Event, State]] /** @@ -91,9 +81,11 @@ import akka.util.ccompat.JavaConverters._ * finding mistakes. */ def reply[ReplyMessage](replyTo: ActorRef[ReplyMessage], replyWithMessage: ReplyMessage): ReplyEffect[Event, State] = - none().thenReply[ReplyMessage](replyTo, new function.Function[State, ReplyMessage] { - override def apply(param: State): ReplyMessage = replyWithMessage - }) + none().thenReply[ReplyMessage]( + replyTo, + new function.Function[State, ReplyMessage] { + override def apply(param: State): ReplyMessage = replyWithMessage + }) /** * When [[EventSourcedBehaviorWithEnforcedReplies]] is used there will be compilation errors if the returned effect @@ -134,14 +126,11 @@ import akka.util.ccompat.JavaConverters._ * but if a known subtype of `State` is expected that can be specified instead (preferably by * explicitly typing the lambda parameter like so: `thenRun((SubState state) -> { ... })`). * If the state is not of the expected type an [[java.lang.ClassCastException]] is thrown. - * */ final def thenRun[NewState <: State](callback: function.Procedure[NewState]): EffectBuilder[Event, State] = CompositeEffect(this, SideEffect[State](s => callback.apply(s.asInstanceOf[NewState]))) - /** - * Run the given callback. Callbacks are run sequentially. - */ + /** Run the given callback. Callbacks are run sequentially. */ final def thenRun(callback: function.Effect): EffectBuilder[Event, State] = CompositeEffect(this, SideEffect[State]((_: State) => callback.apply())) diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala index 198329f8936..f4daf4533a1 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala @@ -5,7 +5,7 @@ package akka.persistence.typed.javadsl import java.util.Objects -import java.util.function.{ BiFunction, Predicate, Supplier, Function => JFunction } +import java.util.function.{ BiFunction, Function => JFunction, Predicate, Supplier } import scala.compat.java8.FunctionConverters._ @@ -178,9 +178,7 @@ object EventHandlerBuilderByState { def builder[State, Event](statePredicate: Predicate[State]): EventHandlerBuilderByState[State, State, Event] = new EventHandlerBuilderByState(classOf[Any].asInstanceOf[Class[State]], statePredicate) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private final case class EventHandlerCase[State, Event]( statePredicate: State => Boolean, eventPredicate: Event => Boolean, @@ -197,11 +195,11 @@ final class EventHandlerBuilderByState[S <: State, State, Event]( private def addCase(eventPredicate: Event => Boolean, handler: BiFunction[State, Event, State]): Unit = { cases = EventHandlerCase[State, Event]( - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - eventPredicate = eventPredicate, - handler) :: cases + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), + eventPredicate = eventPredicate, + handler) :: cases } /** @@ -231,9 +229,11 @@ final class EventHandlerBuilderByState[S <: State, State, Event]( def onEvent[E <: Event]( eventClass: Class[E], handler: JFunction[E, State]): EventHandlerBuilderByState[S, State, Event] = { - onEvent[E](eventClass, new BiFunction[S, E, State] { - override def apply(state: S, event: E): State = handler(event) - }) + onEvent[E]( + eventClass, + new BiFunction[S, E, State] { + override def apply(state: S, event: E): State = handler(event) + }) } /** diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventSourcedBehavior.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventSourcedBehavior.scala index 838407edaab..ebf7d11cad1 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventSourcedBehavior.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventSourcedBehavior.scala @@ -23,9 +23,7 @@ abstract class EventSourcedBehavior[Command, Event, State] private[akka] ( onPersistFailure: Optional[BackoffSupervisorStrategy]) extends DeferredBehavior[Command] { - /** - * @param persistenceId stable unique identifier for the event sourced behavior - */ + /** @param persistenceId stable unique identifier for the event sourced behavior */ def this(persistenceId: PersistenceId) = { this(persistenceId, Optional.empty[BackoffSupervisorStrategy]) } @@ -91,33 +89,23 @@ abstract class EventSourcedBehavior[Command, Event, State] private[akka] ( */ protected def signalHandler(): SignalHandler[State] = SignalHandler.empty[State] - /** - * @return A new, mutable signal handler builder - */ + /** @return A new, mutable signal handler builder */ protected final def newSignalHandlerBuilder(): SignalHandlerBuilder[State] = SignalHandlerBuilder.builder[State] - /** - * @return A new, mutable, command handler builder - */ + /** @return A new, mutable, command handler builder */ protected def newCommandHandlerBuilder(): CommandHandlerBuilder[Command, Event, State] = { CommandHandlerBuilder.builder[Command, Event, State]() } - /** - * @return A new, mutable, event handler builder - */ + /** @return A new, mutable, event handler builder */ protected final def newEventHandlerBuilder(): EventHandlerBuilder[State, Event] = EventHandlerBuilder.builder[State, Event]() - /** - * Override and define the journal plugin id that this actor should use instead of the default. - */ + /** Override and define the journal plugin id that this actor should use instead of the default. */ def journalPluginId: String = "" - /** - * Override and define the snapshot store plugin id that this actor should use instead of the default. - */ + /** Override and define the snapshot store plugin id that this actor should use instead of the default. */ def snapshotPluginId: String = "" /** @@ -189,15 +177,11 @@ abstract class EventSourcedBehavior[Command, Event, State] private[akka] ( */ def snapshotAdapter(): SnapshotAdapter[State] = NoOpSnapshotAdapter.instance[State] - /** - * INTERNAL API: DeferredBehavior init, not for user extension - */ + /** INTERNAL API: DeferredBehavior init, not for user extension */ @InternalApi override def apply(context: typed.TypedActorContext[Command]): Behavior[Command] = createEventSourcedBehavior() - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final def createEventSourcedBehavior() : scaladsl.EventSourcedBehavior[Command, Event, State] = { val snapshotWhen: (State, Event, Long) => Boolean = (state, event, seqNr) => shouldSnapshot(state, event, seqNr) @@ -245,9 +229,7 @@ abstract class EventSourcedBehavior[Command, Event, State] private[akka] ( } - /** - * The last sequence number that was persisted, can only be called from inside the handlers of an `EventSourcedBehavior` - */ + /** The last sequence number that was persisted, can only be called from inside the handlers of an `EventSourcedBehavior` */ final def lastSequenceNumber(ctx: ActorContext[_]): Long = { scaladsl.EventSourcedBehavior.lastSequenceNumber(ctx.asScala) } @@ -290,9 +272,7 @@ abstract class EventSourcedBehaviorWithEnforcedReplies[Command, Event, State]( */ override protected def commandHandler(): CommandHandlerWithReply[Command, Event, State] - /** - * @return A new, mutable, command handler builder - */ + /** @return A new, mutable, command handler builder */ protected def newCommandHandlerWithReplyBuilder(): CommandHandlerWithReplyBuilder[Command, Event, State] = { CommandHandlerWithReplyBuilder.builder[Command, Event, State]() } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/PersistentFSMMigration.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/PersistentFSMMigration.scala index 4ed820de78f..f79b3962039 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/PersistentFSMMigration.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/PersistentFSMMigration.scala @@ -13,9 +13,7 @@ import akka.japi.function.Function3 import akka.persistence.typed.SnapshotAdapter import akka.util.JavaDurationConverters._ -/** - * Helper functions for migration from PersistentFSM to Persistence Typed - */ +/** Helper functions for migration from PersistentFSM to Persistence Typed */ object PersistentFSMMigration { /** diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Recovery.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Recovery.scala index 803099a898d..08cbf23ceb4 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Recovery.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Recovery.scala @@ -8,31 +8,21 @@ import akka.annotation.InternalApi import akka.persistence.typed.SnapshotSelectionCriteria import akka.persistence.typed.internal.{ DefaultRecovery, DisabledRecovery, RecoveryWithSnapshotSelectionCriteria } -/** - * Strategy for recovery of snapshots and events. - */ +/** Strategy for recovery of snapshots and events. */ abstract class Recovery { def asScala: akka.persistence.typed.scaladsl.Recovery - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def toClassic: akka.persistence.Recovery } -/** - * Strategy for recovery of snapshots and events. - */ +/** Strategy for recovery of snapshots and events. */ object Recovery { - /** - * Snapshots and events are recovered - */ + /** Snapshots and events are recovered */ val default: Recovery = DefaultRecovery - /** - * Neither snapshots nor events are recovered - */ + /** Neither snapshots nor events are recovered */ val disabled: Recovery = DisabledRecovery /** diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/ReplicatedEventSourcedBehavior.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/ReplicatedEventSourcedBehavior.scala index 43bf5ad6029..7da7c3048e2 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/ReplicatedEventSourcedBehavior.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/ReplicatedEventSourcedBehavior.scala @@ -12,9 +12,7 @@ import akka.actor.typed.TypedActorContext import akka.annotation.InternalApi import akka.persistence.typed.internal.ReplicationContextImpl -/** - * Base class for replicated event sourced behaviors. - */ +/** Base class for replicated event sourced behaviors. */ abstract class ReplicatedEventSourcedBehavior[Command, Event, State]( replicationContext: ReplicationContext, onPersistFailure: Optional[BackoffSupervisorStrategy]) @@ -30,12 +28,10 @@ abstract class ReplicatedEventSourcedBehavior[Command, Event, State]( protected def getReplicationContext(): ReplicationContext = replicationContext - /** - * INTERNAL API: DeferredBehavior init, not for user extension - */ + /** INTERNAL API: DeferredBehavior init, not for user extension */ @InternalApi override def apply(context: TypedActorContext[Command]): Behavior[Command] = { createEventSourcedBehavior() - // context not user extendable so there should never be any other impls + // context not user extendable so there should never be any other impls .withReplication(replicationContext.asInstanceOf[ReplicationContextImpl]) .withEventPublishing(withEventPublishing) } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/ReplicatedEventSourcing.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/ReplicatedEventSourcing.scala index a57f5e6b305..2008b979dd6 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/ReplicatedEventSourcing.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/ReplicatedEventSourcing.scala @@ -25,24 +25,16 @@ trait ReplicationContext { def replicationId: ReplicationId - /** - * @return The replica id of this replicated event sourced actor - */ + /** @return The replica id of this replicated event sourced actor */ def replicaId: ReplicaId - /** - * @return The ids of all replicas of this replicated event sourced actor - */ + /** @return The ids of all replicas of this replicated event sourced actor */ def getAllReplicas: JSet[ReplicaId] - /** - * @return The unique id of this replica, including the replica id - */ + /** @return The unique id of this replica, including the replica id */ def persistenceId: PersistenceId - /** - * @return The unique id of this replica, not including the replica id - */ + /** @return The unique id of this replica, not including the replica id */ def entityId: String /** @@ -63,9 +55,7 @@ trait ReplicationContext { */ def concurrent: Boolean - /** - * @return a timestamp that will always be increasing (is monotonic) - */ + /** @return a timestamp that will always be increasing (is monotonic) */ def currentTimeMillis(): Long } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/RetentionCriteria.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/RetentionCriteria.scala index 933fe87bc0d..78f8e9b1489 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/RetentionCriteria.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/RetentionCriteria.scala @@ -8,21 +8,15 @@ import akka.annotation.DoNotInherit import akka.persistence.typed.internal.DisabledRetentionCriteria import akka.persistence.typed.internal.SnapshotCountRetentionCriteriaImpl -/** - * Criteria for retention/deletion of snapshots and events. - */ +/** Criteria for retention/deletion of snapshots and events. */ abstract class RetentionCriteria { def asScala: akka.persistence.typed.scaladsl.RetentionCriteria } -/** - * Criteria for retention/deletion of snapshots and events. - */ +/** Criteria for retention/deletion of snapshots and events. */ object RetentionCriteria { - /** - * Snapshots are not saved and deleted automatically, events are not deleted. - */ + /** Snapshots are not saved and deleted automatically, events are not deleted. */ val disabled: RetentionCriteria = DisabledRetentionCriteria /** diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/SignalHandler.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/SignalHandler.scala index 7fa09de8b9a..96a831f6e74 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/SignalHandler.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/SignalHandler.scala @@ -18,15 +18,11 @@ object SignalHandler { final class SignalHandler[State](_handler: PartialFunction[(State, Signal), Unit]) { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def isEmpty: Boolean = _handler eq PartialFunction.empty - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def handler: PartialFunction[(State, Signal), Unit] = _handler } @@ -44,9 +40,7 @@ final class SignalHandlerBuilder[State] { private var handler: PartialFunction[(State, Signal), Unit] = PartialFunction.empty - /** - * If the behavior receives a signal of type `T`, `callback` is invoked with the signal instance as input. - */ + /** If the behavior receives a signal of type `T`, `callback` is invoked with the signal instance as input. */ def onSignal[T <: Signal](signalType: Class[T], callback: BiConsumer[State, T]): SignalHandlerBuilder[State] = { val newPF: PartialFunction[(State, Signal), Unit] = { case (state, t) if signalType.isInstance(t) => @@ -56,13 +50,10 @@ final class SignalHandlerBuilder[State] { this } - /** - * If the behavior receives exactly the signal `signal`, `callback` is invoked. - */ + /** If the behavior receives exactly the signal `signal`, `callback` is invoked. */ def onSignal[T <: Signal](signal: T, callback: Consumer[State]): SignalHandlerBuilder[State] = { - val newPF: PartialFunction[(State, Signal), Unit] = { - case (state, `signal`) => - callback.accept(state) + val newPF: PartialFunction[(State, Signal), Unit] = { case (state, `signal`) => + callback.accept(state) } handler = newPF.orElse(handler) this diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/Effect.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/Effect.scala index 4033e3eb0c0..ab1f1a58ef2 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/Effect.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/Effect.scala @@ -11,9 +11,7 @@ import akka.annotation.DoNotInherit import akka.persistence.typed.internal._ import akka.persistence.typed.internal.SideEffect -/** - * Factory methods for creating [[Effect]] directives - how an event sourced actor reacts on a command. - */ +/** Factory methods for creating [[Effect]] directives - how an event sourced actor reacts on a command. */ object Effect { /** @@ -135,9 +133,7 @@ trait EffectBuilder[+Event, State] extends Effect[Event, State] { /* All events that will be persisted in this effect */ def events: im.Seq[Event] - /** - * Run the given callback. Callbacks are run sequentially. - */ + /** Run the given callback. Callbacks are run sequentially. */ def thenRun(callback: State => Unit): EffectBuilder[Event, State] /** The side effect is to stop the actor */ diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/EventSourcedBehavior.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/EventSourcedBehavior.scala index d750a7a9175..22e198f77ee 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/EventSourcedBehavior.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/EventSourcedBehavior.scala @@ -99,9 +99,7 @@ object EventSourcedBehavior { } - /** - * The last sequence number that was persisted, can only be called from inside the handlers of an `EventSourcedBehavior` - */ + /** The last sequence number that was persisted, can only be called from inside the handlers of an `EventSourcedBehavior` */ def lastSequenceNumber(context: ActorContext[_]): Long = { @tailrec def extractConcreteBehavior(beh: Behavior[_]): Behavior[_] = @@ -137,19 +135,13 @@ object EventSourcedBehavior { */ def receiveSignal(signalHandler: PartialFunction[(State, Signal), Unit]): EventSourcedBehavior[Command, Event, State] - /** - * @return The currently defined signal handler or an empty handler if no custom handler previously defined - */ + /** @return The currently defined signal handler or an empty handler if no custom handler previously defined */ def signalHandler: PartialFunction[(State, Signal), Unit] - /** - * Change the journal plugin id that this actor should use. - */ + /** Change the journal plugin id that this actor should use. */ def withJournalPluginId(id: String): EventSourcedBehavior[Command, Event, State] - /** - * Change the snapshot store plugin id that this actor should use. - */ + /** Change the snapshot store plugin id that this actor should use. */ def withSnapshotPluginId(id: String): EventSourcedBehavior[Command, Event, State] /** @@ -185,9 +177,7 @@ object EventSourcedBehavior { */ def withRetention(criteria: RetentionCriteria): EventSourcedBehavior[Command, Event, State] - /** - * The `tagger` function should give event tags, which will be used in persistence query - */ + /** The `tagger` function should give event tags, which will be used in persistence query */ def withTagger(tagger: Event => Set[String]): EventSourcedBehavior[Command, Event, State] /** @@ -228,15 +218,11 @@ object EventSourcedBehavior { */ def withRecovery(recovery: Recovery): EventSourcedBehavior[Command, Event, State] - /** - * Publish events to the system event stream as [[akka.persistence.typed.PublishedEvent]] after they have been persisted - */ + /** Publish events to the system event stream as [[akka.persistence.typed.PublishedEvent]] after they have been persisted */ @ApiMayChange def withEventPublishing(enabled: Boolean): EventSourcedBehavior[Command, Event, State] - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def withReplication(context: ReplicationContextImpl): EventSourcedBehavior[Command, Event, State] diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/PersistentFSMMigration.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/PersistentFSMMigration.scala index 75d7f7eed08..0aaacfcd668 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/PersistentFSMMigration.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/PersistentFSMMigration.scala @@ -9,9 +9,7 @@ import scala.concurrent.duration.FiniteDuration import akka.persistence.fsm.PersistentFSM.PersistentFSMSnapshot import akka.persistence.typed.SnapshotAdapter -/** - * Helper functions for migration from PersistentFSM to Persistence Typed - */ +/** Helper functions for migration from PersistentFSM to Persistence Typed */ object PersistentFSMMigration { /** diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/Recovery.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/Recovery.scala index f10b83ba8dc..4761226bfaf 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/Recovery.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/Recovery.scala @@ -8,32 +8,22 @@ import akka.annotation.InternalApi import akka.persistence.typed.SnapshotSelectionCriteria import akka.persistence.typed.internal.{ DefaultRecovery, DisabledRecovery, RecoveryWithSnapshotSelectionCriteria } -/** - * Strategy for recovery of snapshots and events. - */ +/** Strategy for recovery of snapshots and events. */ trait Recovery { def asJava: akka.persistence.typed.javadsl.Recovery - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def toClassic: akka.persistence.Recovery } -/** - * Strategy for recovery of snapshots and events. - */ +/** Strategy for recovery of snapshots and events. */ object Recovery { - /** - * Snapshots and events are recovered - */ + /** Snapshots and events are recovered */ val default: Recovery = DefaultRecovery - /** - * Neither snapshots nor events are recovered - */ + /** Neither snapshots nor events are recovered */ val disabled: Recovery = DisabledRecovery /** diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/ReplicatedEventSourcing.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/ReplicatedEventSourcing.scala index 60915b49a2f..71f323f02cd 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/ReplicatedEventSourcing.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/ReplicatedEventSourcing.scala @@ -21,24 +21,16 @@ trait ReplicationContext { def replicationId: ReplicationId - /** - * @return The unique id of this replica, including the replica id - */ + /** @return The unique id of this replica, including the replica id */ def persistenceId: PersistenceId - /** - * @return The replica id of this replicated event sourced actor - */ + /** @return The replica id of this replicated event sourced actor */ def replicaId: ReplicaId = replicationId.replicaId - /** - * @return The ids of all replicas of this replicated event sourced actor - */ + /** @return The ids of all replicas of this replicated event sourced actor */ def allReplicas: Set[ReplicaId] - /** - * @return The entity id of this replicated event sourced actor (not including the replica id) - */ + /** @return The entity id of this replicated event sourced actor (not including the replica id) */ def entityId: String = replicationId.entityId /** @@ -59,9 +51,7 @@ trait ReplicationContext { */ def recoveryRunning: Boolean - /** - * @return a timestamp that will always be increasing (is monotonic) - */ + /** @return a timestamp that will always be increasing (is monotonic) */ def currentTimeMillis(): Long } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/RetentionCriteria.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/RetentionCriteria.scala index 4edd8ded9aa..fa2a802b829 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/RetentionCriteria.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/RetentionCriteria.scala @@ -8,21 +8,15 @@ import akka.annotation.DoNotInherit import akka.persistence.typed.internal.DisabledRetentionCriteria import akka.persistence.typed.internal.SnapshotCountRetentionCriteriaImpl -/** - * Criteria for retention/deletion of snapshots and events. - */ +/** Criteria for retention/deletion of snapshots and events. */ trait RetentionCriteria { def asJava: akka.persistence.typed.javadsl.RetentionCriteria } -/** - * Criteria for retention/deletion of snapshots and events. - */ +/** Criteria for retention/deletion of snapshots and events. */ object RetentionCriteria { - /** - * Snapshots are not saved and deleted automatically, events are not deleted. - */ + /** Snapshots are not saved and deleted automatically, events are not deleted. */ val disabled: RetentionCriteria = DisabledRetentionCriteria /** diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/serialization/ReplicatedEventSourcingSerializer.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/serialization/ReplicatedEventSourcingSerializer.scala index 6b76b97f31d..122dab43e84 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/serialization/ReplicatedEventSourcingSerializer.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/serialization/ReplicatedEventSourcingSerializer.scala @@ -29,9 +29,7 @@ import akka.remote.serialization.WrappedPayloadSupport import akka.serialization.{ BaseSerializer, SerializerWithStringManifest } import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ReplicatedEventSourcingSerializer { object Comparator extends Comparator[Payload] { override def compare(a: Payload, b: Payload): Int = { @@ -58,9 +56,7 @@ import akka.util.ccompat.JavaConverters._ } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class ReplicatedEventSourcingSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest with BaseSerializer { @@ -352,10 +348,10 @@ import akka.util.ccompat.JavaConverters._ def orsetFromProto(orset: ReplicatedEventSourcing.ORSet): ORSet[Any] = { val elements: Iterator[Any] = - (orset.getStringElementsList.iterator.asScala ++ + orset.getStringElementsList.iterator.asScala ++ orset.getIntElementsList.iterator.asScala ++ orset.getLongElementsList.iterator.asScala ++ - orset.getOtherElementsList.iterator.asScala.map(wrappedSupport.deserializePayload)) + orset.getOtherElementsList.iterator.asScala.map(wrappedSupport.deserializePayload) val dots = orset.getDotsList.asScala.map(versionVectorFromProto).iterator val elementsMap = elements.zip(dots).toMap @@ -365,9 +361,8 @@ import akka.util.ccompat.JavaConverters._ def versionVectorToProto(versionVector: VersionVector): ReplicatedEventSourcing.VersionVector = { val b = ReplicatedEventSourcing.VersionVector.newBuilder() - versionVector.versionsIterator.foreach { - case (key, value) => - b.addEntries(ReplicatedEventSourcing.VersionVector.Entry.newBuilder().setKey(key).setVersion(value)) + versionVector.versionsIterator.foreach { case (key, value) => + b.addEntries(ReplicatedEventSourcing.VersionVector.Entry.newBuilder().setKey(key).setVersion(value)) } b.build() } @@ -383,7 +378,7 @@ import akka.util.ccompat.JavaConverters._ VersionVector(entries.get(0).getKey, entries.get(0).getVersion) else { val versions = TreeMap.empty[String, Long] ++ versionVector.getEntriesList.asScala.map(entry => - entry.getKey -> entry.getVersion) + entry.getKey -> entry.getVersion) VersionVector(versions) } } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/DurableStateSignal.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/DurableStateSignal.scala index 54d1927cf8a..40359927ca8 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/DurableStateSignal.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/DurableStateSignal.scala @@ -22,8 +22,6 @@ case object RecoveryCompleted extends RecoveryCompleted { final case class RecoveryFailed(failure: Throwable) extends DurableStateSignal { - /** - * Java API - */ + /** Java API */ def getFailure(): Throwable = failure } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/BehaviorSetup.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/BehaviorSetup.scala index ee557239249..c13b1459959 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/BehaviorSetup.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/BehaviorSetup.scala @@ -23,9 +23,7 @@ import akka.persistence.typed.state.internal.InternalProtocol.RecoveryTimeout import akka.persistence.typed.state.scaladsl.DurableStateBehavior import akka.util.OptionVal -/** - * INTERNAL API: Carry state for the `DurableStateBehavior` implementation behaviors. - */ +/** INTERNAL API: Carry state for the `DurableStateBehavior` implementation behaviors. */ @InternalApi private[akka] final class BehaviorSetup[C, S]( val context: ActorContext[InternalProtocol], @@ -106,9 +104,7 @@ private[akka] final class BehaviorSetup[C, S]( } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object PersistenceMdc { // format: OFF diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/DurableStateBehaviorImpl.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/DurableStateBehaviorImpl.scala index 28f54589162..887927ef64b 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/DurableStateBehaviorImpl.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/DurableStateBehaviorImpl.scala @@ -28,9 +28,7 @@ import akka.util.unused @InternalApi private[akka] object DurableStateBehaviorImpl { - /** - * Used by DurableStateBehaviorTestKit to retrieve the `persistenceId`. - */ + /** Used by DurableStateBehaviorTestKit to retrieve the `persistenceId`. */ final case class GetPersistenceId(replyTo: ActorRef[PersistenceId]) extends Signal /** @@ -120,7 +118,7 @@ private[akka] final case class DurableStateBehaviorImpl[Command, State]( val innerMsg = msg match { case RecoveryPermitter.RecoveryPermitGranted => InternalProtocol.RecoveryPermitGranted case internal: InternalProtocol => internal // such as RecoveryTimeout - case cmd => InternalProtocol.IncomingCommand(cmd.asInstanceOf[Command]) + case cmd => InternalProtocol.IncomingCommand(cmd.asInstanceOf[Command]) } target(ctx, innerMsg) } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/DurableStateSettings.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/DurableStateSettings.scala index 7ed90738fa4..a6b116f6c3e 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/DurableStateSettings.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/DurableStateSettings.scala @@ -15,9 +15,7 @@ import akka.actor.typed.ActorSystem import akka.annotation.InternalApi import akka.persistence.Persistence -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object DurableStateSettings { def apply( @@ -73,9 +71,7 @@ import akka.persistence.Persistence } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class DurableStateSettings( stashCapacity: Int, @@ -90,15 +86,11 @@ private[akka] final case class DurableStateSettings( "DurableStateBehavior plugin id must not be null; use empty string for 'default' state store") } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] sealed trait StashOverflowStrategy -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object StashOverflowStrategy { case object Drop extends StashOverflowStrategy diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/NoOpSnapshotAdapter.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/NoOpSnapshotAdapter.scala index 8d34bf6d8ac..a2f96c2cdc0 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/NoOpSnapshotAdapter.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/NoOpSnapshotAdapter.scala @@ -7,18 +7,14 @@ package akka.persistence.typed.state.internal import akka.annotation.InternalApi import akka.persistence.typed.SnapshotAdapter -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class NoOpSnapshotAdapter extends SnapshotAdapter[Any] { override def toJournal(state: Any): Any = state override def fromJournal(from: Any): Any = from } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object NoOpSnapshotAdapter { val i = new NoOpSnapshotAdapter diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/Running.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/Running.scala index ee8934a5240..adaa3448ebb 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/Running.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/Running.scala @@ -97,7 +97,8 @@ private[akka] object Running { def onCommand(state: RunningState[S], cmd: C): Behavior[InternalProtocol] = { val effect = setup.commandHandler(state.state, cmd) - val (next, doUnstash) = applyEffects(cmd, state, effect.asInstanceOf[EffectImpl[S]]) // TODO can we avoid the cast? + val (next, doUnstash) = + applyEffects(cmd, state, effect.asInstanceOf[EffectImpl[S]]) // TODO can we avoid the cast? if (doUnstash) tryUnstashOne(next) else { recursiveUnstashOne = 0 diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/StashManagement.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/StashManagement.scala index 21251df650a..599ffbf44a6 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/StashManagement.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/internal/StashManagement.scala @@ -27,17 +27,13 @@ private[akka] trait StashManagement[C, S] { protected def isInternalStashEmpty: Boolean = stashState.internalStashBuffer.isEmpty - /** - * Stash a command to the internal stash buffer, which is used while waiting for persist to be completed. - */ + /** Stash a command to the internal stash buffer, which is used while waiting for persist to be completed. */ protected def stashInternal(msg: InternalProtocol): Behavior[InternalProtocol] = { stash(msg, stashState.internalStashBuffer) Behaviors.same } - /** - * Stash a command to the user stash buffer, which is used when `Stash` effect is used. - */ + /** Stash a command to the user stash buffer, which is used when `Stash` effect is used. */ protected def stashUser(msg: InternalProtocol): Unit = stash(msg, stashState.userStashBuffer) @@ -61,9 +57,7 @@ private[akka] trait StashManagement[C, S] { } } - /** - * `tryUnstashOne` is called at the end of processing each command, or when persist is completed - */ + /** `tryUnstashOne` is called at the end of processing each command, or when persist is completed */ protected def tryUnstashOne(behavior: Behavior[InternalProtocol]): Behavior[InternalProtocol] = { val buffer = if (stashState.isUnstashAllInProgress) stashState.userStashBuffer @@ -79,9 +73,7 @@ private[akka] trait StashManagement[C, S] { } - /** - * @return false if `tryUnstashOne` will unstash a message - */ + /** @return false if `tryUnstashOne` will unstash a message */ protected def isStashEmpty: Boolean = if (stashState.isUnstashAllInProgress) stashState.userStashBuffer.isEmpty else stashState.internalStashBuffer.isEmpty diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandler.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandler.scala index ca827021eb2..b468c7e1c7b 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandler.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandler.scala @@ -180,9 +180,7 @@ object CommandHandlerBuilderByState { def builder[Command, State](statePredicate: Predicate[State]): CommandHandlerBuilderByState[Command, State, State] = new CommandHandlerBuilderByState(classOf[Any].asInstanceOf[Class[State]], statePredicate) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private final case class CommandHandlerCase[Command, State]( commandPredicate: Command => Boolean, statePredicate: State => Boolean, @@ -199,12 +197,12 @@ final class CommandHandlerBuilderByState[Command, S <: State, State] @InternalAp private def addCase(predicate: Command => Boolean, handler: BiFunction[S, Command, Effect[State]]): Unit = { cases = CommandHandlerCase[Command, State]( - commandPredicate = predicate, - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else - statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - handler.asInstanceOf[BiFunction[State, Command, Effect[State]]]) :: cases + commandPredicate = predicate, + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else + statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), + handler.asInstanceOf[BiFunction[State, Command, Effect[State]]]) :: cases } /** @@ -234,9 +232,11 @@ final class CommandHandlerBuilderByState[Command, S <: State, State] @InternalAp def onCommand( predicate: Predicate[Command], handler: JFunction[Command, Effect[State]]): CommandHandlerBuilderByState[Command, S, State] = { - addCase(cmd => predicate.test(cmd), new BiFunction[S, Command, Effect[State]] { - override def apply(state: S, cmd: Command): Effect[State] = handler(cmd) - }) + addCase( + cmd => predicate.test(cmd), + new BiFunction[S, Command, Effect[State]] { + override def apply(state: S, cmd: Command): Effect[State] = handler(cmd) + }) this } @@ -269,9 +269,11 @@ final class CommandHandlerBuilderByState[Command, S <: State, State] @InternalAp def onCommand[C <: Command]( commandClass: Class[C], handler: JFunction[C, Effect[State]]): CommandHandlerBuilderByState[Command, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, Effect[State]] { - override def apply(state: S, cmd: C): Effect[State] = handler(cmd) - }) + onCommand[C]( + commandClass, + new BiFunction[S, C, Effect[State]] { + override def apply(state: S, cmd: C): Effect[State] = handler(cmd) + }) } /** @@ -286,9 +288,11 @@ final class CommandHandlerBuilderByState[Command, S <: State, State] @InternalAp def onCommand[C <: Command]( commandClass: Class[C], handler: Supplier[Effect[State]]): CommandHandlerBuilderByState[Command, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, Effect[State]] { - override def apply(state: S, cmd: C): Effect[State] = handler.get() - }) + onCommand[C]( + commandClass, + new BiFunction[S, C, Effect[State]] { + override def apply(state: S, cmd: C): Effect[State] = handler.get() + }) } /** @@ -329,9 +333,11 @@ final class CommandHandlerBuilderByState[Command, S <: State, State] @InternalAp * @return A CommandHandler from the appended states. */ def onAnyCommand(handler: JFunction[Command, Effect[State]]): CommandHandler[Command, State] = { - addCase(_ => true, new BiFunction[S, Command, Effect[State]] { - override def apply(state: S, cmd: Command): Effect[State] = handler(cmd) - }) + addCase( + _ => true, + new BiFunction[S, Command, Effect[State]] { + override def apply(state: S, cmd: Command): Effect[State] = handler(cmd) + }) build() } @@ -353,9 +359,11 @@ final class CommandHandlerBuilderByState[Command, S <: State, State] @InternalAp * @return A CommandHandler from the appended states. */ def onAnyCommand(handler: Supplier[Effect[State]]): CommandHandler[Command, State] = { - addCase(_ => true, new BiFunction[S, Command, Effect[State]] { - override def apply(state: S, cmd: Command): Effect[State] = handler.get() - }) + addCase( + _ => true, + new BiFunction[S, Command, Effect[State]] { + override def apply(state: S, cmd: Command): Effect[State] = handler.get() + }) build() } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandlerWithReply.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandlerWithReply.scala index bc82e5ff2a2..f0deebb3ba8 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandlerWithReply.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/CommandHandlerWithReply.scala @@ -190,9 +190,7 @@ object CommandHandlerWithReplyBuilderByState { statePredicate: Predicate[State]): CommandHandlerWithReplyBuilderByState[Command, State, State] = new CommandHandlerWithReplyBuilderByState(classOf[Any].asInstanceOf[Class[State]], statePredicate) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private final case class CommandHandlerCase[Command, State]( commandPredicate: Command => Boolean, statePredicate: State => Boolean, @@ -209,12 +207,12 @@ final class CommandHandlerWithReplyBuilderByState[Command, S <: State, State] @I private def addCase(predicate: Command => Boolean, handler: BiFunction[S, Command, ReplyEffect[State]]): Unit = { cases = CommandHandlerCase[Command, State]( - commandPredicate = predicate, - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else - statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - handler.asInstanceOf[BiFunction[State, Command, ReplyEffect[State]]]) :: cases + commandPredicate = predicate, + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else + statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), + handler.asInstanceOf[BiFunction[State, Command, ReplyEffect[State]]]) :: cases } /** @@ -244,9 +242,11 @@ final class CommandHandlerWithReplyBuilderByState[Command, S <: State, State] @I def onCommand( predicate: Predicate[Command], handler: JFunction[Command, ReplyEffect[State]]): CommandHandlerWithReplyBuilderByState[Command, S, State] = { - addCase(cmd => predicate.test(cmd), new BiFunction[S, Command, ReplyEffect[State]] { - override def apply(state: S, cmd: Command): ReplyEffect[State] = handler(cmd) - }) + addCase( + cmd => predicate.test(cmd), + new BiFunction[S, Command, ReplyEffect[State]] { + override def apply(state: S, cmd: Command): ReplyEffect[State] = handler(cmd) + }) this } @@ -279,9 +279,11 @@ final class CommandHandlerWithReplyBuilderByState[Command, S <: State, State] @I def onCommand[C <: Command]( commandClass: Class[C], handler: JFunction[C, ReplyEffect[State]]): CommandHandlerWithReplyBuilderByState[Command, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, ReplyEffect[State]] { - override def apply(state: S, cmd: C): ReplyEffect[State] = handler(cmd) - }) + onCommand[C]( + commandClass, + new BiFunction[S, C, ReplyEffect[State]] { + override def apply(state: S, cmd: C): ReplyEffect[State] = handler(cmd) + }) } /** @@ -296,9 +298,11 @@ final class CommandHandlerWithReplyBuilderByState[Command, S <: State, State] @I def onCommand[C <: Command]( commandClass: Class[C], handler: Supplier[ReplyEffect[State]]): CommandHandlerWithReplyBuilderByState[Command, S, State] = { - onCommand[C](commandClass, new BiFunction[S, C, ReplyEffect[State]] { - override def apply(state: S, cmd: C): ReplyEffect[State] = handler.get() - }) + onCommand[C]( + commandClass, + new BiFunction[S, C, ReplyEffect[State]] { + override def apply(state: S, cmd: C): ReplyEffect[State] = handler.get() + }) } /** @@ -339,9 +343,11 @@ final class CommandHandlerWithReplyBuilderByState[Command, S <: State, State] @I * @return A CommandHandlerWithReply from the appended states. */ def onAnyCommand(handler: JFunction[Command, ReplyEffect[State]]): CommandHandlerWithReply[Command, State] = { - addCase(_ => true, new BiFunction[S, Command, ReplyEffect[State]] { - override def apply(state: S, cmd: Command): ReplyEffect[State] = handler(cmd) - }) + addCase( + _ => true, + new BiFunction[S, Command, ReplyEffect[State]] { + override def apply(state: S, cmd: Command): ReplyEffect[State] = handler(cmd) + }) build() } @@ -363,9 +369,11 @@ final class CommandHandlerWithReplyBuilderByState[Command, S <: State, State] @I * @return A CommandHandlerWithReply from the appended states. */ def onAnyCommand(handler: Supplier[ReplyEffect[State]]): CommandHandlerWithReply[Command, State] = { - addCase(_ => true, new BiFunction[S, Command, ReplyEffect[State]] { - override def apply(state: S, cmd: Command): ReplyEffect[State] = handler.get() - }) + addCase( + _ => true, + new BiFunction[S, Command, ReplyEffect[State]] { + override def apply(state: S, cmd: Command): ReplyEffect[State] = handler.get() + }) build() } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/DurableStateBehavior.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/DurableStateBehavior.scala index 8878eb817c7..9843f66b7d2 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/DurableStateBehavior.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/DurableStateBehavior.scala @@ -30,9 +30,7 @@ abstract class DurableStateBehavior[Command, State] private[akka] ( onPersistFailure: Optional[BackoffSupervisorStrategy]) extends DeferredBehavior[Command] { - /** - * @param persistenceId stable unique identifier for the `DurableStateBehavior` - */ + /** @param persistenceId stable unique identifier for the `DurableStateBehavior` */ def this(persistenceId: PersistenceId) = { this(persistenceId, Optional.empty[BackoffSupervisorStrategy]) } @@ -85,27 +83,19 @@ abstract class DurableStateBehavior[Command, State] private[akka] ( */ protected def signalHandler(): SignalHandler[State] = SignalHandler.empty[State] - /** - * @return A new, mutable signal handler builder - */ + /** @return A new, mutable signal handler builder */ protected final def newSignalHandlerBuilder(): SignalHandlerBuilder[State] = SignalHandlerBuilder.builder[State] - /** - * @return A new, mutable, command handler builder - */ + /** @return A new, mutable, command handler builder */ protected def newCommandHandlerBuilder(): CommandHandlerBuilder[Command, State] = { CommandHandlerBuilder.builder[Command, State]() } - /** - * Override and define the `DurableStateStore` plugin id that this actor should use instead of the default. - */ + /** Override and define the `DurableStateStore` plugin id that this actor should use instead of the default. */ def durableStateStorePluginId: String = "" - /** - * The tag that can be used in persistence query. - */ + /** The tag that can be used in persistence query. */ def tag: String = "" /** @@ -114,15 +104,11 @@ abstract class DurableStateBehavior[Command, State] private[akka] ( */ def snapshotAdapter(): SnapshotAdapter[State] = NoOpSnapshotAdapter.instance[State] - /** - * INTERNAL API: DeferredBehavior init, not for user extension - */ + /** INTERNAL API: DeferredBehavior init, not for user extension */ @InternalApi override def apply(context: typed.TypedActorContext[Command]): Behavior[Command] = createDurableStateBehavior() - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final def createDurableStateBehavior(): scaladsl.DurableStateBehavior[Command, State] = { val behavior = new internal.DurableStateBehaviorImpl[Command, State]( @@ -149,9 +135,7 @@ abstract class DurableStateBehavior[Command, State] private[akka] ( } } - /** - * The last sequence number that was persisted, can only be called from inside the handlers of a `DurableStateBehavior` - */ + /** The last sequence number that was persisted, can only be called from inside the handlers of a `DurableStateBehavior` */ final def lastSequenceNumber(ctx: ActorContext[_]): Long = { scaladsl.DurableStateBehavior.lastSequenceNumber(ctx.asScala) } @@ -194,9 +178,7 @@ abstract class DurableStateBehaviorWithEnforcedReplies[Command, State]( */ override protected def commandHandler(): CommandHandlerWithReply[Command, State] - /** - * @return A new, mutable, command handler builder - */ + /** @return A new, mutable, command handler builder */ protected def newCommandHandlerWithReplyBuilder(): CommandHandlerWithReplyBuilder[Command, State] = { CommandHandlerWithReplyBuilder.builder[Command, State]() } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/Effect.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/Effect.scala index a8590b89689..75a664afd47 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/Effect.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/Effect.scala @@ -12,9 +12,7 @@ import akka.japi.function import akka.persistence.typed.state.internal._ import akka.persistence.typed.state.internal.SideEffect -/** - * INTERNAL API: see `class EffectFactories` - */ +/** INTERNAL API: see `class EffectFactories` */ @InternalApi private[akka] object EffectFactories extends EffectFactories[Nothing] /** @@ -101,9 +99,11 @@ import akka.persistence.typed.state.internal.SideEffect * finding mistakes. */ def reply[ReplyMessage](replyTo: ActorRef[ReplyMessage], replyWithMessage: ReplyMessage): ReplyEffect[State] = - none().thenReply[ReplyMessage](replyTo, new function.Function[State, ReplyMessage] { - override def apply(param: State): ReplyMessage = replyWithMessage - }) + none().thenReply[ReplyMessage]( + replyTo, + new function.Function[State, ReplyMessage] { + override def apply(param: State): ReplyMessage = replyWithMessage + }) /** * When [[DurableStateBehaviorWithEnforcedReplies]] is used there will be compilation errors if the returned effect @@ -144,14 +144,11 @@ import akka.persistence.typed.state.internal.SideEffect * but if a known subtype of `State` is expected that can be specified instead (preferably by * explicitly typing the lambda parameter like so: `thenRun((SubState state) -> { ... })`). * If the state is not of the expected type an [[java.lang.ClassCastException]] is thrown. - * */ final def thenRun[NewState <: State](callback: function.Procedure[NewState]): EffectBuilder[State] = CompositeEffect(this, SideEffect[State](s => callback.apply(s.asInstanceOf[NewState]))) - /** - * Run the given callback. Callbacks are run sequentially. - */ + /** Run the given callback. Callbacks are run sequentially. */ final def thenRun(callback: function.Effect): EffectBuilder[State] = CompositeEffect(this, SideEffect[State]((_: State) => callback.apply())) diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/SignalHandler.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/SignalHandler.scala index aebc7914576..504fc42a71b 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/SignalHandler.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/javadsl/SignalHandler.scala @@ -18,15 +18,11 @@ object SignalHandler { final class SignalHandler[State](_handler: PartialFunction[(State, Signal), Unit]) { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def isEmpty: Boolean = _handler eq PartialFunction.empty - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def handler: PartialFunction[(State, Signal), Unit] = _handler } @@ -44,9 +40,7 @@ final class SignalHandlerBuilder[State] { private var handler: PartialFunction[(State, Signal), Unit] = PartialFunction.empty - /** - * If the behavior receives a signal of type `T`, `callback` is invoked with the signal instance as input. - */ + /** If the behavior receives a signal of type `T`, `callback` is invoked with the signal instance as input. */ def onSignal[T <: Signal](signalType: Class[T], callback: BiConsumer[State, T]): SignalHandlerBuilder[State] = { val newPF: PartialFunction[(State, Signal), Unit] = { case (state, t) if signalType.isInstance(t) => @@ -56,13 +50,10 @@ final class SignalHandlerBuilder[State] { this } - /** - * If the behavior receives exactly the signal `signal`, `callback` is invoked. - */ + /** If the behavior receives exactly the signal `signal`, `callback` is invoked. */ def onSignal[T <: Signal](signal: T, callback: Consumer[State]): SignalHandlerBuilder[State] = { - val newPF: PartialFunction[(State, Signal), Unit] = { - case (state, `signal`) => - callback.accept(state) + val newPF: PartialFunction[(State, Signal), Unit] = { case (state, `signal`) => + callback.accept(state) } handler = newPF.orElse(handler) this diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/scaladsl/DurableStateBehavior.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/scaladsl/DurableStateBehavior.scala index b6925499f40..1456d89d49d 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/scaladsl/DurableStateBehavior.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/scaladsl/DurableStateBehavior.scala @@ -19,9 +19,7 @@ import akka.persistence.typed.PersistenceId import akka.persistence.typed.SnapshotAdapter import akka.persistence.typed.state.internal._ -/** - * API May Change - */ +/** API May Change */ @ApiMayChange object DurableStateBehavior { @@ -87,9 +85,7 @@ object DurableStateBehavior { } - /** - * The last sequence number that was persisted, can only be called from inside the handlers of a `DurableStateBehavior` - */ + /** The last sequence number that was persisted, can only be called from inside the handlers of a `DurableStateBehavior` */ def lastSequenceNumber(context: ActorContext[_]): Long = { @tailrec def extractConcreteBehavior(beh: Behavior[_]): Behavior[_] = @@ -126,19 +122,13 @@ object DurableStateBehavior { */ def receiveSignal(signalHandler: PartialFunction[(State, Signal), Unit]): DurableStateBehavior[Command, State] - /** - * @return The currently defined signal handler or an empty handler if no custom handler previously defined - */ + /** @return The currently defined signal handler or an empty handler if no custom handler previously defined */ def signalHandler: PartialFunction[(State, Signal), Unit] - /** - * Change the `DurableStateStore` plugin id that this actor should use. - */ + /** Change the `DurableStateStore` plugin id that this actor should use. */ def withDurableStateStorePluginId(id: String): DurableStateBehavior[Command, State] - /** - * The tag that can used in persistence query - */ + /** The tag that can used in persistence query */ def withTag(tag: String): DurableStateBehavior[Command, State] /** diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/scaladsl/Effect.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/scaladsl/Effect.scala index ed3e70feb40..6bffd2d9770 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/scaladsl/Effect.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/state/scaladsl/Effect.scala @@ -127,9 +127,7 @@ trait EffectBuilder[+State] extends Effect[State] { /* The state that will be persisted in this effect */ def state: Option[State] - /** - * Run the given callback. Callbacks are run sequentially. - */ + /** Run the given callback. Callbacks are run sequentially. */ def thenRun(callback: State => Unit): EffectBuilder[State] /** The side effect is to stop the actor */ diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/ManyRecoveriesSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/ManyRecoveriesSpec.scala index b83a94a0c3f..cb4311bac72 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/ManyRecoveriesSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/ManyRecoveriesSpec.scala @@ -31,18 +31,17 @@ object ManyRecoveriesSpec { EventSourcedBehavior[Cmd, Evt, String]( persistenceId = PersistenceId.ofUniqueId(name), emptyState = "", - commandHandler = CommandHandler.command { - case Cmd(s) => Effect.persist(Evt(s)).thenRun(_ => probe.ref ! s"$name-$s") + commandHandler = CommandHandler.command { case Cmd(s) => + Effect.persist(Evt(s)).thenRun(_ => probe.ref ! s"$name-$s") }, - eventHandler = { - case (state, _) => latch.foreach(Await.ready(_, 10.seconds)); state + eventHandler = { case (state, _) => + latch.foreach(Await.ready(_, 10.seconds)); state }) def forwardBehavior(sender: TestProbe[String]): Behaviors.Receive[Int] = - Behaviors.receiveMessagePartial[Int] { - case value => - sender.ref ! value.toString - Behaviors.same + Behaviors.receiveMessagePartial[Int] { case value => + sender.ref ! value.toString + Behaviors.same } def forN(n: Int)(mapper: Int => String): Set[String] = diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/EventSourcedProducerQueueSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/EventSourcedProducerQueueSpec.scala index 63e388a5be1..9fd79be0b61 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/EventSourcedProducerQueueSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/EventSourcedProducerQueueSpec.scala @@ -33,8 +33,8 @@ object EventSourcedProducerQueueSpec { akka.persistence.journal.inmem.test-serialization = on akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/EventSourcedDurableProducerQueueSpec-${UUID - .randomUUID() - .toString}" + .randomUUID() + .toString}" """) } diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/ReliableDeliveryWithEventSourcedProducerQueueSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/ReliableDeliveryWithEventSourcedProducerQueueSpec.scala index 9fd04d9ed3b..2d21147a32e 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/ReliableDeliveryWithEventSourcedProducerQueueSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/ReliableDeliveryWithEventSourcedProducerQueueSpec.scala @@ -22,8 +22,8 @@ object ReliableDeliveryWithEventSourcedProducerQueueSpec { akka.persistence.journal.inmem.test-serialization = on akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/ProducerControllerWithEventSourcedProducerQueueSpec-${UUID - .randomUUID() - .toString}" + .randomUUID() + .toString}" akka.reliable-delivery.consumer-controller.flow-control-window = 20 """) } @@ -174,6 +174,7 @@ class ReliableDeliveryWithEventSourcedProducerQueueSpec(config: Config) // same tests but with chunked messages class ReliableDeliveryWithEventSourcedProducerQueueChunkedSpec extends ReliableDeliveryWithEventSourcedProducerQueueSpec( - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.reliable-delivery.producer-controller.chunk-large-messages = 1b """).withFallback(ReliableDeliveryWithEventSourcedProducerQueueSpec.conf)) diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/WorkPullingWithEventSourcedProducerQueueSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/WorkPullingWithEventSourcedProducerQueueSpec.scala index 4e6a6d3c621..55f595b70b5 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/WorkPullingWithEventSourcedProducerQueueSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/delivery/WorkPullingWithEventSourcedProducerQueueSpec.scala @@ -26,8 +26,8 @@ object WorkPullingWithEventSourcedProducerQueueSpec { akka.persistence.journal.plugin = "akka.persistence.journal.inmem" akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/WorkPullingWithEventSourcedProducerQueueSpec-${UUID - .randomUUID() - .toString}" + .randomUUID() + .toString}" akka.reliable-delivery.consumer-controller.flow-control-window = 20 """) } diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RecoveryPermitterSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RecoveryPermitterSpec.scala index d055e389d82..08fdc2d6ba5 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RecoveryPermitterSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RecoveryPermitterSpec.scala @@ -57,10 +57,9 @@ object RecoveryPermitterSpec { }, eventHandler = { (state, event) => eventProbe.ref ! event; state - }).receiveSignal { - case (_, RecoveryCompleted) => - eventProbe.ref ! Recovered - if (throwOnRecovery) throw new TE + }).receiveSignal { case (_, RecoveryCompleted) => + eventProbe.ref ! Recovered + if (throwOnRecovery) throw new TE } def forwardingBehavior(target: TestProbe[Any]): Behavior[Any] = diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RetentionCriteriaSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RetentionCriteriaSpec.scala index 9b1f6fb4110..14e1323a62f 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RetentionCriteriaSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RetentionCriteriaSpec.scala @@ -30,11 +30,10 @@ class RetentionCriteriaSpec extends TestSuite with Matchers with AnyWordSpecLike val criteria = RetentionCriteria.snapshotEvery(3, 2).asInstanceOf[SnapshotCountRetentionCriteriaImpl] val expected = List(1 -> 0, 3 -> 0, 4 -> 0, 6 -> 0, 7 -> 1, 9 -> 3, 10 -> 4, 12 -> 6, 13 -> 7, 15 -> 9, 18 -> 12, 20 -> 14) - expected.foreach { - case (seqNr, upper) => - withClue(s"seqNr=$seqNr:") { - criteria.deleteUpperSequenceNr(seqNr) should ===(upper) - } + expected.foreach { case (seqNr, upper) => + withClue(s"seqNr=$seqNr:") { + criteria.deleteUpperSequenceNr(seqNr) should ===(upper) + } } } diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/StashStateSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/StashStateSpec.scala index 686d18e3147..2e5dcfe0d3f 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/StashStateSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/StashStateSpec.scala @@ -50,10 +50,9 @@ class StashStateSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with Behaviors.same[InternalProtocol] case _: IncomingCommand[_] => Behaviors.stopped } - .receiveSignal { - case (_, _) => - stashState.clearStashBuffers() - Behaviors.stopped[InternalProtocol] + .receiveSignal { case (_, _) => + stashState.clearStashBuffers() + Behaviors.stopped[InternalProtocol] } } } diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala index 6e27d56433b..749bbc0e444 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala @@ -37,14 +37,13 @@ object PersistentActorCompileOnlyTest { EventSourcedBehavior[MyCommand, MyEvent, ExampleState]( persistenceId = PersistenceId.ofUniqueId("sample-id-1"), emptyState = ExampleState(Nil), - commandHandler = CommandHandler.command { - case Cmd(data, sender) => - Effect.persist(Evt(data)).thenRun { _ => - sender ! Ack - } + commandHandler = CommandHandler.command { case Cmd(data, sender) => + Effect.persist(Evt(data)).thenRun { _ => + sender ! Ack + } }, - eventHandler = { - case (state, Evt(data)) => state.copy(data :: state.events) + eventHandler = { case (state, Evt(data)) => + state.copy(data :: state.events) }) } @@ -76,34 +75,32 @@ object PersistentActorCompileOnlyTest { } val behavior: Behavior[Command] = - Behaviors.setup( - ctx => - EventSourcedBehavior[Command, Event, EventsInFlight]( - persistenceId = PersistenceId.ofUniqueId("recovery-complete-id"), - emptyState = EventsInFlight(0, Map.empty), - commandHandler = (state, cmd) => - cmd match { - case DoSideEffect(data) => - Effect.persist(IntentRecorded(state.nextCorrelationId, data)).thenRun { _ => - performSideEffect(ctx.self, state.nextCorrelationId, data) - } - case AcknowledgeSideEffect(correlationId) => - Effect.persist(SideEffectAcknowledged(correlationId)) - }, - eventHandler = (state, evt) => - evt match { - case IntentRecorded(correlationId, data) => - EventsInFlight( - nextCorrelationId = correlationId + 1, - dataByCorrelationId = state.dataByCorrelationId + (correlationId -> data)) - case SideEffectAcknowledged(correlationId) => - state.copy(dataByCorrelationId = state.dataByCorrelationId - correlationId) - }).receiveSignal { - case (state, RecoveryCompleted) => - state.dataByCorrelationId.foreach { - case (correlationId, data) => performSideEffect(ctx.self, correlationId, data) - } - }) + Behaviors.setup(ctx => + EventSourcedBehavior[Command, Event, EventsInFlight]( + persistenceId = PersistenceId.ofUniqueId("recovery-complete-id"), + emptyState = EventsInFlight(0, Map.empty), + commandHandler = (state, cmd) => + cmd match { + case DoSideEffect(data) => + Effect.persist(IntentRecorded(state.nextCorrelationId, data)).thenRun { _ => + performSideEffect(ctx.self, state.nextCorrelationId, data) + } + case AcknowledgeSideEffect(correlationId) => + Effect.persist(SideEffectAcknowledged(correlationId)) + }, + eventHandler = (state, evt) => + evt match { + case IntentRecorded(correlationId, data) => + EventsInFlight( + nextCorrelationId = correlationId + 1, + dataByCorrelationId = state.dataByCorrelationId + (correlationId -> data)) + case SideEffectAcknowledged(correlationId) => + state.copy(dataByCorrelationId = state.dataByCorrelationId - correlationId) + }).receiveSignal { case (state, RecoveryCompleted) => + state.dataByCorrelationId.foreach { case (correlationId, data) => + performSideEffect(ctx.self, correlationId, data) + } + }) } @@ -140,8 +137,8 @@ object PersistentActorCompileOnlyTest { } } }, - eventHandler = { - case (_, MoodChanged(to)) => to + eventHandler = { case (_, MoodChanged(to)) => + to }) Behaviors.withTimers((timers: TimerScheduler[Command]) => { @@ -194,27 +191,26 @@ object PersistentActorCompileOnlyTest { def worker(task: Task): Behavior[Nothing] = ??? - val behavior: Behavior[Command] = Behaviors.setup( - ctx => - EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId.ofUniqueId("asdf"), - emptyState = State(Nil), - commandHandler = (_, cmd) => - cmd match { - case RegisterTask(task) => - Effect.persist(TaskRegistered(task)).thenRun { _ => - val child = ctx.spawn[Nothing](worker(task), task) - // This assumes *any* termination of the child may trigger a `TaskDone`: - ctx.watchWith(child, TaskDone(task)) - } - case TaskDone(task) => Effect.persist(TaskRemoved(task)) - }, - eventHandler = (state, evt) => - evt match { - case TaskRegistered(task) => State(task :: state.tasksInFlight) - case TaskRemoved(task) => - State(state.tasksInFlight.filter(_ != task)) - })) + val behavior: Behavior[Command] = Behaviors.setup(ctx => + EventSourcedBehavior[Command, Event, State]( + persistenceId = PersistenceId.ofUniqueId("asdf"), + emptyState = State(Nil), + commandHandler = (_, cmd) => + cmd match { + case RegisterTask(task) => + Effect.persist(TaskRegistered(task)).thenRun { _ => + val child = ctx.spawn[Nothing](worker(task), task) + // This assumes *any* termination of the child may trigger a `TaskDone`: + ctx.watchWith(child, TaskDone(task)) + } + case TaskDone(task) => Effect.persist(TaskRemoved(task)) + }, + eventHandler = (state, evt) => + evt match { + case TaskRegistered(task) => State(task :: state.tasksInFlight) + case TaskRemoved(task) => + State(state.tasksInFlight.filter(_ != task)) + })) } @@ -238,14 +234,14 @@ object PersistentActorCompileOnlyTest { if (currentState == newMood) Effect.none else Effect.persist(MoodChanged(newMood)) - //#commonChainedEffects + // #commonChainedEffects // Example factoring out a chained effect to use in several places with `thenRun` val commonChainedEffects: Mood => Unit = _ => println("Command processed") // Then in a command handler: Effect .persist(Remembered("Yep")) // persist event .thenRun(commonChainedEffects) // add on common chained effect - //#commonChainedEffects + // #commonChainedEffects val commandHandler: CommandHandler[Command, Event, Mood] = { (state, cmd) => cmd match { @@ -288,13 +284,12 @@ object PersistentActorCompileOnlyTest { class State - private val commandHandler: CommandHandler[Command, Event, State] = CommandHandler.command { - case Enough => - Effect.persist(Done).thenRun((_: State) => println("yay")).thenStop() + private val commandHandler: CommandHandler[Command, Event, State] = CommandHandler.command { case Enough => + Effect.persist(Done).thenRun((_: State) => println("yay")).thenStop() } - private val eventHandler: (State, Event) => State = { - case (state, Done) => state + private val eventHandler: (State, Event) => State = { case (state, Done) => + state } EventSourcedBehavior[Command, Event, State]( diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala index 3a888412ea3..e03bafec01b 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala @@ -34,7 +34,7 @@ object BasicPersistentBehaviorCompileOnly { import akka.persistence.typed.scaladsl.RetentionCriteria object FirstExample { - //#command + // #command sealed trait Command final case class Add(data: String) extends Command case object Clear extends Command @@ -42,13 +42,13 @@ object BasicPersistentBehaviorCompileOnly { sealed trait Event final case class Added(data: String) extends Event case object Cleared extends Event - //#command + // #command - //#state + // #state final case class State(history: List[String] = Nil) - //#state + // #state - //#command-handler + // #command-handler import akka.persistence.typed.scaladsl.Effect val commandHandler: (State, Command) => Effect[Event, State] = { (state, command) => @@ -57,9 +57,9 @@ object BasicPersistentBehaviorCompileOnly { case Clear => Effect.persist(Cleared) } } - //#command-handler + // #command-handler - //#effects + // #effects def onCommand(subscriber: ActorRef[State], state: State, command: Command): Effect[Event, State] = { command match { case Add(data) => @@ -68,29 +68,29 @@ object BasicPersistentBehaviorCompileOnly { Effect.persist(Cleared).thenRun((newState: State) => subscriber ! newState).thenStop() } } - //#effects + // #effects - //#event-handler + // #event-handler val eventHandler: (State, Event) => State = { (state, event) => event match { case Added(data) => state.copy((data :: state.history).take(5)) case Cleared => State(Nil) } } - //#event-handler + // #event-handler - //#behavior + // #behavior def apply(id: String): Behavior[Command] = EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId(id), emptyState = State(Nil), commandHandler = commandHandler, eventHandler = eventHandler) - //#behavior + // #behavior } - //#structure + // #structure object MyPersistentBehavior { sealed trait Command sealed trait Event @@ -103,53 +103,52 @@ object BasicPersistentBehaviorCompileOnly { commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) } - //#structure + // #structure import MyPersistentBehavior._ object RecoveryBehavior { def apply(persistenceId: PersistenceId): Behavior[Command] = - //#recovery + // #recovery EventSourcedBehavior[Command, Event, State]( persistenceId = persistenceId, emptyState = State(), commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) - .receiveSignal { - case (state, RecoveryCompleted) => - throw new NotImplementedError("TODO: add some end-of-recovery side-effect here") + .receiveSignal { case (state, RecoveryCompleted) => + throw new NotImplementedError("TODO: add some end-of-recovery side-effect here") } - //#recovery + // #recovery } object RecoveryDisabledBehavior { def apply(): Behavior[Command] = - //#recovery-disabled + // #recovery-disabled EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .withRecovery(Recovery.disabled) - //#recovery-disabled + // #recovery-disabled } object TaggingBehavior { def apply(): Behavior[Command] = - //#tagging + // #tagging EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .withTagger(_ => Set("tag1", "tag2")) - //#tagging + // #tagging } object TaggingBehavior2 { sealed trait OrderCompleted extends Event - //#tagging-query + // #tagging-query val NumberOfEntityGroups = 10 def tagEvent(entityId: String, event: Event): Set[String] = { @@ -168,29 +167,30 @@ object BasicPersistentBehaviorCompileOnly { eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .withTagger(event => tagEvent(entityId, event)) } - //#tagging-query + // #tagging-query } object WrapBehavior { def apply(): Behavior[Command] = - //#wrapPersistentBehavior + // #wrapPersistentBehavior Behaviors.setup[Command] { context => EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), - commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), + commandHandler = + (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .snapshotWhen((state, _, _) => { context.log.info2("Snapshot actor {} => state: {}", context.self.path.name, state) true }) } - //#wrapPersistentBehavior + // #wrapPersistentBehavior } object Supervision { def apply(): Behavior[Command] = - //#supervision + // #supervision EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), @@ -198,7 +198,7 @@ object BasicPersistentBehaviorCompileOnly { eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .onPersistFailure( SupervisorStrategy.restartWithBackoff(minBackoff = 10.seconds, maxBackoff = 60.seconds, randomFactor = 0.1)) - //#supervision + // #supervision } object BehaviorWithContext { @@ -215,8 +215,8 @@ object BasicPersistentBehaviorCompileOnly { context.log.info("Got command {}", cmd) Effect.none }, - eventHandler = { - case (state, _) => state + eventHandler = { case (state, _) => + state }) } // #actor-context @@ -224,7 +224,7 @@ object BasicPersistentBehaviorCompileOnly { final case class BookingCompleted(orderNr: String) extends Event - //#snapshottingEveryN + // #snapshottingEveryN EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), @@ -232,9 +232,9 @@ object BasicPersistentBehaviorCompileOnly { commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .withRetention(RetentionCriteria.snapshotEvery(numberOfEvents = 1000, keepNSnapshots = 2)) - //#snapshottingEveryN + // #snapshottingEveryN - //#snapshottingPredicate + // #snapshottingPredicate EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), @@ -244,9 +244,9 @@ object BasicPersistentBehaviorCompileOnly { case (state, BookingCompleted(_), sequenceNumber) => true case (state, event, sequenceNumber) => false } - //#snapshottingPredicate + // #snapshottingPredicate - //#snapshotSelection + // #snapshotSelection import akka.persistence.typed.SnapshotSelectionCriteria EventSourcedBehavior[Command, Event, State]( @@ -255,9 +255,9 @@ object BasicPersistentBehaviorCompileOnly { commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .withRecovery(Recovery.withSnapshotSelectionCriteria(SnapshotSelectionCriteria.none)) - //#snapshotSelection + // #snapshotSelection - //#retentionCriteria + // #retentionCriteria import akka.persistence.typed.scaladsl.Effect @@ -265,15 +265,16 @@ object BasicPersistentBehaviorCompileOnly { persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), - eventHandler = (state, evt) => state) // do something based on a particular state + eventHandler = (state, evt) => state + ) // do something based on a particular state .snapshotWhen { case (state, BookingCompleted(_), sequenceNumber) => true case (state, event, sequenceNumber) => false } .withRetention(RetentionCriteria.snapshotEvery(numberOfEvents = 100, keepNSnapshots = 2)) - //#retentionCriteria + // #retentionCriteria - //#snapshotAndEventDeletes + // #snapshotAndEventDeletes EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), @@ -286,9 +287,9 @@ object BasicPersistentBehaviorCompileOnly { case (state, _: DeleteSnapshotsFailed) => // react to failure case (state, _: DeleteEventsFailed) => // react to failure } - //#snapshotAndEventDeletes + // #snapshotAndEventDeletes - //#retentionCriteriaWithSignals + // #retentionCriteriaWithSignals EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), @@ -300,27 +301,27 @@ object BasicPersistentBehaviorCompileOnly { case (state, _: SnapshotFailed) => // react to failure case (state, _: DeleteSnapshotsFailed) => // react to failure } - //#retentionCriteriaWithSignals + // #retentionCriteriaWithSignals - //#event-wrapper + // #event-wrapper case class Wrapper[T](event: T) class WrapperEventAdapter[T] extends EventAdapter[T, Wrapper[T]] { override def toJournal(e: T): Wrapper[T] = Wrapper(e) override def fromJournal(p: Wrapper[T], manifest: String): EventSeq[T] = EventSeq.single(p.event) override def manifest(event: T): String = "" } - //#event-wrapper + // #event-wrapper - //#install-event-adapter + // #install-event-adapter EventSourcedBehavior[Command, Event, State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(), commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect"), eventHandler = (state, evt) => throw new NotImplementedError("TODO: process the event return the next state")) .eventAdapter(new WrapperEventAdapter[Event]) - //#install-event-adapter - //#custom-stash-buffer + // #install-event-adapter + // #custom-stash-buffer .withStashCapacity(100) - //#custom-stash-buffer + // #custom-stash-buffer } diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntity.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntity.scala index 1c45e6bf9d4..eb6c2596513 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntity.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntity.scala @@ -17,17 +17,17 @@ import akka.persistence.typed.scaladsl.EventSourcedBehavior object BlogPostEntity { // commands, events, state defined here - //#behavior + // #behavior - //#event + // #event sealed trait Event final case class PostAdded(postId: String, content: PostContent) extends Event final case class BodyChanged(postId: String, newBody: String) extends Event final case class Published(postId: String) extends Event - //#event + // #event - //#state + // #state sealed trait State case object BlankState extends State @@ -42,30 +42,30 @@ object BlogPostEntity { final case class PublishedState(content: PostContent) extends State { def postId: String = content.postId } - //#state + // #state - //#commands + // #commands sealed trait Command - //#reply-command + // #reply-command final case class AddPost(content: PostContent, replyTo: ActorRef[StatusReply[AddPostDone]]) extends Command final case class AddPostDone(postId: String) - //#reply-command + // #reply-command final case class GetPost(replyTo: ActorRef[PostContent]) extends Command final case class ChangeBody(newBody: String, replyTo: ActorRef[Done]) extends Command final case class Publish(replyTo: ActorRef[Done]) extends Command final case class PostContent(postId: String, title: String, body: String) - //#commands + // #commands - //#behavior + // #behavior def apply(entityId: String, persistenceId: PersistenceId): Behavior[Command] = { Behaviors.setup { context => context.log.info("Starting BlogPostEntity {}", entityId) EventSourcedBehavior[Command, Event, State](persistenceId, emptyState = BlankState, commandHandler, eventHandler) } } - //#behavior + // #behavior - //#command-handler + // #command-handler private val commandHandler: (State, Command) => Effect[Event, State] = { (state, command) => state match { @@ -95,13 +95,13 @@ object BlogPostEntity { } private def addPost(cmd: AddPost): Effect[Event, State] = { - //#reply + // #reply val evt = PostAdded(cmd.content.postId, cmd.content) Effect.persist(evt).thenRun { _ => // After persist is done additional side effects can be performed cmd.replyTo ! StatusReply.Success(AddPostDone(cmd.content.postId)) } - //#reply + // #reply } private def changeBody(state: DraftState, cmd: ChangeBody): Effect[Event, State] = { @@ -127,9 +127,9 @@ object BlogPostEntity { replyTo ! state.content Effect.none } - //#command-handler + // #command-handler - //#event-handler + // #event-handler private val eventHandler: (State, Event) => State = { (state, event) => state match { @@ -157,9 +157,9 @@ object BlogPostEntity { throw new IllegalStateException(s"unexpected event [$event] in state [$state]") } } - //#event-handler + // #event-handler - //#behavior + // #behavior // commandHandler and eventHandler defined here } diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntityDurableState.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntityDurableState.scala index b16e61dedc9..dedf10edc6d 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntityDurableState.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostEntityDurableState.scala @@ -17,9 +17,9 @@ import akka.persistence.typed.state.scaladsl.DurableStateBehavior object BlogPostEntityDurableState { // commands, state defined here - //#behavior + // #behavior - //#state + // #state sealed trait State case object BlankState extends State @@ -34,30 +34,30 @@ object BlogPostEntityDurableState { final case class PublishedState(content: PostContent) extends State { def postId: String = content.postId } - //#state + // #state - //#commands + // #commands sealed trait Command - //#reply-command + // #reply-command final case class AddPost(content: PostContent, replyTo: ActorRef[StatusReply[AddPostDone]]) extends Command final case class AddPostDone(postId: String) - //#reply-command + // #reply-command final case class GetPost(replyTo: ActorRef[PostContent]) extends Command final case class ChangeBody(newBody: String, replyTo: ActorRef[Done]) extends Command final case class Publish(replyTo: ActorRef[Done]) extends Command final case class PostContent(postId: String, title: String, body: String) - //#commands + // #commands - //#behavior + // #behavior def apply(entityId: String, persistenceId: PersistenceId): Behavior[Command] = { Behaviors.setup { context => context.log.info("Starting BlogPostEntityDurableState {}", entityId) DurableStateBehavior[Command, State](persistenceId, emptyState = BlankState, commandHandler) } } - //#behavior + // #behavior - //#command-handler + // #command-handler private val commandHandler: (State, Command) => Effect[State] = { (state, command) => state match { @@ -87,12 +87,12 @@ object BlogPostEntityDurableState { } private def addPost(cmd: AddPost): Effect[State] = { - //#reply + // #reply Effect.persist(DraftState(cmd.content)).thenRun { _ => // After persist is done additional side effects can be performed cmd.replyTo ! StatusReply.Success(AddPostDone(cmd.content.postId)) } - //#reply + // #reply } private def changeBody(state: DraftState, cmd: ChangeBody): Effect[State] = { @@ -117,8 +117,8 @@ object BlogPostEntityDurableState { replyTo ! state.content Effect.none } - //#command-handler - //#behavior + // #command-handler + // #behavior // commandHandler defined here } diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/DurableStatePersistentBehaviorCompileOnly.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/DurableStatePersistentBehaviorCompileOnly.scala index 93826ec2639..812073e2b50 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/DurableStatePersistentBehaviorCompileOnly.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/DurableStatePersistentBehaviorCompileOnly.scala @@ -25,19 +25,19 @@ import akka.serialization.jackson.CborSerializable @nowarn object DurableStatePersistentBehaviorCompileOnly { object FirstExample { - //#command + // #command sealed trait Command[ReplyMessage] extends CborSerializable final case object Increment extends Command[Nothing] final case class IncrementBy(value: Int) extends Command[Nothing] final case class GetValue(replyTo: ActorRef[State]) extends Command[State] final case object Delete extends Command[Nothing] - //#command + // #command - //#state + // #state final case class State(value: Int) extends CborSerializable - //#state + // #state - //#command-handler + // #command-handler import akka.persistence.typed.state.scaladsl.Effect val commandHandler: (State, Command[_]) => Effect[State] = (state, command) => @@ -47,19 +47,19 @@ object DurableStatePersistentBehaviorCompileOnly { case GetValue(replyTo) => Effect.reply(replyTo)(state) case Delete => Effect.delete[State]() } - //#command-handler + // #command-handler - //#behavior + // #behavior def counter(id: String): DurableStateBehavior[Command[_], State] = { DurableStateBehavior.apply[Command[_], State]( persistenceId = PersistenceId.ofUniqueId(id), emptyState = State(0), commandHandler = commandHandler) } - //#behavior + // #behavior } - //#structure + // #structure object MyPersistentCounter { sealed trait Command[ReplyMessage] extends CborSerializable @@ -73,13 +73,13 @@ object DurableStatePersistentBehaviorCompileOnly { (state, command) => throw new NotImplementedError("TODO: process the command & return an Effect")) } } - //#structure + // #structure import MyPersistentCounter._ object MyPersistentCounterWithReplies { - //#effects + // #effects sealed trait Command[ReplyMessage] extends CborSerializable final case class IncrementWithConfirmation(replyTo: ActorRef[Done]) extends Command[Done] final case class GetValue(replyTo: ActorRef[State]) extends Command[State] @@ -100,7 +100,7 @@ object DurableStatePersistentBehaviorCompileOnly { Effect.reply(replyTo)(state) }) } - //#effects + // #effects } object BehaviorWithContext { @@ -123,13 +123,13 @@ object DurableStatePersistentBehaviorCompileOnly { object TaggingBehavior { def apply(): Behavior[Command[_]] = - //#tagging + // #tagging DurableStateBehavior[Command[_], State]( persistenceId = PersistenceId.ofUniqueId("abc"), emptyState = State(0), commandHandler = (state, cmd) => throw new NotImplementedError("TODO: process the command & return an Effect")) .withTag("tag1") - //#tagging + // #tagging } object WrapBehavior { @@ -137,7 +137,7 @@ object DurableStatePersistentBehaviorCompileOnly { import akka.persistence.typed.state.scaladsl.DurableStateBehavior.CommandHandler def apply(): Behavior[Command[_]] = - //#wrapPersistentBehavior + // #wrapPersistentBehavior Behaviors.setup[Command[_]] { context => DurableStateBehavior[Command[_], State]( persistenceId = PersistenceId.ofUniqueId("abc"), @@ -147,6 +147,6 @@ object DurableStatePersistentBehaviorCompileOnly { Effect.none }) } - //#wrapPersistentBehavior + // #wrapPersistentBehavior } } diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala index 7b11ec1a999..e1b16057322 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala @@ -14,7 +14,7 @@ import akka.persistence.typed.scaladsl.EventSourcedBehavior object StashingExample { - //#stashing + // #stashing object TaskManager { sealed trait Command @@ -81,5 +81,5 @@ object StashingExample { } } } - //#stashing + // #stashing } diff --git a/akka-persistence/src/main/scala-2/akka/persistence/TraitOrder.scala b/akka-persistence/src/main/scala-2/akka/persistence/TraitOrder.scala index a677fdf9183..b047313fbdf 100644 --- a/akka-persistence/src/main/scala-2/akka/persistence/TraitOrder.scala +++ b/akka-persistence/src/main/scala-2/akka/persistence/TraitOrder.scala @@ -6,9 +6,7 @@ package akka.persistence import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[persistence] object TraitOrder { val canBeChecked = true diff --git a/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala b/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala index 21c6cce0502..e127f55b037 100644 --- a/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala @@ -29,9 +29,7 @@ object AtLeastOnceDelivery { unconfirmedDeliveries: immutable.Seq[UnconfirmedDelivery]) extends Message { - /** - * Java API - */ + /** Java API */ def getUnconfirmedDeliveries: java.util.List[UnconfirmedDelivery] = { import akka.util.ccompat.JavaConverters._ unconfirmedDeliveries.asJava @@ -39,15 +37,11 @@ object AtLeastOnceDelivery { } - /** - * @see [[AtLeastOnceDeliveryLike#warnAfterNumberOfUnconfirmedAttempts]] - */ + /** @see [[AtLeastOnceDeliveryLike#warnAfterNumberOfUnconfirmedAttempts]] */ @SerialVersionUID(1L) case class UnconfirmedWarning(unconfirmedDeliveries: immutable.Seq[UnconfirmedDelivery]) { - /** - * Java API - */ + /** Java API */ def getUnconfirmedDeliveries: java.util.List[UnconfirmedDelivery] = { import akka.util.ccompat.JavaConverters._ unconfirmedDeliveries.asJava @@ -60,20 +54,14 @@ object AtLeastOnceDelivery { */ case class UnconfirmedDelivery(deliveryId: Long, destination: ActorPath, message: Any) { - /** - * Java API - */ + /** Java API */ def getMessage(): AnyRef = message.asInstanceOf[AnyRef] } - /** - * @see [[AtLeastOnceDeliveryLike#maxUnconfirmedMessages]] - */ + /** @see [[AtLeastOnceDeliveryLike#maxUnconfirmedMessages]] */ class MaxUnconfirmedMessagesExceededException(message: String) extends RuntimeException(message) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] object Internal { case class Delivery(destination: ActorPath, message: Any, timestamp: Long, attempt: Int) case object RedeliveryTick extends NotInfluenceReceiveTimeout with DeadLetterSuppression @@ -167,9 +155,7 @@ trait AtLeastOnceDelivery extends PersistentActor with AtLeastOnceDeliveryLike { } -/** - * @see [[AtLeastOnceDelivery]] - */ +/** @see [[AtLeastOnceDelivery]] */ trait AtLeastOnceDeliveryLike extends Eventsourced { import AtLeastOnceDelivery._ import AtLeastOnceDelivery.Internal._ @@ -256,9 +242,7 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { deliverySequenceNr } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final def internalDeliver(destination: ActorPath)(deliveryIdToMessage: Long => Any): Unit = { if (unconfirmed.size >= maxUnconfirmedMessages) @@ -277,9 +261,7 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { send(deliveryId, d, now) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final def internalDeliver(destination: ActorSelection)(deliveryIdToMessage: Long => Any): Unit = { val isWildcardSelection = destination.pathString.contains("*") @@ -306,9 +288,7 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { } else false } - /** - * Number of messages that have not been confirmed yet. - */ + /** Number of messages that have not been confirmed yet. */ def numberOfUnconfirmed: Int = unconfirmed.size private def redeliverOverdue(): Unit = { @@ -319,12 +299,11 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { unconfirmed.iterator .filter { case (_, delivery) => delivery.timestamp <= deadline } .take(redeliveryBurstLimit) - .foreach { - case (deliveryId, delivery) => - send(deliveryId, delivery, now) + .foreach { case (deliveryId, delivery) => + send(deliveryId, delivery, now) - if (delivery.attempt == warnAfterNumberOfUnconfirmedAttempts) - warnings :+= UnconfirmedDelivery(deliveryId, delivery.destination, delivery.message) + if (delivery.attempt == warnAfterNumberOfUnconfirmedAttempts) + warnings :+= UnconfirmedDelivery(deliveryId, delivery.destination, delivery.message) } if (warnings.nonEmpty) @@ -367,17 +346,13 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { d.deliveryId -> Delivery(d.destination, d.message, now, 0))) } - /** - * INTERNAL API - */ + /** INTERNAL API */ override protected[akka] def aroundPreRestart(reason: Throwable, message: Option[Any]): Unit = { cancelRedeliveryTask() super.aroundPreRestart(reason, message) } - /** - * INTERNAL API - */ + /** INTERNAL API */ override protected[akka] def aroundPostStop(): Unit = { cancelRedeliveryTask() super.aroundPostStop() @@ -391,9 +366,7 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { super.onReplaySuccess() } - /** - * INTERNAL API - */ + /** INTERNAL API */ override protected[akka] def aroundReceive(receive: Receive, message: Any): Unit = message match { case RedeliveryTick => diff --git a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala index 132e982a0f6..9513f40b938 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala @@ -113,9 +113,7 @@ private[persistence] trait Eventsourced case _ => true } - /** - * Returns `persistenceId`. - */ + /** Returns `persistenceId`. */ override def snapshotterId: String = persistenceId /** @@ -124,9 +122,7 @@ private[persistence] trait Eventsourced */ def lastSequenceNr: Long = _lastSequenceNr - /** - * Returns `lastSequenceNr`. - */ + /** Returns `lastSequenceNr`. */ def snapshotSequenceNr: Long = lastSequenceNr /** @@ -381,9 +377,7 @@ private[persistence] trait Eventsourced */ def receiveCommand: Receive - /** - * Internal API - */ + /** Internal API */ @InternalApi final private[akka] def internalPersist[A](event: A)(handler: A => Unit): Unit = { if (recoveryRunning) @@ -401,9 +395,7 @@ private[persistence] trait Eventsourced sender = sender()))) } - /** - * Internal API - */ + /** Internal API */ @InternalApi final private[akka] def internalPersistAll[A](events: immutable.Seq[A])(handler: A => Unit): Unit = { if (recoveryRunning) @@ -431,9 +423,7 @@ private[persistence] trait Eventsourced eventBatch ::= atomicWrite } - /** - * Internal API - */ + /** Internal API */ @InternalApi final private[akka] def internalPersistAsync[A](event: A)(handler: A => Unit): Unit = { if (recoveryRunning) @@ -449,9 +439,7 @@ private[persistence] trait Eventsourced sender = sender())) } - /** - * Internal API - */ + /** Internal API */ @InternalApi final private[akka] def internalPersistAllAsync[A](events: immutable.Seq[A])(handler: A => Unit): Unit = { if (recoveryRunning) @@ -472,9 +460,7 @@ private[persistence] trait Eventsourced } } - /** - * Internal API - */ + /** Internal API */ @InternalApi final private[akka] def internalDeferAsync[A](event: A)(handler: A => Unit): Unit = { if (recoveryRunning) @@ -488,9 +474,7 @@ private[persistence] trait Eventsourced } } - /** - * Internal API - */ + /** Internal API */ @InternalApi final private[akka] def internalDefer[A](event: A)(handler: A => Unit): Unit = { if (recoveryRunning) @@ -551,17 +535,13 @@ private[persistence] trait Eventsourced if (sequenceNr > 0) deleteMessages(sequenceNr) } - /** - * Returns `true` if this persistent actor is currently recovering. - */ + /** Returns `true` if this persistent actor is currently recovering. */ def recoveryRunning: Boolean = { // currentState is null if this is called from constructor if (currentState == null) true else currentState.recoveryRunning } - /** - * Returns `true` if this persistent actor has successfully finished recovery. - */ + /** Returns `true` if this persistent actor has successfully finished recovery. */ def recoveryFinished: Boolean = !recoveryRunning override def stash(): Unit = { @@ -625,14 +605,15 @@ private[persistence] trait Eventsourced } private val recoveryBehavior: Receive = { - val _receiveRecover = try receiveRecover - catch { - case NonFatal(e) => - try onRecoveryFailure(e, Some(e)) - finally context.stop(self) - returnRecoveryPermit() - Actor.emptyBehavior - } + val _receiveRecover = + try receiveRecover + catch { + case NonFatal(e) => + try onRecoveryFailure(e, Some(e)) + finally context.stop(self) + returnRecoveryPermit() + Actor.emptyBehavior + } { case PersistentRepr(FilteredPayload, _) => // ignore @@ -653,23 +634,22 @@ private[persistence] trait Eventsourced override def stateReceive(receive: Receive, message: Any): Unit = { def loadSnapshotResult(snapshot: Option[SelectedSnapshot], toSnr: Long): Unit = { timeoutCancellable.cancel() - snapshot.foreach { - case SelectedSnapshot(metadata, snapshot) => - val offer = SnapshotOffer(metadata, snapshot) - if (recoveryBehavior.isDefinedAt(offer)) { - try { - setLastSequenceNr(metadata.sequenceNr) - // Since we are recovering we can ignore the receive behavior from the stack - Eventsourced.super.aroundReceive(recoveryBehavior, offer) - } catch { - case NonFatal(t) => - try onRecoveryFailure(t, None) - finally context.stop(self) - returnRecoveryPermit() - } - } else { - unhandled(offer) + snapshot.foreach { case SelectedSnapshot(metadata, snapshot) => + val offer = SnapshotOffer(metadata, snapshot) + if (recoveryBehavior.isDefinedAt(offer)) { + try { + setLastSequenceNr(metadata.sequenceNr) + // Since we are recovering we can ignore the receive behavior from the stack + Eventsourced.super.aroundReceive(recoveryBehavior, offer) + } catch { + case NonFatal(t) => + try onRecoveryFailure(t, None) + finally context.stop(self) + returnRecoveryPermit() } + } else { + unhandled(offer) + } } changeState(recovering(recoveryBehavior, timeout)) journal ! ReplayMessages(lastSequenceNr + 1L, toSnr, replayMax, persistenceId, self) @@ -685,33 +665,36 @@ private[persistence] trait Eventsourced } } - try message match { - case LoadSnapshotResult(snapshot, toSnr) => - loadSnapshotResult(snapshot, toSnr) + try + message match { + case LoadSnapshotResult(snapshot, toSnr) => + loadSnapshotResult(snapshot, toSnr) + + case LoadSnapshotFailed(cause) => + if (isSnapshotOptional) { + log.info( + "Snapshot load error for persistenceId [{}]. Replaying all events since snapshot-is-optional=true", + persistenceId) + loadSnapshotResult(snapshot = None, recovery.toSequenceNr) + } else { + timeoutCancellable.cancel() + try onRecoveryFailure(cause, event = None) + finally context.stop(self) + returnRecoveryPermit() + } - case LoadSnapshotFailed(cause) => - if (isSnapshotOptional) { - log.info( - "Snapshot load error for persistenceId [{}]. Replaying all events since snapshot-is-optional=true", - persistenceId) - loadSnapshotResult(snapshot = None, recovery.toSequenceNr) - } else { - timeoutCancellable.cancel() - try onRecoveryFailure(cause, event = None) + case RecoveryTick(true) => + try + onRecoveryFailure( + new RecoveryTimedOut(s"Recovery timed out, didn't get snapshot within $timeout"), + event = None) finally context.stop(self) returnRecoveryPermit() - } - - case RecoveryTick(true) => - try onRecoveryFailure( - new RecoveryTimedOut(s"Recovery timed out, didn't get snapshot within $timeout"), - event = None) - finally context.stop(self) - returnRecoveryPermit() - case other => - stashInternally(other) - } catch { + case other => + stashInternally(other) + } + catch { case NonFatal(e) => returnRecoveryPermit() throw e @@ -749,50 +732,53 @@ private[persistence] trait Eventsourced override def recoveryRunning: Boolean = _recoveryRunning override def stateReceive(receive: Receive, message: Any) = - try message match { - case ReplayedMessage(p) => - try { - eventSeenInInterval = true - updateLastSequenceNr(p) - Eventsourced.super.aroundReceive(recoveryBehavior, p) - } catch { - case NonFatal(t) => - timeoutCancellable.cancel() - try onRecoveryFailure(t, Some(p.payload)) - finally context.stop(self) - returnRecoveryPermit() - } - case RecoverySuccess(highestJournalSeqNr) => - timeoutCancellable.cancel() - onReplaySuccess() // callback for subclass implementation - val highestSeqNr = Math.max(highestJournalSeqNr, lastSequenceNr) - sequenceNr = highestSeqNr - setLastSequenceNr(highestSeqNr) - _recoveryRunning = false - try Eventsourced.super.aroundReceive(recoveryBehavior, RecoveryCompleted) - finally transitToProcessingState() // in finally in case exception and resume strategy - // if exception from RecoveryCompleted the permit is returned in below catch - returnRecoveryPermit() - case ReplayMessagesFailure(cause) => - timeoutCancellable.cancel() - try onRecoveryFailure(cause, event = None) - finally context.stop(self) - returnRecoveryPermit() - case RecoveryTick(false) if !eventSeenInInterval => - timeoutCancellable.cancel() - try onRecoveryFailure( - new RecoveryTimedOut( - s"Recovery timed out, didn't get event within $timeout, highest sequence number seen $lastSequenceNr"), - event = None) - finally context.stop(self) - returnRecoveryPermit() - case RecoveryTick(false) => - eventSeenInInterval = false - case RecoveryTick(true) => - // snapshot tick, ignore - case other => - stashInternally(other) - } catch { + try + message match { + case ReplayedMessage(p) => + try { + eventSeenInInterval = true + updateLastSequenceNr(p) + Eventsourced.super.aroundReceive(recoveryBehavior, p) + } catch { + case NonFatal(t) => + timeoutCancellable.cancel() + try onRecoveryFailure(t, Some(p.payload)) + finally context.stop(self) + returnRecoveryPermit() + } + case RecoverySuccess(highestJournalSeqNr) => + timeoutCancellable.cancel() + onReplaySuccess() // callback for subclass implementation + val highestSeqNr = Math.max(highestJournalSeqNr, lastSequenceNr) + sequenceNr = highestSeqNr + setLastSequenceNr(highestSeqNr) + _recoveryRunning = false + try Eventsourced.super.aroundReceive(recoveryBehavior, RecoveryCompleted) + finally transitToProcessingState() // in finally in case exception and resume strategy + // if exception from RecoveryCompleted the permit is returned in below catch + returnRecoveryPermit() + case ReplayMessagesFailure(cause) => + timeoutCancellable.cancel() + try onRecoveryFailure(cause, event = None) + finally context.stop(self) + returnRecoveryPermit() + case RecoveryTick(false) if !eventSeenInInterval => + timeoutCancellable.cancel() + try + onRecoveryFailure( + new RecoveryTimedOut( + s"Recovery timed out, didn't get event within $timeout, highest sequence number seen $lastSequenceNr"), + event = None) + finally context.stop(self) + returnRecoveryPermit() + case RecoveryTick(false) => + eventSeenInInterval = false + case RecoveryTick(true) => + // snapshot tick, ignore + case other => + stashInternally(other) + } + catch { case NonFatal(e) => returnRecoveryPermit() throw e @@ -841,9 +827,7 @@ private[persistence] trait Eventsourced onPersistFailure(cause, p.payload, p.sequenceNr) } - /** - * Common receive handler for processingCommands and persistingEvents - */ + /** Common receive handler for processingCommands and persistingEvents */ private abstract class ProcessingState extends State { override def recoveryRunning: Boolean = false diff --git a/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala b/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala index 3226cb92910..577abe53dcf 100644 --- a/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala +++ b/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala @@ -155,12 +155,8 @@ private[persistence] object JournalProtocol { } -/** - * Reply message to a successful [[JournalProtocol.DeleteMessagesTo]] request. - */ +/** Reply message to a successful [[JournalProtocol.DeleteMessagesTo]] request. */ final case class DeleteMessagesSuccess(toSequenceNr: Long) extends JournalProtocol.Response -/** - * Reply message to a failed [[JournalProtocol.DeleteMessagesTo]] request. - */ +/** Reply message to a failed [[JournalProtocol.DeleteMessagesTo]] request. */ final case class DeleteMessagesFailure(cause: Throwable, toSequenceNr: Long) extends JournalProtocol.Response diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistence.scala b/akka-persistence/src/main/scala/akka/persistence/Persistence.scala index c529d42cf34..319c7c19f22 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Persistence.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Persistence.scala @@ -24,9 +24,7 @@ import akka.util.Collections.EmptyImmutableSeq import akka.util.Helpers.ConfigOps import akka.util.Reflect -/** - * Persistence configuration. - */ +/** Persistence configuration. */ final class PersistenceSettings(config: Config) { object atLeastOnceDelivery { @@ -60,15 +58,11 @@ final class PersistenceSettings(config: Config) { } -/** - * Identification of [[PersistentActor]]. - */ +/** Identification of [[PersistentActor]]. */ //#persistence-identity trait PersistenceIdentity { - /** - * Id of the persistent entity for which messages should be replayed. - */ + /** Id of the persistent entity for which messages should be replayed. */ def persistenceId: String /** @@ -91,7 +85,7 @@ trait PersistenceIdentity { //#persistence-identity trait PersistenceRecovery { - //#persistence-recovery + // #persistence-recovery /** * Called when the persistent actor is started for the first time. * The returned [[Recovery]] object defines how the Actor will recover its persistent state before @@ -101,7 +95,7 @@ trait PersistenceRecovery { */ def recovery: Recovery = Recovery() - //#persistence-recovery + // #persistence-recovery } trait PersistenceStash extends Stash with StashFactory { @@ -139,9 +133,7 @@ trait RuntimePluginConfig { def snapshotPluginConfig: Config } -/** - * Persistence extension provider. - */ +/** Persistence extension provider. */ object Persistence extends ExtensionId[Persistence] with ExtensionIdProvider { /** Java API. */ @@ -193,9 +185,7 @@ object Persistence extends ExtensionId[Persistence] with ExtensionIdProvider { } } -/** - * Persistence extension. - */ +/** Persistence extension. */ class Persistence(val system: ExtendedActorSystem) extends Extension { import Persistence._ @@ -271,15 +261,11 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { } }) - /** - * @throws IllegalArgumentException if `configPath` doesn't exist - */ + /** @throws IllegalArgumentException if `configPath` doesn't exist */ private def verifyJournalPluginConfigExists(pluginConfig: Config, configPath: String): Unit = verifyPluginConfigExists(pluginConfig.withFallback(system.settings.config), configPath, "Journal") - /** - * @throws IllegalArgumentException if `configPath` doesn't exist - */ + /** @throws IllegalArgumentException if `configPath` doesn't exist */ private def verifySnapshotPluginConfigExists(pluginConfig: Config, configPath: String): Unit = verifyPluginConfigExists(pluginConfig.withFallback(system.settings.config), configPath, "Snapshot store") @@ -400,18 +386,19 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { log.debug(s"Create plugin: $pluginActorName $pluginClassName") val pluginClass = system.dynamicAccess.getClassFor[Any](pluginClassName).get val pluginDispatcherId = pluginConfig.getString("plugin-dispatcher") - val pluginActorArgs: List[AnyRef] = try { - Reflect.findConstructor(pluginClass, List(pluginConfig, configPath)) // will throw if not found - List(pluginConfig, configPath) - } catch { - case NonFatal(_) => - try { - Reflect.findConstructor(pluginClass, List(pluginConfig)) // will throw if not found - List(pluginConfig) - } catch { - case NonFatal(_) => Nil - } // otherwise use empty constructor - } + val pluginActorArgs: List[AnyRef] = + try { + Reflect.findConstructor(pluginClass, List(pluginConfig, configPath)) // will throw if not found + List(pluginConfig, configPath) + } catch { + case NonFatal(_) => + try { + Reflect.findConstructor(pluginClass, List(pluginConfig)) // will throw if not found + List(pluginConfig) + } catch { + case NonFatal(_) => Nil + } // otherwise use empty constructor + } val pluginActorProps = Props(Deploy(dispatcher = pluginDispatcherId), pluginClass, pluginActorArgs) system.systemActorOf(pluginActorProps, pluginActorName) } @@ -473,7 +460,7 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { numberOfRanges * rangeSize == numberOfSlices, s"numberOfRanges [$numberOfRanges] must be a whole number divisor of numberOfSlices [$numberOfSlices].") (0 until numberOfRanges).map { i => - (i * rangeSize until i * rangeSize + rangeSize) + i * rangeSize until i * rangeSize + rangeSize }.toVector } diff --git a/akka-persistence/src/main/scala/akka/persistence/PersistencePlugin.scala b/akka-persistence/src/main/scala/akka/persistence/PersistencePlugin.scala index e5eecca091d..c26d15a8dd6 100644 --- a/akka-persistence/src/main/scala/akka/persistence/PersistencePlugin.scala +++ b/akka-persistence/src/main/scala/akka/persistence/PersistencePlugin.scala @@ -17,9 +17,7 @@ import akka.annotation.InternalApi import akka.event.Logging import akka.persistence.PersistencePlugin.PluginHolder -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object PersistencePlugin { final private[persistence] case class PluginHolder[ScalaDsl, JavaDsl]( @@ -28,21 +26,17 @@ private[akka] object PersistencePlugin { extends Extension } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait PluginProvider[T, ScalaDsl, JavaDsl] { def scalaDsl(t: T): ScalaDsl def javaDsl(t: T): JavaDsl } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi -private[akka] abstract class PersistencePlugin[ScalaDsl, JavaDsl, T: ClassTag](system: ExtendedActorSystem)( - implicit ev: PluginProvider[T, ScalaDsl, JavaDsl]) { +private[akka] abstract class PersistencePlugin[ScalaDsl, JavaDsl, T: ClassTag](system: ExtendedActorSystem)(implicit + ev: PluginProvider[T, ScalaDsl, JavaDsl]) { private val plugins = new AtomicReference[Map[String, ExtensionId[PluginHolder[ScalaDsl, JavaDsl]]]](Map.empty) private val log = Logging(system, classOf[PersistencePlugin[_, _, _]]) @@ -82,19 +76,17 @@ private[akka] abstract class PersistencePlugin[ScalaDsl, JavaDsl, T: ClassTag](s instantiate( (classOf[ExtendedActorSystem], system) :: (classOf[Config], pluginConfig) :: (classOf[String], configPath) :: Nil) - .recoverWith { - case _: NoSuchMethodException => - instantiate((classOf[ExtendedActorSystem], system) :: (classOf[Config], pluginConfig) :: Nil) + .recoverWith { case _: NoSuchMethodException => + instantiate((classOf[ExtendedActorSystem], system) :: (classOf[Config], pluginConfig) :: Nil) } .recoverWith { case _: NoSuchMethodException => instantiate((classOf[ExtendedActorSystem], system) :: Nil) } .recoverWith { case _: NoSuchMethodException => instantiate(Nil) } - .recoverWith { - case ex: Exception => - Failure.apply( - new IllegalArgumentException( - "Unable to create read journal plugin instance for path " + - s"[$configPath], class [$pluginClassName]!", - ex)) + .recoverWith { case ex: Exception => + Failure.apply( + new IllegalArgumentException( + "Unable to create read journal plugin instance for path " + + s"[$configPath], class [$pluginClassName]!", + ex)) } .get } diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala index 77ce358eccd..4d45b5a189e 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala @@ -42,10 +42,10 @@ final case class AtomicWrite(payload: immutable.Seq[PersistentRepr]) extends Per // only check that all persistenceIds are equal when there's more than one in the Seq if (payload match { - case l: List[PersistentRepr] => l.tail.nonEmpty // avoids calling .size - case v: Vector[PersistentRepr] => v.size > 1 - case _ => true // some other collection type, let's just check - }) payload.foreach { pr => + case l: List[PersistentRepr] => l.tail.nonEmpty // avoids calling .size + case v: Vector[PersistentRepr] => v.size > 1 + case _ => true // some other collection type, let's just check + }) payload.foreach { pr => if (pr.persistenceId != payload.head.persistenceId) throw new IllegalArgumentException( "AtomicWrite must contain messages for the same persistenceId, " + @@ -70,9 +70,7 @@ final case class AtomicWrite(payload: immutable.Seq[PersistentRepr]) extends Per */ @DoNotInherit trait PersistentRepr extends Message { - /** - * This persistent message's payload (the event). - */ + /** This persistent message's payload (the event). */ def payload: Any /** @@ -82,14 +80,10 @@ final case class AtomicWrite(payload: immutable.Seq[PersistentRepr]) extends Per */ def manifest: String - /** - * Persistent id that journals a persistent message - */ + /** Persistent id that journals a persistent message */ def persistenceId: String - /** - * This persistent message's sequence number. - */ + /** This persistent message's sequence number. */ def sequenceNr: Long /** @@ -113,14 +107,10 @@ final case class AtomicWrite(payload: immutable.Seq[PersistentRepr]) extends Per */ def writerUuid: String - /** - * Creates a new persistent message with the specified `payload` (event). - */ + /** Creates a new persistent message with the specified `payload` (event). */ def withPayload(payload: Any): PersistentRepr - /** - * Creates a new persistent message with the specified event adapter `manifest`. - */ + /** Creates a new persistent message with the specified event adapter `manifest`. */ def withManifest(manifest: String): PersistentRepr /** @@ -132,14 +122,10 @@ final case class AtomicWrite(payload: immutable.Seq[PersistentRepr]) extends Per */ def deleted: Boolean // FIXME deprecate, issue #27278 - /** - * Not used, can be `null` - */ + /** Not used, can be `null` */ def sender: ActorRef // FIXME deprecate, issue #27278 - /** - * Creates a new copy of this [[PersistentRepr]]. - */ + /** Creates a new copy of this [[PersistentRepr]]. */ def update( sequenceNr: Long = sequenceNr, persistenceId: String = persistenceId, @@ -156,9 +142,7 @@ object PersistentRepr { /** Plugin API: value of an undefined / identity event adapter. */ val UndefinedId = 0 - /** - * Plugin API. - */ + /** Plugin API. */ def apply( payload: Any, sequenceNr: Long = 0L, @@ -169,21 +153,15 @@ object PersistentRepr { writerUuid: String = PersistentRepr.Undefined): PersistentRepr = PersistentImpl(payload, sequenceNr, persistenceId, manifest, deleted, sender, writerUuid, 0L, None) - /** - * Java API, Plugin API. - */ + /** Java API, Plugin API. */ def create = apply _ - /** - * extractor of payload and sequenceNr. - */ + /** extractor of payload and sequenceNr. */ def unapply(persistent: PersistentRepr): Option[(Any, Long)] = Some((persistent.payload, persistent.sequenceNr)) } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[persistence] final case class PersistentImpl( override val payload: Any, override val sequenceNr: Long, diff --git a/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala b/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala index 84ca9f8e8fd..69eaf5c0110 100644 --- a/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala +++ b/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala @@ -18,15 +18,11 @@ import akka.japi.Util abstract class RecoveryCompleted -/** - * Sent to a [[PersistentActor]] when the journal replay has been finished. - */ +/** Sent to a [[PersistentActor]] when the journal replay has been finished. */ @SerialVersionUID(1L) case object RecoveryCompleted extends RecoveryCompleted { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this } @@ -111,14 +107,10 @@ final class RecoveryTimedOut(message: String) extends RuntimeException(message) */ sealed trait StashOverflowStrategy -/** - * Discard the message to [[akka.actor.DeadLetter]]. - */ +/** Discard the message to [[akka.actor.DeadLetter]]. */ case object DiscardToDeadLetterStrategy extends StashOverflowStrategy { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this } @@ -130,9 +122,7 @@ case object DiscardToDeadLetterStrategy extends StashOverflowStrategy { */ case object ThrowOverflowExceptionStrategy extends StashOverflowStrategy { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this } @@ -159,9 +149,7 @@ final class DiscardConfigurator extends StashOverflowStrategyConfigurator { override def create(config: Config) = DiscardToDeadLetterStrategy } -/** - * Scala API: A persistent Actor - can be used to implement command or Event Sourcing. - */ +/** Scala API: A persistent Actor - can be used to implement command or Event Sourcing. */ trait PersistentActor extends Eventsourced with PersistenceIdentity { def receive = receiveCommand @@ -289,9 +277,7 @@ trait PersistentActor extends Eventsourced with PersistenceIdentity { } } -/** - * Java API: an persistent actor - can be used to implement command or Event Sourcing. - */ +/** Java API: an persistent actor - can be used to implement command or Event Sourcing. */ abstract class AbstractPersistentActor extends AbstractActor with AbstractPersistentActorLike { /** @@ -323,9 +309,7 @@ abstract class AbstractPersistentActor extends AbstractActor with AbstractPersis // AbstractPersistentActorLike. They were included here also for binary compatibility reasons. } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait AbstractPersistentActorLike extends Eventsourced { /** @@ -472,7 +456,5 @@ abstract class AbstractPersistentActor extends AbstractActor with AbstractPersis } -/** - * Java API: Combination of [[AbstractPersistentActor]] and [[akka.actor.AbstractActorWithTimers]]. - */ +/** Java API: Combination of [[AbstractPersistentActor]] and [[akka.actor.AbstractActorWithTimers]]. */ abstract class AbstractPersistentActorWithTimers extends AbstractActor with Timers with AbstractPersistentActorLike diff --git a/akka-persistence/src/main/scala/akka/persistence/RecoveryPermitter.scala b/akka-persistence/src/main/scala/akka/persistence/RecoveryPermitter.scala index 677310afd84..a94cb2d963c 100644 --- a/akka-persistence/src/main/scala/akka/persistence/RecoveryPermitter.scala +++ b/akka-persistence/src/main/scala/akka/persistence/RecoveryPermitter.scala @@ -12,9 +12,7 @@ import akka.actor.Terminated import akka.annotation.{ InternalApi, InternalStableApi } import akka.util.MessageBuffer -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object RecoveryPermitter { def props(maxPermits: Int): Props = Props(new RecoveryPermitter(maxPermits)) diff --git a/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala b/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala index 239cc9ac510..0e87402b95d 100644 --- a/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala +++ b/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala @@ -168,15 +168,11 @@ final case class SnapshotSelectionCriteria( minSequenceNr: Long = 0L, minTimestamp: Long = 0L) { - /** - * INTERNAL API. - */ + /** INTERNAL API. */ private[persistence] def limit(toSequenceNr: Long): SnapshotSelectionCriteria = if (toSequenceNr < maxSequenceNr) copy(maxSequenceNr = toSequenceNr) else this - /** - * INTERNAL API. - */ + /** INTERNAL API. */ private[persistence] def matches(metadata: SnapshotMetadata): Boolean = metadata.sequenceNr <= maxSequenceNr && metadata.timestamp <= maxTimestamp && metadata.sequenceNr >= minSequenceNr && metadata.timestamp >= minTimestamp @@ -184,36 +180,24 @@ final case class SnapshotSelectionCriteria( object SnapshotSelectionCriteria { - /** - * The latest saved snapshot. - */ + /** The latest saved snapshot. */ val Latest = SnapshotSelectionCriteria() - /** - * No saved snapshot matches. - */ + /** No saved snapshot matches. */ val None = SnapshotSelectionCriteria(0L, 0L) - /** - * Java API. - */ + /** Java API. */ def create(maxSequenceNr: Long, maxTimestamp: Long) = SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp) - /** - * Java API. - */ + /** Java API. */ def create(maxSequenceNr: Long, maxTimestamp: Long, minSequenceNr: Long, minTimestamp: Long) = SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, minSequenceNr, minTimestamp) - /** - * Java API. - */ + /** Java API. */ def latest() = Latest - /** - * Java API. - */ + /** Java API. */ def none() = None } @@ -227,9 +211,7 @@ final case class SelectedSnapshot(metadata: SnapshotMetadata, snapshot: Any) object SelectedSnapshot { - /** - * Java API, Plugin API. - */ + /** Java API, Plugin API. */ def create(metadata: SnapshotMetadata, snapshot: Any): SelectedSnapshot = SelectedSnapshot(metadata, snapshot) } diff --git a/akka-persistence/src/main/scala/akka/persistence/Snapshotter.scala b/akka-persistence/src/main/scala/akka/persistence/Snapshotter.scala index 8378afdca07..675ba8f20ca 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Snapshotter.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Snapshotter.scala @@ -7,22 +7,16 @@ package akka.persistence import akka.actor._ import akka.persistence.SnapshotProtocol._ -/** - * Snapshot API on top of the internal snapshot protocol. - */ +/** Snapshot API on top of the internal snapshot protocol. */ trait Snapshotter extends Actor { /** Snapshot store plugin actor. */ private[persistence] def snapshotStore: ActorRef - /** - * Snapshotter id. - */ + /** Snapshotter id. */ def snapshotterId: String - /** - * Sequence number to use when taking a snapshot. - */ + /** Sequence number to use when taking a snapshot. */ def snapshotSequenceNr: Long /** diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala index dd3a596bd2d..dea88b2a78f 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala @@ -8,11 +8,9 @@ import scala.concurrent.Future import akka.persistence.PersistentRepr -/** - * Asynchronous message replay and sequence number recovery interface. - */ +/** Asynchronous message replay and sequence number recovery interface. */ trait AsyncRecovery { - //#journal-plugin-api + // #journal-plugin-api /** * Plugin API: asynchronously replays persistent messages. Implementations replay * a message by calling `replayCallback`. The returned future must be completed @@ -68,5 +66,5 @@ trait AsyncRecovery { * snapshot or `0L` if no snapshot is used. */ def asyncReadHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] - //#journal-plugin-api + // #journal-plugin-api } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala index 41ff60c4a99..24d524b2a24 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala @@ -17,9 +17,7 @@ import akka.pattern.pipe import akka.persistence._ import akka.util.Helpers.toRootLowerCase -/** - * Abstract journal, optimized for asynchronous, non-blocking writes. - */ +/** Abstract journal, optimized for asynchronous, non-blocking writes. */ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { import AsyncWriteJournal._ import JournalProtocol._ @@ -176,8 +174,8 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { .map { highSeqNr => RecoverySuccess(highSeqNr) } - .recover { - case e => ReplayMessagesFailure(e) + .recover { case e => + ReplayMessagesFailure(e) } .pipeTo(replyTo) .foreach { _ => @@ -190,8 +188,8 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { .map { _ => DeleteMessagesSuccess(toSequenceNr) } - .recover { - case e => DeleteMessagesFailure(e, toSequenceNr) + .recover { case e => + DeleteMessagesFailure(e, toSequenceNr) } .pipeTo(persistentActor) .onComplete { _ => @@ -200,7 +198,7 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { } } - //#journal-plugin-api + // #journal-plugin-api /** * Plugin API: asynchronously writes a batch (`Seq`) of persistent messages to the * journal. @@ -285,16 +283,13 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { * * Allows plugin implementers to use `f pipeTo self` and * handle additional messages for implementing advanced features - * */ def receivePluginInternal: Actor.Receive = Actor.emptyBehavior - //#journal-plugin-api + // #journal-plugin-api } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[persistence] object AsyncWriteJournal { val successUnit: Success[Unit] = Success(()) @@ -307,8 +302,8 @@ private[persistence] object AsyncWriteJournal { private val delayed = Map.empty[Long, Desequenced] private var delivered = 0L - def receive = { - case d: Desequenced => resequence(d) + def receive = { case d: Desequenced => + resequence(d) } @scala.annotation.tailrec diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala index 5afeab7711c..bccdc322180 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala @@ -94,17 +94,13 @@ private[persistence] trait AsyncWriteProxy extends AsyncWriteJournal with Stash } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[persistence] object AsyncWriteProxy { final case class SetStore(ref: ActorRef) case object InitTimeout } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[persistence] object AsyncWriteTarget { @SerialVersionUID(1L) final case class WriteMessages(messages: immutable.Seq[AtomicWrite]) @@ -123,9 +119,7 @@ private[persistence] object AsyncWriteTarget { } -/** - * Thrown if replay inactivity exceeds a specified timeout. - */ +/** Thrown if replay inactivity exceeds a specified timeout. */ @SerialVersionUID(1L) class AsyncReplayTimeoutException(msg: String) extends AkkaException(msg) diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapter.scala b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapter.scala index b4d9f37d275..1817204d752 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapter.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapter.scala @@ -31,7 +31,7 @@ trait EventAdapter extends WriteEventAdapter with ReadEventAdapter * */ trait WriteEventAdapter { - //#event-adapter-api + // #event-adapter-api /** * Return the manifest (type hint) that will be provided in the `fromJournal` method. * Use `""` if manifest is not needed. @@ -52,7 +52,7 @@ trait WriteEventAdapter { * @return the adapted event object, possibly the same object if no adaptation was performed */ def toJournal(event: Any): Any - //#event-adapter-api + // #event-adapter-api } /** @@ -66,7 +66,7 @@ trait WriteEventAdapter { * */ trait ReadEventAdapter { - //#event-adapter-api + // #event-adapter-api /** * Convert a event from its journal model to the applications domain model. * @@ -81,7 +81,7 @@ trait ReadEventAdapter { * @return sequence containing the adapted events (possibly zero) which will be delivered to the PersistentActor */ def fromJournal(event: Any, manifest: String): EventSeq - //#event-adapter-api + // #event-adapter-api } sealed abstract class EventSeq { diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala index b78c56ae54b..c408b85c9ea 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala @@ -18,9 +18,7 @@ import akka.actor.ExtendedActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.util.ccompat._ -/** - * `EventAdapters` serves as a per-journal collection of bound event adapters. - */ +/** `EventAdapters` serves as a per-journal collection of bound event adapters. */ @ccompatUsedUntil213 class EventAdapters( map: ConcurrentHashMap[Class[_], EventAdapter], @@ -85,7 +83,7 @@ private[akka] object EventAdapters { } require( adapterNames(boundAdapter.toString), s"$fqn was bound to undefined event-adapter: $boundAdapter (bindings: ${boundToAdapters - .mkString("[", ", ", "]")}, known adapters: ${adapters.keys.mkString})") + .mkString("[", ", ", "]")}, known adapters: ${adapters.keys.mkString})") // A Map of handler from alias to implementation (i.e. class implementing akka.serialization.Serializer) // For example this defines a handler named 'country': `"country" -> com.example.comain.CountryTagsAdapter` @@ -106,8 +104,8 @@ private[akka] object EventAdapters { sort(bs) } - val backing = bindings.foldLeft(new ConcurrentHashMap[Class[_], EventAdapter]) { - case (map, (c, s)) => map.put(c, s); map + val backing = bindings.foldLeft(new ConcurrentHashMap[Class[_], EventAdapter]) { case (map, (c, s)) => + map.put(c, s); map } new EventAdapters(backing, bindings, Logging(system, classOf[EventAdapters])) @@ -133,7 +131,9 @@ private[akka] object EventAdapters { override def toJournal(event: Any): Any = throw onlyReadSideException override def fromJournal(event: Any, manifest: String): EventSeq = - EventSeq(adapters.flatMap(_.fromJournal(event, manifest).events): _*) // TODO could we could make EventSeq flatMappable + EventSeq( + adapters.flatMap(_.fromJournal(event, manifest).events): _* + ) // TODO could we could make EventSeq flatMappable override def toString = s"CombinedReadEventAdapter(${adapters.map(_.getClass.getCanonicalName).mkString(",")})" @@ -154,13 +154,12 @@ private[akka] object EventAdapters { */ private def sort[T](in: Iterable[(Class[_], T)]): immutable.Seq[(Class[_], T)] = in.foldLeft(new ArrayBuffer[(Class[_], T)](in.size)) { (buf, ca) => - buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { - case -1 => buf.append(ca) - case x => buf.insert(x, ca) - } - buf + buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { + case -1 => buf.append(ca) + case x => buf.insert(x, ca) } - .to(immutable.Seq) + buf + }.to(immutable.Seq) private final def configToMap(config: Config, path: String): Map[String, String] = { import akka.util.ccompat.JavaConverters._ diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala b/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala index 43e5d760f84..8b7f6ec1d75 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala @@ -44,9 +44,7 @@ private[akka] object ReplayFilter { case object Disabled extends Mode } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class ReplayFilter( persistentActor: ActorRef, mode: ReplayFilter.Mode, @@ -80,7 +78,8 @@ private[akka] class ReplayFilter( if (r.persistent.writerUuid == writerUuid) { // from same writer if (r.persistent.sequenceNr < seqNo) { - val errMsg = s"Invalid replayed event [sequenceNr=${r.persistent.sequenceNr}, writerUUID=${r.persistent.writerUuid}] as " + + val errMsg = + s"Invalid replayed event [sequenceNr=${r.persistent.sequenceNr}, writerUUID=${r.persistent.writerUuid}] as " + s"the sequenceNr should be equal to or greater than already-processed event [sequenceNr=${seqNo}, writerUUID=${writerUuid}] from the same writer, for the same persistenceId [${r.persistent.persistenceId}]. " + "Perhaps, events were journaled out of sequence, or duplicate persistenceId for different entities?" logIssue(errMsg) @@ -98,7 +97,8 @@ private[akka] class ReplayFilter( } else if (oldWriters.contains(r.persistent.writerUuid)) { // from old writer - val errMsg = s"Invalid replayed event [sequenceNr=${r.persistent.sequenceNr}, writerUUID=${r.persistent.writerUuid}]. " + + val errMsg = + s"Invalid replayed event [sequenceNr=${r.persistent.sequenceNr}, writerUUID=${r.persistent.writerUuid}]. " + s"There was already a newer writer whose last replayed event was [sequenceNr=${seqNo}, writerUUID=${writerUuid}] for the same persistenceId [${r.persistent.persistenceId}]." + "Perhaps, the old writer kept journaling messages after the new writer created, or duplicate persistenceId for different entities?" logIssue(errMsg) @@ -124,7 +124,8 @@ private[akka] class ReplayFilter( while (iter.hasNext()) { val msg = iter.next() if (msg.persistent.sequenceNr >= seqNo) { - val errMsg = s"Invalid replayed event [sequenceNr=${r.persistent.sequenceNr}, writerUUID=${r.persistent.writerUuid}] from a new writer. " + + val errMsg = + s"Invalid replayed event [sequenceNr=${r.persistent.sequenceNr}, writerUUID=${r.persistent.writerUuid}] from a new writer. " + s"An older writer already sent an event [sequenceNr=${msg.persistent.sequenceNr}, writerUUID=${msg.persistent.writerUuid}] whose sequence number was equal or greater for the same persistenceId [${r.persistent.persistenceId}]. " + "Perhaps, the new writer journaled the event out of sequence, or duplicate persistenceId for different entities?" logIssue(errMsg) diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/Tagged.scala b/akka-persistence/src/main/scala/akka/persistence/journal/Tagged.scala index 610c2db8bed..1e98dd6127c 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/Tagged.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/Tagged.scala @@ -18,9 +18,7 @@ import akka.util.ccompat.JavaConverters._ */ case class Tagged(payload: Any, tags: Set[String]) { - /** - * Java API - */ + /** Java API */ def this(payload: Any, tags: java.util.Set[String]) = { this(payload, tags.asScala.toSet) } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala index 3aee420b89d..9440a965420 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala @@ -108,8 +108,8 @@ object InmemJournal { recoveryCallback: PersistentRepr => Unit): Future[Unit] = { val highest = highestSequenceNr(persistenceId) if (highest != 0L && max != 0L) - read(persistenceId, fromSequenceNr, math.min(toSequenceNr, highest), max).foreach { - case (pr, _) => recoveryCallback(pr) + read(persistenceId, fromSequenceNr, math.min(toSequenceNr, highest), max).foreach { case (pr, _) => + recoveryCallback(pr) } Future.successful(()) } @@ -130,8 +130,8 @@ object InmemJournal { log.debug("ReplayWithMeta {} {} {} {}", fromSequenceNr, toSequenceNr, max, persistenceId) val highest = highestSequenceNr(persistenceId) if (highest != 0L && max != 0L) { - read(persistenceId, fromSequenceNr, math.min(toSequenceNr, highest), max).foreach { - case (pr, meta) => replyTo ! MessageWithMeta(pr, meta) + read(persistenceId, fromSequenceNr, math.min(toSequenceNr, highest), max).foreach { case (pr, meta) => + replyTo ! MessageWithMeta(pr, meta) } } replyTo ! RecoverySuccess(highest) @@ -149,9 +149,7 @@ object InmemJournal { } } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ @InternalApi private[persistence] trait InmemMessages { // persistenceId -> persistent message var messages = Map.empty[String, Vector[(PersistentRepr, OptionVal[Any])]] @@ -165,9 +163,9 @@ object InmemJournal { } messages = messages + (messages.get(p.persistenceId) match { - case Some(ms) => p.persistenceId -> (ms :+ pr) - case None => p.persistenceId -> Vector(pr) - }) + case Some(ms) => p.persistenceId -> (ms :+ pr) + case None => p.persistenceId -> Vector(pr) + }) highestSequenceNumbers = highestSequenceNumbers.updated(p.persistenceId, math.max(highestSequenceNr(p.persistenceId), p.sequenceNr)) } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala index a6c1c065646..95eace52778 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala @@ -12,17 +12,20 @@ import akka.actor.Actor import akka.persistence.PersistentRepr import akka.persistence.journal.{ AsyncRecovery => SAsyncReplay } -/** - * Java API: asynchronous message replay and sequence number recovery interface. - */ +/** Java API: asynchronous message replay and sequence number recovery interface. */ abstract class AsyncRecovery extends SAsyncReplay with AsyncRecoveryPlugin { this: Actor => import context.dispatcher final def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( replayCallback: (PersistentRepr) => Unit) = - doAsyncReplayMessages(persistenceId, fromSequenceNr, toSequenceNr, max, new Consumer[PersistentRepr] { - def accept(p: PersistentRepr) = replayCallback(p) - }).map(_ => ()) + doAsyncReplayMessages( + persistenceId, + fromSequenceNr, + toSequenceNr, + max, + new Consumer[PersistentRepr] { + def accept(p: PersistentRepr) = replayCallback(p) + }).map(_ => ()) final def asyncReadHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = doAsyncReadHighestSequenceNr(persistenceId, fromSequenceNr: Long).map(_.longValue) diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncWriteJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncWriteJournal.scala index b5b5b105b84..88fe10fa51c 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncWriteJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncWriteJournal.scala @@ -14,9 +14,7 @@ import akka.persistence.journal.{ AsyncWriteJournal => SAsyncWriteJournal } import akka.util.ccompat._ import akka.util.ccompat.JavaConverters._ -/** - * Java API: abstract journal, optimized for asynchronous, non-blocking writes. - */ +/** Java API: abstract journal, optimized for asynchronous, non-blocking writes. */ @ccompatUsedUntil213 abstract class AsyncWriteJournal extends AsyncRecovery with SAsyncWriteJournal with AsyncWritePlugin { import SAsyncWriteJournal.successUnit diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbCompaction.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbCompaction.scala index 69591728340..a26148f4bff 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbCompaction.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbCompaction.scala @@ -22,9 +22,8 @@ private[persistence] trait LeveldbCompaction extends Actor with ActorLogging wit import Key._ import LeveldbCompaction._ - def receiveCompactionInternal: Receive = { - case TryCompactLeveldb(persistenceId, toSeqNr) => - tryCompactOnDelete(persistenceId, toSeqNr) + def receiveCompactionInternal: Receive = { case TryCompactLeveldb(persistenceId, toSeqNr) => + tryCompactOnDelete(persistenceId, toSeqNr) } private def tryCompactOnDelete(persistenceId: String, toSeqNr: Long): Unit = { @@ -57,7 +56,6 @@ private[persistence] trait LeveldbCompaction extends Actor with ActorLogging wit * considered to be those which include sequence numbers up to 'toSeqNr' AND whose size is equal to N (the compaction * interval). This rule implies that if 'toSeqNr' spans an incomplete portion of a rightmost segment, then * that segment will be omitted from the pending compaction, and will be included into the next one. - * */ private[persistence] trait CompactionSegmentManagement { diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbJournal.scala index 994fd4a3cbd..dbeadc311ca 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbJournal.scala @@ -44,19 +44,18 @@ private[persistence] class LeveldbJournal(cfg: Config) extends AsyncWriteJournal if (highSeqNr == 0L || fromSequenceNr > toSeqNr) Future.successful(highSeqNr) else { - asyncReplayTaggedMessages(tag, fromSequenceNr, toSeqNr, max) { - case ReplayedTaggedMessage(p, tag, offset) => - adaptFromJournal(p).foreach { adaptedPersistentRepr => - replyTo.tell(ReplayedTaggedMessage(adaptedPersistentRepr, tag, offset), Actor.noSender) - } + asyncReplayTaggedMessages(tag, fromSequenceNr, toSeqNr, max) { case ReplayedTaggedMessage(p, tag, offset) => + adaptFromJournal(p).foreach { adaptedPersistentRepr => + replyTo.tell(ReplayedTaggedMessage(adaptedPersistentRepr, tag, offset), Actor.noSender) + } }.map(_ => highSeqNr) } } .map { highSeqNr => RecoverySuccess(highSeqNr) } - .recover { - case e => ReplayMessagesFailure(e) + .recover { case e => + ReplayMessagesFailure(e) } .pipeTo(replyTo) @@ -74,9 +73,7 @@ private[persistence] class LeveldbJournal(cfg: Config) extends AsyncWriteJournal } } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[persistence] object LeveldbJournal { sealed trait SubscriptionCommand @@ -133,24 +130,21 @@ private[persistence] class SharedLeveldbJournal extends AsyncWriteProxy { val timeout: Timeout = context.system.settings.config.getMillisDuration("akka.persistence.journal.leveldb-shared.timeout") - override def receivePluginInternal: Receive = { - case cmd: LeveldbJournal.SubscriptionCommand => - // forward subscriptions, they are used by query-side - store match { - case Some(s) => s.forward(cmd) - case None => - log.error( - "Failed {} request. " + - "Store not initialized. Use `SharedLeveldbJournal.setStore(sharedStore, system)`", - cmd) - } + override def receivePluginInternal: Receive = { case cmd: LeveldbJournal.SubscriptionCommand => + // forward subscriptions, they are used by query-side + store match { + case Some(s) => s.forward(cmd) + case None => + log.error( + "Failed {} request. " + + "Store not initialized. Use `SharedLeveldbJournal.setStore(sharedStore, system)`", + cmd) + } } } -/** - * For testing only. - */ +/** For testing only. */ object SharedLeveldbJournal { /** diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala index a3bd7a74c8c..dfd41641d35 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala @@ -6,9 +6,7 @@ package akka.persistence.journal.leveldb import java.nio.ByteBuffer -/** - * LevelDB key. - */ +/** LevelDB key. */ private[leveldb] final case class Key(persistenceId: Int, sequenceNr: Long, mappingId: Int) private[leveldb] object Key { diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala index c7b0970fb6e..97b1be3c5d8 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala @@ -64,7 +64,7 @@ private[persistence] trait LeveldbRecovery extends AsyncRecovery { this: Leveldb val nextEntry = iter.peekNext() val nextKey = keyFromBytes(nextEntry.getKey) if (key.persistenceId == nextKey.persistenceId && key.sequenceNr == nextKey.sequenceNr && isDeletionKey( - nextKey)) { + nextKey)) { iter.next() true } else false diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala index 13f58c19198..2bc4b23ac74 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala @@ -30,9 +30,7 @@ private[persistence] object LeveldbStore { } } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ private[persistence] trait LeveldbStore extends Actor with WriteJournalBase @@ -79,26 +77,25 @@ private[persistence] trait LeveldbStore val result = Future.fromTry(Try { withBatch(batch => - messages.map { - a => - Try { - a.payload.foreach { p => - val (p2, tags) = p.payload match { - case Tagged(payload, tags) => - (p.withPayload(payload), tags) - case _ => (p, Set.empty[String]) - } - if (tags.nonEmpty && hasTagSubscribers) - allTags = allTags.union(tags) - - require( - !p2.persistenceId.startsWith(tagPersistenceIdPrefix), - s"persistenceId [${p.persistenceId}] must not start with $tagPersistenceIdPrefix") - addToMessageBatch(p2, tags, batch) + messages.map { a => + Try { + a.payload.foreach { p => + val (p2, tags) = p.payload match { + case Tagged(payload, tags) => + (p.withPayload(payload), tags) + case _ => (p, Set.empty[String]) } - if (hasPersistenceIdSubscribers) - persistenceIds += a.persistenceId + if (tags.nonEmpty && hasTagSubscribers) + allTags = allTags.union(tags) + + require( + !p2.persistenceId.startsWith(tagPersistenceIdPrefix), + s"persistenceId [${p.persistenceId}] must not start with $tagPersistenceIdPrefix") + addToMessageBatch(p2, tags, batch) } + if (hasPersistenceIdSubscribers) + persistenceIds += a.persistenceId + } }) }) @@ -113,29 +110,31 @@ private[persistence] trait LeveldbStore } def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] = - try Future.successful { - withBatch { batch => - val nid = numericId(persistenceId) - - // seek to first existing message - val fromSequenceNr = withIterator { iter => - val startKey = Key(nid, 1L, 0) - iter.seek(keyToBytes(startKey)) - if (iter.hasNext) keyFromBytes(iter.peekNext().getKey).sequenceNr else Long.MaxValue - } - - if (fromSequenceNr != Long.MaxValue) { - val toSeqNr = math.min(toSequenceNr, readHighestSequenceNr(nid)) - var sequenceNr = fromSequenceNr - while (sequenceNr <= toSeqNr) { - batch.delete(keyToBytes(Key(nid, sequenceNr, 0))) - sequenceNr += 1 + try + Future.successful { + withBatch { batch => + val nid = numericId(persistenceId) + + // seek to first existing message + val fromSequenceNr = withIterator { iter => + val startKey = Key(nid, 1L, 0) + iter.seek(keyToBytes(startKey)) + if (iter.hasNext) keyFromBytes(iter.peekNext().getKey).sequenceNr else Long.MaxValue } - self ! LeveldbCompaction.TryCompactLeveldb(persistenceId, toSeqNr) + if (fromSequenceNr != Long.MaxValue) { + val toSeqNr = math.min(toSequenceNr, readHighestSequenceNr(nid)) + var sequenceNr = fromSequenceNr + while (sequenceNr <= toSeqNr) { + batch.delete(keyToBytes(Key(nid, sequenceNr, 0))) + sequenceNr += 1 + } + + self ! LeveldbCompaction.TryCompactLeveldb(persistenceId, toSeqNr) + } } } - } catch { + catch { case NonFatal(e) => Future.failed(e) } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/SharedLeveldbStore.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/SharedLeveldbStore.scala index 25570688937..203f593daf2 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/SharedLeveldbStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/SharedLeveldbStore.scala @@ -79,8 +79,8 @@ class SharedLeveldbStore(cfg: Config) extends LeveldbStore { .map { highSeqNr => ReplaySuccess(highSeqNr) } - .recover { - case e => ReplayFailure(e) + .recover { case e => + ReplayFailure(e) } .pipeTo(replyTo) } diff --git a/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala b/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala index 8d2cd74b9d4..9a5bd1045a3 100644 --- a/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala +++ b/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala @@ -22,14 +22,10 @@ import akka.util.ccompat._ import scala.concurrent.duration.Duration import scala.concurrent.duration.FiniteDuration -/** - * Marker trait for all protobuf-serializable messages in `akka.persistence`. - */ +/** Marker trait for all protobuf-serializable messages in `akka.persistence`. */ trait Message extends Serializable -/** - * Protobuf serializer for [[akka.persistence.PersistentRepr]], [[akka.persistence.AtLeastOnceDelivery]] and [[akka.persistence.fsm.PersistentFSM.StateChangeEvent]] messages. - */ +/** Protobuf serializer for [[akka.persistence.PersistentRepr]], [[akka.persistence.AtLeastOnceDelivery]] and [[akka.persistence.fsm.PersistentFSM.StateChangeEvent]] messages. */ @ccompatUsedUntil213 @nowarn("msg=deprecated") class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer { @@ -56,7 +52,7 @@ class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer case a: AtLeastOnceDeliverySnapshot => atLeastOnceDeliverySnapshotBuilder(a).build.toByteArray case s: StateChangeEvent => stateChangeBuilder(s).build.toByteArray case p: PersistentFSMSnapshot[Any @unchecked] => persistentFSMSnapshotBuilder(p).build.toByteArray - case _ => throw new IllegalArgumentException(s"Can't serialize object of type ${o.getClass}") + case _ => throw new IllegalArgumentException(s"Can't serialize object of type ${o.getClass}") } /** @@ -74,7 +70,7 @@ class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer atLeastOnceDeliverySnapshot(mf.AtLeastOnceDeliverySnapshot.parseFrom(bytes)) case PersistentStateChangeEventClass => stateChange(mf.PersistentStateChangeEvent.parseFrom(bytes)) case PersistentFSMSnapshotClass => persistentFSMSnapshot(mf.PersistentFSMSnapshot.parseFrom(bytes)) - case _ => throw new NotSerializableException(s"Can't deserialize object of type ${c}") + case _ => throw new NotSerializableException(s"Can't deserialize object of type ${c}") } } diff --git a/akka-persistence/src/main/scala/akka/persistence/serialization/PayloadSerializer.scala b/akka-persistence/src/main/scala/akka/persistence/serialization/PayloadSerializer.scala index 16477dea6b8..a2dce09d2b3 100644 --- a/akka-persistence/src/main/scala/akka/persistence/serialization/PayloadSerializer.scala +++ b/akka-persistence/src/main/scala/akka/persistence/serialization/PayloadSerializer.scala @@ -14,9 +14,7 @@ import akka.persistence.serialization.{ MessageFormats => mf } import akka.protobufv3.internal.ByteString import akka.protobufv3.internal.UnsafeByteOperations -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final class PayloadSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest diff --git a/akka-persistence/src/main/scala/akka/persistence/serialization/SnapshotSerializer.scala b/akka-persistence/src/main/scala/akka/persistence/serialization/SnapshotSerializer.scala index b6e9a274efd..7adafbca990 100644 --- a/akka-persistence/src/main/scala/akka/persistence/serialization/SnapshotSerializer.scala +++ b/akka-persistence/src/main/scala/akka/persistence/serialization/SnapshotSerializer.scala @@ -19,9 +19,7 @@ import akka.util.ByteString.UTF_8 @SerialVersionUID(1L) final case class Snapshot(data: Any) -/** - * [[Snapshot]] serializer. - */ +/** [[Snapshot]] serializer. */ class SnapshotSerializer(val system: ExtendedActorSystem) extends BaseSerializer { override val includeManifest: Boolean = false @@ -58,7 +56,7 @@ class SnapshotSerializer(val system: ExtendedActorSystem) extends BaseSerializer val in = new ByteArrayInputStream(bytes) val serializerId = readInt(in) - if ((serializerId & 0xEDAC) == 0xEDAC) // Java Serialization magic value + if ((serializerId & 0xedac) == 0xedac) // Java Serialization magic value throw new NotSerializableException(s"Replaying snapshot from akka 2.3.x version is not supported any more") val remaining = in.available diff --git a/akka-persistence/src/main/scala/akka/persistence/serialization/package.scala b/akka-persistence/src/main/scala/akka/persistence/serialization/package.scala index 6fe169b9764..6aa24230a82 100644 --- a/akka-persistence/src/main/scala/akka/persistence/serialization/package.scala +++ b/akka-persistence/src/main/scala/akka/persistence/serialization/package.scala @@ -8,9 +8,7 @@ import java.io.{ ByteArrayOutputStream, InputStream } package object serialization { - /** - * Converts an input stream to a byte array. - */ + /** Converts an input stream to a byte array. */ def streamToBytes(inputStream: InputStream): Array[Byte] = { val len = 16384 val buf = new Array[Byte](len) diff --git a/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala b/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala index 74e58c8beb2..8839590bd85 100644 --- a/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala @@ -13,9 +13,7 @@ import akka.pattern.CircuitBreaker import akka.pattern.pipe import akka.persistence._ -/** - * Abstract snapshot store. - */ +/** Abstract snapshot store. */ trait SnapshotStore extends Actor with ActorLogging { import SnapshotProtocol._ @@ -46,8 +44,8 @@ trait SnapshotStore extends Actor with ActorLogging { .map { sso => LoadSnapshotResult(sso, toSequenceNr) } - .recover { - case e => LoadSnapshotFailed(e) + .recover { case e => + LoadSnapshotFailed(e) } .pipeTo(senderPersistentActor()) } @@ -59,8 +57,8 @@ trait SnapshotStore extends Actor with ActorLogging { .map { _ => SaveSnapshotSuccess(md) } - .recover { - case e => SaveSnapshotFailure(metadata, e) + .recover { case e => + SaveSnapshotFailure(metadata, e) } .to(self, senderPersistentActor()) @@ -76,15 +74,15 @@ trait SnapshotStore extends Actor with ActorLogging { case d @ DeleteSnapshot(metadata) => breaker .withCircuitBreaker(deleteAsync(metadata)) - .map { - case _ => DeleteSnapshotSuccess(metadata) + .map { case _ => + DeleteSnapshotSuccess(metadata) } - .recover { - case e => DeleteSnapshotFailure(metadata, e) + .recover { case e => + DeleteSnapshotFailure(metadata, e) } .pipeTo(self)(senderPersistentActor()) - .onComplete { - case _ => if (publish) eventStream.publish(d) + .onComplete { case _ => + if (publish) eventStream.publish(d) } case evt: DeleteSnapshotSuccess => @@ -97,15 +95,15 @@ trait SnapshotStore extends Actor with ActorLogging { case d @ DeleteSnapshots(persistenceId, criteria) => breaker .withCircuitBreaker(deleteAsync(persistenceId, criteria)) - .map { - case _ => DeleteSnapshotsSuccess(criteria) + .map { case _ => + DeleteSnapshotsSuccess(criteria) } - .recover { - case e => DeleteSnapshotsFailure(criteria, e) + .recover { case e => + DeleteSnapshotsFailure(criteria, e) } .pipeTo(self)(senderPersistentActor()) - .onComplete { - case _ => if (publish) eventStream.publish(d) + .onComplete { case _ => + if (publish) eventStream.publish(d) } case evt: DeleteSnapshotsFailure => @@ -123,7 +121,7 @@ trait SnapshotStore extends Actor with ActorLogging { private def tryReceivePluginInternal(evt: Any): Unit = if (receivePluginInternal.isDefinedAt(evt)) receivePluginInternal(evt) - //#snapshot-store-plugin-api + // #snapshot-store-plugin-api /** * Plugin API: asynchronously loads a snapshot. @@ -176,5 +174,5 @@ trait SnapshotStore extends Actor with ActorLogging { * handle additional messages for implementing advanced features */ def receivePluginInternal: Actor.Receive = Actor.emptyBehavior - //#snapshot-store-plugin-api + // #snapshot-store-plugin-api } diff --git a/akka-persistence/src/main/scala/akka/persistence/snapshot/japi/SnapshotStore.scala b/akka-persistence/src/main/scala/akka/persistence/snapshot/japi/SnapshotStore.scala index 8769c3e9ae7..3a988a0c13a 100644 --- a/akka-persistence/src/main/scala/akka/persistence/snapshot/japi/SnapshotStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/snapshot/japi/SnapshotStore.scala @@ -10,9 +10,7 @@ import akka.japi.Util._ import akka.persistence._ import akka.persistence.snapshot.{ SnapshotStore => SSnapshotStore } -/** - * Java API: abstract snapshot store. - */ +/** Java API: abstract snapshot store. */ abstract class SnapshotStore extends SSnapshotStore with SnapshotStorePlugin { import context.dispatcher diff --git a/akka-persistence/src/main/scala/akka/persistence/snapshot/local/LocalSnapshotStore.scala b/akka-persistence/src/main/scala/akka/persistence/snapshot/local/LocalSnapshotStore.scala index 4adfd3d7a67..886a7a1400c 100644 --- a/akka-persistence/src/main/scala/akka/persistence/snapshot/local/LocalSnapshotStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/snapshot/local/LocalSnapshotStore.scala @@ -161,8 +161,8 @@ private[persistence] class LocalSnapshotStore(config: Config) extends SnapshotSt files .map(_.getName) .flatMap { filename => - extractMetadata(filename).map { - case (pid, snr, tms) => SnapshotMetadata(URLDecoder.decode(pid, UTF_8), snr, tms) + extractMetadata(filename).map { case (pid, snr, tms) => + SnapshotMetadata(URLDecoder.decode(pid, UTF_8), snr, tms) } } .filter(md => criteria.matches(md) && !saving.contains(md)) diff --git a/akka-persistence/src/main/scala/akka/persistence/state/DurableStateStoreRegistry.scala b/akka-persistence/src/main/scala/akka/persistence/state/DurableStateStoreRegistry.scala index 260594bdf1e..009d1a91c69 100644 --- a/akka-persistence/src/main/scala/akka/persistence/state/DurableStateStoreRegistry.scala +++ b/akka-persistence/src/main/scala/akka/persistence/state/DurableStateStoreRegistry.scala @@ -21,9 +21,7 @@ import akka.persistence.PluginProvider import akka.persistence.state.scaladsl.DurableStateStore import akka.util.unused -/** - * Persistence extension for queries. - */ +/** Persistence extension for queries. */ object DurableStateStoreRegistry extends ExtensionId[DurableStateStoreRegistry] with ExtensionIdProvider { override def get(system: ActorSystem): DurableStateStoreRegistry = super.get(system) diff --git a/akka-persistence/src/main/scala/akka/persistence/state/javadsl/DurableStateUpdateStore.scala b/akka-persistence/src/main/scala/akka/persistence/state/javadsl/DurableStateUpdateStore.scala index 8c0c029efe1..f7f03c07ce9 100644 --- a/akka-persistence/src/main/scala/akka/persistence/state/javadsl/DurableStateUpdateStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/state/javadsl/DurableStateUpdateStore.scala @@ -15,9 +15,7 @@ import akka.Done */ trait DurableStateUpdateStore[A] extends DurableStateStore[A] { - /** - * @param seqNr sequence number for optimistic locking. starts at 1. - */ + /** @param seqNr sequence number for optimistic locking. starts at 1. */ def upsertObject(persistenceId: String, revision: Long, value: A, tag: String): CompletionStage[Done] @deprecated(message = "Use the deleteObject overload with revision instead.", since = "2.6.20") diff --git a/akka-persistence/src/main/scala/akka/persistence/state/scaladsl/DurableStateUpdateStore.scala b/akka-persistence/src/main/scala/akka/persistence/state/scaladsl/DurableStateUpdateStore.scala index 8cb4c4f1553..de5d53ce888 100644 --- a/akka-persistence/src/main/scala/akka/persistence/state/scaladsl/DurableStateUpdateStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/state/scaladsl/DurableStateUpdateStore.scala @@ -16,9 +16,7 @@ import akka.Done //#plugin-api trait DurableStateUpdateStore[A] extends DurableStateStore[A] { - /** - * @param revision sequence number for optimistic locking. starts at 1. - */ + /** @param revision sequence number for optimistic locking. starts at 1. */ def upsertObject(persistenceId: String, revision: Long, value: A, tag: String): Future[Done] @deprecated(message = "Use the deleteObject overload with revision instead.", since = "2.6.20") diff --git a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliveryFailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliveryFailureSpec.scala index a98224ceb50..b8c087cf38f 100644 --- a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliveryFailureSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliveryFailureSpec.scala @@ -15,8 +15,7 @@ import akka.actor._ import akka.testkit._ object AtLeastOnceDeliveryFailureSpec { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.persistence.sender.chaos.live-processing-failure-rate = 0.3 akka.persistence.sender.chaos.replay-processing-failure-rate = 0.1 akka.persistence.destination.chaos.confirm-failure-rate = 0.3 @@ -97,13 +96,12 @@ object AtLeastOnceDeliveryFailureSpec { case Confirm(deliveryId, i) => persist(MsgConfirmed(deliveryId, i))(updateState) } - def receiveRecover: Receive = { - case evt: Evt => - updateState(evt) - if (shouldFail(replayProcessingFailureRate)) - throw new TestException(debugMessage(s"replay failed at event $evt")) - else - log.debug(debugMessage(s"replayed event $evt")) + def receiveRecover: Receive = { case evt: Evt => + updateState(evt) + if (shouldFail(replayProcessingFailureRate)) + throw new TestException(debugMessage(s"replay failed at event $evt")) + else + log.debug(debugMessage(s"replayed event $evt")) } def updateState(evt: Evt): Unit = evt match { @@ -131,18 +129,17 @@ object AtLeastOnceDeliveryFailureSpec { val config = context.system.settings.config.getConfig("akka.persistence.destination.chaos") val confirmFailureRate = config.getDouble("confirm-failure-rate") - def receive = { - case m @ Msg(deliveryId, i) => - if (shouldFail(confirmFailureRate)) { - log.debug(debugMessage("confirm message failed", m)) - } else if (contains(i)) { - log.debug(debugMessage("ignored duplicate", m)) - sender() ! Confirm(deliveryId, i) - } else { - add(i) - sender() ! Confirm(deliveryId, i) - log.debug(debugMessage("received and confirmed message", m)) - } + def receive = { case m @ Msg(deliveryId, i) => + if (shouldFail(confirmFailureRate)) { + log.debug(debugMessage("confirm message failed", m)) + } else if (contains(i)) { + log.debug(debugMessage("ignored duplicate", m)) + sender() ! Confirm(deliveryId, i) + } else { + add(i) + sender() ! Confirm(deliveryId, i) + log.debug(debugMessage("received and confirmed message", m)) + } } private def debugMessage(msg: String, m: Msg): String = @@ -183,7 +180,10 @@ class AtLeastOnceDeliveryFailureSpec expectDone() // by sender expectDone() // by destination - system.actorOf(Props(classOf[ChaosApp], testActor), "chaosApp2") // recovery of new instance should have same outcome + system.actorOf( + Props(classOf[ChaosApp], testActor), + "chaosApp2" + ) // recovery of new instance should have same outcome expectDone() // by sender // destination doesn't receive messages again because all have been confirmed already } diff --git a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala index 222a4e34349..d51b19d8370 100644 --- a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala @@ -90,7 +90,8 @@ object AtLeastOnceDeliverySpec { persistAsync(AcceptedReq(payload, destination)) { evt => updateState(evt) sender() ! ReqAck - } else + } + else persist(AcceptedReq(payload, destination)) { evt => updateState(evt) sender() ! ReqAck @@ -103,7 +104,8 @@ object AtLeastOnceDeliverySpec { if (async) persistAsync(ReqDone(id)) { evt => updateState(evt) - } else + } + else persist(ReqDone(id)) { evt => updateState(evt) } @@ -143,15 +145,14 @@ object AtLeastOnceDeliverySpec { var allReceived = Set.empty[Long] - def receive = { - case a @ Action(id, _) => - // discard duplicates (naive impl) - if (!allReceived.contains(id)) { - log.debug("Destination got {}, all count {}", a, allReceived.size + 1) - testActor ! a - allReceived += id - } - sender() ! ActionAck(id) + def receive = { case a @ Action(id, _) => + // discard duplicates (naive impl) + if (!allReceived.contains(id)) { + log.debug("Destination got {}, all count {}", a, allReceived.size + 1) + testActor ! a + allReceived += id + } + sender() ! ActionAck(id) } } @@ -160,26 +161,24 @@ object AtLeastOnceDeliverySpec { class Unreliable(dropMod: Int, target: ActorRef) extends Actor with ActorLogging { var count = 0 - def receive = { - case msg => - count += 1 - if (count % dropMod != 0) { - log.debug("Pass msg {} count {}", msg, count) - target.forward(msg) - } else { - log.debug("Drop msg {} count {}", msg, count) - } + def receive = { case msg => + count += 1 + if (count % dropMod != 0) { + log.debug("Pass msg {} count {}", msg, count) + target.forward(msg) + } else { + log.debug("Drop msg {} count {}", msg, count) + } } } class DeliverToStarSelection(name: String) extends PersistentActor with AtLeastOnceDelivery { override def persistenceId = name - override def receiveCommand = { - case any => - // this is not supported currently, so expecting exception - try deliver(context.actorSelection("*"))(id => s"$any$id") - catch { case ex: Exception => sender() ! Failure(ex) } + override def receiveCommand = { case any => + // this is not supported currently, so expecting exception + try deliver(context.actorSelection("*"))(id => s"$any$id") + catch { case ex: Exception => sender() ! Failure(ex) } } override def receiveRecover = Actor.emptyBehavior @@ -195,7 +194,7 @@ class AtLeastOnceDeliverySpec "AtLeastOnceDelivery" must { List(true, false).foreach { deliverUsingActorSelection => - s"deliver messages in order when nothing is lost (using actorSelection: $deliverUsingActorSelection)" taggedAs (TimingTest) in { + s"deliver messages in order when nothing is lost (using actorSelection: $deliverUsingActorSelection)" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val destinations = Map("A" -> system.actorOf(destinationProps(probeA.ref)).path) @@ -206,7 +205,7 @@ class AtLeastOnceDeliverySpec probeA.expectNoMessage(1.second) } - s"re-deliver lost messages (using actorSelection: $deliverUsingActorSelection)" taggedAs (TimingTest) in { + s"re-deliver lost messages (using actorSelection: $deliverUsingActorSelection)" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) @@ -247,7 +246,7 @@ class AtLeastOnceDeliverySpec expectMsgType[Failure[_]].toString should include("not supported") } - "re-deliver lost messages after restart" taggedAs (TimingTest) in { + "re-deliver lost messages after restart" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) @@ -281,7 +280,7 @@ class AtLeastOnceDeliverySpec probeA.expectNoMessage(1.second) } - "re-send replayed deliveries with an 'initially in-order' strategy, before delivering fresh messages" taggedAs (TimingTest) in { + "re-send replayed deliveries with an 'initially in-order' strategy, before delivering fresh messages" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) @@ -313,12 +312,13 @@ class AtLeastOnceDeliverySpec // a-4 was re-delivered but lost probeA.expectMsgAllOf( Action(5, "a-5"), // re-delivered - Action(4, "a-4")) // re-delivered, 3rd time + Action(4, "a-4") + ) // re-delivered, 3rd time probeA.expectNoMessage(1.second) } - "restore state from snapshot" taggedAs (TimingTest) in { + "restore state from snapshot" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) @@ -356,7 +356,7 @@ class AtLeastOnceDeliverySpec probeA.expectNoMessage(1.second) } - "warn about unconfirmed messages" taggedAs (TimingTest) in { + "warn about unconfirmed messages" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val probeB = TestProbe() @@ -369,8 +369,8 @@ class AtLeastOnceDeliverySpec probe.expectMsg(ReqAck) probe.expectMsg(ReqAck) val unconfirmed = probe - .receiveWhile(5.seconds) { - case UnconfirmedWarning(unconfirmed) => unconfirmed + .receiveWhile(5.seconds) { case UnconfirmedWarning(unconfirmed) => + unconfirmed } .flatten unconfirmed.map(_.destination).toSet should ===(Set(probeA.ref.path, probeB.ref.path)) @@ -378,7 +378,7 @@ class AtLeastOnceDeliverySpec system.stop(snd) } - "re-deliver many lost messages" taggedAs (TimingTest) in { + "re-deliver many lost messages" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val probeB = TestProbe() @@ -410,7 +410,7 @@ class AtLeastOnceDeliverySpec (1 to N).map(n => "c-" + n).toSet) } - "limit the number of messages redelivered at once" taggedAs (TimingTest) in { + "limit the number of messages redelivered at once" taggedAs TimingTest in { val probe = TestProbe() val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) diff --git a/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala b/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala index 0b8ffed893c..ad40667da13 100644 --- a/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala @@ -15,7 +15,8 @@ import akka.testkit.ImplicitSender object EventAdapterSpec { - final val JournalModelClassName = classOf[EventAdapterSpec].getCanonicalName + "$" + classOf[JournalModel].getSimpleName + final val JournalModelClassName = + classOf[EventAdapterSpec].getCanonicalName + "$" + classOf[JournalModel].getSimpleName trait JournalModel { def payload: Any def tags: immutable.Set[String] diff --git a/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorDeleteFailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorDeleteFailureSpec.scala index 853a11c65ba..dcf188f9da7 100644 --- a/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorDeleteFailureSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorDeleteFailureSpec.scala @@ -27,8 +27,8 @@ object EventSourcedActorDeleteFailureSpec { class DoesNotHandleDeleteFailureActor(name: String) extends PersistentActor { override def persistenceId = name - override def receiveCommand: Receive = { - case DeleteTo(n) => deleteMessages(n) + override def receiveCommand: Receive = { case DeleteTo(n) => + deleteMessages(n) } override def receiveRecover: Receive = Actor.emptyBehavior } @@ -49,8 +49,7 @@ class EventSourcedActorDeleteFailureSpec PersistenceSpec.config( "inmem", "SnapshotFailureRobustnessSpec", - extraConfig = Some( - """ + extraConfig = Some(""" akka.persistence.journal.inmem.class = "akka.persistence.EventSourcedActorDeleteFailureSpec$DeleteFailingInmemJournal" """))) with ImplicitSender { diff --git a/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorFailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorFailureSpec.scala index 98f045f4f65..b7272540c28 100644 --- a/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorFailureSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorFailureSpec.scala @@ -56,14 +56,13 @@ object EventSourcedActorFailureSpec { } def checkSerializable(messages: immutable.Seq[AtomicWrite]): immutable.Seq[Try[Unit]] = - messages.collect { - case a: AtomicWrite => - a.payload.collectFirst { - case PersistentRepr(Evt(s: String), _: Long) if s.contains("not serializable") => s - } match { - case Some(s) => Failure(new SimulatedSerializationException(s)) - case None => AsyncWriteJournal.successUnit - } + messages.collect { case a: AtomicWrite => + a.payload.collectFirst { + case PersistentRepr(Evt(s: String), _: Long) if s.contains("not serializable") => s + } match { + case Some(s) => Failure(new SimulatedSerializationException(s)) + case None => AsyncWriteJournal.successUnit + } } def isCorrupt(events: Seq[PersistentRepr]): Boolean = @@ -75,8 +74,8 @@ object EventSourcedActorFailureSpec { } class OnRecoveryFailurePersistentActor(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(txt) => persist(Evt(txt))(updateState) + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(txt) => + persist(Evt(txt))(updateState) } override protected def onRecoveryFailure(cause: Throwable, event: Option[Any]): Unit = @@ -84,10 +83,9 @@ object EventSourcedActorFailureSpec { } class Supervisor(testActor: ActorRef) extends Actor { - override def supervisorStrategy = OneForOneStrategy(loggingEnabled = false) { - case e => - testActor ! e - SupervisorStrategy.Restart + override def supervisorStrategy = OneForOneStrategy(loggingEnabled = false) { case e => + testActor ! e + SupervisorStrategy.Restart } def receive = { @@ -97,18 +95,17 @@ object EventSourcedActorFailureSpec { } class ResumingSupervisor(testActor: ActorRef) extends Supervisor(testActor) { - override def supervisorStrategy = OneForOneStrategy(loggingEnabled = false) { - case e => - testActor ! e - SupervisorStrategy.Resume + override def supervisorStrategy = OneForOneStrategy(loggingEnabled = false) { case e => + testActor ! e + SupervisorStrategy.Resume } } class FailingRecovery(name: String) extends ExamplePersistentActor(name) { - override val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => persist(Evt(s"${data}"))(updateState) + override val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + persist(Evt(s"${data}"))(updateState) } val failingRecover: Receive = { @@ -121,22 +118,20 @@ object EventSourcedActorFailureSpec { } class ThrowingActor1(name: String) extends ExamplePersistentActor(name) { - override val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - persist(Evt(s"${data}"))(updateState) - if (data == "err") - throw new SimulatedException("Simulated exception 1") + override val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + persist(Evt(s"${data}"))(updateState) + if (data == "err") + throw new SimulatedException("Simulated exception 1") } } class ThrowingActor2(name: String) extends ExamplePersistentActor(name) { - override val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - persist(Evt(s"${data}")) { evt => - if (data == "err") - throw new SimulatedException("Simulated exception 1") - updateState(evt) - } + override val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + persist(Evt(s"${data}")) { evt => + if (data == "err") + throw new SimulatedException("Simulated exception 1") + updateState(evt) + } } } @@ -147,8 +142,7 @@ class EventSourcedActorFailureSpec PersistenceSpec.config( "inmem", "SnapshotFailureRobustnessSpec", - extraConfig = Some( - """ + extraConfig = Some(""" akka.persistence.journal.inmem.class = "akka.persistence.EventSourcedActorFailureSpec$FailingInmemJournal" """))) with ImplicitSender { diff --git a/akka-persistence/src/test/scala/akka/persistence/LoadPluginSpec.scala b/akka-persistence/src/test/scala/akka/persistence/LoadPluginSpec.scala index 7f2ddaf0b44..f4da4a9fcca 100644 --- a/akka-persistence/src/test/scala/akka/persistence/LoadPluginSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/LoadPluginSpec.scala @@ -16,8 +16,8 @@ object LoadPluginSpec { case object GetConfig class JournalWithConfig(val config: Config) extends InmemJournal { - override def receivePluginInternal: Actor.Receive = { - case GetConfig => sender() ! config + override def receivePluginInternal: Actor.Receive = { case GetConfig => + sender() ! config } } diff --git a/akka-persistence/src/test/scala/akka/persistence/ManyRecoveriesSpec.scala b/akka-persistence/src/test/scala/akka/persistence/ManyRecoveriesSpec.scala index c0eebe13367..8ed691844c7 100644 --- a/akka-persistence/src/test/scala/akka/persistence/ManyRecoveriesSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/ManyRecoveriesSpec.scala @@ -24,9 +24,8 @@ object ManyRecoveriesSpec { override def persistenceId = name - override def receiveRecover: Receive = { - case Evt(_) => - latch.foreach(Await.ready(_, 10.seconds)) + override def receiveRecover: Receive = { case Evt(_) => + latch.foreach(Await.ready(_, 10.seconds)) } override def receiveCommand: Receive = { case Cmd(s) => diff --git a/akka-persistence/src/test/scala/akka/persistence/OptionalSnapshotStoreSpec.scala b/akka-persistence/src/test/scala/akka/persistence/OptionalSnapshotStoreSpec.scala index 6262591cf8e..4d2d3070f53 100644 --- a/akka-persistence/src/test/scala/akka/persistence/OptionalSnapshotStoreSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/OptionalSnapshotStoreSpec.scala @@ -32,7 +32,8 @@ object OptionalSnapshotStoreSpec { } } -class OptionalSnapshotStoreSpec extends PersistenceSpec(ConfigFactory.parseString(s""" +class OptionalSnapshotStoreSpec + extends PersistenceSpec(ConfigFactory.parseString(s""" akka.persistence.publish-plugin-commands = on akka.persistence.journal.plugin = "akka.persistence.journal.inmem" @@ -40,7 +41,8 @@ class OptionalSnapshotStoreSpec extends PersistenceSpec(ConfigFactory.parseStrin # snapshot store plugin is NOT defined, things should still work akka.persistence.snapshot-store.local.dir = "target/snapshots-${classOf[OptionalSnapshotStoreSpec].getName}/" - """)) with ImplicitSender { + """)) + with ImplicitSender { import OptionalSnapshotStoreSpec._ system.eventStream.publish(TestEvent.Mute(EventFilter[akka.pattern.AskTimeoutException]())) diff --git a/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala index e7062fbc33d..09c2abc52da 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala @@ -34,15 +34,15 @@ object PerformanceSpec { def stopMeasure(): Double = { stopTime = System.nanoTime - (NanoToSecond * numberOfMessages / (stopTime - startTime)) + NanoToSecond * numberOfMessages / (stopTime - startTime) } } abstract class PerformanceTestPersistentActor(name: String) extends NamedPersistentActor(name) { var failAt: Long = -1 - override val receiveRecover: Receive = { - case _ => if (lastSequenceNr % 1000 == 0) print("r") + override val receiveRecover: Receive = { case _ => + if (lastSequenceNr % 1000 == 0) print("r") } val controlBehavior: Receive = { @@ -54,29 +54,25 @@ object PerformanceSpec { class CommandsourcedTestPersistentActor(name: String) extends PerformanceTestPersistentActor(name) { - override val receiveCommand: Receive = controlBehavior.orElse { - case cmd => - persistAsync(cmd) { _ => - if (lastSequenceNr % 1000 == 0) print(".") - if (lastSequenceNr == failAt) throw new TestException("boom") - } + override val receiveCommand: Receive = controlBehavior.orElse { case cmd => + persistAsync(cmd) { _ => + if (lastSequenceNr % 1000 == 0) print(".") + if (lastSequenceNr == failAt) throw new TestException("boom") + } } } class EventsourcedTestPersistentActor(name: String) extends PerformanceTestPersistentActor(name) { - override val receiveCommand: Receive = controlBehavior.orElse { - case cmd => - persist(cmd) { _ => - if (lastSequenceNr % 1000 == 0) print(".") - if (lastSequenceNr == failAt) throw new TestException("boom") - } + override val receiveCommand: Receive = controlBehavior.orElse { case cmd => + persist(cmd) { _ => + if (lastSequenceNr % 1000 == 0) print(".") + if (lastSequenceNr == failAt) throw new TestException("boom") + } } } - /** - * `persist` every 10th message, otherwise `persistAsync` - */ + /** `persist` every 10th message, otherwise `persistAsync` */ class MixedTestPersistentActor(name: String) extends PerformanceTestPersistentActor(name) { var counter = 0 @@ -85,18 +81,17 @@ object PerformanceSpec { if (lastSequenceNr == failAt) throw new TestException("boom") } - val receiveCommand: Receive = controlBehavior.orElse { - case cmd => - counter += 1 - if (counter % 10 == 0) persist(cmd)(handler) - else persistAsync(cmd)(handler) + val receiveCommand: Receive = controlBehavior.orElse { case cmd => + counter += 1 + if (counter % 10 == 0) persist(cmd)(handler) + else persistAsync(cmd)(handler) } } class StashingEventsourcedTestPersistentActor(name: String) extends PerformanceTestPersistentActor(name) { - val printProgress: PartialFunction[Any, Any] = { - case m => if (lastSequenceNr % 1000 == 0) print("."); m + val printProgress: PartialFunction[Any, Any] = { case m => + if (lastSequenceNr % 1000 == 0) print("."); m } val receiveCommand: Receive = printProgress.andThen(controlBehavior.orElse { diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala index a4dcfad93b5..4746a77fa17 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala @@ -29,25 +29,17 @@ abstract class PersistenceSpec(config: Config) lazy val extension = Persistence(system) val counter = new AtomicInteger(0) - /** - * Unique name per test. - */ + /** Unique name per test. */ def name = _name - /** - * Prefix for generating a unique name per test. - */ + /** Prefix for generating a unique name per test. */ def namePrefix: String = system.name - /** - * Creates a persistent actor with current name as constructor argument. - */ + /** Creates a persistent actor with current name as constructor argument. */ def namedPersistentActor[T <: NamedPersistentActor: ClassTag] = system.actorOf(Props(implicitly[ClassTag[T]].runtimeClass, name)) - /** - * Creates a persistent actor with current name as constructor argument, plus a custom [[Config]] - */ + /** Creates a persistent actor with current name as constructor argument, plus a custom [[Config]] */ def namedPersistentActorWithProvidedConfig[T <: NamedPersistentActor: ClassTag](providedConfig: Config) = system.actorOf(Props(implicitly[ClassTag[T]].runtimeClass, name, providedConfig)) @@ -109,7 +101,7 @@ trait PersistenceMatchers { final class IndependentlyOrdered(prefixes: immutable.Seq[String]) extends Matcher[immutable.Seq[Any]] { override def apply(_left: immutable.Seq[Any]) = { val left = _left.map(_.toString) - val mapped = left.groupBy(l => prefixes.indexWhere(p => l.startsWith(p))) - (-1) // ignore other messages + val mapped = left.groupBy(l => prefixes.indexWhere(p => l.startsWith(p))) - -1 // ignore other messages val results = for { (pos, seq) <- mapped nrs = seq.map(_.replaceFirst(prefixes(pos), "").toInt) diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala index 9b3100034a4..5e6811746b7 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala @@ -27,18 +27,18 @@ object PersistentActorBoundedStashingSpec { class StashOverflowStrategyFromConfigPersistentActor(name: String) extends NamedPersistentActor(name) { var events: List[Any] = Nil - val updateState: Receive = { - case Evt(data) => events = data :: events + val updateState: Receive = { case Evt(data) => + events = data :: events } - val commonBehavior: Receive = { - case GetState => sender() ! events.reverse + val commonBehavior: Receive = { case GetState => + sender() ! events.reverse } def receiveRecover = updateState - override def receiveCommand: Receive = commonBehavior.orElse { - case Cmd(x: Any) => persist(Evt(x))(updateState) + override def receiveCommand: Receive = commonBehavior.orElse { case Cmd(x: Any) => + persist(Evt(x))(updateState) } } @@ -100,14 +100,14 @@ class ThrowExceptionStrategyPersistentActorBoundedStashingSpec persistentActor ! GetState expectMsg(Nil) - //barrier for stash + // barrier for stash persistentActor ! Cmd("a") - //internal stash overflow + // internal stash overflow (1 to (capacity + 1)).foreach(persistentActor ! Cmd(_)) - //after PA stopped, all stashed messages forward to deadletters - //the message triggering the overflow is lost, so we get one less message than we sent + // after PA stopped, all stashed messages forward to deadletters + // the message triggering the overflow is lost, so we get one less message than we sent (1 to capacity).foreach(i => expectMsg(DeadLetter(Cmd(i), testActor, persistentActor))) // send another message to the now dead actor and make sure that it goes to dead letters @@ -125,21 +125,21 @@ class DiscardStrategyPersistentActorBoundedStashingSpec awaitAssert(SteppingInmemJournal.getRef("persistence-bounded-stash"), 3.seconds) val journal = SteppingInmemJournal.getRef("persistence-bounded-stash") - //initial read highest + // initial read highest SteppingInmemJournal.step(journal) // make sure it's fully started first persistentActor ! GetState expectMsg(Nil) - //barrier for stash + // barrier for stash persistentActor ! Cmd("a") - //internal stash overflow after 10 + // internal stash overflow after 10 (1 to (2 * capacity)).foreach(persistentActor ! Cmd(_)) - //so, 11 to 20 discard to deadletter + // so, 11 to 20 discard to deadletter ((1 + capacity) to (2 * capacity)).foreach(i => expectMsg(DeadLetter(Cmd(i), testActor, persistentActor))) - //allow "a" and 1 to 10 write complete + // allow "a" and 1 to 10 write complete (1 to (1 + capacity)).foreach(_ => SteppingInmemJournal.step(journal)) persistentActor ! GetState @@ -157,21 +157,21 @@ class ReplyToStrategyPersistentActorBoundedStashingSpec awaitAssert(SteppingInmemJournal.getRef("persistence-bounded-stash"), 3.seconds) val journal = SteppingInmemJournal.getRef("persistence-bounded-stash") - //initial read highest + // initial read highest SteppingInmemJournal.step(journal) // make sure it's fully started first persistentActor ! GetState expectMsg(Nil) - //barrier for stash + // barrier for stash persistentActor ! Cmd("a") - //internal stash overflow after 10 + // internal stash overflow after 10 (1 to (2 * capacity)).foreach(persistentActor ! Cmd(_)) - //so, 11 to 20 reply to with "Reject" String + // so, 11 to 20 reply to with "Reject" String ((1 + capacity) to (2 * capacity)).foreach(_ => expectMsg("RejectToStash")) - //allow "a" and 1 to 10 write complete + // allow "a" and 1 to 10 write complete (1 to (1 + capacity)).foreach(_ => SteppingInmemJournal.step(journal)) persistentActor ! GetState diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorJournalProtocolSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorJournalProtocolSpec.scala index 1e1c8a2bbca..94565f4b4b9 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorJournalProtocolSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorJournalProtocolSpec.scala @@ -45,11 +45,11 @@ akka.persistence.snapshot-store.plugin = "akka.persistence.no-snapshot-store" override def postRestart(reason: Throwable): Unit = monitor ! PostRestart(persistenceId) override def postStop(): Unit = monitor ! PostStop(persistenceId) - def receiveRecover = { - case x => monitor ! x + def receiveRecover = { case x => + monitor ! x } - def receiveCommand = behavior.orElse { - case m: Multi => m.cmd.foreach(behavior) + def receiveCommand = behavior.orElse { case m: Multi => + m.cmd.foreach(behavior) } val behavior: Receive = { @@ -93,8 +93,8 @@ class JournalProbe(implicit private val system: ExtendedActorSystem) extends Ext class JournalPuppet extends Actor { val ref = JournalPuppet(context.system).ref - def receive = { - case x => ref.forward(x) + def receive = { case x => + ref.forward(x) } } diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorRecoveryTimeoutSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorRecoveryTimeoutSpec.scala index 534ff4e487f..b1abb2fafa5 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorRecoveryTimeoutSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorRecoveryTimeoutSpec.scala @@ -27,11 +27,10 @@ object PersistentActorRecoveryTimeoutSpec { class TestActor(probe: ActorRef) extends NamedPersistentActor("recovery-timeout-actor") { override def receiveRecover: Receive = Actor.emptyBehavior - override def receiveCommand: Receive = { - case x => - persist(x) { _ => - sender() ! x - } + override def receiveCommand: Receive = { case x => + persist(x) { _ => + sender() ! x + } } override protected def onRecoveryFailure(cause: Throwable, event: Option[Any]): Unit = { @@ -52,11 +51,10 @@ object PersistentActorRecoveryTimeoutSpec { case _ => // we don't care } - override def receiveCommand: Receive = { - case x => - persist(x) { _ => - sender() ! x - } + override def receiveCommand: Receive = { case x => + persist(x) { _ => + sender() ! x + } } override protected def onRecoveryFailure(cause: Throwable, event: Option[Any]): Unit = { diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala index 5ecaf3a3b27..9ea6c72b5ab 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala @@ -99,10 +99,9 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class Behavior2PersistentActor(name: String) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - persistAll(List(Evt(s"${data}-1"), Evt(s"${data}-2")))(updateState) - persistAll(List(Evt(s"${data}-3"), Evt(s"${data}-4")))(updateState) + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + persistAll(List(Evt(s"${data}-1"), Evt(s"${data}-2")))(updateState) + persistAll(List(Evt(s"${data}-3"), Evt(s"${data}-4")))(updateState) } } class Behavior2PersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) @@ -110,10 +109,9 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class Behavior3PersistentActor(name: String) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - persistAll(List(Evt(s"${data}-11"), Evt(s"${data}-12")))(updateState) - updateState(Evt(s"${data}-10")) + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + persistAll(List(Evt(s"${data}-11"), Evt(s"${data}-12")))(updateState) + updateState(Evt(s"${data}-10")) } } class Behavior3PersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) @@ -128,9 +126,8 @@ object PersistentActorSpec { persist(Evt(s"$data-${lastSequenceNr + 1}"))(updateState) } - override def receiveRecover: Receive = super.receiveRecover.orElse { - case FilteredPayload => - throw new IllegalStateException("Unexpected FilteredPayload") + override def receiveRecover: Receive = super.receiveRecover.orElse { case FilteredPayload => + throw new IllegalStateException("Unexpected FilteredPayload") } } @@ -139,21 +136,19 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class ChangeBehaviorInLastEventHandlerPersistentActor(name: String) extends ExamplePersistentActor(name) { - val newBehavior: Receive = { - case Cmd(data) => - persist(Evt(s"${data}-21"))(updateState) - persist(Evt(s"${data}-22")) { event => - updateState(event) - context.unbecome() - } + val newBehavior: Receive = { case Cmd(data) => + persist(Evt(s"${data}-21"))(updateState) + persist(Evt(s"${data}-22")) { event => + updateState(event) + context.unbecome() + } } - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - persist(Evt(s"${data}-0")) { event => - updateState(event) - context.become(newBehavior) - } + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + persist(Evt(s"${data}-0")) { event => + updateState(event) + context.become(newBehavior) + } } } class ChangeBehaviorInLastEventHandlerPersistentActorWithInmemRuntimePluginConfig( @@ -163,21 +158,19 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class ChangeBehaviorInFirstEventHandlerPersistentActor(name: String) extends ExamplePersistentActor(name) { - val newBehavior: Receive = { - case Cmd(data) => - persist(Evt(s"${data}-21")) { event => - updateState(event) - context.unbecome() - } - persist(Evt(s"${data}-22"))(updateState) + val newBehavior: Receive = { case Cmd(data) => + persist(Evt(s"${data}-21")) { event => + updateState(event) + context.unbecome() + } + persist(Evt(s"${data}-22"))(updateState) } - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - persist(Evt(s"${data}-0")) { event => - updateState(event) - context.become(newBehavior) - } + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + persist(Evt(s"${data}-0")) { event => + updateState(event) + context.become(newBehavior) + } } } class ChangeBehaviorInFirstEventHandlerPersistentActorWithInmemRuntimePluginConfig( @@ -187,17 +180,15 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class ChangeBehaviorInCommandHandlerFirstPersistentActor(name: String) extends ExamplePersistentActor(name) { - val newBehavior: Receive = { - case Cmd(data) => - context.unbecome() - persistAll(List(Evt(s"${data}-31"), Evt(s"${data}-32")))(updateState) - updateState(Evt(s"${data}-30")) + val newBehavior: Receive = { case Cmd(data) => + context.unbecome() + persistAll(List(Evt(s"${data}-31"), Evt(s"${data}-32")))(updateState) + updateState(Evt(s"${data}-30")) } - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - context.become(newBehavior) - persist(Evt(s"${data}-0"))(updateState) + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + context.become(newBehavior) + persist(Evt(s"${data}-0"))(updateState) } } class ChangeBehaviorInCommandHandlerFirstPersistentActorWithInmemRuntimePluginConfig( @@ -207,17 +198,15 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class ChangeBehaviorInCommandHandlerLastPersistentActor(name: String) extends ExamplePersistentActor(name) { - val newBehavior: Receive = { - case Cmd(data) => - persistAll(List(Evt(s"${data}-31"), Evt(s"${data}-32")))(updateState) - updateState(Evt(s"${data}-30")) - context.unbecome() + val newBehavior: Receive = { case Cmd(data) => + persistAll(List(Evt(s"${data}-31"), Evt(s"${data}-32")))(updateState) + updateState(Evt(s"${data}-30")) + context.unbecome() } - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - persist(Evt(s"${data}-0"))(updateState) - context.become(newBehavior) + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + persist(Evt(s"${data}-0"))(updateState) + context.become(newBehavior) } } class ChangeBehaviorInCommandHandlerLastPersistentActorWithInmemRuntimePluginConfig( @@ -227,10 +216,9 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class SnapshottingPersistentActor(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { - override def receiveRecover = super.receiveRecover.orElse { - case SnapshotOffer(_, events: List[_]) => - probe ! "offered" - this.events = events + override def receiveRecover = super.receiveRecover.orElse { case SnapshotOffer(_, events: List[_]) => + probe ! "offered" + this.events = events } private def handleCmd(cmd: Cmd): Unit = { @@ -252,19 +240,18 @@ object PersistentActorSpec { class SnapshottingBecomingPersistentActor(name: String, probe: ActorRef) extends SnapshottingPersistentActor(name, probe) { - val becomingRecover: Receive = { - case msg: SnapshotOffer => - context.become(becomingCommand) - // sending ourself a normal message here also tests - // that we stash them until recovery is complete - self ! "It's changing me" - super.receiveRecover(msg) + val becomingRecover: Receive = { case msg: SnapshotOffer => + context.become(becomingCommand) + // sending ourself a normal message here also tests + // that we stash them until recovery is complete + self ! "It's changing me" + super.receiveRecover(msg) } override def receiveRecover = becomingRecover.orElse(super.receiveRecover) - val becomingCommand: Receive = receiveCommand.orElse { - case "It's changing me" => probe ! "I am becoming" + val becomingCommand: Receive = receiveCommand.orElse { case "It's changing me" => + probe ! "I am becoming" } } class SnapshottingBecomingPersistentActorWithInmemRuntimePluginConfig( @@ -275,8 +262,8 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class ReplyInEventHandlerPersistentActor(name: String) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = { - case Cmd("a") => persist(Evt("a"))(evt => sender() ! evt.data) + val receiveCommand: Receive = { case Cmd("a") => + persist(Evt("a"))(evt => sender() ! evt.data) } } class ReplyInEventHandlerPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) @@ -286,12 +273,11 @@ object PersistentActorSpec { class AsyncPersistPersistentActor(name: String) extends ExamplePersistentActor(name) { var counter = 0 - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - sender() ! data - persistAsync(Evt(s"$data-${incCounter()}")) { evt => - sender() ! evt.data - } + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + sender() ! data + persistAsync(Evt(s"$data-${incCounter()}")) { evt => + sender() ! evt.data + } } private def incCounter(): Int = { @@ -313,15 +299,14 @@ object PersistentActorSpec { class AsyncPersistThreeTimesPersistentActor(name: String) extends ExamplePersistentActor(name) { var counter = 0 - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - sender() ! data + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + sender() ! data - (1 to 3).foreach { _ => - persistAsync(Evt(s"$data-${incCounter()}")) { evt => - sender() ! ("a" + evt.data.toString.drop(1)) // c-1 => a-1, as in "ack" - } + (1 to 3).foreach { _ => + persistAsync(Evt(s"$data-${incCounter()}")) { evt => + sender() ! ("a" + evt.data.toString.drop(1)) // c-1 => a-1, as in "ack" } + } } private def incCounter(): Int = { @@ -338,19 +323,18 @@ object PersistentActorSpec { // atomic because used from inside the *async* callbacks val sendMsgCounter = new AtomicInteger() - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - sender() ! data - val event = Evt(data) + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + sender() ! data + val event = Evt(data) - persistAsync(event) { evt => - // be way slower, in order to be overtaken by the other callback - Thread.sleep(300) - sender() ! s"${evt.data}-a-${sendMsgCounter.incrementAndGet()}" - } - persistAsync(event) { evt => - sender() ! s"${evt.data}-b-${sendMsgCounter.incrementAndGet()}" - } + persistAsync(event) { evt => + // be way slower, in order to be overtaken by the other callback + Thread.sleep(300) + sender() ! s"${evt.data}-a-${sendMsgCounter.incrementAndGet()}" + } + persistAsync(event) { evt => + sender() ! s"${evt.data}-b-${sendMsgCounter.incrementAndGet()}" + } } } class AsyncPersistSameEventTwicePersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) @@ -382,22 +366,21 @@ object PersistentActorSpec { var counter = 0 - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - sender() ! data + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + sender() ! data - persist(Evt(s"$data-e1")) { evt => - sender() ! s"${evt.data}-${incCounter()}" - } + persist(Evt(s"$data-e1")) { evt => + sender() ! s"${evt.data}-${incCounter()}" + } - // this should be happily executed - persistAsync(Evt(s"$data-ea2")) { evt => - sender() ! s"${evt.data}-${incCounter()}" - } + // this should be happily executed + persistAsync(Evt(s"$data-ea2")) { evt => + sender() ! s"${evt.data}-${incCounter()}" + } - persist(Evt(s"$data-e3")) { evt => - sender() ! s"${evt.data}-${incCounter()}" - } + persist(Evt(s"$data-e3")) { evt => + sender() ! s"${evt.data}-${incCounter()}" + } } private def incCounter(): Int = { @@ -415,17 +398,16 @@ object PersistentActorSpec { var sendMsgCounter = 0 - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - sender() ! data + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + sender() ! data - persist(Evt(s"$data-e1")) { evt => - sender() ! s"${evt.data}-${incCounter()}" - } + persist(Evt(s"$data-e1")) { evt => + sender() ! s"${evt.data}-${incCounter()}" + } - persistAsync(Evt(s"$data-ea2")) { evt => - sender() ! s"${evt.data}-${incCounter()}" - } + persistAsync(Evt(s"$data-ea2")) { evt => + sender() ! s"${evt.data}-${incCounter()}" + } } def incCounter() = { @@ -440,14 +422,13 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class AsyncPersistHandlerCorrelationCheck(name: String) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - persistAsync(Evt(data)) { evt => - if (data != evt.data) - sender() ! s"Expected [$data] bot got [${evt.data}]" - if (evt.data == "done") - sender() ! "done" - } + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + persistAsync(Evt(data)) { evt => + if (data != evt.data) + sender() ! s"Expected [$data] bot got [${evt.data}]" + if (evt.data == "done") + sender() ! "done" + } } } class AsyncPersistHandlerCorrelationCheckWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) @@ -455,8 +436,8 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class PrimitiveEventPersistentActor(name: String) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = { - case Cmd("a") => persist(5)(evt => sender() ! evt) + val receiveCommand: Receive = { case Cmd("a") => + persist(5)(evt => sender() ! evt) } } class PrimitiveEventPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) @@ -479,8 +460,8 @@ object PersistentActorSpec { override def receiveRecover = sendingRecover.orElse(super.receiveRecover) - override def receiveCommand: Receive = super.receiveCommand.orElse { - case s: String => probe ! s + override def receiveCommand: Receive = super.receiveCommand.orElse { case s: String => + probe ! s } } @@ -503,12 +484,11 @@ object PersistentActorSpec { def doDefer[A](event: A)(handler: A => Unit): Unit = deferAsync(event)(handler) } abstract class DeferringWithPersistActor(name: String) extends ExamplePersistentActor(name) with DeferActor { - val receiveCommand: Receive = { - case Cmd(data) => - doDefer("d-1") { sender() ! _ } - persist(s"$data-2") { sender() ! _ } - doDefer("d-3") { sender() ! _ } - doDefer("d-4") { sender() ! _ } + val receiveCommand: Receive = { case Cmd(data) => + doDefer("d-1") { sender() ! _ } + persist(s"$data-2") { sender() ! _ } + doDefer("d-3") { sender() ! _ } + doDefer("d-4") { sender() ! _ } } } class DeferringAsyncWithPersistActor(name: String) extends DeferringWithPersistActor(name) with DeferAsync @@ -521,12 +501,11 @@ object PersistentActorSpec { with InmemRuntimePluginConfig abstract class DeferringWithAsyncPersistActor(name: String) extends ExamplePersistentActor(name) with DeferActor { - val receiveCommand: Receive = { - case Cmd(data) => - doDefer(s"d-$data-1") { sender() ! _ } - persistAsync(s"pa-$data-2") { sender() ! _ } - doDefer(s"d-$data-3") { sender() ! _ } - doDefer(s"d-$data-4") { sender() ! _ } + val receiveCommand: Receive = { case Cmd(data) => + doDefer(s"d-$data-1") { sender() ! _ } + persistAsync(s"pa-$data-2") { sender() ! _ } + doDefer(s"d-$data-3") { sender() ! _ } + doDefer(s"d-$data-4") { sender() ! _ } } } class DeferringAsyncWithAsyncPersistActor(name: String) extends DeferringWithAsyncPersistActor(name) with DeferAsync @@ -541,14 +520,13 @@ object PersistentActorSpec { abstract class DeferringMixedCallsPPADDPADPersistActor(name: String) extends ExamplePersistentActor(name) with DeferActor { - val receiveCommand: Receive = { - case Cmd(data) => - persist(s"p-$data-1") { sender() ! _ } - persistAsync(s"pa-$data-2") { sender() ! _ } - doDefer(s"d-$data-3") { sender() ! _ } - doDefer(s"d-$data-4") { sender() ! _ } - persistAsync(s"pa-$data-5") { sender() ! _ } - doDefer(s"d-$data-6") { sender() ! _ } + val receiveCommand: Receive = { case Cmd(data) => + persist(s"p-$data-1") { sender() ! _ } + persistAsync(s"pa-$data-2") { sender() ! _ } + doDefer(s"d-$data-3") { sender() ! _ } + doDefer(s"d-$data-4") { sender() ! _ } + persistAsync(s"pa-$data-5") { sender() ! _ } + doDefer(s"d-$data-6") { sender() ! _ } } } class DeferringAsyncMixedCallsPPADDPADPersistActor(name: String) @@ -571,11 +549,10 @@ object PersistentActorSpec { abstract class DeferringWithNoPersistCallsPersistActor(name: String) extends ExamplePersistentActor(name) with DeferActor { - val receiveCommand: Receive = { - case Cmd(_) => - doDefer("d-1") { sender() ! _ } - doDefer("d-2") { sender() ! _ } - doDefer("d-3") { sender() ! _ } + val receiveCommand: Receive = { case Cmd(_) => + doDefer("d-1") { sender() ! _ } + doDefer("d-2") { sender() ! _ } + doDefer("d-3") { sender() ! _ } } } class DeferringAsyncWithNoPersistCallsPersistActor(name: String) @@ -596,14 +573,12 @@ object PersistentActorSpec { with InmemRuntimePluginConfig abstract class DeferringActor(name: String) extends ExamplePersistentActor(name) with DeferActor { - val receiveCommand: Receive = { - case Cmd(data) => - sender() ! data - persist(()) { _ => - } // skip calling defer immediately because of empty pending invocations - doDefer(Evt(s"$data-defer")) { evt => - sender() ! evt.data - } + val receiveCommand: Receive = { case Cmd(data) => + sender() ! data + persist(()) { _ => } // skip calling defer immediately because of empty pending invocations + doDefer(Evt(s"$data-defer")) { evt => + sender() ! evt.data + } } } class DeferringAsyncActor(name: String) extends DeferringActor(name) with DeferAsync @@ -635,10 +610,9 @@ object PersistentActorSpec { class RecoverMessageCausedRestart(name: String) extends NamedPersistentActor(name) { var master: ActorRef = _ - val receiveCommand: Receive = { - case "Boom" => - master = sender() - throw new TestException("boom") + val receiveCommand: Receive = { case "Boom" => + master = sender() + throw new TestException("boom") } override def preRestart(reason: Throwable, message: Option[Any]): Unit = { @@ -648,8 +622,8 @@ object PersistentActorSpec { context.stop(self) } - override def receiveRecover = { - case _ => () + override def receiveRecover = { case _ => + () } } @@ -658,21 +632,20 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class MultipleAndNestedPersists(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = { - case s: String => - probe ! s - persist(s + "-outer-1") { outer => - probe ! outer - persist(s + "-inner-1") { inner => - probe ! inner - } + val receiveCommand: Receive = { case s: String => + probe ! s + persist(s + "-outer-1") { outer => + probe ! outer + persist(s + "-inner-1") { inner => + probe ! inner } - persist(s + "-outer-2") { outer => - probe ! outer - persist(s + "-inner-2") { inner => - probe ! inner - } + } + persist(s + "-outer-2") { outer => + probe ! outer + persist(s + "-inner-2") { inner => + probe ! inner } + } } } class MultipleAndNestedPersistsWithInmemRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) @@ -680,21 +653,20 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class MultipleAndNestedPersistAsyncs(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = { - case s: String => - probe ! s - persistAsync(s + "-outer-1") { outer => - probe ! outer - persistAsync(s + "-inner-1") { inner => - probe ! inner - } + val receiveCommand: Receive = { case s: String => + probe ! s + persistAsync(s + "-outer-1") { outer => + probe ! outer + persistAsync(s + "-inner-1") { inner => + probe ! inner } - persistAsync(s + "-outer-2") { outer => - probe ! outer - persistAsync(s + "-inner-2") { inner => - probe ! inner - } + } + persistAsync(s + "-outer-2") { outer => + probe ! outer + persistAsync(s + "-inner-2") { inner => + probe ! inner } + } } } class MultipleAndNestedPersistAsyncsWithInmemRuntimePluginConfig( @@ -719,10 +691,9 @@ object PersistentActorSpec { } } - val receiveCommand: Receive = { - case s: String => - probe ! s - persistAsync(s + "-" + 1)(weMustGoDeeper) + val receiveCommand: Receive = { case s: String => + probe ! s + persistAsync(s + "-" + 1)(weMustGoDeeper) } } class DeeplyNestedPersistAsyncsWithInmemRuntimePluginConfig( @@ -734,21 +705,20 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class NestedPersistNormalAndAsyncs(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = { - case s: String => - probe ! s - persist(s + "-outer-1") { outer => - probe ! outer - persistAsync(s + "-inner-async-1") { inner => - probe ! inner - } + val receiveCommand: Receive = { case s: String => + probe ! s + persist(s + "-outer-1") { outer => + probe ! outer + persistAsync(s + "-inner-async-1") { inner => + probe ! inner } - persist(s + "-outer-2") { outer => - probe ! outer - persistAsync(s + "-inner-async-2") { inner => - probe ! inner - } + } + persist(s + "-outer-2") { outer => + probe ! outer + persistAsync(s + "-inner-async-2") { inner => + probe ! inner } + } } } class NestedPersistNormalAndAsyncsWithInmemRuntimePluginConfig( @@ -759,21 +729,20 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class NestedPersistAsyncsAndNormal(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = { - case s: String => - probe ! s - persistAsync(s + "-outer-async-1") { outer => - probe ! outer - persist(s + "-inner-1") { inner => - probe ! inner - } + val receiveCommand: Receive = { case s: String => + probe ! s + persistAsync(s + "-outer-async-1") { outer => + probe ! outer + persist(s + "-inner-1") { inner => + probe ! inner } - persistAsync(s + "-outer-async-2") { outer => - probe ! outer - persist(s + "-inner-2") { inner => - probe ! inner - } + } + persistAsync(s + "-outer-async-2") { outer => + probe ! outer + persist(s + "-inner-2") { inner => + probe ! inner } + } } } class NestedPersistAsyncsAndNormalWithInmemRuntimePluginConfig( @@ -784,18 +753,17 @@ object PersistentActorSpec { with InmemRuntimePluginConfig class NestedPersistInAsyncEnforcesStashing(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = { - case s: String => - probe ! s - persistAsync(s + "-outer-async") { outer => - probe ! outer - persist(s + "-inner") { inner => - probe ! inner - Thread.sleep(1000) // really long wait here... - // the next incoming command must be handled by the following function - context.become({ case _ => sender() ! "done" }) - } + val receiveCommand: Receive = { case s: String => + probe ! s + persistAsync(s + "-outer-async") { outer => + probe ! outer + persist(s + "-inner") { inner => + probe ! inner + Thread.sleep(1000) // really long wait here... + // the next incoming command must be handled by the following function + context.become { case _ => sender() ! "done" } } + } } } class NestedPersistInAsyncEnforcesStashingWithInmemRuntimePluginConfig( @@ -820,10 +788,9 @@ object PersistentActorSpec { } } - val receiveCommand: Receive = { - case s: String => - probe ! s - persist(s + "-" + 1)(weMustGoDeeper) + val receiveCommand: Receive = { case s: String => + probe ! s + persist(s + "-" + 1)(weMustGoDeeper) } } class DeeplyNestedPersistsWithInmemRuntimePluginConfig( @@ -840,15 +807,14 @@ object PersistentActorSpec { with StackableTestPersistentActor.MixinActor { override def persistenceId: String = "StackableTestPersistentActor" - def receiveCommand = { - case "restart" => - throw new Exception("triggering restart") with NoStackTrace { - override def toString = "Boom!" - } + def receiveCommand = { case "restart" => + throw new Exception("triggering restart") with NoStackTrace { + override def toString = "Boom!" + } } - def receiveRecover = { - case _ => () + def receiveRecover = { case _ => + () } override def preStart(): Unit = { @@ -953,8 +919,8 @@ object PersistentActorSpec { override def onRecoveryFailure(cause: scala.Throwable, event: Option[Any]): Unit = () - def receiveCommand = commonBehavior.orElse { - case Cmd(d) => persist(Evt(d))(updateState) + def receiveCommand = commonBehavior.orElse { case Cmd(d) => + persist(Evt(d))(updateState) } } class PersistInRecoveryWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) @@ -1271,7 +1237,8 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi persistentActor ! i } - val all: immutable.Seq[String] = this.receiveN(40).asInstanceOf[immutable.Seq[String]] // each command = 1 reply + 3 event-replies + val all: immutable.Seq[String] = + this.receiveN(40).asInstanceOf[immutable.Seq[String]] // each command = 1 reply + 3 event-replies val replies = all.filter(r => r.count(_ == '-') == 1) replies should equal(commands.map(_.data)) @@ -1292,9 +1259,8 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi } val probes = Vector.fill(10)(TestProbe()) - probes.zip(commands).foreach { - case (p, c) => - persistentActor.tell(c, p.ref) + probes.zip(commands).foreach { case (p, c) => + persistentActor.tell(c, p.ref) } val ackClass = classOf[String] @@ -1353,13 +1319,17 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi expectMsg("a-e1-1") // persist, must be before next command var expectInAnyOrder1 = Set("b", "a-ea2-2") - expectInAnyOrder1 -= expectMsgAnyOf(expectInAnyOrder1.toList: _*) // ea2 is persistAsync, b (command) can processed before it + expectInAnyOrder1 -= expectMsgAnyOf( + expectInAnyOrder1.toList: _* + ) // ea2 is persistAsync, b (command) can processed before it expectMsgAnyOf(expectInAnyOrder1.toList: _*) expectMsg("b-e1-3") // persist, must be before next command var expectInAnyOrder2 = Set("c", "b-ea2-4") - expectInAnyOrder2 -= expectMsgAnyOf(expectInAnyOrder2.toList: _*) // ea2 is persistAsync, b (command) can processed before it + expectInAnyOrder2 -= expectMsgAnyOf( + expectInAnyOrder2.toList: _* + ) // ea2 is persistAsync, b (command) can processed before it expectMsgAnyOf(expectInAnyOrder2.toList: _*) expectMsg("c-e1-5") @@ -1697,8 +1667,7 @@ class InmemPersistentActorWithRuntimePluginConfigSpec val providedActorConfig: Config = { ConfigFactory - .parseString( - """ + .parseString(""" | custom.persistence.snapshot-store.local.dir = target/snapshots-InmemPersistentActorWithRuntimePluginConfigSpec/ """.stripMargin) .withValue( diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorStashingSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorStashingSpec.scala index 0087a3996c4..1e391d91588 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorStashingSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorStashingSpec.scala @@ -48,17 +48,16 @@ object PersistentActorStashingSpec { case Cmd("b") => persist(Evt("b"))(evt => sender() ! evt.data) } - def unstashBehavior: Receive = { - case Cmd("c") => unstashAll(); sender() ! "c" + def unstashBehavior: Receive = { case Cmd("c") => + unstashAll(); sender() ! "c" } } class UserStashWithinHandlerPersistentActor(name: String) extends UserStashPersistentActor(name: String) { - override def unstashBehavior: Receive = { - case Cmd("c") => - persist(Evt("c")) { evt => - sender() ! evt.data; unstashAll() - } + override def unstashBehavior: Receive = { case Cmd("c") => + persist(Evt("c")) { evt => + sender() ! evt.data; unstashAll() + } } } @@ -73,61 +72,56 @@ object PersistentActorStashingSpec { case Cmd("b-2") => persist(Evt("b-2"))(updateState) } - val processC: Receive = unstashBehavior.orElse { - case _ => stash() + val processC: Receive = unstashBehavior.orElse { case _ => + stash() } - def unstashBehavior: Receive = { - case Cmd("c") => - persist(Evt("c")) { evt => - updateState(evt); context.unbecome() - } - unstashAll() + def unstashBehavior: Receive = { case Cmd("c") => + persist(Evt("c")) { evt => + updateState(evt); context.unbecome() + } + unstashAll() } } class UserStashWithinHandlerManyPersistentActor(name: String) extends UserStashManyPersistentActor(name) { - override def unstashBehavior: Receive = { - case Cmd("c") => - persist(Evt("c")) { evt => - updateState(evt); context.unbecome(); unstashAll() - } + override def unstashBehavior: Receive = { case Cmd("c") => + persist(Evt("c")) { evt => + updateState(evt); context.unbecome(); unstashAll() + } } } class UserStashFailurePersistentActor(name: String) extends StashExamplePersistentActor(name) { - val receiveCommand: Receive = commonBehavior.orElse { - case Cmd(data) => - if (data == "b-2") throw new TestException("boom") - persist(Evt(data)) { evt => - updateState(evt) - if (data == "a") context.become(otherCommandHandler) - } + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => + if (data == "b-2") throw new TestException("boom") + persist(Evt(data)) { evt => + updateState(evt) + if (data == "a") context.become(otherCommandHandler) + } } - val otherCommandHandler: Receive = unstashBehavior.orElse { - case _ => stash() + val otherCommandHandler: Receive = unstashBehavior.orElse { case _ => + stash() } - def unstashBehavior: Receive = { - case Cmd("c") => - persist(Evt("c")) { evt => - updateState(evt) - context.unbecome() - } - unstashAll() + def unstashBehavior: Receive = { case Cmd("c") => + persist(Evt("c")) { evt => + updateState(evt) + context.unbecome() + } + unstashAll() } } class UserStashWithinHandlerFailureCallbackPersistentActor(name: String) extends UserStashFailurePersistentActor(name) { - override def unstashBehavior: Receive = { - case Cmd("c") => - persist(Evt("c")) { evt => - updateState(evt) - context.unbecome() - unstashAll() - } + override def unstashBehavior: Receive = { case Cmd("c") => + persist(Evt("c")) { evt => + updateState(evt) + context.unbecome() + unstashAll() + } } } @@ -141,17 +135,16 @@ object PersistentActorStashingSpec { case Cmd("b") => persistAsync(Evt("b"))(updateState) } - override def unstashBehavior: Receive = { - case Cmd("c") => persistAsync(Evt("c"))(updateState); unstashAll() + override def unstashBehavior: Receive = { case Cmd("c") => + persistAsync(Evt("c"))(updateState); unstashAll() } } class AsyncStashingWithinHandlerPersistentActor(name: String) extends AsyncStashingPersistentActor(name) { - override def unstashBehavior: Receive = { - case Cmd("c") => - persistAsync(Evt("c")) { evt => - updateState(evt); unstashAll() - } + override def unstashBehavior: Receive = { case Cmd("c") => + persistAsync(Evt("c")) { evt => + updateState(evt); unstashAll() + } } } @@ -159,20 +152,18 @@ object PersistentActorStashingSpec { val child = context.actorOf(Props(classOf[StashWithinHandlerPersistentActor], name)) - override val supervisorStrategy = OneForOneStrategy(loggingEnabled = false) { - case ex: Exception => - target ! ex - Resume + override val supervisorStrategy = OneForOneStrategy(loggingEnabled = false) { case ex: Exception => + target ! ex + Resume } - def receive = { - case c: Cmd => child ! c + def receive = { case c: Cmd => + child ! c } } class StashWithinHandlerPersistentActor(name: String) extends NamedPersistentActor(name) { - val receiveRecover: Receive = { - case _ => // ignore + val receiveRecover: Receive = { case _ => // ignore } def stashWithinHandler(@unused evt: Evt) = { @@ -183,8 +174,7 @@ object PersistentActorStashingSpec { case Cmd("a") => persist(Evt("a"))(stashWithinHandler) case Cmd("b") => persistAsync(Evt("b"))(stashWithinHandler) case Cmd("c") => - persist(Evt("x")) { _ => - } + persist(Evt("x")) { _ => } deferAsync(Evt("c"))(stashWithinHandler) } diff --git a/akka-persistence/src/test/scala/akka/persistence/RecoveryPermitterSpec.scala b/akka-persistence/src/test/scala/akka/persistence/RecoveryPermitterSpec.scala index ab7e11a4cb7..9f5c5a3b483 100644 --- a/akka-persistence/src/test/scala/akka/persistence/RecoveryPermitterSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/RecoveryPermitterSpec.scala @@ -30,15 +30,13 @@ object RecoveryPermitterSpec { probe ! "postStop" } - override def receiveRecover: Receive = { - case RecoveryCompleted => - probe ! RecoveryCompleted - if (throwFromRecoveryCompleted) - throw new TestExc + override def receiveRecover: Receive = { case RecoveryCompleted => + probe ! RecoveryCompleted + if (throwFromRecoveryCompleted) + throw new TestExc } - override def receiveCommand: Receive = { - case "stop" => - context.stop(self) + override def receiveCommand: Receive = { case "stop" => + context.stop(self) } } diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotDecodeFailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotDecodeFailureSpec.scala index 8966c94f466..060d02e367d 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotDecodeFailureSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotDecodeFailureSpec.scala @@ -16,8 +16,7 @@ object SnapshotDecodeFailureSpec { case Cmd(payload) => persist(payload)(_ => saveSnapshot(payload)) case SaveSnapshotSuccess(md) => probe ! md.sequenceNr } - def receiveRecover = { - case _ => + def receiveRecover = { case _ => } } @@ -25,8 +24,7 @@ object SnapshotDecodeFailureSpec { extends NamedPersistentActor(name) with ActorLogging { - def receiveCommand = { - case _ => + def receiveCommand = { case _ => } def receiveRecover = { case SnapshotOffer(_, _) => throw new Exception("kanbudong") diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotDirectoryFailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotDirectoryFailureSpec.scala index e4a233cb9f9..4c5e891c2c0 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotDirectoryFailureSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotDirectoryFailureSpec.scala @@ -16,8 +16,8 @@ object SnapshotDirectoryFailureSpec { override def persistenceId: String = name - override def receiveRecover: Receive = { - case SnapshotOffer(md, s) => probe ! ((md, s)) + override def receiveRecover: Receive = { case SnapshotOffer(md, s) => + probe ! ((md, s)) } override def receiveCommand = { diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala index 4169646034a..49d419c168c 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala @@ -128,10 +128,9 @@ class SnapshotFailureRobustnessSpec try { system.actorOf(Props(classOf[LoadSnapshotTestPersistentActor], name, testActor)) expectMsgType[Logging.Error].message.toString should startWith("Error loading snapshot") - expectMsgPF() { - case (SnapshotMetadata(`persistenceId`, 1, timestamp), state) => - state should ===("blahonga") - timestamp should be > (0L) + expectMsgPF() { case (SnapshotMetadata(`persistenceId`, 1, timestamp), state) => + state should ===("blahonga") + timestamp should be > 0L } expectMsg("kablama-2") expectMsg(RecoveryCompleted) @@ -182,10 +181,9 @@ class SnapshotFailureRobustnessSpec p ! Cmd("hello") expectMsg(1) p ! DeleteSnapshot(1) - expectMsgPF() { - case DeleteSnapshotFailure(SnapshotMetadata(`persistenceId`, 1, _), cause) => - // ok, expected failure - cause.getMessage should include("Failed to delete") + expectMsgPF() { case DeleteSnapshotFailure(SnapshotMetadata(`persistenceId`, 1, _), cause) => + // ok, expected failure + cause.getMessage should include("Failed to delete") } } "receive failure message when bulk deleting snapshot fails" in { @@ -198,10 +196,9 @@ class SnapshotFailureRobustnessSpec expectMsg(2) val criteria = SnapshotSelectionCriteria(maxSequenceNr = 10) p ! DeleteSnapshots(criteria) - expectMsgPF() { - case DeleteSnapshotsFailure(_, cause) => - // ok, expected failure - cause.getMessage should include("Failed to delete") + expectMsgPF() { case DeleteSnapshotsFailure(_, cause) => + // ok, expected failure + cause.getMessage should include("Failed to delete") } } } diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotRecoveryLocalStoreSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotRecoveryLocalStoreSpec.scala index 4eebaf3bf09..94847cf21e3 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotRecoveryLocalStoreSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotRecoveryLocalStoreSpec.scala @@ -20,8 +20,7 @@ object SnapshotRecoveryLocalStoreSpec { case SaveSnapshotSuccess(md) => probe ! md.sequenceNr case GetState => probe ! state } - def receiveRecover = { - case _ => + def receiveRecover = { case _ => } } @@ -31,11 +30,10 @@ object SnapshotRecoveryLocalStoreSpec { override def recovery = Recovery(toSequenceNr = 0) - def receiveCommand = { - case _ => + def receiveCommand = { case _ => } - def receiveRecover = { - case other => probe ! other + def receiveRecover = { case other => + probe ! other } } } diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala index 59099bcc0c6..19dfd30196c 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala @@ -94,10 +94,9 @@ class SnapshotSerializationSpec sPersistentActor ! "blahonga" expectMsg(0) system.actorOf(Props(classOf[TestPersistentActor], name, testActor)) - expectMsgPF() { - case (SnapshotMetadata(`persistenceId`, 0, timestamp), state) => - state should ===(new MySnapshot("blahonga")) - timestamp should be > (0L) + expectMsgPF() { case (SnapshotMetadata(`persistenceId`, 0, timestamp), state) => + state should ===(new MySnapshot("blahonga")) + timestamp should be > 0L } } } diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala index 5269a19d30b..1b2cb0bb2da 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala @@ -108,10 +108,9 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "Snap system.actorOf(Props(classOf[LoadSnapshotTestPersistentActor], name, Recovery(), testActor)) val persistenceId = name - expectMsgPF() { - case SnapshotOffer(SnapshotMetadata(`persistenceId`, 4, timestamp), state) => - state should ===(List("a-1", "b-2", "c-3", "d-4").reverse) - timestamp should be > (0L) + expectMsgPF() { case SnapshotOffer(SnapshotMetadata(`persistenceId`, 4, timestamp), state) => + state should ===(List("a-1", "b-2", "c-3", "d-4").reverse) + timestamp should be > 0L } expectMsg("e-5") expectMsg("f-6") @@ -132,10 +131,9 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "Snap system.actorOf(Props(classOf[LoadSnapshotTestPersistentActor], name, Recovery(toSequenceNr = 3), testActor)) val persistenceId = name - expectMsgPF() { - case SnapshotOffer(SnapshotMetadata(`persistenceId`, 2, timestamp), state) => - state should ===(List("a-1", "b-2").reverse) - timestamp should be > (0L) + expectMsgPF() { case SnapshotOffer(SnapshotMetadata(`persistenceId`, 2, timestamp), state) => + state should ===(List("a-1", "b-2").reverse) + timestamp should be > 0L } expectMsg("c-3") expectMsg(RecoveryCompleted) @@ -147,10 +145,9 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "Snap persistentActor ! "done" - expectMsgPF() { - case SnapshotOffer(SnapshotMetadata(`persistenceId`, 4, timestamp), state) => - state should ===(List("a-1", "b-2", "c-3", "d-4").reverse) - timestamp should be > (0L) + expectMsgPF() { case SnapshotOffer(SnapshotMetadata(`persistenceId`, 4, timestamp), state) => + state should ===(List("a-1", "b-2", "c-3", "d-4").reverse) + timestamp should be > 0L } expectMsg(RecoveryCompleted) expectMsg("done") @@ -160,10 +157,9 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "Snap system.actorOf(Props(classOf[LoadSnapshotTestPersistentActor], name, recovery, testActor)) val persistenceId = name - expectMsgPF() { - case SnapshotOffer(SnapshotMetadata(`persistenceId`, 2, timestamp), state) => - state should ===(List("a-1", "b-2").reverse) - timestamp should be > (0L) + expectMsgPF() { case SnapshotOffer(SnapshotMetadata(`persistenceId`, 2, timestamp), state) => + state should ===(List("a-1", "b-2").reverse) + timestamp should be > 0L } expectMsg("c-3") expectMsg("d-4") @@ -176,10 +172,9 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "Snap system.actorOf(Props(classOf[LoadSnapshotTestPersistentActor], name, recovery, testActor)) val persistenceId = name - expectMsgPF() { - case SnapshotOffer(SnapshotMetadata(`persistenceId`, 2, timestamp), state) => - state should ===(List("a-1", "b-2").reverse) - timestamp should be > (0L) + expectMsgPF() { case SnapshotOffer(SnapshotMetadata(`persistenceId`, 2, timestamp), state) => + state should ===(List("a-1", "b-2").reverse) + timestamp should be > 0L } expectMsg("c-3") expectMsg(RecoveryCompleted) @@ -206,10 +201,9 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "Snap persistentActor1 ! "done" - val metadata = expectMsgPF() { - case SnapshotOffer(md @ SnapshotMetadata(`persistenceId`, 4, _), state) => - state should ===(List("a-1", "b-2", "c-3", "d-4").reverse) - md + val metadata = expectMsgPF() { case SnapshotOffer(md @ SnapshotMetadata(`persistenceId`, 4, _), state) => + state should ===(List("a-1", "b-2", "c-3", "d-4").reverse) + md } expectMsg(RecoveryCompleted) expectMsg("done") @@ -243,9 +237,8 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "Snap // recover persistentActor and the delete first three (= all) snapshots val criteria = SnapshotSelectionCriteria(maxSequenceNr = 4) persistentActor1 ! DeleteN(criteria) - expectMsgPF() { - case SnapshotOffer(SnapshotMetadata(`persistenceId`, 4, _), state) => - state should ===(List("a-1", "b-2", "c-3", "d-4").reverse) + expectMsgPF() { case SnapshotOffer(SnapshotMetadata(`persistenceId`, 4, _), state) => + state should ===(List("a-1", "b-2", "c-3", "d-4").reverse) } expectMsg(RecoveryCompleted) deleteProbe.expectMsgType[DeleteSnapshots] diff --git a/akka-persistence/src/test/scala/akka/persistence/TimerPersistentActorSpec.scala b/akka-persistence/src/test/scala/akka/persistence/TimerPersistentActorSpec.scala index 01da13da587..ad517c32ade 100644 --- a/akka-persistence/src/test/scala/akka/persistence/TimerPersistentActorSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/TimerPersistentActorSpec.scala @@ -27,8 +27,7 @@ object TimerPersistentActorSpec { override def persistenceId = name - override def receiveRecover: Receive = { - case _ => + override def receiveRecover: Receive = { case _ => } override def receiveCommand: Receive = { @@ -45,11 +44,10 @@ object TimerPersistentActorSpec { // this should fail in constructor class WrongOrder extends PersistentActor with Timers { override def persistenceId = "notused" - override def receiveRecover: Receive = { - case _ => + override def receiveRecover: Receive = { case _ => } - override def receiveCommand: Receive = { - case _ => () + override def receiveCommand: Receive = { case _ => + () } } @@ -70,9 +68,11 @@ object TimerPersistentActorSpec { BoxedUnit.UNIT case msg => timers.startSingleTimer("key", Scheduled(msg, sender()), Duration.Zero) - persist(msg, new Procedure[Any] { - override def apply(evt: Any): Unit = () - }) + persist( + msg, + new Procedure[Any] { + override def apply(evt: Any): Unit = () + }) BoxedUnit.UNIT }) } diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/InmemEventAdaptersSpec.scala b/akka-persistence/src/test/scala/akka/persistence/journal/InmemEventAdaptersSpec.scala index c1891f4467d..f66664a3cef 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/InmemEventAdaptersSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/InmemEventAdaptersSpec.scala @@ -14,7 +14,8 @@ import akka.testkit.AkkaSpec @nowarn("msg=Unused import") class InmemEventAdaptersSpec extends AkkaSpec { - val config = ConfigFactory.parseString(s""" + val config = ConfigFactory + .parseString(s""" |akka.persistence.journal { | plugin = "akka.persistence.journal.inmem" | @@ -44,7 +45,8 @@ class InmemEventAdaptersSpec extends AkkaSpec { | } | } |} - """.stripMargin).withFallback(ConfigFactory.load()) + """.stripMargin) + .withFallback(ConfigFactory.load()) val extendedActorSystem = system.asInstanceOf[ExtendedActorSystem] val inmemConfig = config.getConfig("akka.persistence.journal.inmem") diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala b/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala index 8cb4e85ba8c..7032e6824a9 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala @@ -24,9 +24,7 @@ object SteppingInmemJournal { case object Token case object TokenConsumed - /** - * Allow the journal to do one operation, will block until that completes - */ + /** Allow the journal to do one operation, will block until that completes */ def step(journal: ActorRef)(implicit system: ActorSystem): Unit = { implicit val timeout: Timeout = 3.seconds.dilated Await.result(journal ? SteppingInmemJournal.Token, timeout.duration) diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala b/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala index 40f1d6d2939..e79df70d709 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala @@ -41,23 +41,27 @@ class ChaosJournal extends AsyncWriteJournal { def random = ThreadLocalRandom.current override def asyncWriteMessages(messages: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]] = - try Future.successful { - if (shouldFail(writeFailureRate)) throw new WriteFailedException(messages.flatMap(_.payload)) - else - for (a <- messages) yield { - a.payload.foreach(add) - AsyncWriteJournal.successUnit - } - } catch { + try + Future.successful { + if (shouldFail(writeFailureRate)) throw new WriteFailedException(messages.flatMap(_.payload)) + else + for (a <- messages) yield { + a.payload.foreach(add) + AsyncWriteJournal.successUnit + } + } + catch { case NonFatal(e) => Future.failed(e) } override def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] = { - try Future.successful { - (1L to toSequenceNr).foreach { snr => - del(persistenceId, snr) + try + Future.successful { + (1L to toSequenceNr).foreach { snr => + del(persistenceId, snr) + } } - } catch { + catch { case NonFatal(e) => Future.failed(e) } } diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/inmem/InmemJournalSpec.scala b/akka-persistence/src/test/scala/akka/persistence/journal/inmem/InmemJournalSpec.scala index dcf2eb72f52..e18ae3fc2ac 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/inmem/InmemJournalSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/inmem/InmemJournalSpec.scala @@ -22,8 +22,7 @@ object InmemJournalSpec { override def persistenceId: String = name - override def receiveRecover: Receive = { - case Evt(_) => + override def receiveRecover: Receive = { case Evt(_) => } override def receiveCommand: Receive = { case Cmd(s) => persist(Evt(s))(_ => ()) diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/JournalCompactionSpec.scala b/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/JournalCompactionSpec.scala index 1c254b7a21b..28caa63a4bf 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/JournalCompactionSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/JournalCompactionSpec.scala @@ -185,8 +185,8 @@ object JournalCompactionSpec { import EventLogger._ - override def receiveRecover: Receive = { - case Event(seqNr, _) => log.info("Recovered event {}", seqNr) + override def receiveRecover: Receive = { case Event(seqNr, _) => + log.info("Recovered event {}", seqNr) } override def receiveCommand: Receive = { diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/AttemptSysMsgRedeliverySpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/AttemptSysMsgRedeliverySpec.scala index ee51a3c963e..2d7f17671a0 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/AttemptSysMsgRedeliverySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/AttemptSysMsgRedeliverySpec.scala @@ -41,8 +41,8 @@ class AttemptSysMsgRedeliveryMultiJvmNode3 extends AttemptSysMsgRedeliverySpec object AttemptSysMsgRedeliverySpec { class Echo extends Actor { - def receive = { - case m => sender() ! m + def receive = { case m => + sender() ! m } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala index bd18d151758..c292cc53bb4 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala @@ -28,8 +28,8 @@ class LookupRemoteActorMultiJvmNode2 extends LookupRemoteActorSpec object LookupRemoteActorSpec { class SomeActor extends Actor { - def receive = { - case "identify" => sender() ! self + def receive = { case "identify" => + sender() ! self } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala index 21b006aec3f..674791982c4 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala @@ -19,15 +19,19 @@ import akka.util.unused object NewRemoteActorMultiJvmSpec extends MultiNodeConfig { commonConfig( - debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + debugConfig(on = false).withFallback( + ConfigFactory + .parseString(""" akka.remote.use-unsafe-remote-features-outside-cluster = on - """).withFallback(RemotingMultiNodeSpec.commonConfig))) + """) + .withFallback(RemotingMultiNodeSpec.commonConfig))) val leader = role("leader") val follower = role("follower") - deployOn(leader, """ + deployOn( + leader, + """ /service-hello.remote = "@follower@" /service-hello-null.remote = "@follower@" /service-hello3.remote = "@follower@" @@ -41,14 +45,14 @@ class NewRemoteActorMultiJvmNode2 extends NewRemoteActorSpec object NewRemoteActorSpec { class SomeActor extends Actor { - def receive = { - case "identify" => sender() ! self + def receive = { case "identify" => + sender() ! self } } class SomeActorWithParam(@unused ignored: String) extends Actor { - def receive = { - case "identify" => sender() ! self + def receive = { case "identify" => + sender() ! self } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/PiercingShouldKeepQuarantineSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/PiercingShouldKeepQuarantineSpec.scala index cd82be28eed..34d09cca884 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/PiercingShouldKeepQuarantineSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/PiercingShouldKeepQuarantineSpec.scala @@ -23,8 +23,8 @@ class PiercingShouldKeepQuarantineSpecMultiJvmNode2 extends PiercingShouldKeepQu object PiercingShouldKeepQuarantineSpec { class Subject extends Actor { - def receive = { - case "getuid" => sender() ! context.system.asInstanceOf[ExtendedActorSystem].uid + def receive = { case "getuid" => + sender() ! context.system.asInstanceOf[ExtendedActorSystem].uid } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeliverySpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeliverySpec.scala index 67f2ca04ab2..5988162a245 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeliverySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeliverySpec.scala @@ -32,8 +32,8 @@ object RemoteDeliverySpec { final case class Letter(n: Int, route: List[ActorRef]) extends CborSerializable class Postman extends Actor { - def receive = { - case Letter(n, route) => route.head ! Letter(n, route.tail) + def receive = { case Letter(n, route) => + route.head ! Letter(n, route.tail) } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteFeaturesSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteFeaturesSpec.scala index fdd67e25006..6ac023a1fba 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteFeaturesSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteFeaturesSpec.scala @@ -40,9 +40,11 @@ class RemotingFeaturesConfig(val useUnsafe: Boolean) extends MultiNodeConfig { val workerInstances = 3 val iterationCount = 10 - protected val baseConfig = ConfigFactory.parseString(s""" + protected val baseConfig = ConfigFactory + .parseString(s""" akka.remote.use-unsafe-remote-features-outside-cluster = $useUnsafe - """).withFallback(RemotingMultiNodeSpec.commonConfig) + """) + .withFallback(RemotingMultiNodeSpec.commonConfig) commonConfig(debugConfig(on = false).withFallback(baseConfig)) @@ -300,7 +302,7 @@ abstract class RemotingFeaturesSpec(val multiNodeConfig: RemotingFeaturesConfig) "A remote round robin pool" must { s"${if (useUnsafe) "be instantiated on remote node and communicate through its RemoteActorRef" - else "not be instantiated on remote node and communicate through its LocalActorRef "} " in { + else "not be instantiated on remote node and communicate through its LocalActorRef "} " in { runOn(first, second, third) { enterBarrier("start", "broadcast-end", "end") @@ -315,8 +317,8 @@ abstract class RemotingFeaturesSpec(val multiNodeConfig: RemotingFeaturesConfig) actor ! "hit" } - val replies = receiveWhile(5.seconds, messages = workerInstances * iterationCount) { - case ref: ActorRef => ref.path.address + val replies = receiveWhile(5.seconds, messages = workerInstances * iterationCount) { case ref: ActorRef => + ref.path.address }.foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { case (replyMap, address) => if (useUnsafe) address.hasLocalScope shouldBe false diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeDeathWatchSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeDeathWatchSpec.scala index 822d365c701..ca5757710ce 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeDeathWatchSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeDeathWatchSpec.scala @@ -27,8 +27,7 @@ object RemoteNodeDeathWatchConfig extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString( - """ + .withFallback(ConfigFactory.parseString(""" akka.loglevel = INFO ## Use a tighter setting than the default, otherwise it takes 20s for DeathWatch to trigger akka.remote.watch-failure-detector.acceptable-heartbeat-pause = 3 s diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala index c3d7919edb5..23ae7899384 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala @@ -26,9 +26,7 @@ object RemoteNodeRestartDeathWatchConfig extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig( - debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.loglevel = INFO akka.remote.use-unsafe-remote-features-outside-cluster = on """))) @@ -103,9 +101,11 @@ abstract class RemoteNodeRestartDeathWatchSpec extends RemotingMultiNodeSpec(Rem val freshSystem = ActorSystem( system.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${address.port.get} - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) freshSystem.actorOf(Props[Subject](), "subject") Await.ready(freshSystem.whenTerminated, 30.seconds) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala index 62edfab46f4..74a66498916 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala @@ -85,8 +85,8 @@ abstract class RemoteQuarantinePiercingSpec extends RemotingMultiNodeSpec(Remote awaitAssert { system.actorSelection(RootActorPath(secondAddress) / "user" / "subject") ! "identify" val (uidSecond, subjectSecond) = expectMsgType[(Long, ActorRef)](1.second) - uidSecond should not be (uidFirst) - subjectSecond should not be (subjectFirst) + uidSecond should not be uidFirst + subjectSecond should not be subjectFirst } } @@ -107,9 +107,11 @@ abstract class RemoteQuarantinePiercingSpec extends RemotingMultiNodeSpec(Remote val freshSystem = ActorSystem( system.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${address.port.get} - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) freshSystem.actorOf(Props[Subject](), "subject") Await.ready(freshSystem.whenTerminated, 30.seconds) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala index 8813db7ef6d..2ad292c6625 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala @@ -87,10 +87,9 @@ object RemoteReDeploymentMultiJvmSpec { } class Echo(target: ActorRef) extends Actor with ActorLogging { - def receive = { - case msg => - log.info(s"received $msg from ${sender()}") - target ! msg + def receive = { case msg => + log.info(s"received $msg from ${sender()}") + target ! msg } } def echoProps(target: ActorRef) = Props(new Echo(target)) @@ -137,7 +136,8 @@ abstract class RemoteReDeploymentMultiJvmSpec extends RemotingMultiNodeSpec(Remo // The quarantine of node 2, where the Parent lives, should cause the Hello child to be stopped: expectMsg("PostStop") expectNoMessage() - } else expectNoMessage(sleepAfterKill) + } + else expectNoMessage(sleepAfterKill) awaitAssert(node(second), 10.seconds, 100.millis) } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemotingMultiNodeSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemotingMultiNodeSpec.scala index 7114302fae3..fcba6441602 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemotingMultiNodeSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemotingMultiNodeSpec.scala @@ -14,9 +14,11 @@ import akka.testkit.{ DefaultTimeout, ImplicitSender } object RemotingMultiNodeSpec { def commonConfig = - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.actor.warn-about-java-serializer-usage = off - """).withFallback(ArterySpecSupport.tlsConfig) // TLS only used if transport=tls-tcp + """) + .withFallback(ArterySpecSupport.tlsConfig) // TLS only used if transport=tls-tcp } @@ -25,6 +27,4 @@ abstract class RemotingMultiNodeSpec(config: MultiNodeConfig) with Suite with STMultiNodeSpec with ImplicitSender - with DefaultTimeout { self: MultiNodeSpec => - -} + with DefaultTimeout { self: MultiNodeSpec => } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala index 9853da6f481..874a17dea64 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala @@ -31,8 +31,7 @@ object FanInThroughputSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback( - ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" # for serious measurements you should increase the totalMessagesFactor (20) akka.test.FanInThroughputSpec.totalMessagesFactor = 10.0 akka.test.FanInThroughputSpec.real-message = off diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala index a6ba48f9d6a..3dd39d5feef 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala @@ -32,8 +32,7 @@ object FanOutThroughputSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback( - ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" # for serious measurements you should increase the totalMessagesFactor (20) akka.test.FanOutThroughputSpec.totalMessagesFactor = 10.0 akka.test.FanOutThroughputSpec.real-message = off @@ -155,13 +154,12 @@ abstract class FanOutThroughputSpec extends RemotingMultiNodeSpec(FanOutThroughp snd ! Run (snd, terminationProbe, plotProbe) } - senders.foreach { - case (snd, terminationProbe, plotProbe) => - terminationProbe.expectTerminated(snd, barrierTimeout) - if (snd == senders.head._1) { - val plotResult = plotProbe.expectMsgType[PlotResult] - plot = plot.addAll(plotResult) - } + senders.foreach { case (snd, terminationProbe, plotProbe) => + terminationProbe.expectTerminated(snd, barrierTimeout) + if (snd == senders.head._1) { + val plotResult = plotProbe.expectMsgType[PlotResult] + plot = plot.addAll(plotResult) + } } enterBarrier(testName + "-done") } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala index 83bee329e3a..12c8c76bad0 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala @@ -23,9 +23,7 @@ object HandshakeRestartReceiverSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig( - debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka { loglevel = INFO actor.provider = remote @@ -120,9 +118,11 @@ abstract class HandshakeRestartReceiverSpec val freshSystem = ActorSystem( system.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${address.port.get} - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) freshSystem.actorOf(Props[Subject](), "subject2") Await.result(freshSystem.whenTerminated, 45.seconds) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/LatencySpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/LatencySpec.scala index d0e3b40fc7d..50ac432c5aa 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/LatencySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/LatencySpec.scala @@ -29,7 +29,9 @@ object LatencySpec extends MultiNodeConfig { val barrierTimeout = 5.minutes - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(s""" # for serious measurements you should increase the totalMessagesFactor (30) and repeatCount (3) akka.test.LatencySpec.totalMessagesFactor = 1.0 akka.test.LatencySpec.repeatCount = 1 @@ -61,7 +63,8 @@ object LatencySpec extends MultiNodeConfig { } } } - """)).withFallback(RemotingMultiNodeSpec.commonConfig)) + """)) + .withFallback(RemotingMultiNodeSpec.commonConfig)) case object Reset extends CborSerializable diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala index e6fdce6c686..25cf8bb23b2 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala @@ -113,8 +113,8 @@ object MaxThroughputSpec extends MultiNodeConfig { class Receiver(reporter: RateReporter, payloadSize: Int, numSenders: Int) extends Actor { private var c = 0L private var endMessagesMissing = numSenders - private var correspondingSender - : ActorRef = null // the Actor which send the Start message will also receive the report + private var correspondingSender: ActorRef = + null // the Actor which send the Start message will also receive the report def receive = { case msg: Array[Byte] => @@ -176,13 +176,12 @@ object MaxThroughputSpec extends MultiNodeConfig { val compressionEnabled = RARP(context.system).provider.remoteSettings.Artery.Advanced.Compression.Enabled - def receive = { - case Run => - if (compressionEnabled) { - target.tell(Warmup(payload), self) - context.setReceiveTimeout(1.second) - context.become(waitingForCompression) - } else runWarmup() + def receive = { case Run => + if (compressionEnabled) { + target.tell(Warmup(payload), self) + context.setReceiveTimeout(1.second) + context.become(waitingForCompression) + } else runWarmup() } def waitingForCompression: Receive = { @@ -226,39 +225,40 @@ object MaxThroughputSpec extends MultiNodeConfig { case _: Warmup => } - def active: Receive = { - case _ @FlowControl(id, t0) => - val targetCount = pendingFlowControl(id) - if (targetCount - 1 == 0) { - pendingFlowControl -= id - val now = System.nanoTime() - val duration = NANOSECONDS.toMillis(now - t0) - maxRoundTripMillis = math.max(maxRoundTripMillis, duration) + def active: Receive = { case _ @FlowControl(id, t0) => + val targetCount = pendingFlowControl(id) + if (targetCount - 1 == 0) { + pendingFlowControl -= id + val now = System.nanoTime() + val duration = NANOSECONDS.toMillis(now - t0) + maxRoundTripMillis = math.max(maxRoundTripMillis, duration) - sendBatch(warmup = false) - sendFlowControl(now) - } else { - // waiting for FlowControl from more targets - pendingFlowControl = pendingFlowControl.updated(id, targetCount - 1) - } + sendBatch(warmup = false) + sendFlowControl(now) + } else { + // waiting for FlowControl from more targets + pendingFlowControl = pendingFlowControl.updated(id, targetCount - 1) + } } val waitingForEndResult: Receive = { case EndResult(totalReceived) => val took = NANOSECONDS.toMillis(System.nanoTime - startTime) - val throughput = (totalReceived * 1000.0 / took) - - reporter.reportResults(s"=== ${reporter.testName} ${self.path.name}: " + - f"throughput ${throughput * testSettings.senderReceiverPairs}%,.0f msg/s, " + - f"${throughput * payloadSize * testSettings.senderReceiverPairs}%,.0f bytes/s (payload), " + - f"${throughput * totalSize(context.system) * testSettings.senderReceiverPairs}%,.0f bytes/s (total" + - (if (RARP(context.system).provider.remoteSettings.Artery.Advanced.Compression.Enabled) ",compression" else "") + "), " + - (if (testSettings.senderReceiverPairs == 1) s"dropped ${totalMessages - totalReceived}, " else "") + - s"max round-trip $maxRoundTripMillis ms, " + - s"burst size $burstSize, " + - s"payload size $payloadSize, " + - s"total size ${totalSize(context.system)}, " + - s"$took ms to deliver $totalReceived messages.") + val throughput = totalReceived * 1000.0 / took + + reporter.reportResults( + s"=== ${reporter.testName} ${self.path.name}: " + + f"throughput ${throughput * testSettings.senderReceiverPairs}%,.0f msg/s, " + + f"${throughput * payloadSize * testSettings.senderReceiverPairs}%,.0f bytes/s (payload), " + + f"${throughput * totalSize(context.system) * testSettings.senderReceiverPairs}%,.0f bytes/s (total" + + (if (RARP(context.system).provider.remoteSettings.Artery.Advanced.Compression.Enabled) ",compression" + else "") + "), " + + (if (testSettings.senderReceiverPairs == 1) s"dropped ${totalMessages - totalReceived}, " else "") + + s"max round-trip $maxRoundTripMillis ms, " + + s"burst size $burstSize, " + + s"payload size $payloadSize, " + + s"total size ${totalSize(context.system)}, " + + s"$took ms to deliver $totalReceived messages.") plotRef ! PlotResult().add(testName, throughput * payloadSize * testSettings.senderReceiverPairs / 1024 / 1024) context.stop(self) @@ -469,13 +469,12 @@ abstract class MaxThroughputSpec extends RemotingMultiNodeSpec(MaxThroughputSpec snd ! Run (snd, terminationProbe, plotProbe) } - senders.foreach { - case (snd, terminationProbe, plotProbe) => - terminationProbe.expectTerminated(snd, barrierTimeout) - if (snd == senders.head._1) { - val plotResult = plotProbe.expectMsgType[PlotResult] - plot = plot.addAll(plotResult) - } + senders.foreach { case (snd, terminationProbe, plotProbe) => + terminationProbe.expectTerminated(snd, barrierTimeout) + if (snd == senders.head._1) { + val plotResult = plotProbe.expectMsgType[PlotResult] + plot = plot.addAll(plotResult) + } } enterBarrier(testName + "-done") } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala index 71af284bddc..870c446b996 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala @@ -98,8 +98,7 @@ abstract class RemoteRestartedQuarantinedSpec extends RemotingMultiNodeSpec(Remo enterBarrier("before-quarantined") enterBarrier("quarantined") - expectMsgPF(10 seconds) { - case ThisActorSystemQuarantinedEvent(_, _) => + expectMsgPF(10 seconds) { case ThisActorSystemQuarantinedEvent(_, _) => } // check that we quarantine back @@ -115,16 +114,18 @@ abstract class RemoteRestartedQuarantinedSpec extends RemotingMultiNodeSpec(Remo val freshSystem = ActorSystem( system.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.remote.artery.canonical.port = ${address.port.get} - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) val probe = TestProbe()(freshSystem) freshSystem .actorSelection(RootActorPath(firstAddress) / "user" / "subject") .tell(Identify("subject"), probe.ref) - probe.expectMsgType[ActorIdentity](5.seconds).ref should not be (None) + probe.expectMsgType[ActorIdentity](5.seconds).ref should not be None // Now the other system will be able to pass, too freshSystem.actorOf(Props[Subject](), "subject") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala index 9dd2ee7c351..42b48c75b29 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala @@ -43,11 +43,10 @@ object SurviveInboundStreamRestartWithCompressionInFlightSpec extends MultiNodeC * @param ref target ActorRef to forward messages to */ case class TellAndEcho(ref: ActorRef) extends Actor { - override def receive = { - case msg => - ref ! msg - val reply = s"${self.path.name}-$msg" - sender() ! reply + override def receive = { case msg => + ref ! msg + val reply = s"${self.path.name}-$msg" + sender() ! reply } } @@ -126,10 +125,13 @@ abstract class SurviveInboundStreamRestartWithCompressionInFlightSpec Thread.sleep(2000) // we poke the remote system, awaiting its inbound stream recovery, then it should reply - awaitAssert({ - sendToB ! "alive-again" - expectMsg(300.millis, s"${sendToB.path.name}-alive-again") - }, max = 5.seconds, interval = 500.millis) + awaitAssert( + { + sendToB ! "alive-again" + expectMsg(300.millis, s"${sendToB.path.name}-alive-again") + }, + max = 5.seconds, + interval = 500.millis) // we continue sending messages using the "old table". // if a new table was being built, it would cause the b to be compressed as 1 causing a wrong reply to come back diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala index 0ed2ed54f93..1ac8fe79636 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala @@ -7,22 +7,24 @@ package akka.remote.artery import java.util.concurrent.TimeUnit.SECONDS class TestRateReporter(name: String) - extends RateReporter(SECONDS.toNanos(1), new RateReporter.Reporter { - override def onReport( - messagesPerSec: Double, - bytesPerSec: Double, - totalMessages: Long, - totalBytes: Long): Unit = { - if (totalBytes > 0) { - println( - name + - f": ${messagesPerSec}%,.0f msgs/sec, ${bytesPerSec}%,.0f bytes/sec, " + - f"totals ${totalMessages}%,d messages ${totalBytes / (1024 * 1024)}%,d MB") - } else { - println( - name + - f": ${messagesPerSec}%,.0f msgs/sec " + - f"total ${totalMessages}%,d messages") + extends RateReporter( + SECONDS.toNanos(1), + new RateReporter.Reporter { + override def onReport( + messagesPerSec: Double, + bytesPerSec: Double, + totalMessages: Long, + totalBytes: Long): Unit = { + if (totalBytes > 0) { + println( + name + + f": ${messagesPerSec}%,.0f msgs/sec, ${bytesPerSec}%,.0f bytes/sec, " + + f"totals ${totalMessages}%,d messages ${totalBytes / (1024 * 1024)}%,d MB") + } else { + println( + name + + f": ${messagesPerSec}%,.0f msgs/sec " + + f"total ${totalMessages}%,d messages") + } } - } - }) {} + }) {} diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/UdpPortActor.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/UdpPortActor.scala index 3a78192526f..eaeebc68128 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/UdpPortActor.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/UdpPortActor.scala @@ -13,16 +13,14 @@ object UdpPortActor { case object GetUdpPort extends CborSerializable } -/** - * Used for exchanging free udp port between multi-jvm nodes - */ +/** Used for exchanging free udp port between multi-jvm nodes */ class UdpPortActor extends Actor { import UdpPortActor._ val port = SocketUtil.temporaryServerAddress(RARP(context.system).provider.getDefaultAddress.host.get, udp = true).getPort - def receive = { - case GetUdpPort => sender() ! port + def receive = { case GetUdpPort => + sender() ! port } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala index 735a00e5856..f2f80620405 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala @@ -50,8 +50,8 @@ class RemoteRandomMultiJvmNode4 extends RemoteRandomSpec object RemoteRandomSpec { class SomeActor extends Actor { - def receive = { - case "hit" => sender() ! self + def receive = { case "hit" => + sender() ! self } } } @@ -81,9 +81,9 @@ class RemoteRandomSpec extends RemotingMultiNodeSpec(RemoteRandomConfig) with De actor ! "hit" } - val replies: Map[Address, Int] = (receiveWhile(5.seconds, messages = connectionCount * iterationCount) { + val replies: Map[Address, Int] = receiveWhile(5.seconds, messages = connectionCount * iterationCount) { case ref: ActorRef => ref.path.address - }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { + }.foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala index 579b2d0ef73..24c85adacbe 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala @@ -61,8 +61,8 @@ class RemoteRoundRobinMultiJvmNode4 extends RemoteRoundRobinSpec object RemoteRoundRobinSpec { class SomeActor extends Actor { - def receive = { - case "hit" => sender() ! self + def receive = { case "hit" => + sender() ! self } } @@ -97,11 +97,11 @@ class RemoteRoundRobinSpec extends RemotingMultiNodeSpec(RemoteRoundRobinConfig) actor ! "hit" } - val replies: Map[Address, Int] = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) { + val replies: Map[Address, Int] = receiveWhile(5 seconds, messages = connectionCount * iterationCount) { case ref: ActorRef => info(s"reply from $ref") ref.path.address - }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { + }.foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) } @@ -185,9 +185,9 @@ class RemoteRoundRobinSpec extends RemotingMultiNodeSpec(RemoteRoundRobinConfig) actor ! "hit" } - val replies: Map[Address, Int] = (receiveWhile(5 seconds, messages = connectionCount * iterationCount) { + val replies: Map[Address, Int] = receiveWhile(5 seconds, messages = connectionCount * iterationCount) { case ref: ActorRef => ref.path.address - }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { + }.foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala index caa34c42de4..93e2580a41f 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala @@ -51,8 +51,8 @@ class RemoteScatterGatherMultiJvmNode4 extends RemoteScatterGatherSpec object RemoteScatterGatherSpec { class SomeActor extends Actor { - def receive = { - case "hit" => sender() ! self + def receive = { case "hit" => + sender() ! self } } } @@ -89,9 +89,9 @@ class RemoteScatterGatherSpec extends RemotingMultiNodeSpec(RemoteRoundRobinConf actor ! "hit" } - val replies: Map[Address, Int] = (receiveWhile(5.seconds, messages = connectionCount * iterationCount) { + val replies: Map[Address, Int] = receiveWhile(5.seconds, messages = connectionCount * iterationCount) { case ref: ActorRef => ref.path.address - }).foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { + }.foldLeft(Map(node(first).address -> 0, node(second).address -> 0, node(third).address -> 0)) { case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala index 87a91394742..9f997b7b139 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala @@ -26,8 +26,8 @@ class MultiNodeSampleSpecMultiJvmNode2 extends MultiNodeSample object MultiNodeSample { class Ponger extends Actor { - def receive = { - case "ping" => sender() ! "pong" + def receive = { case "ping" => + sender() ! "pong" } } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 1f2705a58ba..0c27cbae51c 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -43,11 +43,13 @@ class TestConductorSpec extends RemotingMultiNodeSpec(TestConductorMultiJvmSpec) "enter a barrier" taggedAs LongRunningTest in { runOn(leader) { - system.actorOf(Props(new Actor { - def receive = { - case x => testActor ! x; sender() ! x - } - }).withDeploy(Deploy.local), "echo") + system.actorOf( + Props(new Actor { + def receive = { case x => + testActor ! x; sender() ! x + } + }).withDeploy(Deploy.local), + "echo") } enterBarrier("name") diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index 0b0ba959f87..8bcf085916a 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -138,7 +138,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { if thr == ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil, thr.data.deadline), B) => case x => fail( - "Expected " + Failed(barrier, ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil, null), B)) + " but got " + x) + "Expected " + Failed( + barrier, + ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil, null), B)) + " but got " + x) } } @@ -161,7 +163,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { if thr == ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil, thr.data.deadline), B) => case x => fail( - "Expected " + Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil, null), B)) + " but got " + x) + "Expected " + Failed( + barrier, + ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil, null), B)) + " but got " + x) } } @@ -198,12 +202,15 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { msg match { case Failed(_, thr: BarrierEmpty) if thr == BarrierEmpty( - Data(Set(), "", Nil, thr.data.deadline), - "cannot remove RoleName(a): no client to remove") => + Data(Set(), "", Nil, thr.data.deadline), + "cannot remove RoleName(a): no client to remove") => case x => - fail("Expected " + Failed( - barrier, - BarrierEmpty(Data(Set(), "", Nil, null), "cannot remove RoleName(a): no client to remove")) + " but got " + x) + fail( + "Expected " + Failed( + barrier, + BarrierEmpty( + Data(Set(), "", Nil, null), + "cannot remove RoleName(a): no client to remove")) + " but got " + x) } barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) a.send(barrier, EnterBarrier("bar9", None)) @@ -225,7 +232,9 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { if thr == BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil, thr.data.deadline)) => case x => fail( - "Expected " + Failed(barrier, BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil, null))) + " but got " + x) + "Expected " + Failed( + barrier, + BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil, null))) + " but got " + x) } } } @@ -548,11 +557,11 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { val controller = context.actorOf(Props(classOf[Controller], participants, new InetSocketAddress(InetAddress.getLocalHost, 0))) controller ! GetSockAddr - override def supervisorStrategy = OneForOneStrategy() { - case x => testActor ! Failed(controller, x); SupervisorStrategy.Restart + override def supervisorStrategy = OneForOneStrategy() { case x => + testActor ! Failed(controller, x); SupervisorStrategy.Restart } - def receive = { - case _: InetSocketAddress => testActor ! controller + def receive = { case _: InetSocketAddress => + testActor ! controller } }).withDeploy(Deploy.local)) val actor = expectMsgType[ActorRef] @@ -567,11 +576,11 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { private def getBarrier(): ActorRef = { system.actorOf(Props(new Actor { val barrier = context.actorOf(Props[BarrierCoordinator]()) - override def supervisorStrategy = OneForOneStrategy() { - case x => testActor ! Failed(barrier, x); SupervisorStrategy.Restart + override def supervisorStrategy = OneForOneStrategy() { case x => + testActor ! Failed(barrier, x); SupervisorStrategy.Restart } - def receive = { - case _ => sender() ! barrier + def receive = { case _ => + sender() ! barrier } }).withDeploy(Deploy.local)) ! "" expectMsgType[ActorRef] diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala index b640b302d91..af5bd02043d 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala @@ -81,9 +81,7 @@ object LogRoleReplace extends ClipboardOwner { } } - /** - * Empty implementation of the ClipboardOwner interface - */ + /** Empty implementation of the ClipboardOwner interface */ def lostOwnership(clipboard: Clipboard, contents: Transferable): Unit = () } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala index 5d743de26a3..bdb45a2ed75 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala @@ -11,9 +11,7 @@ import org.scalatest.BeforeAndAfterAll import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike -/** - * Hooks up MultiNodeSpec with ScalaTest - */ +/** Hooks up MultiNodeSpec with ScalaTest */ trait STMultiNodeSpec extends MultiNodeSpecCallbacks with AnyWordSpecLike with Matchers with BeforeAndAfterAll { self: MultiNodeSpec => diff --git a/akka-remote/src/main/scala/akka/remote/BoundAddressesExtension.scala b/akka-remote/src/main/scala/akka/remote/BoundAddressesExtension.scala index c6dfe4a20ef..a6a52dc5939 100644 --- a/akka-remote/src/main/scala/akka/remote/BoundAddressesExtension.scala +++ b/akka-remote/src/main/scala/akka/remote/BoundAddressesExtension.scala @@ -13,9 +13,7 @@ import akka.actor.ExtensionId import akka.actor.ExtensionIdProvider import akka.remote.artery.ArteryTransport -/** - * Extension provides access to bound addresses. - */ +/** Extension provides access to bound addresses. */ object BoundAddressesExtension extends ExtensionId[BoundAddressesExtension] with ExtensionIdProvider { override def get(system: ActorSystem): BoundAddressesExtension = super.get(system) override def get(system: ClassicActorSystemProvider): BoundAddressesExtension = super.get(system) @@ -28,9 +26,7 @@ object BoundAddressesExtension extends ExtensionId[BoundAddressesExtension] with class BoundAddressesExtension(val system: ExtendedActorSystem) extends Extension { - /** - * Returns a mapping from a protocol to a set of bound addresses. - */ + /** Returns a mapping from a protocol to a set of bound addresses. */ def boundAddresses: Map[String, Set[Address]] = system.provider.asInstanceOf[RemoteActorRefProvider].transport match { case artery: ArteryTransport => Map(ArteryTransport.ProtocolName -> Set(artery.bindAddress.address)) case other => throw new IllegalStateException(s"Unexpected transport type: ${other.getClass}") diff --git a/akka-remote/src/main/scala/akka/remote/ByteStringUtils.scala b/akka-remote/src/main/scala/akka/remote/ByteStringUtils.scala index 5a8cd3bc4cf..51e69a74ae3 100644 --- a/akka-remote/src/main/scala/akka/remote/ByteStringUtils.scala +++ b/akka-remote/src/main/scala/akka/remote/ByteStringUtils.scala @@ -11,9 +11,7 @@ import akka.util.ByteString import akka.util.ByteString.ByteString1 import akka.util.ByteString.ByteString1C -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ByteStringUtils { def toProtoByteStringUnsafe(bytes: ByteString): ProtoByteString = { diff --git a/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala index fdd7d98af1e..f7e08a073ef 100644 --- a/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala +++ b/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala @@ -29,8 +29,7 @@ import akka.util.Helpers.ConfigOps * purposes. It is only used for measuring intervals (duration). */ class DeadlineFailureDetector(val acceptableHeartbeatPause: FiniteDuration, val heartbeatInterval: FiniteDuration)( - implicit - clock: Clock) + implicit clock: Clock) extends FailureDetector { /** @@ -46,7 +45,7 @@ class DeadlineFailureDetector(val acceptableHeartbeatPause: FiniteDuration, val require(heartbeatInterval > Duration.Zero, "failure-detector.heartbeat-interval must be > 0 s") private val deadlineMillis = acceptableHeartbeatPause.toMillis + heartbeatInterval.toMillis - @volatile private var heartbeatTimestamp = 0L //not used until active (first heartbeat) + @volatile private var heartbeatTimestamp = 0L // not used until active (first heartbeat) @volatile private var active = false override def isAvailable: Boolean = isAvailable(clock()) diff --git a/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala b/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala index ac33e1e677c..8f9e1fdd8c0 100644 --- a/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala +++ b/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala @@ -15,7 +15,6 @@ import scala.collection.immutable.Map * * @param detectorFactory * By-name parameter that returns the failure detector instance to be used by a newly registered resource - * */ class DefaultFailureDetectorRegistry[A](detectorFactory: () => FailureDetector) extends FailureDetectorRegistry[A] { diff --git a/akka-remote/src/main/scala/akka/remote/FailureDetector.scala b/akka-remote/src/main/scala/akka/remote/FailureDetector.scala index dc7d4853132..137bbda195a 100644 --- a/akka-remote/src/main/scala/akka/remote/FailureDetector.scala +++ b/akka-remote/src/main/scala/akka/remote/FailureDetector.scala @@ -12,9 +12,7 @@ import java.util.concurrent.TimeUnit.NANOSECONDS */ trait FailureDetector { - /** - * Returns true if the resource is considered to be up and healthy and returns false otherwise. - */ + /** Returns true if the resource is considered to be up and healthy and returns false otherwise. */ def isAvailable: Boolean /** diff --git a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala index 89c251c4cfe..ddee076f699 100644 --- a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala +++ b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala @@ -37,14 +37,10 @@ trait FailureDetectorRegistry[A] { */ def heartbeat(resource: A): Unit - /** - * Removes the heartbeat management for a resource. - */ + /** Removes the heartbeat management for a resource. */ def remove(resource: A): Unit - /** - * Removes all resources and any associated failure detector state. - */ + /** Removes all resources and any associated failure detector state. */ def reset(): Unit } @@ -72,10 +68,9 @@ private[akka] object FailureDetectorLoader { .createInstanceFor[FailureDetector]( fqcn, List(classOf[Config] -> config, classOf[EventStream] -> system.eventStream)) - .recover({ - case e => - throw new ConfigurationException(s"Could not create custom failure detector [$fqcn] due to: ${e.toString}", e) - }) + .recover { case e => + throw new ConfigurationException(s"Could not create custom failure detector [$fqcn] due to: ${e.toString}", e) + } .get } diff --git a/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala index dcf54ae6e03..d0b514d2ad7 100644 --- a/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala @@ -24,9 +24,7 @@ private[akka] object MessageSerializer { class SerializationException(msg: String, cause: Throwable) extends RuntimeException(msg, cause) - /** - * Uses Akka Serialization for the specified ActorSystem to transform the given MessageProtocol to a message - */ + /** Uses Akka Serialization for the specified ActorSystem to transform the given MessageProtocol to a message */ def deserialize(system: ExtendedActorSystem, messageProtocol: SerializedMessage): AnyRef = { SerializationExtension(system) .deserialize( diff --git a/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala index 17b0e990da6..4bb0a002165 100644 --- a/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala +++ b/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala @@ -60,14 +60,10 @@ class PhiAccrualFailureDetector( val minStdDeviation: FiniteDuration, val acceptableHeartbeatPause: FiniteDuration, val firstHeartbeatEstimate: FiniteDuration, - eventStream: Option[EventStream])( - implicit - clock: Clock) + eventStream: Option[EventStream])(implicit clock: Clock) extends FailureDetector { - /** - * Constructor without eventStream to support backwards compatibility - */ + /** Constructor without eventStream to support backwards compatibility */ def this( threshold: Double, maxSampleSize: Int, diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index cbd8d0dc67f..e0dcd930e41 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -38,9 +38,7 @@ import akka.util.ErrorMessages import akka.util.OptionVal import akka.util.unused -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object RemoteActorRefProvider { @@ -62,25 +60,22 @@ private[akka] object RemoteActorRefProvider { startWith(Uninitialized, None) - when(Uninitialized) { - case Event(i: Internals, _) => - systemGuardian ! RegisterTerminationHook - goto(Idle).using(Some(i)) + when(Uninitialized) { case Event(i: Internals, _) => + systemGuardian ! RegisterTerminationHook + goto(Idle).using(Some(i)) } - when(Idle) { - case Event(TerminationHook, Some(internals)) => - log.info("Shutting down remote daemon.") - internals.remoteDaemon ! TerminationHook - goto(WaitDaemonShutdown) + when(Idle) { case Event(TerminationHook, Some(internals)) => + log.info("Shutting down remote daemon.") + internals.remoteDaemon ! TerminationHook + goto(WaitDaemonShutdown) } // TODO: state timeout - when(WaitDaemonShutdown) { - case Event(TerminationHookDone, Some(internals)) => - log.info("Remote daemon shut down; proceeding with flushing remote transports.") - internals.transport.shutdown().pipeTo(self) - goto(WaitTransportShutdown) + when(WaitDaemonShutdown) { case Event(TerminationHookDone, Some(internals)) => + log.info("Remote daemon shut down; proceeding with flushing remote transports.") + internals.transport.shutdown().pipeTo(self) + goto(WaitTransportShutdown) } when(WaitTransportShutdown) { @@ -137,7 +132,6 @@ private[akka] object RemoteActorRefProvider { * Depending on this class is not supported, only the [[akka.actor.ActorRefProvider]] interface is supported. * * Remote ActorRefProvider. Starts up actor on remote node and creates a RemoteActorRef representing it. - * */ private[akka] class RemoteActorRefProvider( val systemName: String, @@ -153,7 +147,8 @@ private[akka] class RemoteActorRefProvider( val remoteSettings: RemoteSettings = new RemoteSettings(settings.config) - private[akka] final val hasClusterOrUseUnsafe = settings.HasCluster || remoteSettings.UseUnsafeRemoteFeaturesWithoutCluster + private[akka] final val hasClusterOrUseUnsafe = + settings.HasCluster || remoteSettings.UseUnsafeRemoteFeaturesWithoutCluster private val warnOnUnsafeRemote = !settings.HasCluster && @@ -241,7 +236,8 @@ private[akka] class RemoteActorRefProvider( case ArterySettings.AeronUpd => new ArteryAeronUdpTransport(system, this) case ArterySettings.Tcp => new ArteryTcpTransport(system, this, tlsEnabled = false) case ArterySettings.TlsTcp => new ArteryTcpTransport(system, this, tlsEnabled = true) - } else + } + else throw new IllegalArgumentException( "Classic remoting has been removed in Akka 2.8.0. Use default Artery remoting instead.")) _internals = internals @@ -329,14 +325,16 @@ private[akka] class RemoteActorRefProvider( if (warnOnUnsafeRemote) log.warning(message) else log.debug(message) - /** Logs if deathwatch message is intentionally dropped. To disable + /** + * Logs if deathwatch message is intentionally dropped. To disable * warnings set `akka.remote.warn-unsafe-watch-outside-cluster` to `off` * or use Akka Cluster. */ private[akka] def warnIfUnsafeDeathwatchWithoutCluster(watchee: ActorRef, watcher: ActorRef, action: String): Unit = warnOnUnsafe(s"Dropped remote $action: disabled for [$watcher -> $watchee]") - /** If `warnOnUnsafeRemote`, this logs a warning if `actorOf` falls back to `LocalActorRef` + /** + * If `warnOnUnsafeRemote`, this logs a warning if `actorOf` falls back to `LocalActorRef` * versus creating a `RemoteActorRef`. Override to log a more granular reason if using * `RemoteActorRefProvider` as a superclass. */ @@ -400,7 +398,8 @@ private[akka] class RemoteActorRefProvider( case "user" | "system" => deployer.lookup(elems.drop(1)) case "remote" => lookupRemotes(elems) case _ => None - } else None + } + else None val deployment = { deploy.toList ::: lookup.toList match { @@ -555,9 +554,7 @@ private[akka] class RemoteActorRefProvider( } } - /** - * Using (checking out) actor on a specific node. - */ + /** Using (checking out) actor on a specific node. */ def useActorOnNode(ref: ActorRef, props: Props, deploy: Deploy, supervisor: ActorRef): Unit = remoteDeploymentWatcher match { case Some(watcher) => @@ -687,9 +684,7 @@ private[akka] class RemoteActorRef private[akka] ( remote.system.deadLetters.tell(message, sender) } - /** - * Determine if a watch/unwatch message must be handled by the remoteWatcher actor, or sent to this remote ref - */ + /** Determine if a watch/unwatch message must be handled by the remoteWatcher actor, or sent to this remote ref */ def isWatchIntercepted(watchee: ActorRef, watcher: ActorRef): Boolean = { // If watchee != this then watcher should == this. This is a reverse watch, and it is not intercepted // If watchee == this, only the watches from remoteWatcher are sent on the wire, on behalf of other watchers @@ -698,7 +693,7 @@ private[akka] class RemoteActorRef private[akka] ( def sendSystemMessage(message: SystemMessage): Unit = try { - //send to remote, unless watch message is intercepted by the remoteWatcher + // send to remote, unless watch message is intercepted by the remoteWatcher message match { case Watch(watchee, watcher) => if (isWatchIntercepted(watchee, watcher)) @@ -708,7 +703,7 @@ private[akka] class RemoteActorRef private[akka] ( else provider.warnIfUnsafeDeathwatchWithoutCluster(watchee, watcher, "Watch") - //Unwatch has a different signature, need to pattern match arguments against InternalActorRef + // Unwatch has a different signature, need to pattern match arguments against InternalActorRef case Unwatch(watchee: InternalActorRef, watcher: InternalActorRef) => if (isWatchIntercepted(watchee, watcher)) provider.remoteWatcher.foreach(_ ! RemoteWatcher.UnwatchRemote(watchee, watcher)) diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala index 24870c66791..fcfd782a29c 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala @@ -37,14 +37,10 @@ import akka.dispatch.sysmsg.Unwatch import akka.event.{ AddressTerminatedTopic, LogMarker, MarkerLoggingAdapter } import akka.util.Switch -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] sealed trait DaemonMsg -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) private[akka] final case class DaemonMsgCreate(props: Props, deploy: Deploy, path: String, supervisor: ActorRef) extends DaemonMsg @@ -158,76 +154,78 @@ private[akka] class RemoteSystemDaemon( } override def !(msg: Any)(implicit sender: ActorRef = Actor.noSender): Unit = - try msg match { - case message: DaemonMsg => - log.debug("Received command [{}] to RemoteSystemDaemon on [{}]", message, path.address) - message match { - case DaemonMsgCreate(_, _, path, _) if untrustedMode => - log.debug("does not accept deployments (untrusted) for [{}]", path) // TODO add security marker? + try + msg match { + case message: DaemonMsg => + log.debug("Received command [{}] to RemoteSystemDaemon on [{}]", message, path.address) + message match { + case DaemonMsgCreate(_, _, path, _) if untrustedMode => + log.debug("does not accept deployments (untrusted) for [{}]", path) // TODO add security marker? - case DaemonMsgCreate(props, deploy, path, supervisor) if allowListEnabled => - val name = props.clazz.getCanonicalName - if (remoteDeploymentAllowList.contains(name)) + case DaemonMsgCreate(props, deploy, path, supervisor) if allowListEnabled => + val name = props.clazz.getCanonicalName + if (remoteDeploymentAllowList.contains(name)) + doCreateActor(message, props, deploy, path, supervisor) + else { + val ex = + new NotAllowedClassRemoteDeploymentAttemptException(props.actorClass(), remoteDeploymentAllowList) + log.error( + LogMarker.Security, + ex, + "Received command to create remote Actor, but class [{}] is not allow-listed! " + + "Target path: [{}]", + props.actorClass(), + path) + } + case DaemonMsgCreate(props, deploy, path, supervisor) => doCreateActor(message, props, deploy, path, supervisor) - else { - val ex = - new NotAllowedClassRemoteDeploymentAttemptException(props.actorClass(), remoteDeploymentAllowList) - log.error( - LogMarker.Security, - ex, - "Received command to create remote Actor, but class [{}] is not allow-listed! " + - "Target path: [{}]", - props.actorClass(), - path) - } - case DaemonMsgCreate(props, deploy, path, supervisor) => - doCreateActor(message, props, deploy, path, supervisor) - } + } - case sel: ActorSelectionMessage => - val (concatenatedChildNames, m) = { - val iter = sel.elements.iterator - // find child elements, and the message to send, which is a remaining ActorSelectionMessage - // in case of SelectChildPattern, otherwise the actual message of the selection - @tailrec def rec(acc: List[String]): (List[String], Any) = - if (iter.isEmpty) - (acc.reverse, sel.msg) - else { - iter.next() match { - case SelectChildName(name) => rec(name :: acc) - case SelectParent if acc.isEmpty => rec(acc) - case SelectParent => rec(acc.tail) - case pat: SelectChildPattern => (acc.reverse, sel.copy(elements = pat +: iter.toVector)) + case sel: ActorSelectionMessage => + val (concatenatedChildNames, m) = { + val iter = sel.elements.iterator + // find child elements, and the message to send, which is a remaining ActorSelectionMessage + // in case of SelectChildPattern, otherwise the actual message of the selection + @tailrec def rec(acc: List[String]): (List[String], Any) = + if (iter.isEmpty) + (acc.reverse, sel.msg) + else { + iter.next() match { + case SelectChildName(name) => rec(name :: acc) + case SelectParent if acc.isEmpty => rec(acc) + case SelectParent => rec(acc.tail) + case pat: SelectChildPattern => (acc.reverse, sel.copy(elements = pat +: iter.toVector)) + } } - } - rec(Nil) - } - getChild(concatenatedChildNames.iterator) match { - case Nobody => - val emptyRef = - new EmptyLocalActorRef(system.provider, path / sel.elements.map(_.toString), system.eventStream) - emptyRef.tell(sel, sender) - case child => - child.tell(m, sender) - } + rec(Nil) + } + getChild(concatenatedChildNames.iterator) match { + case Nobody => + val emptyRef = + new EmptyLocalActorRef(system.provider, path / sel.elements.map(_.toString), system.eventStream) + emptyRef.tell(sel, sender) + case child => + child.tell(m, sender) + } - case Identify(messageId) => sender ! ActorIdentity(messageId, Some(this)) + case Identify(messageId) => sender ! ActorIdentity(messageId, Some(this)) - case TerminationHook => - terminating.switchOn { - terminationHookDoneWhenNoChildren() - foreachChild { system.stop } - } + case TerminationHook => + terminating.switchOn { + terminationHookDoneWhenNoChildren() + foreachChild { system.stop } + } - case AddressTerminated(address) => - foreachChild { - case a: InternalActorRef if a.getParent.path.address == address => system.stop(a) - case _ => // skip, this child doesn't belong to the terminated address - } + case AddressTerminated(address) => + foreachChild { + case a: InternalActorRef if a.getParent.path.address == address => system.stop(a) + case _ => // skip, this child doesn't belong to the terminated address + } - case unknown => log.warning(LogMarker.Security, "Unknown message [{}] received by [{}]", unknown, this) + case unknown => log.warning(LogMarker.Security, "Unknown message [{}] received by [{}]", unknown, this) - } catch { + } + catch { case NonFatal(e) => log.error(e, "exception while processing remote command [{}] from [{}]", msg, sender) } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index 85d802a51d0..29d7b23e6fc 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -18,9 +18,7 @@ final case class RemoteScope(node: Address) extends Scope { def withFallback(other: Scope): Scope = this } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extends Deployer(_settings, _pm) { override def parseConfig(path: String, config: Config): Option[Deploy] = { diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala index 5b2f0c7e561..190f6dbf98d 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala @@ -11,9 +11,7 @@ import akka.actor.Terminated import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } import akka.dispatch.sysmsg.DeathWatchNotification -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object RemoteDeploymentWatcher { final case class WatchRemote(actor: ActorRef, supervisor: ActorRef) } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index f6a833ff039..1ddd77f6d8f 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -21,9 +21,7 @@ final class RemoteSettings(val config: Config) { val WarnAboutDirectUse: Boolean = getBoolean("akka.remote.warn-about-direct-use") - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def untrustedMode: Boolean = Artery.UntrustedMode def configureDispatcher(props: Props): Props = diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index 22197535c4e..eeb20d934cd 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -24,9 +24,7 @@ class RemoteTransportException(message: String, cause: Throwable) extends AkkaEx def this(msg: String) = this(msg, null) } -/** - * [[RemoteTransportException]] without stack trace. - */ +/** [[RemoteTransportException]] without stack trace. */ @SerialVersionUID(1L) class RemoteTransportExceptionNoStackTrace(message: String, cause: Throwable) extends RemoteTransportException(message, cause) @@ -44,14 +42,10 @@ class RemoteTransportExceptionNoStackTrace(message: String, cause: Throwable) */ private[akka] abstract class RemoteTransport(val system: ExtendedActorSystem, val provider: RemoteActorRefProvider) { - /** - * Shuts down the remoting - */ + /** Shuts down the remoting */ def shutdown(): Future[Done] - /** - * Address to be used in RootActorPath of refs generated for this transport. - */ + /** Address to be used in RootActorPath of refs generated for this transport. */ def addresses: immutable.Set[Address] /** @@ -67,14 +61,10 @@ private[akka] abstract class RemoteTransport(val system: ExtendedActorSystem, va */ def localAddressForRemote(remote: Address): Address - /** - * Start up the transport, i.e. enable incoming connections. - */ + /** Start up the transport, i.e. enable incoming connections. */ def start(): Unit - /** - * Sends the given message to the recipient supplying the sender() if any - */ + /** Sends the given message to the recipient supplying the sender() if any */ def send(message: Any, senderOption: OptionVal[ActorRef], recipient: RemoteActorRef): Unit /** @@ -85,9 +75,7 @@ private[akka] abstract class RemoteTransport(val system: ExtendedActorSystem, va */ def managementCommand(@unused cmd: Any): Future[Boolean] = { Future.successful(false) } - /** - * A Logger that can be used to log issues that may occur - */ + /** A Logger that can be used to log issues that may occur */ def log: LoggingAdapter /** diff --git a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala index 643423bf7c6..1e4aed5ee4c 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala @@ -18,15 +18,11 @@ import akka.remote.artery.ArteryMessage import akka.remote.artery.ArteryTransport import akka.util.unused -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object RemoteWatcher { - /** - * Factory method for `RemoteWatcher` [[akka.actor.Props]]. - */ + /** Factory method for `RemoteWatcher` [[akka.actor.Props]]. */ def props(settings: RemoteSettings, failureDetector: FailureDetectorRegistry[Address]): Props = Props( new RemoteWatcher( @@ -89,7 +85,6 @@ private[akka] object RemoteWatcher { * * For bi-directional watch between two nodes the same thing will be established in * both directions, but independent of each other. - * */ @InternalApi private[akka] class RemoteWatcher( @@ -148,11 +143,10 @@ private[akka] class RemoteWatcher( // test purpose case Stats => val watchSet = watching.iterator - .flatMap { - case (wee, wers) => - wers.map { wer => - wee -> wer - } + .flatMap { case (wee, wers) => + wers.map { wer => + wee -> wer + } } .toSet[(ActorRef, ActorRef)] sender() ! Stats(watching = watchSet.size, watchingNodes = watchingNodes.size)(watchSet, watchingNodes.toSet) @@ -199,7 +193,8 @@ private[akka] class RemoteWatcher( } } - /** Returns true if either has cluster or `akka.remote.use-unsafe-remote-features-outside-cluster` + /** + * Returns true if either has cluster or `akka.remote.use-unsafe-remote-features-outside-cluster` * is enabled. Can be overridden when using RemoteWatcher as a superclass. */ protected def shouldWatch(@unused watchee: InternalActorRef): Boolean = { @@ -321,6 +316,8 @@ private[akka] class RemoteWatcher( } { val watcher = self.asInstanceOf[InternalActorRef] log.debug("Re-watch [{} -> {}]", watcher.path, watchee.path) - watchee.sendSystemMessage(Watch(watchee, watcher)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + watchee.sendSystemMessage( + Watch(watchee, watcher) + ) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ } } diff --git a/akka-remote/src/main/scala/akka/remote/Remoting.scala b/akka-remote/src/main/scala/akka/remote/Remoting.scala index 432b57c868e..5ed38abcfdf 100644 --- a/akka-remote/src/main/scala/akka/remote/Remoting.scala +++ b/akka-remote/src/main/scala/akka/remote/Remoting.scala @@ -6,16 +6,12 @@ package akka.remote import akka.actor._ -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] final case class RARP(provider: RemoteActorRefProvider) extends Extension { def configureDispatcher(props: Props): Props = provider.remoteSettings.configureDispatcher(props) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object RARP extends ExtensionId[RARP] with ExtensionIdProvider { override def lookup = RARP @@ -31,7 +27,5 @@ private[akka] object RARP extends ExtensionId[RARP] with ExtensionIdProvider { */ private[akka] trait PriorityMessage -/** - * Failure detector heartbeat messages are marked with this trait. - */ +/** Failure detector heartbeat messages are marked with this trait. */ private[akka] trait HeartbeatMessage extends PriorityMessage diff --git a/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala b/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala index ff55029bc04..5f551571224 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala @@ -28,10 +28,13 @@ private[akka] final class ArterySettings private (config: Config) { import config._ def withDisabledCompression(): ArterySettings = - ArterySettings(ConfigFactory.parseString("""|akka.remote.artery.advanced.compression { + ArterySettings( + ConfigFactory + .parseString("""|akka.remote.artery.advanced.compression { | actor-refs.max = off | manifests.max = off - |}""".stripMargin).withFallback(config)) + |}""".stripMargin) + .withFallback(config)) val Enabled: Boolean = getBoolean("enabled") diff --git a/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala index 5efef8477ee..f48a26770b6 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala @@ -54,9 +54,7 @@ import akka.util.WildcardIndex */ private[remote] trait InboundContext { - /** - * The local inbound address. - */ + /** The local inbound address. */ def localAddress: UniqueAddress /** @@ -65,9 +63,7 @@ private[remote] trait InboundContext { */ def sendControl(to: Address, message: ControlMessage): Unit - /** - * Lookup the outbound association for a given address. - */ + /** Lookup the outbound association for a given address. */ def association(remoteAddress: Address): OutboundContext /** @@ -85,9 +81,7 @@ private[remote] trait InboundContext { } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object AssociationState { def apply(): AssociationState = new AssociationState( @@ -113,9 +107,7 @@ private[remote] object AssociationState { } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] final class AssociationState private ( val incarnation: Int, val lastUsedTimestamp: AtomicLong, // System.nanoTime timestamp @@ -228,14 +220,10 @@ private[remote] final class AssociationState private ( */ private[remote] trait OutboundContext { - /** - * The local inbound address. - */ + /** The local inbound address. */ def localAddress: UniqueAddress - /** - * The outbound address for this association. - */ + /** The outbound address for this association. */ def remoteAddress: Address def associationState: AssociationState @@ -248,9 +236,7 @@ private[remote] trait OutboundContext { */ def sendControl(message: ControlMessage): Unit - /** - * @return `true` if any of the streams are active (not stopped due to idle) - */ + /** @return `true` if any of the streams are active (not stopped due to idle) */ def isOrdinaryMessageStreamActive(): Boolean /** @@ -263,9 +249,7 @@ private[remote] trait OutboundContext { } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _provider: RemoteActorRefProvider) extends RemoteTransport(_system, _provider) with InboundContext { @@ -312,9 +296,7 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr override def addresses: Set[Address] = _addresses override def localAddressForRemote(remote: Address): Address = defaultAddress - /** - * Must not be accessed before `start()`, will throw NullPointerException otherwise. - */ + /** Must not be accessed before `start()`, will throw NullPointerException otherwise. */ override def systemUid: Long = _localAddress.uid protected val killSwitch: SharedKillSwitch = KillSwitches.shared("transportKillSwitch") @@ -333,8 +315,8 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr private val priorityMessageDestinations = WildcardIndex[NotUsed]() - // These destinations are not defined in configuration because it should not - // be possible to abuse the control channel + // These destinations are not defined in configuration because it should not + // be possible to abuse the control channel .insert(Array("system", "remote-watcher"), NotUsed) // these belongs to cluster and should come from there .insert(Array("system", "cluster", "core", "daemon", "heartbeatSender"), NotUsed) @@ -354,21 +336,19 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr private val inboundEnvelopePool = ReusableInboundEnvelope.createObjectPool(capacity = 16) // The outboundEnvelopePool is shared among all outbound associations - private val outboundEnvelopePool = ReusableOutboundEnvelope.createObjectPool( - capacity = - settings.Advanced.OutboundMessageQueueSize * settings.Advanced.OutboundLanes * 3) - - private val associationRegistry = new AssociationRegistry( - remoteAddress => - new Association( - this, - materializer, - controlMaterializer, - remoteAddress, - controlSubject, - settings.LargeMessageDestinations, - priorityMessageDestinations, - outboundEnvelopePool)) + private val outboundEnvelopePool = ReusableOutboundEnvelope.createObjectPool(capacity = + settings.Advanced.OutboundMessageQueueSize * settings.Advanced.OutboundLanes * 3) + + private val associationRegistry = new AssociationRegistry(remoteAddress => + new Association( + this, + materializer, + controlMaterializer, + remoteAddress, + controlSubject, + settings.LargeMessageDestinations, + priorityMessageDestinations, + outboundEnvelopePool)) def remoteAddresses: Set[Address] = associationRegistry.allAssociations.map(_.remoteAddress) @@ -650,7 +630,7 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr killSwitch.abort(ShutdownSignal) flightRecorder.transportKillSwitchPulled() for { - _ <- streamsCompleted.recover { case _ => Done } + _ <- streamsCompleted.recover { case _ => Done } _ <- shutdownTransport().recover { case _ => Done } } yield { // no need to explicitly shut down the contained access since it's lifecycle is bound to the Decoder @@ -677,8 +657,8 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr implicit val ec = system.dispatchers.internalDispatcher for { _ <- Future.traverse(associationRegistry.allAssociations)(_.streamsCompleted) - _ <- Future.sequence(streamMatValues.get().valuesIterator.map { - case InboundStreamMatValues(_, done) => done + _ <- Future.sequence(streamMatValues.get().valuesIterator.map { case InboundStreamMatValues(_, done) => + done }) } yield Done } @@ -763,8 +743,8 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr } def outboundLarge(outboundContext: OutboundContext): Sink[OutboundEnvelope, Future[Done]] = - createOutboundSink(LargeStreamId, outboundContext, largeEnvelopeBufferPool).mapMaterializedValue { - case (_, d) => d + createOutboundSink(LargeStreamId, outboundContext, largeEnvelopeBufferPool).mapMaterializedValue { case (_, d) => + d } def outbound(outboundContext: OutboundContext): Sink[OutboundEnvelope, (OutboundCompressionAccess, Future[Done])] = @@ -798,15 +778,14 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr Flow .fromGraph(killSwitch.flow[OutboundEnvelope]) - .via( - new OutboundHandshake( - system, - outboundContext, - outboundEnvelopePool, - settings.Advanced.HandshakeTimeout, - settings.Advanced.HandshakeRetryInterval, - settings.Advanced.InjectHandshakeInterval, - Duration.Undefined)) + .via(new OutboundHandshake( + system, + outboundContext, + outboundEnvelopePool, + settings.Advanced.HandshakeTimeout, + settings.Advanced.HandshakeRetryInterval, + settings.Advanced.InjectHandshakeInterval, + Duration.Undefined)) .viaMat(createEncoder(bufferPool, streamId))(Keep.right) } @@ -816,15 +795,14 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr (settings.Advanced.QuarantineIdleOutboundAfter / 10).max(settings.Advanced.HandshakeRetryInterval) Flow .fromGraph(killSwitch.flow[OutboundEnvelope]) - .via( - new OutboundHandshake( - system, - outboundContext, - outboundEnvelopePool, - settings.Advanced.HandshakeTimeout, - settings.Advanced.HandshakeRetryInterval, - settings.Advanced.InjectHandshakeInterval, - livenessProbeInterval)) + .via(new OutboundHandshake( + system, + outboundContext, + outboundEnvelopePool, + settings.Advanced.HandshakeTimeout, + settings.Advanced.HandshakeRetryInterval, + settings.Advanced.InjectHandshakeInterval, + livenessProbeInterval)) .via( new SystemMessageDelivery( outboundContext, @@ -962,9 +940,7 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object ArteryTransport { val ProtocolName = "akka" @@ -1006,7 +982,7 @@ private[remote] object ArteryTransport { hash2 = 31 * hash2 + java.lang.Long.hashCode(System.nanoTime()) hash2 = 31 * hash2 + java.lang.Long.hashCode(System.currentTimeMillis()) - (hash1.toLong << 32) | (hash2 & 0XFFFFFFFFL) + (hash1.toLong << 32) | (hash2 & 0xffffffffL) } } diff --git a/akka-remote/src/main/scala/akka/remote/artery/Association.scala b/akka-remote/src/main/scala/akka/remote/artery/Association.scala index 852f0e82330..9793e952dbe 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/Association.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/Association.scala @@ -62,9 +62,7 @@ import akka.util.Unsafe import akka.util.WildcardIndex import akka.util.ccompat._ -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object Association { sealed trait QueueWrapper extends SendQueue.ProducerApi[OutboundEnvelope] { def queue: Queue[OutboundEnvelope] @@ -154,7 +152,8 @@ private[remote] class Association( override def settings = transport.settings private def advancedSettings = transport.settings.Advanced - private val deathWatchNotificationFlushEnabled = advancedSettings.DeathWatchNotificationFlushTimeout > Duration.Zero && transport.provider.settings.HasCluster + private val deathWatchNotificationFlushEnabled = + advancedSettings.DeathWatchNotificationFlushTimeout > Duration.Zero && transport.provider.settings.HasCluster private val restartCounter = new RestartCounter(advancedSettings.OutboundMaxRestarts, advancedSettings.OutboundRestartTimeout) @@ -185,7 +184,8 @@ private[remote] class Association( DisabledQueueWrapper (0 until outboundLanes).foreach { i => - queues(OrdinaryQueueIndex + i) = QueueWrapperImpl(createQueue(queueSize, OrdinaryQueueIndex + i)) // ordinary messages stream + queues(OrdinaryQueueIndex + i) = + QueueWrapperImpl(createQueue(queueSize, OrdinaryQueueIndex + i)) // ordinary messages stream } @volatile private[this] var queuesVisibility = false @@ -248,9 +248,7 @@ private[remote] class Association( override def localAddress: UniqueAddress = transport.localAddress - /** - * Holds reference to shared state of Association - *access only via helper methods* - */ + /** Holds reference to shared state of Association - *access only via helper methods* */ @volatile @nowarn("msg=never used") private[artery] var _sharedStateDoNotCallMeDirectly: AssociationState = AssociationState() @@ -266,9 +264,7 @@ private[remote] class Association( private[artery] def swapState(oldState: AssociationState, newState: AssociationState): Boolean = Unsafe.instance.compareAndSwapObject(this, AbstractAssociation.sharedStateOffset, oldState, newState) - /** - * @return Reference to current shared state - */ + /** @return Reference to current shared state */ def associationState: AssociationState = Unsafe.instance.getObjectVolatile(this, AbstractAssociation.sharedStateOffset).asInstanceOf[AssociationState] @@ -594,9 +590,7 @@ private[remote] class Association( } - /** - * After calling this no messages can be sent with this Association instance - */ + /** After calling this no messages can be sent with this Association instance */ def removedAfterQuarantined(): Unit = { if (!isRemovedAfterQuarantined()) { flightRecorder.transportRemoveQuarantined(remoteAddress) @@ -639,15 +633,14 @@ private[remote] class Association( private def abortQuarantined(): Unit = { cancelIdleTimer() - streamMatValues.get.foreach { - case (queueIndex, OutboundStreamMatValues(killSwitch, _, _)) => - killSwitch match { - case OptionVal.Some(k) => - setStopReason(queueIndex, OutboundStreamStopQuarantinedSignal) - clearStreamKillSwitch(queueIndex, k) - k.abort(OutboundStreamStopQuarantinedSignal) - case _ => // already aborted - } + streamMatValues.get.foreach { case (queueIndex, OutboundStreamMatValues(killSwitch, _, _)) => + killSwitch match { + case OptionVal.Some(k) => + setStopReason(queueIndex, OutboundStreamStopQuarantinedSignal) + clearStreamKillSwitch(queueIndex, k) + k.abort(OutboundStreamStopQuarantinedSignal) + case _ => // already aborted + } } } @@ -680,34 +673,33 @@ private[remote] class Association( abortQuarantined() // quarantine ignored due to unknown UID, have to stop this task anyway } } else if (lastUsedDurationNanos >= StopIdleOutboundAfter.toNanos) { - streamMatValues.get.foreach { - case (queueIndex, OutboundStreamMatValues(streamKillSwitch, _, stopping)) => - if (isStreamActive(queueIndex) && stopping.isEmpty) { - if (queueIndex != ControlQueueIndex) { - streamKillSwitch match { - case OptionVal.Some(k) => - // for non-control streams we can stop the entire stream - log.info("Stopping idle outbound stream [{}] to [{}]", queueIndex, remoteAddress) - flightRecorder.transportStopIdleOutbound(remoteAddress, queueIndex) - setStopReason(queueIndex, OutboundStreamStopIdleSignal) - clearStreamKillSwitch(queueIndex, k) - k.abort(OutboundStreamStopIdleSignal) - case _ => // already aborted - } - - } else { - // only stop the transport parts of the stream because SystemMessageDelivery stage has - // state (seqno) and system messages might be sent at the same time - associationState.controlIdleKillSwitch match { - case OptionVal.Some(killSwitch) => - log.info("Stopping idle outbound control stream to [{}]", remoteAddress) - flightRecorder.transportStopIdleOutbound(remoteAddress, queueIndex) - setControlIdleKillSwitch(OptionVal.None) - killSwitch.abort(OutboundStreamStopIdleSignal) - case _ => // already stopped - } + streamMatValues.get.foreach { case (queueIndex, OutboundStreamMatValues(streamKillSwitch, _, stopping)) => + if (isStreamActive(queueIndex) && stopping.isEmpty) { + if (queueIndex != ControlQueueIndex) { + streamKillSwitch match { + case OptionVal.Some(k) => + // for non-control streams we can stop the entire stream + log.info("Stopping idle outbound stream [{}] to [{}]", queueIndex, remoteAddress) + flightRecorder.transportStopIdleOutbound(remoteAddress, queueIndex) + setStopReason(queueIndex, OutboundStreamStopIdleSignal) + clearStreamKillSwitch(queueIndex, k) + k.abort(OutboundStreamStopIdleSignal) + case _ => // already aborted + } + + } else { + // only stop the transport parts of the stream because SystemMessageDelivery stage has + // state (seqno) and system messages might be sent at the same time + associationState.controlIdleKillSwitch match { + case OptionVal.Some(killSwitch) => + log.info("Stopping idle outbound control stream to [{}]", remoteAddress) + flightRecorder.transportStopIdleOutbound(remoteAddress, queueIndex) + setControlIdleKillSwitch(OptionVal.None) + killSwitch.abort(OutboundStreamStopIdleSignal) + case _ => // already stopped } } + } } } }(transport.system.dispatcher) @@ -822,7 +814,7 @@ private[remote] class Association( .fromGraph(new SendQueue[OutboundEnvelope](sendToDeadLetters)) .via(streamKillSwitch.flow) .viaMat(transport.outboundTestFlow(this))(Keep.both) - .toMat(transport.outbound(this))({ case ((a, b), (c, d)) => (a, b, c, d) }) // "keep all, exploded" + .toMat(transport.outbound(this)) { case ((a, b), (c, d)) => (a, b, c, d) } // "keep all, exploded" .run()(materializer) queueValue.inject(wrapper.queue) @@ -856,8 +848,8 @@ private[remote] class Association( .watchTermination()(Keep.both) // recover to avoid error logging by MergeHub .recoverWithRetries(-1, { case _: Throwable => Source.empty }) - .mapMaterializedValue { - case ((q, c), w) => (q, c, w) + .mapMaterializedValue { case ((q, c), w) => + (q, c, w) } val (mergeHub, transportSinkCompleted) = MergeHub @@ -887,10 +879,9 @@ private[remote] class Association( val allCompleted = Future.sequence(laneCompletedValues).flatMap(_ => transportSinkCompleted) - queueValues.zip(wrappers).zipWithIndex.foreach { - case ((q, w), i) => - q.inject(w.queue) - queues(OrdinaryQueueIndex + i) = q // replace with the materialized value, still same underlying queue + queueValues.zip(wrappers).zipWithIndex.foreach { case ((q, w), i) => + q.inject(w.queue) + queues(OrdinaryQueueIndex + i) = q // replace with the materialized value, still same underlying queue } queuesVisibility = true // volatile write for visibility of the queues array @@ -1059,9 +1050,14 @@ private[remote] class Association( streamKillSwitch: SharedKillSwitch, completed: Future[Done]): Unit = { implicit val ec = materializer.executionContext - updateStreamMatValues(streamId, OutboundStreamMatValues(OptionVal.Some(streamKillSwitch), completed.recover { - case _ => Done - }, stopping = OptionVal.None)) + updateStreamMatValues( + streamId, + OutboundStreamMatValues( + OptionVal.Some(streamKillSwitch), + completed.recover { case _ => + Done + }, + stopping = OptionVal.None)) } @tailrec private def updateStreamMatValues(streamId: Int, values: OutboundStreamMatValues): Unit = { @@ -1109,8 +1105,8 @@ private[remote] class Association( def streamsCompleted: Future[Done] = { implicit val ec = materializer.executionContext Future - .sequence(streamMatValues.get().values.map { - case OutboundStreamMatValues(_, done, _) => done + .sequence(streamMatValues.get().values.map { case OutboundStreamMatValues(_, done, _) => + done }) .map(_ => Done) } @@ -1120,16 +1116,12 @@ private[remote] class Association( } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class AssociationRegistry(createAssociation: Address => Association) { private[this] val associationsByAddress = new AtomicReference[Map[Address, Association]](Map.empty) private[this] val associationsByUid = new AtomicReference[ImmutableLongMap[Association]](ImmutableLongMap.empty) - /** - * @throws ShuttingDown if called while the transport is shutting down - */ + /** @throws ShuttingDown if called while the transport is shutting down */ @tailrec final def association(remoteAddress: Address): Association = { val currentMap = associationsByAddress.get currentMap.get(remoteAddress) match { @@ -1148,9 +1140,7 @@ private[remote] class AssociationRegistry(createAssociation: Address => Associat def association(uid: Long): OptionVal[Association] = associationsByUid.get.get(uid) - /** - * @throws ShuttingDown if called while the transport is shutting down - */ + /** @throws ShuttingDown if called while the transport is shutting down */ @tailrec final def setUID(peer: UniqueAddress): Association = { // Don't create a new association via this method. It's supposed to exist unless it was removed after quarantined. val a = association(peer.address) @@ -1186,17 +1176,16 @@ private[remote] class AssociationRegistry(createAssociation: Address => Associat val now = System.nanoTime() val afterNanos = after.toNanos val currentMap = associationsByAddress.get - val remove = currentMap.foldLeft(Map.empty[Address, Association]) { - case (acc, (address, association)) => - val state = association.associationState - if ((now - state.lastUsedTimestamp.get) >= afterNanos) { - state.uniqueRemoteAddressState() match { - case AssociationState.UidQuarantined | AssociationState.UidUnknown => acc.updated(address, association) - case AssociationState.UidKnown => acc - } - } else { - acc + val remove = currentMap.foldLeft(Map.empty[Address, Association]) { case (acc, (address, association)) => + val state = association.associationState + if ((now - state.lastUsedTimestamp.get) >= afterNanos) { + state.uniqueRemoteAddressState() match { + case AssociationState.UidQuarantined | AssociationState.UidUnknown => acc.updated(address, association) + case AssociationState.UidKnown => acc } + } else { + acc + } } if (remove.nonEmpty) { val newMap = currentMap -- remove.keysIterator @@ -1211,18 +1200,17 @@ private[remote] class AssociationRegistry(createAssociation: Address => Associat val now = System.nanoTime() val afterNanos = after.toNanos val currentMap = associationsByUid.get - val remove = currentMap.keysIterator.foldLeft(Map.empty[Long, Association]) { - case (acc, uid) => - val association = currentMap.get(uid).get - val state = association.associationState - if ((now - state.lastUsedTimestamp.get) >= afterNanos) { - state.uniqueRemoteAddressState() match { - case AssociationState.UidQuarantined | AssociationState.UidUnknown => acc.updated(uid, association) - case AssociationState.UidKnown => acc - } - } else { - acc + val remove = currentMap.keysIterator.foldLeft(Map.empty[Long, Association]) { case (acc, uid) => + val association = currentMap.get(uid).get + val state = association.associationState + if ((now - state.lastUsedTimestamp.get) >= afterNanos) { + state.uniqueRemoteAddressState() match { + case AssociationState.UidQuarantined | AssociationState.UidUnknown => acc.updated(uid, association) + case AssociationState.UidKnown => acc } + } else { + acc + } } if (remove.nonEmpty) { val newMap = remove.keysIterator.foldLeft(currentMap)((acc, uid) => acc.remove(uid)) diff --git a/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala b/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala index 2bef9fd55c7..fd57718f092 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala @@ -37,15 +37,11 @@ import akka.stream.stage._ import akka.util.OptionVal import akka.util.unused -/** - * INTERNAL API - */ +/** INTERNAL API */ @SerialVersionUID(1L) private[remote] class OversizedPayloadException(msg: String) extends AkkaException(msg) with OnlyCauseStackTrace -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object Encoder { private[remote] trait OutboundCompressionAccess { def changeActorRefCompression(table: CompressionTable[ActorRef]): Future[Done] @@ -54,9 +50,7 @@ private[remote] object Encoder { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class Encoder( uniqueLocalAddress: UniqueAddress, system: ExtendedActorSystem, @@ -216,21 +210,15 @@ private[remote] class Encoder( override def onPull(): Unit = pull(in) - /** - * External call from ChangeOutboundCompression materialized value - */ + /** External call from ChangeOutboundCompression materialized value */ override def changeActorRefCompression(table: CompressionTable[ActorRef]): Future[Done] = changeActorRefCompressionCb.invokeWithFeedback(table) - /** - * External call from ChangeOutboundCompression materialized value - */ + /** External call from ChangeOutboundCompression materialized value */ override def changeClassManifestCompression(table: CompressionTable[String]): Future[Done] = changeClassManifestCompressionCb.invokeWithFeedback(table) - /** - * External call from ChangeOutboundCompression materialized value - */ + /** External call from ChangeOutboundCompression materialized value */ override def clearCompression(): Future[Done] = clearCompressionCb.invokeWithFeedback(()) @@ -241,9 +229,7 @@ private[remote] class Encoder( } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object Decoder { private final case class RetryResolveRemoteDeployedRecipient( attemptsLeft: Int, @@ -296,40 +282,28 @@ private[remote] object Decoder { p.success(compressions.currentOriginUids) } - /** - * External call from ChangeInboundCompression materialized value - */ + /** External call from ChangeInboundCompression materialized value */ override def closeCompressionFor(originUid: Long): Future[Done] = closeCompressionForCb.invokeWithFeedback(originUid) - /** - * External call from ChangeInboundCompression materialized value - */ + /** External call from ChangeInboundCompression materialized value */ override def confirmActorRefCompressionAdvertisementAck(ack: ActorRefCompressionAdvertisementAck): Future[Done] = confirmActorRefCompressionAdvertisementCb.invokeWithFeedback(ack) - /** - * External call from ChangeInboundCompression materialized value - */ + /** External call from ChangeInboundCompression materialized value */ override def confirmClassManifestCompressionAdvertisementAck( ack: ClassManifestCompressionAdvertisementAck): Future[Done] = confirmClassManifestCompressionAdvertisementCb.invokeWithFeedback(ack) - /** - * External call from ChangeInboundCompression materialized value - */ + /** External call from ChangeInboundCompression materialized value */ override def runNextActorRefAdvertisement(): Unit = runNextActorRefAdvertisementCb.invoke(()) - /** - * External call from ChangeInboundCompression materialized value - */ + /** External call from ChangeInboundCompression materialized value */ override def runNextClassManifestAdvertisement(): Unit = runNextClassManifestAdvertisementCb.invoke(()) - /** - * External call from ChangeInboundCompression materialized value - */ + /** External call from ChangeInboundCompression materialized value */ override def currentCompressionOriginUids: Future[Set[Long]] = { val p = Promise[Set[Long]]() currentCompressionOriginUidsCb.invoke(p) @@ -343,9 +317,7 @@ private[remote] object Decoder { } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] final class ActorRefResolveCacheWithAddress( provider: RemoteActorRefProvider, localAddress: UniqueAddress) @@ -357,9 +329,7 @@ private[remote] final class ActorRefResolveCacheWithAddress( override protected def isKeyCacheable(k: String): Boolean = true } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class Decoder( inboundContext: InboundContext, system: ExtendedActorSystem, @@ -424,44 +394,51 @@ private[remote] class Decoder( val originUid = headerBuilder.uid val association = inboundContext.association(originUid) - val recipient: OptionVal[InternalActorRef] = try headerBuilder.recipientActorRef(originUid) match { - case OptionVal.Some(ref) => - OptionVal(ref.asInstanceOf[InternalActorRef]) - case OptionVal.None if headerBuilder.recipientActorRefPath.isDefined => - resolveRecipient(headerBuilder.recipientActorRefPath.get) - case _ => - OptionVal.None - } catch { - case NonFatal(e) => - // probably version mismatch due to restarted system - log.warning("Couldn't decompress sender from originUid [{}]. {}", originUid, e) - OptionVal.None - } + val recipient: OptionVal[InternalActorRef] = + try + headerBuilder.recipientActorRef(originUid) match { + case OptionVal.Some(ref) => + OptionVal(ref.asInstanceOf[InternalActorRef]) + case OptionVal.None if headerBuilder.recipientActorRefPath.isDefined => + resolveRecipient(headerBuilder.recipientActorRefPath.get) + case _ => + OptionVal.None + } + catch { + case NonFatal(e) => + // probably version mismatch due to restarted system + log.warning("Couldn't decompress sender from originUid [{}]. {}", originUid, e) + OptionVal.None + } - val sender: OptionVal[InternalActorRef] = try headerBuilder.senderActorRef(originUid) match { - case OptionVal.Some(ref) => - OptionVal(ref.asInstanceOf[InternalActorRef]) - case OptionVal.None if headerBuilder.senderActorRefPath.isDefined => - OptionVal(actorRefResolver.resolve(headerBuilder.senderActorRefPath.get)) - case _ => - OptionVal.None - } catch { - case NonFatal(e) => - // probably version mismatch due to restarted system - log.warning("Couldn't decompress sender from originUid [{}]. {}", originUid, e) - OptionVal.None - } + val sender: OptionVal[InternalActorRef] = + try + headerBuilder.senderActorRef(originUid) match { + case OptionVal.Some(ref) => + OptionVal(ref.asInstanceOf[InternalActorRef]) + case OptionVal.None if headerBuilder.senderActorRefPath.isDefined => + OptionVal(actorRefResolver.resolve(headerBuilder.senderActorRefPath.get)) + case _ => + OptionVal.None + } + catch { + case NonFatal(e) => + // probably version mismatch due to restarted system + log.warning("Couldn't decompress sender from originUid [{}]. {}", originUid, e) + OptionVal.None + } - val classManifestOpt = try headerBuilder.manifest(originUid) - catch { - case NonFatal(e) => - // probably version mismatch due to restarted system - log.warning("Couldn't decompress manifest from originUid [{}]. {}", originUid, e) - OptionVal.None - } + val classManifestOpt = + try headerBuilder.manifest(originUid) + catch { + case NonFatal(e) => + // probably version mismatch due to restarted system + log.warning("Couldn't decompress manifest from originUid [{}]. {}", originUid, e) + OptionVal.None + } if ((recipient.isEmpty && headerBuilder.recipientActorRefPath.isEmpty && !headerBuilder.isNoRecipient) || - (sender.isEmpty && headerBuilder.senderActorRefPath.isEmpty && !headerBuilder.isNoSender)) { + (sender.isEmpty && headerBuilder.senderActorRefPath.isEmpty && !headerBuilder.isNoSender)) { log.debug( "Dropping message for unknown recipient/sender. It was probably sent from system [{}] with compression " + "table [{}] built for previous incarnation of the destination system, or it was compressed with a table " + @@ -631,9 +608,7 @@ private[remote] class Decoder( } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class Deserializer( @unused inboundContext: InboundContext, system: ExtendedActorSystem, diff --git a/akka-remote/src/main/scala/akka/remote/artery/Control.scala b/akka-remote/src/main/scala/akka/remote/artery/Control.scala index 5a74a087c3a..e3a0b891a73 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/Control.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/Control.scala @@ -25,9 +25,7 @@ import akka.util.OptionVal @InternalApi private[remote] trait ArteryMessage extends Serializable -/** - * INTERNAL API: Marker trait for reply messages - */ +/** INTERNAL API: Marker trait for reply messages */ @InternalApi private[remote] trait Reply extends ControlMessage @@ -39,39 +37,27 @@ private[remote] trait Reply extends ControlMessage @InternalApi private[remote] trait ControlMessage extends ArteryMessage -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] final case class Quarantined(from: UniqueAddress, to: UniqueAddress) extends ControlMessage -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] final case class ActorSystemTerminating(from: UniqueAddress) extends ControlMessage -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] final case class ActorSystemTerminatingAck(from: UniqueAddress) extends ArteryMessage -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] case object Flush extends ControlMessage -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] final case class FlushAck(expectedAcks: Int) extends ArteryMessage -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] object InboundControlJunction { @@ -104,9 +90,7 @@ private[remote] object InboundControlJunction { private[InboundControlJunction] final case class Dettach(observer: ControlMessageObserver) extends CallbackMessage } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] class InboundControlJunction extends GraphStageWithMaterializedValue[ @@ -168,9 +152,7 @@ private[remote] class InboundControlJunction } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] object OutboundControlJunction { private[remote] trait OutboundControlIngress { @@ -178,9 +160,7 @@ private[remote] object OutboundControlJunction { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] class OutboundControlJunction( outboundContext: OutboundContext, diff --git a/akka-remote/src/main/scala/akka/remote/artery/EnvelopeBufferPool.scala b/akka-remote/src/main/scala/akka/remote/artery/EnvelopeBufferPool.scala index bf9ea37e3b9..f537a37cbdb 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/EnvelopeBufferPool.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/EnvelopeBufferPool.scala @@ -15,14 +15,10 @@ import akka.remote.artery.compress.{ CompressionTable, InboundCompressions, NoIn import akka.serialization.Serialization import akka.util.{ OptionVal, Unsafe } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class OutOfBuffersException extends RuntimeException("Out of usable ByteBuffers") -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class EnvelopeBufferPool(maximumPayload: Int, maximumBuffers: Int) { private val availableBuffers = new ManyToManyConcurrentArrayQueue[EnvelopeBuffer](maximumBuffers) @@ -51,9 +47,7 @@ private[remote] final class ByteFlag(val mask: Byte) extends AnyVal { override def toString = s"ByteFlag(${ByteFlag.binaryLeftPad(mask)})" } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object ByteFlag { def binaryLeftPad(byte: Byte): String = { val string = Integer.toBinaryString(byte) @@ -83,8 +77,8 @@ private[remote] object ByteFlag { */ private[remote] object EnvelopeBuffer { - val TagTypeMask = 0xFF000000 - val TagValueMask = 0x0000FFFF + val TagTypeMask = 0xff000000 + val TagValueMask = 0x0000ffff // Flags (1 byte allocated for them) val MetadataPresentFlag = new ByteFlag(0x1) @@ -122,9 +116,7 @@ private[remote] object HeaderBuilder { final val DeadLettersCode = -1 } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] sealed trait HeaderBuilder { def setVersion(v: Byte): Unit def version: Byte @@ -220,9 +212,7 @@ private[remote] final class SerializationFormatCache override protected def isCacheable(v: String): Boolean = true } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] final class HeaderBuilderImpl( inboundCompression: InboundCompressions, var _outboundActorRefCompression: CompressionTable[ActorRef], @@ -392,9 +382,7 @@ private[remote] final class HeaderBuilderImpl( } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] final class EnvelopeBuffer(val byteBuffer: ByteBuffer) { import EnvelopeBuffer._ val aeronBuffer = new UnsafeBuffer(byteBuffer) diff --git a/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala b/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala index fe5275c305a..5a9d925eca1 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala @@ -9,9 +9,7 @@ import org.agrona.concurrent.OneToOneConcurrentArrayQueue import akka.annotation.InternalApi import akka.stream.scaladsl.PartitionHub -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class FixedSizePartitionHub[T](partitioner: T => Int, lanes: Int, bufferSize: Int) extends PartitionHub[T]( // during tear down or restart it's possible that some streams have been removed @@ -26,9 +24,7 @@ import akka.stream.scaladsl.PartitionHub } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class FixedSizePartitionQueue(lanes: Int, capacity: Int) extends PartitionHub.Internal.PartitionQueue { diff --git a/akka-remote/src/main/scala/akka/remote/artery/FlushBeforeDeathWatchNotification.scala b/akka-remote/src/main/scala/akka/remote/artery/FlushBeforeDeathWatchNotification.scala index 177b4c3aaaf..564c0d72d83 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/FlushBeforeDeathWatchNotification.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/FlushBeforeDeathWatchNotification.scala @@ -16,9 +16,7 @@ import akka.actor.ActorLogging import akka.actor.Props import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] object FlushBeforeDeathWatchNotification { private val nameCounter = new AtomicLong(0L) @@ -32,9 +30,7 @@ private[remote] object FlushBeforeDeathWatchNotification { private case object Timeout } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] class FlushBeforeDeathWatchNotification( done: Promise[Done], diff --git a/akka-remote/src/main/scala/akka/remote/artery/FlushOnShutdown.scala b/akka-remote/src/main/scala/akka/remote/artery/FlushOnShutdown.scala index 27d5e489a98..e94c6cabcfc 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/FlushOnShutdown.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/FlushOnShutdown.scala @@ -13,9 +13,7 @@ import akka.actor.{ Actor, ActorLogging, Props } import akka.annotation.InternalApi import akka.remote.UniqueAddress -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] object FlushOnShutdown { def props(done: Promise[Done], timeout: FiniteDuration, associations: Set[Association]): Props = { @@ -26,9 +24,7 @@ private[remote] object FlushOnShutdown { private case object Timeout } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] class FlushOnShutdown(done: Promise[Done], timeout: FiniteDuration, associations: Set[Association]) extends Actor diff --git a/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala b/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala index 759b264c18c..8ed3ba71bbb 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala @@ -21,9 +21,7 @@ import akka.stream.stage._ import akka.util.OptionVal import akka.util.unused -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object OutboundHandshake { /** @@ -47,9 +45,7 @@ private[remote] object OutboundHandshake { } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class OutboundHandshake( @unused system: ActorSystem, outboundContext: OutboundContext, @@ -216,9 +212,7 @@ private[remote] class OutboundHandshake( } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class InboundHandshake(inboundContext: InboundContext, inControlStream: Boolean) extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { val in: Inlet[InboundEnvelope] = Inlet("InboundHandshake.in") @@ -257,16 +251,18 @@ private[remote] class InboundHandshake(inboundContext: InboundContext, inControl } }) else - setHandler(in, new InHandler { - override def onPush(): Unit = { - val env = grab(in) - env.message match { - case HandshakeReq(from, to) => onHandshakeReq(from, to) - case _ => - onMessage(env) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + val env = grab(in) + env.message match { + case HandshakeReq(from, to) => onHandshakeReq(from, to) + case _ => + onMessage(env) + } } - } - }) + }) private def onHandshakeReq(from: UniqueAddress, to: Address): Unit = { if (to == inboundContext.localAddress.address) { @@ -298,8 +294,8 @@ private[remote] class InboundHandshake(inboundContext: InboundContext, inControl // periodically. thenInside(result.isSuccess) case None => - first.onComplete(result => runInStage.invoke(() => thenInside(result.isSuccess)))( - ExecutionContexts.parasitic) + first + .onComplete(result => runInStage.invoke(() => thenInside(result.isSuccess)))(ExecutionContexts.parasitic) } } diff --git a/akka-remote/src/main/scala/akka/remote/artery/ImmutableLongMap.scala b/akka-remote/src/main/scala/akka/remote/artery/ImmutableLongMap.scala index 120ee8002db..35394f9dd06 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/ImmutableLongMap.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/ImmutableLongMap.scala @@ -12,9 +12,7 @@ import scala.reflect.ClassTag import akka.util.HashCode import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object ImmutableLongMap { def empty[A >: Null](implicit t: ClassTag[A]): ImmutableLongMap[A] = new ImmutableLongMap(Array.emptyLongArray, Array.empty) @@ -31,18 +29,14 @@ private[akka] class ImmutableLongMap[A >: Null] private (private val keys: Array val size: Int = keys.length - /** - * Worst case `O(log n)`, allocation free. - */ + /** Worst case `O(log n)`, allocation free. */ def get(key: Long): OptionVal[A] = { val i = Arrays.binarySearch(keys, key) if (i >= 0) OptionVal(values(i)) else OptionVal.None } - /** - * Worst case `O(log n)`, allocation free. - */ + /** Worst case `O(log n)`, allocation free. */ def contains(key: Long): Boolean = { Arrays.binarySearch(keys, key) >= 0 } @@ -101,9 +95,7 @@ private[akka] class ImmutableLongMap[A >: Null] private (private val keys: Array this } - /** - * All keys - */ + /** All keys */ def keysIterator: Iterator[Long] = keys.iterator diff --git a/akka-remote/src/main/scala/akka/remote/artery/InboundEnvelope.scala b/akka-remote/src/main/scala/akka/remote/artery/InboundEnvelope.scala index 9308d121c34..4efc3be5ac4 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/InboundEnvelope.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/InboundEnvelope.scala @@ -9,14 +9,10 @@ import akka.actor.InternalActorRef import akka.actor.NoSerializationVerificationNeeded import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object InboundEnvelope { - /** - * Only used in tests - */ + /** Only used in tests */ def apply( recipient: OptionVal[InternalActorRef], message: AnyRef, @@ -29,9 +25,7 @@ private[remote] object InboundEnvelope { } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] trait InboundEnvelope extends NoSerializationVerificationNeeded { def recipient: OptionVal[InternalActorRef] def sender: OptionVal[ActorRef] @@ -56,9 +50,7 @@ private[remote] trait InboundEnvelope extends NoSerializationVerificationNeeded def copyForLane(lane: Int): InboundEnvelope } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object ReusableInboundEnvelope { def createObjectPool(capacity: Int) = new ObjectPool[ReusableInboundEnvelope]( @@ -67,9 +59,7 @@ private[remote] object ReusableInboundEnvelope { clear = inEnvelope => inEnvelope.asInstanceOf[ReusableInboundEnvelope].clear()) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] final class ReusableInboundEnvelope extends InboundEnvelope { private var _recipient: OptionVal[InternalActorRef] = OptionVal.None private var _sender: OptionVal[ActorRef] = OptionVal.None diff --git a/akka-remote/src/main/scala/akka/remote/artery/InboundQuarantineCheck.scala b/akka-remote/src/main/scala/akka/remote/artery/InboundQuarantineCheck.scala index ac8be4ff96c..2ae95d7d049 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/InboundQuarantineCheck.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/InboundQuarantineCheck.scala @@ -15,9 +15,7 @@ import akka.stream.Outlet import akka.stream.stage._ import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class InboundQuarantineCheck(inboundContext: InboundContext) extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { val in: Inlet[InboundEnvelope] = Inlet("InboundQuarantineCheck.in") @@ -43,7 +41,7 @@ private[remote] class InboundQuarantineCheck(inboundContext: InboundContext) env.originUid) // avoid starting outbound stream for heartbeats if (!env.message.isInstanceOf[Quarantined] && !isHeartbeat(env.message) && - !association.associationState.isQuarantinedHarmless(env.originUid)) { + !association.associationState.isQuarantinedHarmless(env.originUid)) { log.info("Sending Quarantined to [{}]", association.remoteAddress) inboundContext.sendControl( association.remoteAddress, diff --git a/akka-remote/src/main/scala/akka/remote/artery/LruBoundedCache.scala b/akka-remote/src/main/scala/akka/remote/artery/LruBoundedCache.scala index c90803a9050..4bf53763eb5 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/LruBoundedCache.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/LruBoundedCache.scala @@ -7,9 +7,7 @@ package akka.remote.artery import scala.annotation.tailrec import scala.reflect.ClassTag -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] case class CacheStatistics(entries: Int, maxProbeDistance: Int, averageProbeDistance: Double) /** @@ -38,7 +36,8 @@ private[akka] abstract class LruBoundedCache[K <: AnyRef: ClassTag, V <: AnyRef: private[this] val keys = Array.ofDim[K](capacity) private[this] val values = Array.ofDim[V](capacity) private[this] val hashes = new Array[Int](capacity) - private[this] val epochs = Array.fill[Int](capacity)(epoch - evictAgeThreshold) // Guarantee existing "values" are stale + private[this] val epochs = + Array.fill[Int](capacity)(epoch - evictAgeThreshold) // Guarantee existing "values" are stale final def get(k: K): Option[V] = { val h = hash(k) diff --git a/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala b/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala index fe07596feb4..21ff31f2468 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala @@ -17,9 +17,7 @@ import akka.remote.RemoteActorRefProvider import akka.remote.RemoteRef import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class MessageDispatcher(system: ExtendedActorSystem, provider: RemoteActorRefProvider) { private val remoteDaemon = provider.remoteDaemon @@ -61,7 +59,7 @@ private[remote] class MessageDispatcher(system: ExtendedActorSystem, provider: R message match { case sel: ActorSelectionMessage => if (UntrustedMode && (!TrustedSelectionPaths.contains(sel.elements.mkString("/", "/", "")) || - sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian)) { + sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian)) { if (debugLogEnabled) log.debug( LogMarker.Security, diff --git a/akka-remote/src/main/scala/akka/remote/artery/ObjectPool.scala b/akka-remote/src/main/scala/akka/remote/artery/ObjectPool.scala index 0aa3aadb265..36c3912371b 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/ObjectPool.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/ObjectPool.scala @@ -6,9 +6,7 @@ package akka.remote.artery import org.agrona.concurrent.ManyToManyConcurrentArrayQueue -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class ObjectPool[A <: AnyRef](capacity: Int, create: () => A, clear: A => Unit) { private val pool = new ManyToManyConcurrentArrayQueue[A](capacity) @@ -20,6 +18,6 @@ private[remote] class ObjectPool[A <: AnyRef](capacity: Int, create: () => A, cl def release(obj: A): Boolean = { clear(obj) - (!pool.offer(obj)) + !pool.offer(obj) } } diff --git a/akka-remote/src/main/scala/akka/remote/artery/OutboundEnvelope.scala b/akka-remote/src/main/scala/akka/remote/artery/OutboundEnvelope.scala index 6d2a4c74a2f..ca05c218f6c 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/OutboundEnvelope.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/OutboundEnvelope.scala @@ -9,9 +9,7 @@ import akka.actor.NoSerializationVerificationNeeded import akka.remote.RemoteActorRef import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object OutboundEnvelope { def apply(recipient: OptionVal[RemoteActorRef], message: AnyRef, sender: OptionVal[ActorRef]): OutboundEnvelope = { val env = new ReusableOutboundEnvelope @@ -20,9 +18,7 @@ private[remote] object OutboundEnvelope { } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] trait OutboundEnvelope extends NoSerializationVerificationNeeded { def recipient: OptionVal[RemoteActorRef] def message: AnyRef @@ -33,9 +29,7 @@ private[remote] trait OutboundEnvelope extends NoSerializationVerificationNeeded def copy(): OutboundEnvelope } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object ReusableOutboundEnvelope { def createObjectPool(capacity: Int) = new ObjectPool[ReusableOutboundEnvelope]( @@ -44,9 +38,7 @@ private[remote] object ReusableOutboundEnvelope { clear = outEnvelope => outEnvelope.clear()) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] final class ReusableOutboundEnvelope extends OutboundEnvelope { private var _recipient: OptionVal[RemoteActorRef] = OptionVal.None private var _message: AnyRef = null diff --git a/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala b/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala index 44392037a3a..4e9f9275549 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala @@ -47,9 +47,7 @@ abstract class RemoteInstrument { */ def identifier: Byte - /** - * Should the serialization be timed? Otherwise times are always 0. - */ + /** Should the serialization be timed? Otherwise times are always 0. */ def serializationTimingEnabled: Boolean = false /** @@ -69,9 +67,7 @@ abstract class RemoteInstrument { */ def remoteMessageSent(recipient: ActorRef, message: Object, sender: ActorRef, size: Int, time: Long): Unit - /** - * Called while deserializing the message once a message (containing a metadata field designated for this instrument) is found. - */ + /** Called while deserializing the message once a message (containing a metadata field designated for this instrument) is found. */ def remoteReadMetadata(recipient: ActorRef, message: Object, sender: ActorRef, buffer: ByteBuffer): Unit /** @@ -85,9 +81,7 @@ abstract class RemoteInstrument { def remoteMessageReceived(recipient: ActorRef, message: Object, sender: ActorRef, size: Int, time: Long): Unit } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class LoggingRemoteInstrument(system: ActorSystem) extends RemoteInstrument { private val settings = system @@ -167,7 +161,6 @@ abstract class RemoteInstrument { * | ... metadata entry ... | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * }}} - * */ private[remote] final class RemoteInstruments( private val system: ExtendedActorSystem, diff --git a/akka-remote/src/main/scala/akka/remote/artery/RemotingFlightRecorder.scala b/akka-remote/src/main/scala/akka/remote/artery/RemotingFlightRecorder.scala index 87ba0bb281b..c153e004827 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/RemotingFlightRecorder.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/RemotingFlightRecorder.scala @@ -15,9 +15,7 @@ import akka.annotation.InternalApi import akka.remote.UniqueAddress import akka.util.FlightRecorderLoader -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi object RemotingFlightRecorder extends ExtensionId[RemotingFlightRecorder] with ExtensionIdProvider { @@ -30,9 +28,7 @@ object RemotingFlightRecorder extends ExtensionId[RemotingFlightRecorder] with E override def lookup: ExtensionId[_ <: Extension] = this } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait RemotingFlightRecorder extends Extension { diff --git a/akka-remote/src/main/scala/akka/remote/artery/RestartCounter.scala b/akka-remote/src/main/scala/akka/remote/artery/RestartCounter.scala index 35766791a4e..6815547331c 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/RestartCounter.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/RestartCounter.scala @@ -10,24 +10,18 @@ import scala.annotation.tailrec import scala.concurrent.duration.Deadline import scala.concurrent.duration.FiniteDuration -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object RestartCounter { final case class State(count: Int, deadline: Deadline) } -/** - * INTERNAL API: Thread safe "restarts with duration" counter - */ +/** INTERNAL API: Thread safe "restarts with duration" counter */ private[remote] class RestartCounter(maxRestarts: Int, restartTimeout: FiniteDuration) { import RestartCounter._ private val state = new AtomicReference[State](State(0, Deadline.now + restartTimeout)) - /** - * Current number of restarts. - */ + /** Current number of restarts. */ def count(): Int = state.get.count /** diff --git a/akka-remote/src/main/scala/akka/remote/artery/SendQueue.scala b/akka-remote/src/main/scala/akka/remote/artery/SendQueue.scala index 3956a613443..5752d47182e 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/SendQueue.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/SendQueue.scala @@ -19,9 +19,7 @@ import akka.stream.stage.GraphStageLogic import akka.stream.stage.GraphStageWithMaterializedValue import akka.stream.stage.OutHandler -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object SendQueue { trait ProducerApi[T] { def offer(message: T): Boolean @@ -38,9 +36,7 @@ private[remote] object SendQueue { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] final class SendQueue[T](postStopAction: Vector[T] => Unit) extends GraphStageWithMaterializedValue[SourceShape[T], SendQueue.QueueValue[T]] { import SendQueue._ diff --git a/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala b/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala index c7b4866977e..c9e5ed21a95 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala @@ -35,9 +35,7 @@ import akka.stream.stage.TimerGraphStageLogic import akka.util.OptionVal import akka.util.PrettyDuration.PrettyPrintableDuration -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] object SystemMessageDelivery { final case class SystemMessageEnvelope(message: AnyRef, seqNo: Long, ackReplyTo: UniqueAddress) extends ArteryMessage final case class Ack(seqNo: Long, from: UniqueAddress) extends Reply @@ -67,9 +65,7 @@ import akka.util.PrettyDuration.PrettyPrintableDuration } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] class SystemMessageDelivery( outboundContext: OutboundContext, deadLetters: ActorRef, @@ -184,7 +180,7 @@ import akka.util.PrettyDuration.PrettyPrintableDuration @tailrec private def clearUnacknowledged(ackedSeqNo: Long): Unit = { if (!unacknowledged.isEmpty && - unacknowledged.peek().message.asInstanceOf[SystemMessageEnvelope].seqNo <= ackedSeqNo) { + unacknowledged.peek().message.asInstanceOf[SystemMessageEnvelope].seqNo <= ackedSeqNo) { unacknowledged.removeFirst() if (unacknowledged.isEmpty) cancelTimer(resendInterval) @@ -313,16 +309,12 @@ import akka.util.PrettyDuration.PrettyPrintableDuration } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object SystemMessageAcker { val MaxNegativeAcknowledgementLogging = 1000 } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[remote] class SystemMessageAcker(inboundContext: InboundContext) extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { import SystemMessageAcker._ diff --git a/akka-remote/src/main/scala/akka/remote/artery/TestStage.scala b/akka-remote/src/main/scala/akka/remote/artery/TestStage.scala index 3bd6854385c..4f9dbbfd83c 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/TestStage.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/TestStage.scala @@ -103,14 +103,10 @@ private[remote] class SharedTestState { } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] final case class TestState(blackholes: Map[Address, Set[Address]], failInboundStream: Option[Throwable]) -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class OutboundTestStage(outboundContext: OutboundContext, state: SharedTestState) extends GraphStage[FlowShape[OutboundEnvelope, OutboundEnvelope]] { val in: Inlet[OutboundEnvelope] = Inlet("OutboundTestStage.in") @@ -141,9 +137,7 @@ private[remote] class OutboundTestStage(outboundContext: OutboundContext, state: } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] class InboundTestStage(inboundContext: InboundContext, state: SharedTestState) extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { val in: Inlet[InboundEnvelope] = Inlet("InboundTestStage.in") diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala index 1fbd060dffd..21b00a99155 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala @@ -29,9 +29,7 @@ import akka.stream.stage.GraphStageWithMaterializedValue import akka.stream.stage.InHandler import akka.util.PrettyDuration.PrettyPrintableDuration -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object AeronSink { final class GaveUpMessageException(msg: String) extends RuntimeException(msg) with NoStackTrace @@ -72,7 +70,7 @@ private[remote] object AeronSink { onPublicationClosed.invoke(()) true } else if (giveUpAfterNanos >= 0 && (n & TimerCheckMask) == 0 && (System - .nanoTime() - startTime) > giveUpAfterNanos) { + .nanoTime() - startTime) > giveUpAfterNanos) { // the task is invoked by the spinning thread, only check nanoTime each 8192th invocation n = 0L onGiveUp.invoke(()) diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala index f3bb821c0a9..3cf23a5faae 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala @@ -25,9 +25,7 @@ import akka.stream.stage.GraphStageWithMaterializedValue import akka.stream.stage.OutHandler import akka.stream.stage.StageLogging -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object AeronSource { private def pollTask( @@ -50,8 +48,8 @@ private[remote] object AeronSource { class MessageHandler(pool: EnvelopeBufferPool) { def reset(): Unit = messageReceived = null - private[remote] var messageReceived - : EnvelopeBuffer = null // private to avoid scalac warning about exposing EnvelopeBuffer + private[remote] var messageReceived: EnvelopeBuffer = + null // private to avoid scalac warning about exposing EnvelopeBuffer val fragmentsHandler = new Fragments(data => messageReceived = data, pool) } diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala index 583d6a4ec3d..68fcaf56947 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala @@ -49,9 +49,7 @@ import akka.stream.scaladsl.Sink import akka.stream.scaladsl.Source import akka.util.ccompat._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @ccompatUsedUntil213 private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _provider: RemoteActorRefProvider) extends ArteryTransport(_system, _provider) { @@ -191,9 +189,8 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro log.debug(s"onUnavailableImage from ${img.sourceIdentity} session ${img.sessionId}") // freeSessionBuffer in AeronSource FragmentAssembler - streamMatValues.get.valuesIterator.foreach { - case InboundStreamMatValues(resourceLife, _) => - resourceLife.onUnavailableImage(img.sessionId) + streamMatValues.get.valuesIterator.foreach { case InboundStreamMatValues(resourceLife, _) => + resourceLife.onUnavailableImage(img.sessionId) } } }) @@ -321,7 +318,7 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro private def aeronSourceSpinningStrategy: Int = if (settings.Advanced.InboundLanes > 1 || // spinning was identified to be the cause of massive slowdowns with multiple lanes, see #21365 - settings.Advanced.Aeron.IdleCpuLevel < 5) 0 // also don't spin for small IdleCpuLevels + settings.Advanced.Aeron.IdleCpuLevel < 5) 0 // also don't spin for small IdleCpuLevels else 50 * settings.Advanced.Aeron.IdleCpuLevel - 240 override protected def bindInboundStreams(): (Int, Int) = { @@ -356,7 +353,7 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro val (resourceLife, ctrl, completed) = aeronSource(ControlStreamId, envelopeBufferPool, inboundChannel) .via(inboundFlow(settings, NoInboundCompressions)) - .toMat(inboundControlSink)({ case (a, (c, d)) => (a, c, d) }) + .toMat(inboundControlSink) { case (a, (c, d)) => (a, c, d) } .run()(controlMaterializer) attachControlMessageObserver(ctrl) @@ -372,7 +369,7 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro if (inboundLanes == 1) { aeronSource(OrdinaryStreamId, envelopeBufferPool, inboundChannel) .viaMat(inboundFlow(settings, _inboundCompressions))(Keep.both) - .toMat(inboundSink(envelopeBufferPool))({ case ((a, b), c) => (a, b, c) }) + .toMat(inboundSink(envelopeBufferPool)) { case ((a, b), c) => (a, b, c) } .run()(materializer) } else { @@ -391,9 +388,9 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro new FixedSizePartitionHub[InboundEnvelope]( inboundLanePartitioner, inboundLanes, - settings.Advanced.InboundHubBufferSize)))({ - case ((a, b), c) => (a, b, c) - }) + settings.Advanced.InboundHubBufferSize))) { case ((a, b), c) => + (a, b, c) + } .run()(materializer) val lane = inboundSink(envelopeBufferPool) @@ -445,9 +442,13 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro aeronSourceLifecycle: AeronSource.AeronLifecycle, completed: Future[Done]): Unit = { implicit val ec = materializer.executionContext - updateStreamMatValues(streamId, InboundStreamMatValues[AeronLifecycle](aeronSourceLifecycle, completed.recover { - case _ => Done - })) + updateStreamMatValues( + streamId, + InboundStreamMatValues[AeronLifecycle]( + aeronSourceLifecycle, + completed.recover { case _ => + Done + })) } override protected def shutdownTransport(): Future[Done] = { diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/TaskRunner.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/TaskRunner.scala index 66c6d71654a..19ad54d870f 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/aeron/TaskRunner.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/TaskRunner.scala @@ -19,9 +19,7 @@ import akka.actor.ExtendedActorSystem import akka.dispatch.{ AbstractNodeQueue, MonitorableThreadFactory } import akka.event.Logging -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object TaskRunner { type Task = () => Boolean @@ -48,7 +46,7 @@ private[akka] object TaskRunner { } else if (elements(i) eq null) elements(i) = e else - tryAdd(i + 1) //recursive + tryAdd(i + 1) // recursive } tryAdd(0) } @@ -61,7 +59,7 @@ private[akka] object TaskRunner { else if (elements(i) == e) elements(i) = null.asInstanceOf[T] else - tryRemove(i + 1) //recursive + tryRemove(i + 1) // recursive } tryRemove(0) } @@ -110,9 +108,7 @@ private[akka] object TaskRunner { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class TaskRunner(system: ExtendedActorSystem, val idleCpuLevel: Int) extends Runnable { import TaskRunner._ diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala index 19bec35fc5a..4c89a7ed59c 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala @@ -55,7 +55,7 @@ private[remote] final class CompressionTable[T]( val mit = _dictionary.entrySet().iterator while (i < tups.length) { val entry = mit.next() - tups(i) = (entry.getKey -> entry.getValue.intValue()) + tups(i) = entry.getKey -> entry.getValue.intValue() i += 1 } util.Arrays.sort(tups, CompressionTable.compareBy2ndValue[T]) @@ -105,8 +105,8 @@ private[remote] object CompressionTable { def apply[T](originUid: Long, version: Byte, dictionary: Map[T, Int]): CompressionTable[T] = { val _dictionary = newObject2IntHashMap[T](dictionary.size * 2) - dictionary.foreach { - case (key, value) => _dictionary.put(key, value) + dictionary.foreach { case (key, value) => + _dictionary.put(key, value) } new CompressionTable[T](originUid, version, _dictionary) } diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/DecompressionTable.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/DecompressionTable.scala index 0df8ef0862d..f3c4b5ab9a1 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/DecompressionTable.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/DecompressionTable.scala @@ -27,7 +27,7 @@ private[remote] final case class DecompressionTable[T](originUid: Long, version: /** Writes complete table as String (heavy operation) */ override def toString = s"DecompressionTable($originUid, $version, " + - s"Map(${table.zipWithIndex.map({ case (t, i) => s"$i -> $t" }).mkString(",")}))" + s"Map(${table.zipWithIndex.map { case (t, i) => s"$i -> $t" }.mkString(",")}))" } /** INTERNAL API */ diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala index e0676281bee..be07f46e3b2 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala @@ -43,9 +43,7 @@ private[remote] trait InboundCompressions { def currentOriginUids: Set[Long] - /** - * Remove compression and cancel advertisement scheduling for a specific origin - */ + /** Remove compression and cancel advertisement scheduling for a specific origin */ def close(originUid: Long): Unit } @@ -216,9 +214,7 @@ private[remote] final class InboundActorRefCompression( } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] final class InboundManifestCompression( log: LoggingAdapter, settings: ArterySettings.Compression, @@ -245,9 +241,7 @@ private[remote] final class InboundManifestCompression( decompressInternal(incomingTableVersion, idx, 0) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object InboundCompression { final val KeepOldTablesNumber = 3 // TODO could be configurable @@ -298,8 +292,8 @@ private[remote] object InboundCompression { println( s"[compress] Found table [version: ${version}], was [OLD][${t}], old tables: [${oldTables.map(_.version)}]") case OptionVal.None => - println(s"[compress] Did not find table [version: ${version}], old tables: [${oldTables - .map(_.version)}], activeTable: ${activeTable}, nextTable: ${nextTable}") + println(s"[compress] Did not find table [version: ${version}], old tables: [${oldTables.map( + _.version)}], activeTable: ${activeTable}, nextTable: ${nextTable}") } } found @@ -531,9 +525,7 @@ private[remote] abstract class InboundCompression[T >: Null]( } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] final class UnknownCompressedIdException(id: Long) extends RuntimeException( s"Attempted de-compress unknown id [$id]! " + diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/TopHeavyHitters.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/TopHeavyHitters.scala index 56d10ed2fc9..5742b35d4aa 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/TopHeavyHitters.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/TopHeavyHitters.scala @@ -336,9 +336,7 @@ private[remote] final class TopHeavyHitters[T >: Null](val max: Int)(implicit cl fixHeap(0) } - /** - * Remove value from hash-table based on position. - */ + /** Remove value from hash-table based on position. */ private def removeHash(index: Int): Unit = { if (index >= 0) { items(index) = null @@ -399,9 +397,7 @@ private[remote] final class TopHeavyHitters[T >: Null](val max: Int)(implicit cl s"${getClass.getSimpleName}(max:$max)" } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object TopHeavyHitters { /** Value class to avoid mixing up count and hashCode in APIs. */ diff --git a/akka-remote/src/main/scala/akka/remote/artery/jfr/Events.scala b/akka-remote/src/main/scala/akka/remote/artery/jfr/Events.scala index 0d666e46cbc..5eeb0bd1559 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/jfr/Events.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/jfr/Events.scala @@ -21,9 +21,7 @@ import akka.remote.UniqueAddress // requires jdk9+ to compile // for editing these in IntelliJ, open module settings, change JDK dependency to 11 for only this module -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object JFREventUtils { @@ -34,41 +32,31 @@ private[akka] object JFREventUtils { // transport events -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron")) @Label("Media driver started") final class TransportMediaDriverStarted(val directoryName: String) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Transport started") final class TransportStarted() extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron")) @Label("Aeron error log started") final class TransportAeronErrorLogStarted() extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Task runner started") final class TransportTaskRunnerStarted() extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Unique address set") @@ -76,65 +64,49 @@ final class TransportUniqueAddressSet(_uniqueAddress: UniqueAddress) extends Eve val uniqueAddress = _uniqueAddress.toString() } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Materializer started") final class TransportMaterializerStarted() extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Startup finished") final class TransportStartupFinished() extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Kill switch pulled") final class TransportKillSwitchPulled() extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Stopped") final class TransportStopped() extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron")) @Label("Aeron log task stopped") final class TransportAeronErrorLogTaskStopped() extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Media file deleted") final class TransportMediaFileDeleted() extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Send queue overflow") final class TransportSendQueueOverflow(val queueIndex: Int) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Stop idle outbound") @@ -142,9 +114,7 @@ final class TransportStopIdleOutbound(_remoteAddress: Address, val queueIndex: I val remoteAddress = _remoteAddress.toString } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Quarantined") @@ -152,9 +122,7 @@ final class TransportQuarantined(_remoteAddress: Address, val uid: Long) extends val remoteAddress = _remoteAddress.toString } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Remove quarantined") @@ -162,9 +130,7 @@ final class TransportRemoveQuarantined(_remoteAddress: Address) extends Event { val remoteAddress = _remoteAddress.toString } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Restart outbound") @@ -172,9 +138,7 @@ final class TransportRestartOutbound(_remoteAddress: Address, val streamName: St val remoteAddress = _remoteAddress.toString } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Transport")) @Label("Restart inbound") @@ -184,84 +148,64 @@ final class TransportRestartInbound(_remoteAddress: UniqueAddress, val streamNam // aeron sink events -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Sink")) @Label("Started") final class AeronSinkStarted(val channel: String, val streamId: Int) extends Event {} -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Sink")) @Label("Task runner removed") final class AeronSinkTaskRunnerRemoved(val channel: String, val streamId: Int) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Sink")) @Label("Publication closed") final class AeronSinkPublicationClosed(val channel: String, val streamId: Int) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Sink")) @Label("Publication closed unexpectedly") final class AeronSinkPublicationClosedUnexpectedly(val channel: String, val streamId: Int) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Sink")) @Label("Stopped") final class AeronSinkStopped(val channel: String, val streamId: Int) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @Enabled(false) // hi frequency event @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Sink")) @Label("Envelope grabbed") final class AeronSinkEnvelopeGrabbed(@DataAmount() val lastMessageSize: Int) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @Enabled(false) // hi frequency event @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Sink")) @Label("Envelope offered") final class AeronSinkEnvelopeOffered(@DataAmount() val lastMessageSize: Int) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Sink")) @Label("Gave up envelope") final class AeronSinkGaveUpEnvelope(val cause: String) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @Enabled(false) // hi frequency event @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Sink")) @Label("Delegate to task runner") final class AeronSinkDelegateToTaskRunner(val countBeforeDelegate: Long) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @Enabled(false) // hi frequency event @StackTrace(false) @@ -271,43 +215,33 @@ final class AeronSinkReturnFromTaskRunner(@Timespan(Timespan.NANOSECONDS) val na // aeron source events -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Source")) @Label("Started") final class AeronSourceStarted(val channel: String, val streamId: Int) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Source")) @Label("Stopped") final class AeronSourceStopped(val channel: String, val streamId: Int) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @Enabled(false) // hi frequency event @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Source")) @Label("Received") final class AeronSourceReceived(@DataAmount() val size: Int) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @Enabled(false) // hi frequency event @StackTrace(false) @Category(Array("Akka", "Remoting", "Aeron", "Source")) @Label("Delegate to task runner") final class AeronSourceDelegateToTaskRunner(val countBeforeDelegate: Long) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @Enabled(false) // hi frequency event @StackTrace(false) @@ -317,17 +251,13 @@ final class AeronSourceReturnFromTaskRunner(@Timespan(Timespan.NANOSECONDS) val // compression events -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Compression")) @Label("ActorRef advertisement") final class CompressionActorRefAdvertisement(val uid: Long) extends Event -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Compression")) @Label("ClassManifest advertisement") @@ -335,9 +265,7 @@ final class CompressionClassManifestAdvertisement(val uid: Long) extends Event // tcp outbound events -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Tcp", "Outbound")) @Label("Connected") @@ -345,9 +273,7 @@ final class TcpOutboundConnected(_remoteAddress: Address, val streamName: String val remoteAddress = _remoteAddress.toString } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @Enabled(false) // hi frequency event @StackTrace(false) @@ -356,9 +282,7 @@ final class TcpOutboundSent(@DataAmount() val size: Int) extends Event // tcp inbound events -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Tcp", "Inbound")) @Label("Bound") @@ -366,9 +290,7 @@ final class TcpInboundBound(val bindHost: String, _address: InetSocketAddress) e val address = JFREventUtils.stringOf(_address) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Tcp", "Inbound")) @Label("Unbound") @@ -376,9 +298,7 @@ final class TcpInboundUnbound(_localAddress: UniqueAddress) extends Event { val localAddress = _localAddress.toString() } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @StackTrace(false) @Category(Array("Akka", "Remoting", "Tcp", "Inbound")) @Label("Connected") @@ -386,9 +306,7 @@ final class TcpInboundConnected(_remoteAddress: InetSocketAddress) extends Event val remoteAddress = JFREventUtils.stringOf(_remoteAddress) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @Enabled(false) // hi frequency event @StackTrace(false) diff --git a/akka-remote/src/main/scala/akka/remote/artery/jfr/JFRRemotingFlightRecorder.scala b/akka-remote/src/main/scala/akka/remote/artery/jfr/JFRRemotingFlightRecorder.scala index 965142a5dd9..20a0b7e604c 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/jfr/JFRRemotingFlightRecorder.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/jfr/JFRRemotingFlightRecorder.scala @@ -11,9 +11,7 @@ import akka.annotation.InternalApi import akka.remote.UniqueAddress import akka.remote.artery.RemotingFlightRecorder -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class JFRRemotingFlightRecorder() extends RemotingFlightRecorder { override def transportMediaDriverStarted(directoryName: String): Unit = diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala index 9cdd5300f3f..dee4caf806b 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala @@ -51,9 +51,7 @@ import akka.stream.scaladsl.Tcp.ServerBinding import akka.util.{ ByteString, OptionVal } import akka.util.ccompat._ -/** - * INTERNAL API - */ +/** INTERNAL API */ private[remote] object ArteryTcpTransport { private val successUnit = Success(()) @@ -64,9 +62,7 @@ private[remote] object ArteryTcpTransport { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @ccompatUsedUntil213 private[remote] class ArteryTcpTransport( _system: ExtendedActorSystem, @@ -98,11 +94,10 @@ private[remote] class ArteryTcpTransport( .createInstanceFor[SSLEngineProvider]( settings.SSLEngineProviderClassName, List((classOf[ActorSystem], system))) - .recover { - case e => - throw new ConfigurationException( - s"Could not create SSLEngineProvider [${settings.SSLEngineProviderClassName}]", - e) + .recover { case e => + throw new ConfigurationException( + s"Could not create SSLEngineProvider [${settings.SSLEngineProviderClassName}]", + e) } .get) } @@ -264,13 +259,12 @@ private[remote] class ArteryTcpTransport( inboundConnectionFlow.map(connection.handleWith(_))(sys.dispatcher) }) .run() - .recoverWith { - case e => - Future.failed( - new RemoteTransportException( - s"Failed to bind TCP to [$bindHost:$bindPort] due to: " + - e.getMessage, - e)) + .recoverWith { case e => + Future.failed( + new RemoteTransportException( + s"Failed to bind TCP to [$bindHost:$bindPort] due to: " + + e.getMessage, + e)) }(ExecutionContexts.parasitic) // only on initial startup, when ActorSystem is starting @@ -315,7 +309,8 @@ private[remote] class ArteryTcpTransport( Flow[EnvelopeBuffer] .map(_ => log.warning("Dropping large message, missing large-message-destinations configuration.")) .to(Sink.ignore), - Promise[Done]().future) // never completed, not enabled + Promise[Done]().future + ) // never completed, not enabled } // An inbound connection will only use one of the control, ordinary or large streams, but we have to @@ -326,14 +321,17 @@ private[remote] class ArteryTcpTransport( // overhead. val inboundStream = Sink.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ - val partition = b.add(Partition[EnvelopeBuffer](3, env => { - env.streamId match { - case OrdinaryStreamId => 1 - case ControlStreamId => 0 - case LargeStreamId => 2 - case other => throw new IllegalArgumentException(s"Unexpected streamId [$other]") - } - })) + val partition = b.add( + Partition[EnvelopeBuffer]( + 3, + env => { + env.streamId match { + case OrdinaryStreamId => 1 + case ControlStreamId => 0 + case LargeStreamId => 2 + case other => throw new IllegalArgumentException(s"Unexpected streamId [$other]") + } + })) partition.out(0) ~> controlStream partition.out(1) ~> ordinaryMessagesStream partition.out(2) ~> largeMessagesStream @@ -365,10 +363,12 @@ private[remote] class ArteryTcpTransport( inboundKillSwitch = KillSwitches.shared("inboundKillSwitch") val allStopped: Future[Done] = for { - _ <- controlStreamCompleted.recover { case _ => Done } + _ <- controlStreamCompleted.recover { case _ => Done } _ <- ordinaryMessagesStreamCompleted.recover { case _ => Done } - _ <- if (largeMessageChannelEnabled) - largeMessagesStreamCompleted.recover { case _ => Done } else Future.successful(Done) + _ <- + if (largeMessageChannelEnabled) + largeMessagesStreamCompleted.recover { case _ => Done } + else Future.successful(Done) } yield Done allStopped.foreach(_ => runInboundStreams(port, bindPort)) } @@ -385,7 +385,7 @@ private[remote] class ArteryTcpTransport( .addAttributes(Attributes.logLevels(onFailure = LogLevels.Off)) .via(inboundKillSwitch.flow) .via(inboundFlow(settings, NoInboundCompressions)) - .toMat(inboundControlSink)({ case (a, (c, d)) => (a, c, d) }) + .toMat(inboundControlSink) { case (a, (c, d)) => (a, c, d) } .run()(controlMaterializer) attachControlMessageObserver(ctrl) updateStreamMatValues(completed) @@ -403,7 +403,7 @@ private[remote] class ArteryTcpTransport( .addAttributes(Attributes.logLevels(onFailure = LogLevels.Off)) .via(inboundKillSwitch.flow) .viaMat(inboundFlow(settings, _inboundCompressions))(Keep.both) - .toMat(inboundSink(envelopeBufferPool))({ case ((a, b), c) => (a, b, c) }) + .toMat(inboundSink(envelopeBufferPool)) { case ((a, b), c) => (a, b, c) } .run()(materializer) } else { @@ -426,9 +426,9 @@ private[remote] class ArteryTcpTransport( new FixedSizePartitionHub[InboundEnvelope]( inboundLanePartitioner, inboundLanes, - settings.Advanced.InboundHubBufferSize)))({ - case ((a, b), c) => (a, b, c) - }) + settings.Advanced.InboundHubBufferSize))) { case ((a, b), c) => + (a, b, c) + } .run()(materializer) val lane = inboundSink(envelopeBufferPool) diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/ConfigSSLEngineProvider.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/ConfigSSLEngineProvider.scala index 33a967ec4e1..29e04f4ef6b 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/ConfigSSLEngineProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/ConfigSSLEngineProvider.scala @@ -88,9 +88,7 @@ class ConfigSSLEngineProvider(protected val config: Config, protected val log: M } } - /** - * Subclass may override to customize loading of `KeyStore` - */ + /** Subclass may override to customize loading of `KeyStore` */ protected def loadKeystore(filename: String, password: String): KeyStore = { val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) val fin = Files.newInputStream(Paths.get(filename)) @@ -99,18 +97,14 @@ class ConfigSSLEngineProvider(protected val config: Config, protected val log: M keyStore } - /** - * Subclass may override to customize `KeyManager` - */ + /** Subclass may override to customize `KeyManager` */ protected def keyManagers: Array[KeyManager] = { val factory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm) factory.init(loadKeystore(SSLKeyStore, SSLKeyStorePassword), SSLKeyPassword.toCharArray) factory.getKeyManagers } - /** - * Subclass may override to customize `TrustManager` - */ + /** Subclass may override to customize `TrustManager` */ protected def trustManagers: Array[TrustManager] = { val trustManagerFactory = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm) trustManagerFactory.init(loadKeystore(SSLTrustStore, SSLTrustStorePassword)) diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/SecureRandomFactory.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/SecureRandomFactory.scala index 24e760f8dd1..53b3b3a2761 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/SecureRandomFactory.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/SecureRandomFactory.scala @@ -11,16 +11,12 @@ import com.typesafe.config.Config import akka.annotation.InternalApi import akka.event.MarkerLoggingAdapter -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object SecureRandomFactory { val GeneratorJdkSecureRandom = "SecureRandom" - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi // extracted as a method for testing private[tcp] def rngConfig(config: Config) = { diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala index 0497f5e163c..7602665bd81 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala @@ -18,9 +18,7 @@ import akka.stream.scaladsl.Framing.FramingException import akka.stream.stage.GraphStageLogic import akka.util.ByteString -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TcpFraming { val Undefined = Int.MinValue @@ -56,9 +54,7 @@ import akka.util.ByteString ((frameLength & 0xff000000) >> 24).toByte)) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class TcpFraming(flightRecorder: RemotingFlightRecorder = NoOpRemotingFlightRecorder) extends ByteStringParser[EnvelopeBuffer] { diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/PemManagersProvider.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/PemManagersProvider.scala index c473e23c08c..0852a0896fa 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/PemManagersProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/PemManagersProvider.scala @@ -24,15 +24,11 @@ import akka.annotation.InternalApi import akka.pki.pem.DERPrivateKeyLoader import akka.pki.pem.PEMDecoder -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[ssl] object PemManagersProvider { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[ssl] def buildKeyManagers( privateKey: PrivateKey, @@ -52,9 +48,7 @@ private[ssl] object PemManagersProvider { keyManagers } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[ssl] def buildTrustManagers(cacert: Certificate): Array[TrustManager] = { val trustStore = KeyStore.getInstance("JKS") @@ -67,9 +61,7 @@ private[ssl] object PemManagersProvider { tmf.getTrustManagers } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[ssl] def loadPrivateKey(filename: String): PrivateKey = blocking { val bytes = Files.readAllBytes(new File(filename).toPath) @@ -79,9 +71,7 @@ private[ssl] object PemManagersProvider { private val certFactory = CertificateFactory.getInstance("X.509") - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[ssl] def loadCertificate(filename: String): Certificate = blocking { val bytes = Files.readAllBytes(new File(filename).toPath) diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/RotatingKeysSSLEngineProvider.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/RotatingKeysSSLEngineProvider.scala index 1d195f85e74..29538784d1c 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/RotatingKeysSSLEngineProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/RotatingKeysSSLEngineProvider.scala @@ -154,15 +154,11 @@ final class RotatingKeysSSLEngineProvider(val config: Config, protected val log: object RotatingKeysSSLEngineProvider { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private case class CachedContext(cached: ConfiguredContext, expires: Deadline) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private case class ConfiguredContext(context: SSLContext, sessionVerifier: SessionVerifier) diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/SSLEngineConfig.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/SSLEngineConfig.scala index 5bcff82ed45..91ca5ff4db8 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/SSLEngineConfig.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/SSLEngineConfig.scala @@ -12,9 +12,7 @@ import com.typesafe.config.Config import akka.annotation.InternalApi import akka.japi.Util.immutableSeq -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[tcp] class SSLEngineConfig(config: Config) { private[tcp] val SSLRandomNumberGenerator: String = config.getString("random-number-generator") diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/X509Readers.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/X509Readers.scala index 9e350e763db..0edd599975b 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/X509Readers.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/ssl/X509Readers.scala @@ -11,9 +11,7 @@ import javax.naming.ldap.LdapName import akka.annotation.InternalApi import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object X509Readers { diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ActorRefResolveCache.scala b/akka-remote/src/main/scala/akka/remote/serialization/ActorRefResolveCache.scala index 0502d090c03..967314993fd 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/ActorRefResolveCache.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/ActorRefResolveCache.scala @@ -21,9 +21,7 @@ import akka.remote.artery.LruBoundedCache import akka.util.Unsafe import akka.util.unused -/** - * INTERNAL API: Thread local cache per actor system - */ +/** INTERNAL API: Thread local cache per actor system */ private[akka] object ActorRefResolveThreadLocalCache extends ExtensionId[ActorRefResolveThreadLocalCache] with ExtensionIdProvider { @@ -37,9 +35,7 @@ private[akka] object ActorRefResolveThreadLocalCache new ActorRefResolveThreadLocalCache(system) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class ActorRefResolveThreadLocalCache(val system: ExtendedActorSystem) extends Extension { private val provider = system.provider match { @@ -59,9 +55,7 @@ private[akka] class ActorRefResolveThreadLocalCache(val system: ExtendedActorSys } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] final class ActorRefResolveCache(provider: RemoteActorRefProvider) extends AbstractActorRefResolveCache[ActorRef] { @@ -69,9 +63,7 @@ private[akka] final class ActorRefResolveCache(provider: RemoteActorRefProvider) provider.internalResolveActorRef(k) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] abstract class AbstractActorRefResolveCache[R <: ActorRef: ClassTag] extends LruBoundedCache[String, R](capacity = 1024, evictAgeThreshold = 600) { diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala index 6b106c51c4e..df3655269ae 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala @@ -104,11 +104,11 @@ private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSyste case HandshakeRspManifest => deserializeWithFromAddress(bytes, HandshakeRsp.apply) case SystemMessageDeliveryNackManifest => deserializeSystemMessageDeliveryAck(bytes, SystemMessageDelivery.Nack.apply) - case QuarantinedManifest => deserializeQuarantined(ArteryControlFormats.Quarantined.parseFrom(bytes)) - case FlushManifest => Flush - case FlushAckManifest => deserializeFlushAck(bytes) - case ActorSystemTerminatingManifest => deserializeWithFromAddress(bytes, ActorSystemTerminating.apply) - case ActorSystemTerminatingAckManifest => deserializeWithFromAddress(bytes, ActorSystemTerminatingAck.apply) + case QuarantinedManifest => deserializeQuarantined(ArteryControlFormats.Quarantined.parseFrom(bytes)) + case FlushManifest => Flush + case FlushAckManifest => deserializeFlushAck(bytes) + case ActorSystemTerminatingManifest => deserializeWithFromAddress(bytes, ActorSystemTerminating.apply) + case ActorSystemTerminatingAckManifest => deserializeWithFromAddress(bytes, ActorSystemTerminatingAck.apply) case ActorRefCompressionAdvertisementManifest => deserializeActorRefCompressionAdvertisement(bytes) case ActorRefCompressionAdvertisementAckManifest => deserializeCompressionTableAdvertisementAck(bytes, ActorRefCompressionAdvertisementAck.apply) @@ -158,9 +158,8 @@ private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSyste .setOriginUid(adv.table.originUid) .setTableVersion(adv.table.version) - adv.table.dictionary.foreach { - case (key, value) => - builder.addKeys(keySerializer(key)).addValues(value) + adv.table.dictionary.foreach { case (key, value) => + builder.addKeys(keySerializer(key)).addValues(value) } builder.build diff --git a/akka-remote/src/main/scala/akka/remote/serialization/MessageContainerSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/MessageContainerSerializer.scala index 03cd4ab9006..790ee1113a6 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/MessageContainerSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/MessageContainerSerializer.scala @@ -27,7 +27,7 @@ class MessageContainerSerializer(val system: ExtendedActorSystem) extends BaseSe def toBinary(obj: AnyRef): Array[Byte] = obj match { case sel: ActorSelectionMessage => serializeSelection(sel) - case _ => throw new IllegalArgumentException(s"Cannot serialize object of type [${obj.getClass.getName}]") + case _ => throw new IllegalArgumentException(s"Cannot serialize object of type [${obj.getClass.getName}]") } import ContainerFormats.PatternType._ diff --git a/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala index 592011a8ac2..c91ce6d045f 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala @@ -69,7 +69,7 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW case sgp: ScatterGatherFirstCompletedPool => serializeScatterGatherFirstCompletedPool(sgp) case tp: TailChoppingPool => serializeTailChoppingPool(tp) case rrc: RemoteRouterConfig => serializeRemoteRouterConfig(rrc) - case _ => throw new IllegalArgumentException(s"Cannot serialize object of type [${obj.getClass.getName}]") + case _ => throw new IllegalArgumentException(s"Cannot serialize object of type [${obj.getClass.getName}]") } private def serializeIdentify(identify: Identify): Array[Byte] = @@ -342,7 +342,7 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW ActorIdentityManifest -> deserializeActorIdentity, StatusSuccessManifest -> deserializeStatusSuccess, StatusFailureManifest -> deserializeStatusFailure, - StatusReplyAckManifest -> ((_) => StatusReply.Ack), + StatusReplyAckManifest -> (_ => StatusReply.Ack), StatusReplySuccessManifest -> deserializeStatusReplySuccess, StatusReplyErrorMessageManifest -> deserializeStatusReplyErrorMessage, StatusReplyErrorExceptionManifest -> deserializeStatusReplyErrorException, @@ -350,17 +350,17 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW ActorRefManifest -> deserializeActorRefBytes, OptionManifest -> deserializeOption, OptionalManifest -> deserializeOptional, - PoisonPillManifest -> ((_) => PoisonPill), - KillManifest -> ((_) => Kill), - RemoteWatcherHBManifest -> ((_) => RemoteWatcher.Heartbeat), - DoneManifest -> ((_) => Done), - NotUsedManifest -> ((_) => NotUsed), + PoisonPillManifest -> (_ => PoisonPill), + KillManifest -> (_ => Kill), + RemoteWatcherHBManifest -> (_ => RemoteWatcher.Heartbeat), + DoneManifest -> (_ => Done), + NotUsedManifest -> (_ => NotUsed), AddressManifest -> deserializeAddressData, UniqueAddressManifest -> deserializeUniqueAddress, RemoteWatcherHBRespManifest -> deserializeHeartbeatRsp, ActorInitializationExceptionManifest -> deserializeActorInitializationException, ThrowableNotSerializableExceptionManifest -> deserializeThrowableNotSerializableException, - LocalScopeManifest -> ((_) => LocalScope), + LocalScopeManifest -> (_ => LocalScope), RemoteScopeManifest -> deserializeRemoteScope, ConfigManifest -> deserializeConfig, FromConfigManifest -> deserializeFromConfig, diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala index ea0569c7be0..1ec76dc993e 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala @@ -73,8 +73,8 @@ class ProtobufSerializer(val system: ExtendedActorSystem) extends BaseSerializer if (method eq null) clazz.getDeclaredMethod("parseFrom", ProtobufSerializer.ARRAY_OF_BYTE_ARRAY: _*) else method if (parsingMethodBindingRef.compareAndSet( - parsingMethodBinding, - parsingMethodBinding.updated(clazz, unCachedParsingMethod))) + parsingMethodBinding, + parsingMethodBinding.updated(clazz, unCachedParsingMethod))) unCachedParsingMethod else parsingMethod(unCachedParsingMethod) @@ -99,8 +99,8 @@ class ProtobufSerializer(val system: ExtendedActorSystem) extends BaseSerializer if (method eq null) clazz.getMethod("toByteArray") else method if (toByteArrayMethodBindingRef.compareAndSet( - toByteArrayMethodBinding, - toByteArrayMethodBinding.updated(clazz, unCachedtoByteArrayMethod))) + toByteArrayMethodBinding, + toByteArrayMethodBinding.updated(clazz, unCachedtoByteArrayMethod))) unCachedtoByteArrayMethod else toByteArrayMethod(unCachedtoByteArrayMethod) diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ThrowableSupport.scala b/akka-remote/src/main/scala/akka/remote/serialization/ThrowableSupport.scala index 2dac59fa002..45e1daab778 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/ThrowableSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/ThrowableSupport.scala @@ -14,9 +14,7 @@ import akka.remote.ContainerFormats import akka.serialization.DisabledJavaSerializer import akka.serialization.SerializationExtension -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class ThrowableSupport(system: ExtendedActorSystem) { private lazy val serialization = SerializationExtension(system) diff --git a/akka-remote/src/main/scala/akka/remote/serialization/WrappedPayloadSupport.scala b/akka-remote/src/main/scala/akka/remote/serialization/WrappedPayloadSupport.scala index 6b56b55d174..e377984e7bb 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/WrappedPayloadSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/WrappedPayloadSupport.scala @@ -17,9 +17,7 @@ import akka.serialization.DisabledJavaSerializer import akka.serialization.Serialization import akka.serialization.SerializerWithStringManifest -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class WrappedPayloadSupport(system: ExtendedActorSystem) { private lazy val serialization = SerializationExtension(system) diff --git a/akka-remote/src/main/scala/akka/remote/testkit/TestTransportCommands.scala b/akka-remote/src/main/scala/akka/remote/testkit/TestTransportCommands.scala index f1182765367..53ee74816cc 100644 --- a/akka-remote/src/main/scala/akka/remote/testkit/TestTransportCommands.scala +++ b/akka-remote/src/main/scala/akka/remote/testkit/TestTransportCommands.scala @@ -20,19 +20,13 @@ sealed trait Direction { object Direction { - /** - * Java API: get the Direction.Send instance - */ + /** Java API: get the Direction.Send instance */ def sendDirection(): Direction = Direction.Send - /** - * Java API: get the Direction.Receive instance - */ + /** Java API: get the Direction.Receive instance */ def receiveDirection(): Direction = Direction.Receive - /** - * Java API: get the Direction.Both instance - */ + /** Java API: get the Direction.Both instance */ def bothDirection(): Direction = Direction.Both @SerialVersionUID(1L) @@ -42,9 +36,7 @@ object Direction { case _ => false } - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance: Direction = this } @@ -55,9 +47,7 @@ object Direction { case _ => false } - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance: Direction = this } @@ -65,9 +55,7 @@ object Direction { case object Both extends Direction { override def includes(other: Direction): Boolean = true - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance: Direction = this } } @@ -78,22 +66,16 @@ final case class SetThrottle(address: Address, direction: Direction, mode: Throt @SerialVersionUID(1L) case object SetThrottleAck { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this } object ThrottleMode { - /** - * Java API: get the Blackhole instance - */ + /** Java API: get the Blackhole instance */ def blackholeThrottleMode(): ThrottleMode = Blackhole - /** - * Java API: get the Unthrottled instance - */ + /** Java API: get the Unthrottled instance */ def unthrottledThrottleMode(): ThrottleMode = Unthrottled } @@ -107,9 +89,9 @@ final case class TokenBucket(capacity: Int, tokensPerSecond: Double, nanoTimeOfL extends ThrottleMode { private def isAvailable(nanoTimeOfSend: Long, tokens: Int): Boolean = - if ((tokens > capacity && availableTokens > 0)) { + if (tokens > capacity && availableTokens > 0) { true // Allow messages larger than capacity through, it will be recorded as negative tokens - } else min((availableTokens + tokensGenerated(nanoTimeOfSend)), capacity) >= tokens + } else min(availableTokens + tokensGenerated(nanoTimeOfSend), capacity) >= tokens override def tryConsumeTokens(nanoTimeOfSend: Long, tokens: Int): (ThrottleMode, Boolean) = { if (isAvailable(nanoTimeOfSend, tokens)) @@ -135,9 +117,7 @@ case object Unthrottled extends ThrottleMode { override def tryConsumeTokens(nanoTimeOfSend: Long, tokens: Int): (ThrottleMode, Boolean) = (this, true) override def timeToAvailable(currentNanoTime: Long, tokens: Int): FiniteDuration = Duration.Zero - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance: ThrottleMode = this } @@ -146,23 +126,17 @@ case object Blackhole extends ThrottleMode { override def tryConsumeTokens(nanoTimeOfSend: Long, tokens: Int): (ThrottleMode, Boolean) = (this, false) override def timeToAvailable(currentNanoTime: Long, tokens: Int): FiniteDuration = Duration.Zero - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance: ThrottleMode = this } -/** - * Management Command to force disassociation of an address. - */ +/** Management Command to force disassociation of an address. */ @SerialVersionUID(1L) final case class ForceDisassociate(address: Address) @SerialVersionUID(1L) case object ForceDisassociateAck { - /** - * Java API: get the singleton instance - */ + /** Java API: get the singleton instance */ def getInstance = this } diff --git a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala index 4092dc08b44..19e96497a02 100644 --- a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala @@ -44,19 +44,19 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { "use good enough cumulative distribution function" in { val fd = createFailureDetector() - cdf(fd.phi(0, 0, 10)) should ===(0.5 +- (0.001)) - cdf(fd.phi(6L, 0, 10)) should ===(0.7257 +- (0.001)) - cdf(fd.phi(15L, 0, 10)) should ===(0.9332 +- (0.001)) - cdf(fd.phi(20L, 0, 10)) should ===(0.97725 +- (0.001)) - cdf(fd.phi(25L, 0, 10)) should ===(0.99379 +- (0.001)) - cdf(fd.phi(35L, 0, 10)) should ===(0.99977 +- (0.001)) - cdf(fd.phi(40L, 0, 10)) should ===(0.99997 +- (0.0001)) + cdf(fd.phi(0, 0, 10)) should ===(0.5 +- 0.001) + cdf(fd.phi(6L, 0, 10)) should ===(0.7257 +- 0.001) + cdf(fd.phi(15L, 0, 10)) should ===(0.9332 +- 0.001) + cdf(fd.phi(20L, 0, 10)) should ===(0.97725 +- 0.001) + cdf(fd.phi(25L, 0, 10)) should ===(0.99379 +- 0.001) + cdf(fd.phi(35L, 0, 10)) should ===(0.99977 +- 0.001) + cdf(fd.phi(40L, 0, 10)) should ===(0.99997 +- 0.0001) for (Seq(x, y) <- (0 to 40).toList.sliding(2)) { fd.phi(x, 0, 10) should be < (fd.phi(y, 0, 10)) } - cdf(fd.phi(22, 20.0, 3)) should ===(0.7475 +- (0.001)) + cdf(fd.phi(22, 20.0, 3)) should ===(0.7475 +- 0.001) } "handle outliers without losing precision or hitting exceptions" in { @@ -69,12 +69,14 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { val fd = createFailureDetector() val test = TreeMap(0 -> 0.0, 500 -> 0.1, 1000 -> 0.3, 1200 -> 1.6, 1400 -> 4.7, 1600 -> 10.8, 1700 -> 15.3) for ((timeDiff, expectedPhi) <- test) { - fd.phi(timeDiff = timeDiff, mean = 1000.0, stdDeviation = 100.0) should ===(expectedPhi +- (0.1)) + fd.phi(timeDiff = timeDiff, mean = 1000.0, stdDeviation = 100.0) should ===(expectedPhi +- 0.1) } // larger stdDeviation results => lower phi - fd.phi(timeDiff = 1100, mean = 1000.0, stdDeviation = 500.0) should be < (fd - .phi(timeDiff = 1100, mean = 1000.0, stdDeviation = 100.0)) + fd.phi(timeDiff = 1100, mean = 1000.0, stdDeviation = 500.0) should be < (fd.phi( + timeDiff = 1100, + mean = 1000.0, + stdDeviation = 100.0)) } "return phi value of 0.0 on startup for each address, when no heartbeats" in { @@ -90,7 +92,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { fd.heartbeat() fd.phi should ===(0.3 +- 0.2) fd.phi should ===(4.5 +- 0.3) - fd.phi should be > (15.0) + fd.phi should be > 15.0 } "return phi value using first interval after second heartbeat" in { @@ -98,9 +100,9 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { val fd = createFailureDetector(clock = fakeTimeGenerator(timeInterval)) fd.heartbeat() - fd.phi should be > (0.0) + fd.phi should be > 0.0 fd.heartbeat() - fd.phi should be > (0.0) + fd.phi should be > 0.0 } "mark node as monitored after a series of successful heartbeats" in { @@ -120,18 +122,19 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { val timeInterval = List[Long](0, 1000, 100, 100, 7000) val fd = createFailureDetector(threshold = 3, clock = fakeTimeGenerator(timeInterval)) - fd.heartbeat() //0 - fd.heartbeat() //1000 - fd.heartbeat() //1100 + fd.heartbeat() // 0 + fd.heartbeat() // 1000 + fd.heartbeat() // 1100 - fd.isAvailable should ===(true) //1200 - fd.isAvailable should ===(false) //8200 + fd.isAvailable should ===(true) // 1200 + fd.isAvailable should ===(false) // 8200 } "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { // 1000 regular intervals, 5 minute pause, and then a short pause again that should trigger unreachable again val regularIntervals = 0L +: Vector.fill(999)(1000L) - val timeIntervals = regularIntervals :+ (5 * 60 * 1000L) :+ 100L :+ 900L :+ 100L :+ 7000L :+ 100L :+ 900L :+ 100L :+ 900L + val timeIntervals = + regularIntervals :+ (5 * 60 * 1000L) :+ 100L :+ 900L :+ 100L :+ 7000L :+ 100L :+ 900L :+ 100L :+ 900L val fd = createFailureDetector( threshold = 8, acceptableLostDuration = 3.seconds, @@ -182,18 +185,18 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { val fd = createFailureDetector(maxSampleSize = 3, clock = fakeTimeGenerator(timeInterval)) // 100 ms interval - fd.heartbeat() //0 - fd.heartbeat() //100 - fd.heartbeat() //200 - fd.heartbeat() //300 - val phi1 = fd.phi //400 + fd.heartbeat() // 0 + fd.heartbeat() // 100 + fd.heartbeat() // 200 + fd.heartbeat() // 300 + val phi1 = fd.phi // 400 // 500 ms interval, should become same phi when 100 ms intervals have been dropped - fd.heartbeat() //1000 - fd.heartbeat() //1500 - fd.heartbeat() //2000 - fd.heartbeat() //2500 - val phi2 = fd.phi //3000 - phi2 should ===(phi1 +- (0.001)) + fd.heartbeat() // 1000 + fd.heartbeat() // 1500 + fd.heartbeat() // 2000 + fd.heartbeat() // 2500 + val phi2 = fd.phi // 3000 + phi2 should ===(phi1 +- 0.001) } } diff --git a/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala b/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala index 9cb120b8b8b..e7ef51ab64c 100644 --- a/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala @@ -42,11 +42,13 @@ class DaemonicSpec extends AkkaSpec { selection ! "whatever" // get new non daemonic threads running - awaitAssert({ - val newNonDaemons: Set[Thread] = - Thread.getAllStackTraces.keySet().asScala.filter(t => !origThreads(t) && !t.isDaemon).to(Set) - newNonDaemons should ===(Set.empty[Thread]) - }, 4.seconds) + awaitAssert( + { + val newNonDaemons: Set[Thread] = + Thread.getAllStackTraces.keySet().asScala.filter(t => !origThreads(t) && !t.isDaemon).to(Set) + newNonDaemons should ===(Set.empty[Thread]) + }, + 4.seconds) } finally { shutdown(daemonicSystem) diff --git a/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala index 18dc81f3f85..6ef2a53b246 100644 --- a/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala @@ -43,18 +43,19 @@ class DeadlineFailureDetectorSpec extends AkkaSpec { val timeInterval = List[Long](0, 1000, 100, 100, 7000) val fd = createFailureDetector(acceptableLostDuration = 4.seconds, clock = fakeTimeGenerator(timeInterval)) - fd.heartbeat() //0 - fd.heartbeat() //1000 - fd.heartbeat() //1100 + fd.heartbeat() // 0 + fd.heartbeat() // 1000 + fd.heartbeat() // 1100 - fd.isAvailable should ===(true) //1200 - fd.isAvailable should ===(false) //8200 + fd.isAvailable should ===(true) // 1200 + fd.isAvailable should ===(false) // 8200 } "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { // 1000 regular intervals, 5 minute pause, and then a short pause again that should trigger unreachable again val regularIntervals = 0L +: Vector.fill(999)(1000L) - val timeIntervals = regularIntervals :+ (5 * 60 * 1000L) :+ 100L :+ 900L :+ 100L :+ 7000L :+ 100L :+ 900L :+ 100L :+ 900L + val timeIntervals = + regularIntervals :+ (5 * 60 * 1000L) :+ 100L :+ 900L :+ 100L :+ 7000L :+ 100L :+ 900L :+ 100L :+ 900L val fd = createFailureDetector(acceptableLostDuration = 4.seconds, clock = fakeTimeGenerator(timeIntervals)) for (_ <- 0 until 1000) fd.heartbeat() diff --git a/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala b/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala index 3b7d29bbf22..6408b4f6aca 100644 --- a/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala @@ -42,15 +42,14 @@ class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") { acceptableLostDuration: FiniteDuration = Duration.Zero, firstHeartbeatEstimate: FiniteDuration = 1.second, clock: Clock = FailureDetector.defaultClock): FailureDetectorRegistry[String] = { - new DefaultFailureDetectorRegistry[String]( - () => - createFailureDetector( - threshold, - maxSampleSize, - minStdDeviation, - acceptableLostDuration, - firstHeartbeatEstimate, - clock)) + new DefaultFailureDetectorRegistry[String](() => + createFailureDetector( + threshold, + maxSampleSize, + minStdDeviation, + acceptableLostDuration, + firstHeartbeatEstimate, + clock)) } "mark node as available after a series of successful heartbeats" in { @@ -68,13 +67,13 @@ class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") { val timeInterval = List[Long](0, 1000, 100, 100, 4000, 3000) val fd = createFailureDetectorRegistry(threshold = 3, clock = fakeTimeGenerator(timeInterval)) - fd.heartbeat("resource1") //0 - fd.heartbeat("resource1") //1000 - fd.heartbeat("resource1") //1100 + fd.heartbeat("resource1") // 0 + fd.heartbeat("resource1") // 1000 + fd.heartbeat("resource1") // 1100 - fd.isAvailable("resource1") should ===(true) //1200 - fd.heartbeat("resource2") //5200, but unrelated resource - fd.isAvailable("resource1") should ===(false) //8200 + fd.isAvailable("resource1") should ===(true) // 1200 + fd.heartbeat("resource2") // 5200, but unrelated resource + fd.isAvailable("resource1") should ===(false) // 8200 } "accept some configured missing heartbeats" in { @@ -123,25 +122,25 @@ class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") { val fd = createFailureDetectorRegistry(clock = fakeTimeGenerator(timeInterval)) fd.isMonitoring("resource1") should ===(false) - fd.heartbeat("resource1") //0 + fd.heartbeat("resource1") // 0 - fd.heartbeat("resource1") //1000 - fd.heartbeat("resource1") //1100 + fd.heartbeat("resource1") // 1000 + fd.heartbeat("resource1") // 1100 - fd.isAvailable("resource1") should ===(true) //2200 + fd.isAvailable("resource1") should ===(true) // 2200 fd.isMonitoring("resource1") should ===(true) fd.remove("resource1") fd.isMonitoring("resource1") should ===(false) - fd.isAvailable("resource1") should ===(true) //3300 + fd.isAvailable("resource1") should ===(true) // 3300 // it receives heartbeat from an explicitly removed node - fd.heartbeat("resource1") //4400 - fd.heartbeat("resource1") //5500 - fd.heartbeat("resource1") //6600 + fd.heartbeat("resource1") // 4400 + fd.heartbeat("resource1") // 5500 + fd.heartbeat("resource1") // 6600 - fd.isAvailable("resource1") should ===(true) //6700 + fd.isAvailable("resource1") should ===(true) // 6700 fd.isMonitoring("resource1") should ===(true) } diff --git a/akka-remote/src/test/scala/akka/remote/LogSourceSpec.scala b/akka-remote/src/test/scala/akka/remote/LogSourceSpec.scala index f3996859ead..31c5a23814c 100644 --- a/akka-remote/src/test/scala/akka/remote/LogSourceSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/LogSourceSpec.scala @@ -16,9 +16,8 @@ import akka.testkit.TestProbe object LogSourceSpec { class Reporter extends Actor with ActorLogging { - def receive = { - case s: String => - log.info(s) + def receive = { case s: String => + log.info(s) } } } @@ -33,12 +32,16 @@ class LogSourceSpec extends AkkaSpec(""" val reporter = system.actorOf(Props[Reporter](), "reporter") val logProbe = TestProbe() - system.eventStream.subscribe(system.actorOf(Props(new Actor { - def receive = { - case i @ Info(_, _, msg: String) if msg contains "hello" => logProbe.ref ! i - case _ => - } - }).withDeploy(Deploy.local), "logSniffer"), classOf[Logging.Info]) + system.eventStream.subscribe( + system.actorOf( + Props(new Actor { + def receive = { + case i @ Info(_, _, msg: String) if msg contains "hello" => logProbe.ref ! i + case _ => + } + }).withDeploy(Deploy.local), + "logSniffer"), + classOf[Logging.Info]) "Log events" must { diff --git a/akka-remote/src/test/scala/akka/remote/MessageLoggingSpec.scala b/akka-remote/src/test/scala/akka/remote/MessageLoggingSpec.scala index 6fde2ecc6d0..1900dd1229f 100644 --- a/akka-remote/src/test/scala/akka/remote/MessageLoggingSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/MessageLoggingSpec.scala @@ -34,9 +34,8 @@ object MessageLoggingSpec { } class BadActor extends Actor { - override def receive = { - case _ => - sender() ! BadMsg("hah") + override def receive = { case _ => + sender() ! BadMsg("hah") } } } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConsistentHashingRouterSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConsistentHashingRouterSpec.scala index ff9b06035cd..71589b01614 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConsistentHashingRouterSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConsistentHashingRouterSpec.scala @@ -10,8 +10,7 @@ import akka.routing.ConsistentHash import akka.routing.ConsistentRoutee import akka.testkit.AkkaSpec -class RemoteConsistentHashingRouterSpec - extends AkkaSpec(""" +class RemoteConsistentHashingRouterSpec extends AkkaSpec(""" akka.remote.artery.canonical.port = 0 akka.actor.provider = remote """) { diff --git a/akka-remote/src/test/scala/akka/remote/RemoteFeaturesSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteFeaturesSpec.scala index fe1de632b91..db1bdf10e28 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteFeaturesSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteFeaturesSpec.scala @@ -135,7 +135,8 @@ class RemoteFeaturesDisabledSpec extends RemoteFeaturesSpec(RemoteFeaturesSpec.d """)) val masterRef = masterSystem.actorOf(Props[RemoteDeploymentSpec.Echo1](), actorName) - masterRef.path shouldEqual RootActorPath(AddressFromURIString(s"akka://${masterSystem.name}")) / "user" / actorName + masterRef.path shouldEqual RootActorPath( + AddressFromURIString(s"akka://${masterSystem.name}")) / "user" / actorName masterRef.path.address.hasLocalScope shouldBe true masterSystem.actorSelection(RootActorPath(address(system)) / "user" / actorName) ! Identify(1) diff --git a/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala index 263cdab0e8d..12978a8e890 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala @@ -16,9 +16,8 @@ import akka.testkit.TestActors.echoActorProps object RemoteRouterSpec { class Parent extends Actor { - def receive = { - case (p: Props, name: String) => - sender() ! context.actorOf(p, name) + def receive = { case (p: Props, name: String) => + sender() ! context.actorOf(p, name) } } } @@ -51,7 +50,8 @@ class RemoteRouterSpec extends AkkaSpec(""" val sysName = system.name val masterSystemName = "Master" + sysName val protocol = "akka" - val conf = ConfigFactory.parseString(s""" + val conf = ConfigFactory + .parseString(s""" akka { actor.deployment { /blub { @@ -84,7 +84,8 @@ class RemoteRouterSpec extends AkkaSpec(""" target.nodes = ["$protocol://${sysName}@localhost:${port}"] } } - }""").withFallback(system.settings.config) + }""") + .withFallback(system.settings.config) val masterSystem = ActorSystem(masterSystemName, conf) override def afterTermination(): Unit = { @@ -226,8 +227,8 @@ class RemoteRouterSpec extends AkkaSpec(""" "set supplied supervisorStrategy" in { val probe = TestProbe()(masterSystem) - val escalator = OneForOneStrategy() { - case e => probe.ref ! e; SupervisorStrategy.Escalate + val escalator = OneForOneStrategy() { case e => + probe.ref ! e; SupervisorStrategy.Escalate } val router = masterSystem.actorOf( new RemoteRouterConfig( diff --git a/akka-remote/src/test/scala/akka/remote/artery/ActorRefResolveCacheQuarantineSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/ActorRefResolveCacheQuarantineSpec.scala index 7031cd07c66..084636d4df7 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/ActorRefResolveCacheQuarantineSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/ActorRefResolveCacheQuarantineSpec.scala @@ -13,9 +13,7 @@ import akka.testkit.TestDuration import akka.testkit.TestEvent.Mute import akka.util.Timeout -/** - * Reproducer of issue #29828 - */ +/** Reproducer of issue #29828 */ class ActorRefResolveCacheQuarantineSpec extends ArteryMultiNodeSpec(""" akka.remote.artery.advanced.remove-quarantined-association-after = 2 seconds diff --git a/akka-remote/src/test/scala/akka/remote/artery/ArterySpecSupport.scala b/akka-remote/src/test/scala/akka/remote/artery/ArterySpecSupport.scala index 77e16f0cf59..59556743247 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/ArterySpecSupport.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/ArterySpecSupport.scala @@ -22,9 +22,7 @@ object ArterySpecSupport { } }""") - /** - * Artery enabled, flight recorder enabled, dynamic selection of port on localhost. - */ + /** Artery enabled, flight recorder enabled, dynamic selection of port on localhost. */ def defaultConfig: Config = staticArteryRemotingConfig.withFallback(tlsConfig) // TLS only used if transport=tls-tcp diff --git a/akka-remote/src/test/scala/akka/remote/artery/BindCanonicalAddressSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/BindCanonicalAddressSpec.scala index 618d8d67e63..5731165a29a 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/BindCanonicalAddressSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/BindCanonicalAddressSpec.scala @@ -116,7 +116,8 @@ trait BindCanonicalAddressBehaviors { implicit val sys = ActorSystem("sys", config.withFallback(commonConfig)) getInternal().flatMap(_.port) should contain(getExternal().port.get) - getInternal().map(x => (x.host.get should include).regex("0.0.0.0".r)) // regexp dot is intentional to match IPv4 and 6 addresses + getInternal().map(x => + (x.host.get should include).regex("0.0.0.0".r)) // regexp dot is intentional to match IPv4 and 6 addresses Await.result(sys.terminate(), Duration.Inf) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/DuplicateFlushSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/DuplicateFlushSpec.scala index 8885cdcf363..414875d4ce1 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/DuplicateFlushSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/DuplicateFlushSpec.scala @@ -18,9 +18,11 @@ import akka.testkit.AkkaSpec import akka.testkit.ImplicitSender import akka.util.OptionVal -class DuplicateFlushSpec extends AkkaSpec(""" +class DuplicateFlushSpec + extends AkkaSpec(""" akka.stream.materializer.debug.fuzzing-mode = on - """) with ImplicitSender { + """) + with ImplicitSender { private val pool = new EnvelopeBufferPool(1034 * 1024, 128) private val serialization = SerializationExtension(system) diff --git a/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala index a4d2ae378e1..d5e3b11af86 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala @@ -19,9 +19,11 @@ import akka.testkit.AkkaSpec import akka.testkit.ImplicitSender import akka.util.OptionVal -class DuplicateHandshakeSpec extends AkkaSpec(""" +class DuplicateHandshakeSpec + extends AkkaSpec(""" akka.stream.materializer.debug.fuzzing-mode = on - """) with ImplicitSender { + """) + with ImplicitSender { val pool = new EnvelopeBufferPool(1034 * 1024, 128) val serialization = SerializationExtension(system) @@ -56,7 +58,7 @@ class DuplicateHandshakeSpec extends AkkaSpec(""" env } .via(new DuplicateHandshakeReq(numberOfLanes = 3, inboundContext, system.asInstanceOf[ExtendedActorSystem], pool)) - .map { case env: InboundEnvelope => (env.message -> env.lane) } + .map { case env: InboundEnvelope => env.message -> env.lane } .toMat(TestSink[Any]())(Keep.both) .run() } diff --git a/akka-remote/src/test/scala/akka/remote/artery/EnvelopeBufferSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/EnvelopeBufferSpec.scala index 89a0f6b581c..6bbcac61675 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/EnvelopeBufferSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/EnvelopeBufferSpec.scala @@ -74,8 +74,9 @@ class EnvelopeBufferSpec extends AkkaSpec { headerIn.setManifest("manifest1") envelope.writeHeader(headerIn) - envelope.byteBuffer - .position() should ===(EnvelopeBuffer.MetadataContainerAndLiteralSectionOffset) // Fully compressed header + envelope.byteBuffer.position() should ===( + EnvelopeBuffer.MetadataContainerAndLiteralSectionOffset + ) // Fully compressed header envelope.byteBuffer.flip() envelope.parseHeader(headerOut) diff --git a/akka-remote/src/test/scala/akka/remote/artery/FlushOnShutdownSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/FlushOnShutdownSpec.scala index f2c511d6eaf..2ead7e03c84 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/FlushOnShutdownSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/FlushOnShutdownSpec.scala @@ -21,24 +21,28 @@ class FlushOnShutdownSpec extends ArteryMultiNodeSpec(ArterySpecSupport.defaultC val probe = TestProbe() val probeRef = probe.ref - localSystem.actorOf(Props(new Actor { - def receive = { - case msg => probeRef ! msg - } - }), "receiver") - - val actorOnSystemB = remoteSystem.actorOf(Props(new Actor { - def receive = { - case "start" => - context.actorSelection(rootActorPath(localSystem) / "user" / "receiver") ! Identify(None) - - case ActorIdentity(_, Some(receiverRef)) => - receiverRef ! "msg1" - receiverRef ! "msg2" - receiverRef ! "msg3" - context.system.terminate() - } - }), "sender") + localSystem.actorOf( + Props(new Actor { + def receive = { case msg => + probeRef ! msg + } + }), + "receiver") + + val actorOnSystemB = remoteSystem.actorOf( + Props(new Actor { + def receive = { + case "start" => + context.actorSelection(rootActorPath(localSystem) / "user" / "receiver") ! Identify(None) + + case ActorIdentity(_, Some(receiverRef)) => + receiverRef ! "msg1" + receiverRef ! "msg2" + receiverRef ! "msg3" + context.system.terminate() + } + }), + "sender") actorOnSystemB ! "start" diff --git a/akka-remote/src/test/scala/akka/remote/artery/HandshakeDenySpec.scala b/akka-remote/src/test/scala/akka/remote/artery/HandshakeDenySpec.scala index fb51e3ead2d..fca3f3812ed 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/HandshakeDenySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/HandshakeDenySpec.scala @@ -14,11 +14,13 @@ import akka.testkit._ object HandshakeDenySpec { - val commonConfig = ConfigFactory.parseString(""" + val commonConfig = ConfigFactory + .parseString(""" akka.loglevel = WARNING akka.remote.artery.advanced.handshake-timeout = 2s akka.remote.artery.advanced.aeron.image-liveness-timeout = 1.9s - """).withFallback(ArterySpecSupport.defaultConfig) + """) + .withFallback(ArterySpecSupport.defaultConfig) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/HandshakeFailureSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/HandshakeFailureSpec.scala index 1c7eebf8d84..5e205cbd6a0 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/HandshakeFailureSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/HandshakeFailureSpec.scala @@ -15,10 +15,12 @@ import akka.testkit.TestProbe object HandshakeFailureSpec { - val commonConfig = ConfigFactory.parseString(""" + val commonConfig = ConfigFactory + .parseString(""" akka.remote.artery.advanced.handshake-timeout = 2s akka.remote.artery.advanced.aeron.image-liveness-timeout = 1.9s - """).withFallback(ArterySpecSupport.defaultConfig) + """) + .withFallback(ArterySpecSupport.defaultConfig) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/HandshakeRetrySpec.scala b/akka-remote/src/test/scala/akka/remote/artery/HandshakeRetrySpec.scala index 0c3d8b0e63a..29c1a621530 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/HandshakeRetrySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/HandshakeRetrySpec.scala @@ -13,10 +13,12 @@ import akka.testkit.ImplicitSender import akka.testkit.TestActors object HandshakeRetrySpec { - val commonConfig = ConfigFactory.parseString(""" + val commonConfig = ConfigFactory + .parseString(""" akka.remote.artery.advanced.handshake-timeout = 10s akka.remote.artery.advanced.aeron.image-liveness-timeout = 7s - """).withFallback(ArterySpecSupport.defaultConfig) + """) + .withFallback(ArterySpecSupport.defaultConfig) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/InboundHandshakeSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/InboundHandshakeSpec.scala index 5831edec871..1b15d12e9a5 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/InboundHandshakeSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/InboundHandshakeSpec.scala @@ -29,9 +29,11 @@ object InboundHandshakeSpec { case object Control3 extends ControlMessage } -class InboundHandshakeSpec extends AkkaSpec(""" +class InboundHandshakeSpec + extends AkkaSpec(""" akka.stream.materializer.debug.fuzzing-mode = on - """) with ImplicitSender { + """) + with ImplicitSender { val addressA = UniqueAddress(Address("akka", "sysA", "hostA", 1001), 1) val addressB = UniqueAddress(Address("akka", "sysB", "hostB", 1002), 2) diff --git a/akka-remote/src/test/scala/akka/remote/artery/LargeMessagesStreamSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/LargeMessagesStreamSpec.scala index f7863772351..ed837b9e3ee 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/LargeMessagesStreamSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/LargeMessagesStreamSpec.scala @@ -18,15 +18,13 @@ object LargeMessagesStreamSpec { case class Pong(bytesReceived: Long) extends JavaSerializable class EchoSize extends Actor { - def receive = { - case Ping(bytes) => sender() ! Pong(bytes.size) + def receive = { case Ping(bytes) => + sender() ! Pong(bytes.size) } } } -class LargeMessagesStreamSpec - extends ArteryMultiNodeSpec( - """ +class LargeMessagesStreamSpec extends ArteryMultiNodeSpec(""" akka { remote.artery.large-message-destinations = [ "/user/large1", "/user/large2", "/user/large3" , "/user/largeWildcard*" ] } diff --git a/akka-remote/src/test/scala/akka/remote/artery/LateConnectSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/LateConnectSpec.scala index cc23dc38b29..79b4cec6bb6 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/LateConnectSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/LateConnectSpec.scala @@ -16,10 +16,12 @@ import akka.testkit.TestProbe object LateConnectSpec { - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString(""" akka.remote.artery.advanced.handshake-timeout = 3s akka.remote.artery.advanced.aeron.image-liveness-timeout = 2.9s - """).withFallback(ArterySpecSupport.defaultConfig) + """) + .withFallback(ArterySpecSupport.defaultConfig) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/MetadataCarryingSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/MetadataCarryingSpec.scala index 914f594ee01..cb65986d774 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/MetadataCarryingSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/MetadataCarryingSpec.scala @@ -101,13 +101,15 @@ object MetadataCarryingSpec { } } -class MetadataCarryingSpec extends ArteryMultiNodeSpec(""" +class MetadataCarryingSpec + extends ArteryMultiNodeSpec(""" akka { remote.artery.advanced { instruments = [ "akka.remote.artery.TestInstrument" ] } } - """) with ImplicitSender { + """) + with ImplicitSender { import MetadataCarryingSpec._ import MetadataCarryingSpy._ diff --git a/akka-remote/src/test/scala/akka/remote/artery/OutboundControlJunctionSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/OutboundControlJunctionSpec.scala index 5744131f4c3..95cf5a6f799 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/OutboundControlJunctionSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/OutboundControlJunctionSpec.scala @@ -19,9 +19,11 @@ object OutboundControlJunctionSpec { case object Control3 extends ControlMessage } -class OutboundControlJunctionSpec extends AkkaSpec(""" +class OutboundControlJunctionSpec + extends AkkaSpec(""" akka.stream.materializer.debug.fuzzing-mode = on - """) with ImplicitSender { + """) + with ImplicitSender { import OutboundControlJunctionSpec._ val addressA = UniqueAddress(Address("akka", "sysA", "hostA", 1001), 1) diff --git a/akka-remote/src/test/scala/akka/remote/artery/OutboundHandshakeSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/OutboundHandshakeSpec.scala index e61dd1dc941..b1d0d15c02e 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/OutboundHandshakeSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/OutboundHandshakeSpec.scala @@ -19,9 +19,11 @@ import akka.testkit.AkkaSpec import akka.testkit.ImplicitSender import akka.util.OptionVal -class OutboundHandshakeSpec extends AkkaSpec(""" +class OutboundHandshakeSpec + extends AkkaSpec(""" akka.stream.materializer.debug.fuzzing-mode = on - """) with ImplicitSender { + """) + with ImplicitSender { val addressA = UniqueAddress(Address("akka", "sysA", "hostA", 1001), 1) val addressB = UniqueAddress(Address("akka", "sysB", "hostB", 1002), 2) diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeathWatchSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeathWatchSpec.scala index b3e1c3d1b05..318fa80d3b8 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeathWatchSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeathWatchSpec.scala @@ -18,7 +18,8 @@ import akka.testkit.SocketUtil object RemoteDeathWatchSpec { val otherPort = ArteryMultiNodeSpec.freePort(ConfigFactory.load()) - val config = ConfigFactory.parseString(s""" + val config = ConfigFactory + .parseString(s""" akka { actor { provider = remote @@ -40,7 +41,8 @@ object RemoteDeathWatchSpec { # test is using Java serialization and not priority to rewrite akka.actor.allow-java-serialization = on akka.actor.warn-about-java-serializer-usage = off - """).withFallback(ArterySpecSupport.defaultConfig) + """) + .withFallback(ArterySpecSupport.defaultConfig) } class RemoteDeathWatchSpec @@ -72,8 +74,8 @@ class RemoteDeathWatchSpec system.actorOf(Props(new Actor { context.watch(ref) - def receive = { - case Terminated(r) => testActor ! r + def receive = { case Terminated(r) => + testActor ! r } }).withDeploy(Deploy.local)) @@ -85,15 +87,17 @@ class RemoteDeathWatchSpec "receive Terminated when watched node is unknown host" in { val path = RootActorPath(Address("akka", system.name, "unknownhost", 2552)) / "user" / "subject" - system.actorOf(Props(new Actor { - @nowarn - val watchee = RARP(context.system).provider.resolveActorRef(path) - context.watch(watchee) + system.actorOf( + Props(new Actor { + @nowarn + val watchee = RARP(context.system).provider.resolveActorRef(path) + context.watch(watchee) - def receive = { - case t: Terminated => testActor ! t.actor.path - } - }).withDeploy(Deploy.local), name = "observer2") + def receive = { case t: Terminated => + testActor ! t.actor.path + } + }).withDeploy(Deploy.local), + name = "observer2") expectMsg(60.seconds, path) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeployerSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeployerSpec.scala index 9651228d501..348444e850b 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeployerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeployerSpec.scala @@ -13,7 +13,8 @@ import akka.routing._ import akka.testkit._ object RemoteDeployerSpec { - val deployerConf = ConfigFactory.parseString(""" + val deployerConf = ConfigFactory + .parseString(""" akka.actor.deployment { /service2 { router = round-robin-pool @@ -22,7 +23,8 @@ object RemoteDeployerSpec { dispatcher = mydispatcher } } - """).withFallback(ArterySpecSupport.defaultConfig) + """) + .withFallback(ArterySpecSupport.defaultConfig) class RecipeActor extends Actor { def receive = { case _ => } diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeploymentSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeploymentSpec.scala index 9c2e1e10b90..a101884304e 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeploymentSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeploymentSpec.scala @@ -44,10 +44,9 @@ object RemoteDeploymentSpec { class Parent(probe: ActorRef) extends Actor { var target: ActorRef = context.system.deadLetters - override val supervisorStrategy = OneForOneStrategy() { - case e: Exception => - probe ! e - SupervisorStrategy.stop + override val supervisorStrategy = OneForOneStrategy() { case e: Exception => + probe ! e + SupervisorStrategy.stop } def receive = { @@ -73,7 +72,8 @@ object RemoteDeploymentSpec { class RemoteDeploymentSpec extends ArteryMultiNodeSpec( - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.remote.artery.advanced.inbound-lanes = 10 akka.remote.artery.advanced.outbound-lanes = 3 akka.remote.use-unsafe-remote-features-outside-cluster = on @@ -173,17 +173,17 @@ class RemoteDeploymentSpec val probes = Vector.fill(numParents, numChildren)(TestProbe()(masterSystem)) val childProps = Props[Echo1]() - for (p <- (0 until numParents); c <- (0 until numChildren)) { + for (p <- 0 until numParents; c <- 0 until numChildren) { parents(p).tell((childProps, numMessages), probes(p)(c).ref) } - for (p <- (0 until numParents); c <- (0 until numChildren)) { + for (p <- 0 until numParents; c <- 0 until numChildren) { val probe = probes(p)(c) probe.expectMsgType[ActorRef] // the child } val expectedMessages = (0 until numMessages).toVector - for (p <- (0 until numParents); c <- (0 until numChildren)) { + for (p <- 0 until numParents; c <- 0 until numChildren) { val probe = probes(p)(c) probe.receiveN(numMessages) should equal(expectedMessages) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala index 5c79681ee4a..511b8b3c4d0 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala @@ -81,7 +81,8 @@ class RemoteInstrumentsSerializationSpec extends AkkaSpec("akka.loglevel = DEBUG } "skip all remote instruments in the message if none are existing" in { - ensureDebugLog("Skipping serialized data in message for RemoteInstrument(s) [1, 10, 31] that has no local match") { + ensureDebugLog( + "Skipping serialized data in message for RemoteInstrument(s) [1, 10, 31] that has no local match") { val p = TestProbe() val instruments = Seq(testInstrument(1, "!"), testInstrument(10, ".."), testInstrument(31, "???")) val riS = remoteInstruments(instruments: _*) diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteMessageSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteMessageSerializationSpec.scala index d30611f5b68..97a8e0738b6 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteMessageSerializationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteMessageSerializationSpec.scala @@ -96,12 +96,14 @@ class RemoteMessageSerializationSpec extends ArteryMultiNodeSpec with ImplicitSe private def verifySend(msg: Any)(afterSend: => Unit): Unit = { val bigBounceId = s"bigBounce-${ThreadLocalRandom.current.nextInt()}" - val bigBounceOther = remoteSystem.actorOf(Props(new Actor { - def receive = { - case x: Int => sender() ! byteStringOfSize(x) - case x => sender() ! x - } - }), bigBounceId) + val bigBounceOther = remoteSystem.actorOf( + Props(new Actor { + def receive = { + case x: Int => sender() ! byteStringOfSize(x) + case x => sender() ! x + } + }), + bigBounceId) @nowarn val bigBounceHere = RARP(system).provider.resolveActorRef(s"akka://${remoteSystem.name}@localhost:$remotePort/user/$bigBounceId") diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteRouterSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteRouterSpec.scala index e617140fb3c..553df96fd28 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteRouterSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteRouterSpec.scala @@ -17,15 +17,16 @@ import akka.testkit.TestActors.echoActorProps object RemoteRouterSpec { class Parent extends Actor { - def receive = { - case (p: Props, name: String) => - sender() ! context.actorOf(p, name) + def receive = { case (p: Props, name: String) => + sender() ! context.actorOf(p, name) } } } class RemoteRouterSpec - extends AkkaSpec(ConfigFactory.parseString(""" + extends AkkaSpec( + ConfigFactory + .parseString(""" akka.remote.use-unsafe-remote-features-outside-cluster = on akka.actor.deployment { /remote-override { @@ -40,13 +41,15 @@ class RemoteRouterSpec router = round-robin-pool nr-of-instances = 6 } - }""").withFallback(ArterySpecSupport.defaultConfig)) { + }""") + .withFallback(ArterySpecSupport.defaultConfig)) { import RemoteRouterSpec._ val port = RARP(system).provider.getDefaultAddress.port.get val sysName = system.name - val conf = ConfigFactory.parseString(s""" + val conf = ConfigFactory + .parseString(s""" akka { actor.deployment { /blub { @@ -79,7 +82,8 @@ class RemoteRouterSpec target.nodes = ["akka://${sysName}@localhost:${port}"] } } - }""").withFallback(system.settings.config) + }""") + .withFallback(system.settings.config) val masterSystem = ActorSystem("Master" + sysName, conf) @@ -223,8 +227,8 @@ class RemoteRouterSpec "set supplied supervisorStrategy" in { val probe = TestProbe()(masterSystem) - val escalator = OneForOneStrategy() { - case e => probe.ref ! e; SupervisorStrategy.Escalate + val escalator = OneForOneStrategy() { case e => + probe.ref ! e; SupervisorStrategy.Escalate } val router = masterSystem.actorOf( new RemoteRouterConfig( diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala index 254a98dea2e..55a0a82412d 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala @@ -23,14 +23,18 @@ import akka.actor.RootActorPath import akka.testkit.{ ImplicitSender, TestActors, TestProbe } class ArteryUpdSendConsistencyWithOneLaneSpec - extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" + extends AbstractRemoteSendConsistencySpec( + ConfigFactory + .parseString(""" akka.remote.artery.transport = aeron-udp akka.remote.artery.advanced.outbound-lanes = 1 akka.remote.artery.advanced.inbound-lanes = 1 """).withFallback(ArterySpecSupport.defaultConfig)) class ArteryUpdSendConsistencyWithThreeLanesSpec - extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" + extends AbstractRemoteSendConsistencySpec( + ConfigFactory + .parseString(""" akka.loglevel = DEBUG akka.remote.artery.transport = aeron-udp akka.remote.artery.advanced.outbound-lanes = 3 @@ -38,28 +42,36 @@ class ArteryUpdSendConsistencyWithThreeLanesSpec """).withFallback(ArterySpecSupport.defaultConfig)) class ArteryTcpSendConsistencyWithOneLaneSpec - extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" + extends AbstractRemoteSendConsistencySpec( + ConfigFactory + .parseString(""" akka.remote.artery.transport = tcp akka.remote.artery.advanced.outbound-lanes = 1 akka.remote.artery.advanced.inbound-lanes = 1 """).withFallback(ArterySpecSupport.defaultConfig)) class ArteryTcpSendConsistencyWithThreeLanesSpec - extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" + extends AbstractRemoteSendConsistencySpec( + ConfigFactory + .parseString(""" akka.remote.artery.transport = tcp akka.remote.artery.advanced.outbound-lanes = 3 akka.remote.artery.advanced.inbound-lanes = 3 """).withFallback(ArterySpecSupport.defaultConfig)) class ArteryTlsTcpSendConsistencyWithOneLaneSpec - extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" + extends AbstractRemoteSendConsistencySpec( + ConfigFactory + .parseString(""" akka.remote.artery.transport = tls-tcp akka.remote.artery.advanced.outbound-lanes = 1 akka.remote.artery.advanced.inbound-lanes = 1 """).withFallback(ArterySpecSupport.defaultConfig)) class ArteryTlsTcpSendConsistencyWithThreeLanesSpec - extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" + extends AbstractRemoteSendConsistencySpec( + ConfigFactory + .parseString(""" akka.remote.artery.transport = tls-tcp akka.remote.artery.advanced.outbound-lanes = 1 akka.remote.artery.advanced.inbound-lanes = 1 @@ -87,11 +99,13 @@ abstract class AbstractRemoteSendConsistencySpec(config: Config) "Artery" must { "be able to identify a remote actor and ping it" in { - systemB.actorOf(Props(new Actor { - def receive = { - case "ping" => sender() ! "pong" - } - }), "echo") + systemB.actorOf( + Props(new Actor { + def receive = { case "ping" => + sender() ! "pong" + } + }), + "echo") val actorPath = rootB / "user" / "echo" val echoSel = system.actorSelection(actorPath) @@ -148,16 +162,15 @@ abstract class AbstractRemoteSendConsistencySpec(config: Config) var counter = 1000 remoteRef ! counter - override def receive: Receive = { - case i: Int => - if (i != counter) testActor ! s"Failed, expected $counter got $i" - else if (counter == 0) { - testActor ! "success" - context.stop(self) - } else { - counter -= 1 - remoteRef ! counter - } + override def receive: Receive = { case i: Int => + if (i != counter) testActor ! s"Failed, expected $counter got $i" + else if (counter == 0) { + testActor ! "success" + context.stop(self) + } else { + counter -= 1 + remoteRef ! counter + } } }).withDeploy(Deploy.local) @@ -188,16 +201,15 @@ abstract class AbstractRemoteSendConsistencySpec(config: Config) var counter = 1000 sel ! counter - override def receive: Receive = { - case i: Int => - if (i != counter) testActor ! s"Failed, expected $counter got $i" - else if (counter == 0) { - testActor ! "success2" - context.stop(self) - } else { - counter -= 1 - sel ! counter - } + override def receive: Receive = { case i: Int => + if (i != counter) testActor ! s"Failed, expected $counter got $i" + else if (counter == 0) { + testActor ! "success2" + context.stop(self) + } else { + counter -= 1 + sel ! counter + } } }).withDeploy(Deploy.local) diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteWatcherSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteWatcherSpec.scala index a2563159159..6c3ecf479dd 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteWatcherSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteWatcherSpec.scala @@ -16,8 +16,8 @@ import akka.testkit._ object RemoteWatcherSpec { class TestActorProxy(testActor: ActorRef) extends Actor { - def receive = { - case msg => testActor.forward(msg) + def receive = { case msg => + testActor.forward(msg) } } diff --git a/akka-remote/src/test/scala/akka/remote/artery/RollingEventLogSimulationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RollingEventLogSimulationSpec.scala index 2907d87fd2e..7d7d898601a 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RollingEventLogSimulationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RollingEventLogSimulationSpec.scala @@ -101,7 +101,7 @@ class RollingEventLogSimulationSpec extends AkkaSpec { val instructions: Array[Instruction] = (Array(AdvanceHeader, TryMarkDirty) :+ - WriteId) ++ + WriteId) ++ Array.fill(EntrySize - 2)(WriteByte) :+ Commit diff --git a/akka-remote/src/test/scala/akka/remote/artery/SendQueueSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/SendQueueSpec.scala index 6fe755615cc..0bff81efe12 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/SendQueueSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/SendQueueSpec.scala @@ -49,10 +49,12 @@ object SendQueueSpec { } } -class SendQueueSpec extends AkkaSpec(""" +class SendQueueSpec + extends AkkaSpec(""" akka.stream.materializer.debug.fuzzing-mode = on akka.stream.secret-test-fuzzing-warning-disable = yep - """) with ImplicitSender { + """) + with ImplicitSender { import SendQueueSpec._ def sendToDeadLetters[T](pending: Vector[T]): Unit = diff --git a/akka-remote/src/test/scala/akka/remote/artery/SystemMessageAckerSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/SystemMessageAckerSpec.scala index 6a26cb0b197..bf08ee26b4b 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/SystemMessageAckerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/SystemMessageAckerSpec.scala @@ -17,9 +17,11 @@ import akka.testkit.ImplicitSender import akka.testkit.TestProbe import akka.util.OptionVal -class SystemMessageAckerSpec extends AkkaSpec(""" +class SystemMessageAckerSpec + extends AkkaSpec(""" akka.stream.materializer.debug.fuzzing-mode = on - """) with ImplicitSender { + """) + with ImplicitSender { val addressA = UniqueAddress(Address("akka", "sysA", "hostA", 1001), 1) val addressB = UniqueAddress(Address("akka", "sysB", "hostB", 1002), 2) diff --git a/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala b/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala index 4377b0f06b7..3430a989e31 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala @@ -37,7 +37,8 @@ object SystemMessageDeliverySpec { case class TestSysMsg(s: String) extends SystemMessageDelivery.AckedDeliveryMessage - val safe = ConfigFactory.parseString(""" + val safe = ConfigFactory + .parseString(""" akka.loglevel = INFO akka.remote.artery.advanced.stop-idle-outbound-after = 1000 ms akka.remote.artery.advanced.inject-handshake-interval = 500 ms @@ -45,7 +46,8 @@ object SystemMessageDeliverySpec { akka.remote.artery.log-received-messages = on akka.remote.artery.log-sent-messages = on akka.stream.materializer.debug.fuzzing-mode = on - """).withFallback(ArterySpecSupport.defaultConfig) + """) + .withFallback(ArterySpecSupport.defaultConfig) val config = ConfigFactory.parseString("akka.remote.use-unsafe-remote-features-outside-cluster = on").withFallback(safe) diff --git a/akka-remote/src/test/scala/akka/remote/artery/TransientSerializationErrorSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/TransientSerializationErrorSpec.scala index 778ad1ab612..66ecf67f8fb 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/TransientSerializationErrorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/TransientSerializationErrorSpec.scala @@ -53,9 +53,7 @@ object TransientSerializationErrorSpec { } class TransientSerializationErrorSpec - extends AkkaSpec( - ArterySpecSupport.defaultConfig.withFallback( - ConfigFactory.parseString(""" + extends AkkaSpec(ArterySpecSupport.defaultConfig.withFallback(ConfigFactory.parseString(""" akka { loglevel = info actor { diff --git a/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala index 1c69aabcaab..b560b0f554b 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala @@ -46,23 +46,25 @@ object UntrustedSpec { override def postStop(): Unit = { testActor ! s"${self.path.name} stopped" } - def receive = { - case msg => testActor.forward(msg) + def receive = { case msg => + testActor.forward(msg) } } class FakeUser(testActor: ActorRef) extends Actor { context.actorOf(Props(classOf[Child], testActor), "receptionist") - def receive = { - case msg => testActor.forward(msg) + def receive = { case msg => + testActor.forward(msg) } } - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString(""" akka.remote.artery.untrusted-mode = on akka.remote.artery.trusted-selection-paths = ["/user/receptionist", ] akka.loglevel = DEBUG # test verifies debug - """).withFallback(ArterySpecSupport.defaultConfig) + """) + .withFallback(ArterySpecSupport.defaultConfig) } @@ -103,13 +105,17 @@ class UntrustedSpec extends ArteryMultiNodeSpec(UntrustedSpec.config) with Impli "discard harmful messages to /remote" in { val logProbe = TestProbe() // but instead install our own listener - system.eventStream.subscribe(system.actorOf(Props(new Actor { - import Logging._ - def receive = { - case d @ Debug(_, _, msg: String) if msg contains "dropping" => logProbe.ref ! d - case _ => - } - }).withDeploy(Deploy.local), "debugSniffer"), classOf[Logging.Debug]) + system.eventStream.subscribe( + system.actorOf( + Props(new Actor { + import Logging._ + def receive = { + case d @ Debug(_, _, msg: String) if msg contains "dropping" => logProbe.ref ! d + case _ => + } + }).withDeploy(Deploy.local), + "debugSniffer"), + classOf[Logging.Debug]) remoteDaemon ! "hello" logProbe.expectMsgType[Logging.Debug] @@ -126,8 +132,8 @@ class UntrustedSpec extends ArteryMultiNodeSpec(UntrustedSpec.config) with Impli "discard watch messages" in { client.actorOf(Props(new Actor { context.watch(target2) - def receive = { - case x => testActor.forward(x) + def receive = { case x => + testActor.forward(x) } }).withDeploy(Deploy.local)) receptionist ! StopChild("child2") diff --git a/akka-remote/src/test/scala/akka/remote/artery/aeron/AeronSinkSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/aeron/AeronSinkSpec.scala index 689338e7da4..65dd45967aa 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/aeron/AeronSinkSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/aeron/AeronSinkSpec.scala @@ -23,9 +23,11 @@ import akka.testkit.AkkaSpec import akka.testkit.ImplicitSender import akka.testkit.SocketUtil -class AeronSinkSpec extends AkkaSpec(""" +class AeronSinkSpec + extends AkkaSpec(""" akka.stream.materializer.debug.fuzzing-mode = on - """) with ImplicitSender { + """) + with ImplicitSender { val driver = MediaDriver.launchEmbedded() diff --git a/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala index 0248cf8ad26..db29371dd73 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala @@ -79,13 +79,13 @@ class CompressionIntegrationSpec awaitAssert { val a1 = aManifestProbe.expectMsgType[Events.ReceivedClassManifestCompressionTable](2.seconds) info("System [A] received: " + a1) - a1.table.version.toInt should be >= (1) + a1.table.version.toInt should be >= 1 a1.table.dictionary.keySet should contain("TestMessageManifest") } awaitAssert { val a1 = aRefProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](2.seconds) info("System [A] received: " + a1) - a1.table.version.toInt should be >= (1) + a1.table.version.toInt should be >= 1 a1.table.dictionary.keySet should contain(echoRefA) // recipient a1.table.dictionary.keySet should contain(testActor) // sender } @@ -94,13 +94,13 @@ class CompressionIntegrationSpec awaitAssert { val b1 = bManifestProbe.expectMsgType[Events.ReceivedClassManifestCompressionTable](2.seconds) info("System [B] received: " + b1) - b1.table.version.toInt should be >= (1) + b1.table.version.toInt should be >= 1 b1.table.dictionary.keySet should contain("TestMessageManifest") } awaitAssert { val b1 = bRefProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](2.seconds) info("System [B] received: " + b1) - b1.table.version.toInt should be >= (1) + b1.table.version.toInt should be >= 1 b1.table.dictionary.keySet should contain(echoRefB) } } @@ -112,26 +112,26 @@ class CompressionIntegrationSpec echoRefA.tell(TestMessage("hello2"), ignore.ref) val a2 = aManifestProbe.expectMsgType[Events.ReceivedClassManifestCompressionTable](2.seconds) info("System [A] received more: " + a2) - a2.table.version.toInt should be >= (3) + a2.table.version.toInt should be >= 3 } awaitAssert { echoRefA.tell(TestMessage("hello2"), ignore.ref) val a2 = aRefProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](2.seconds) info("System [A] received more: " + a2) - a2.table.version.toInt should be >= (3) + a2.table.version.toInt should be >= 3 } awaitAssert { echoRefA.tell(TestMessage("hello3"), ignore.ref) val b2 = bManifestProbe.expectMsgType[Events.ReceivedClassManifestCompressionTable](2.seconds) info("System [B] received more: " + b2) - b2.table.version.toInt should be >= (3) + b2.table.version.toInt should be >= 3 } awaitAssert { echoRefA.tell(TestMessage("hello3"), ignore.ref) val b2 = bRefProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](2.seconds) info("System [B] received more: " + b2) - b2.table.version.toInt should be >= (3) + b2.table.version.toInt should be >= 3 } } } @@ -301,15 +301,15 @@ class CompressionIntegrationSpec awaitAssert { val a2 = aManifestProbe.expectMsgType[Events.ReceivedClassManifestCompressionTable](2.seconds) info("System [A] received: " + a2) - a2.table.version.toInt should be >= (1) - a2.table.version.toInt should be < (3) + a2.table.version.toInt should be >= 1 + a2.table.version.toInt should be < 3 a2.table.dictionary.keySet should contain("TestMessageManifest") } awaitAssert { val a2 = aRefProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](2.seconds) info("System [A] received: " + a2) - a2.table.version.toInt should be >= (1) - a2.table.version.toInt should be < (3) + a2.table.version.toInt should be >= 1 + a2.table.version.toInt should be < 3 a2.table.dictionary.keySet should contain(echoRefA) // recipient a2.table.dictionary.keySet should contain(testActor) // sender } @@ -318,13 +318,13 @@ class CompressionIntegrationSpec awaitAssert { val b2 = bManifestProbe.expectMsgType[Events.ReceivedClassManifestCompressionTable](2.seconds) info("System [B2] received: " + b2) - b2.table.version.toInt should be >= (1) + b2.table.version.toInt should be >= 1 b2.table.dictionary.keySet should contain("TestMessageManifest") } awaitAssert { val b2 = bRefProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](2.seconds) info("System [B] received: " + b2) - b2.table.version.toInt should be >= (1) + b2.table.version.toInt should be >= 1 b2.table.dictionary.keySet should contain(echoRefB2) } } @@ -377,14 +377,16 @@ class CompressionIntegrationSpec receiveN(messagesToExchange) // the replies var currentTable: CompressionTable[ActorRef] = null - receivedActorRefCompressionTableProbe.awaitAssert({ - // discard duplicates with awaitAssert until we receive next version - val receivedActorRefCompressionTable = - receivedActorRefCompressionTableProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](10.seconds) - - currentTable = receivedActorRefCompressionTable.table - seenTableVersions = currentTable.version :: seenTableVersions - }, max = 10.seconds) + receivedActorRefCompressionTableProbe.awaitAssert( + { + // discard duplicates with awaitAssert until we receive next version + val receivedActorRefCompressionTable = + receivedActorRefCompressionTableProbe.expectMsgType[Events.ReceivedActorRefCompressionTable](10.seconds) + + currentTable = receivedActorRefCompressionTable.table + seenTableVersions = currentTable.version :: seenTableVersions + }, + max = 10.seconds) // debugging: info("Seen versions: " + seenTableVersions) lastTable = currentTable diff --git a/akka-remote/src/test/scala/akka/remote/artery/compress/HandshakeShouldDropCompressionTableSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/compress/HandshakeShouldDropCompressionTableSpec.scala index 5fa715fc529..af3b88ccad8 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/compress/HandshakeShouldDropCompressionTableSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/compress/HandshakeShouldDropCompressionTableSpec.scala @@ -19,7 +19,8 @@ import akka.testkit._ import akka.util.Timeout object HandshakeShouldDropCompressionTableSpec { - val commonConfig = ConfigFactory.parseString(""" + val commonConfig = ConfigFactory + .parseString(""" akka { remote.artery.advanced.handshake-timeout = 10s remote.artery.advanced.aeron.image-liveness-timeout = 7s @@ -31,7 +32,8 @@ object HandshakeShouldDropCompressionTableSpec { } } } - """).withFallback(ArterySpecSupport.defaultConfig) + """) + .withFallback(ArterySpecSupport.defaultConfig) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/tcp/TcpFramingSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/tcp/TcpFramingSpec.scala index cbc7f511122..d332880dbaf 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/tcp/TcpFramingSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/tcp/TcpFramingSpec.scala @@ -15,9 +15,11 @@ import akka.testkit.AkkaSpec import akka.testkit.ImplicitSender import akka.util.ByteString -class TcpFramingSpec extends AkkaSpec(""" +class TcpFramingSpec + extends AkkaSpec(""" akka.stream.materializer.debug.fuzzing-mode = on - """) with ImplicitSender { + """) + with ImplicitSender { import TcpFraming.encodeFrameHeader private val framingFlow = Flow[ByteString].via(new TcpFraming) diff --git a/akka-remote/src/test/scala/akka/remote/artery/tcp/TlsTcpSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/tcp/TlsTcpSpec.scala index 36aa8935822..c3df7b52a2c 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/tcp/TlsTcpSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/tcp/TlsTcpSpec.scala @@ -26,24 +26,21 @@ import akka.testkit.TestProbe class TlsTcpWithDefaultConfigSpec extends TlsTcpSpec(ConfigFactory.empty()) -class TlsTcpWithSHA1PRNGSpec - extends TlsTcpSpec(ConfigFactory.parseString(""" +class TlsTcpWithSHA1PRNGSpec extends TlsTcpSpec(ConfigFactory.parseString(""" akka.remote.artery.ssl.config-ssl-engine { random-number-generator = "SHA1PRNG" enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"] } """)) -class TlsTcpWithDefaultRNGSecureSpec - extends TlsTcpSpec(ConfigFactory.parseString(""" +class TlsTcpWithDefaultRNGSecureSpec extends TlsTcpSpec(ConfigFactory.parseString(""" akka.remote.artery.ssl.config-ssl-engine { random-number-generator = "" enabled-algorithms = ["TLS_DHE_RSA_WITH_AES_256_GCM_SHA384"] } """)) -class TlsTcpWithCrappyRSAWithMD5OnlyHereToMakeSureThingsWorkSpec - extends TlsTcpSpec(ConfigFactory.parseString(""" +class TlsTcpWithCrappyRSAWithMD5OnlyHereToMakeSureThingsWorkSpec extends TlsTcpSpec(ConfigFactory.parseString(""" akka.remote.artery.ssl.config-ssl-engine { random-number-generator = "" enabled-algorithms = [""SSL_RSA_WITH_NULL_MD5""] @@ -147,7 +144,9 @@ abstract class TlsTcpSpec(config: Config) } class TlsTcpWithHostnameVerificationSpec - extends ArteryMultiNodeSpec(ConfigFactory.parseString(""" + extends ArteryMultiNodeSpec( + ConfigFactory + .parseString(""" akka.remote.artery.ssl.config-ssl-engine { hostname-verification = on } @@ -178,8 +177,7 @@ class TlsTcpWithHostnameVerificationSpec // depending on JRE version. EventFilter .warning( - pattern = - "outbound connection to \\[akka://systemB@127.0.0.1:.*" + + pattern = "outbound connection to \\[akka://systemB@127.0.0.1:.*" + "Upstream failed, cause: SSLHandshakeException: .*", occurrences = 3) .intercept { diff --git a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/PemManagersProviderSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/PemManagersProviderSpec.scala index 02dfd374a67..59286472bef 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/PemManagersProviderSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/PemManagersProviderSpec.scala @@ -12,7 +12,6 @@ import org.scalatest.matchers.must.Matchers import org.scalatest.wordspec.AnyWordSpec /** - * */ class PemManagersProviderSpec extends AnyWordSpec with Matchers { diff --git a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/RotatingKeysSSLEngineProviderSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/RotatingKeysSSLEngineProviderSpec.scala index 54b1eb2f8f2..29f7c0ce7b7 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/RotatingKeysSSLEngineProviderSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/RotatingKeysSSLEngineProviderSpec.scala @@ -294,8 +294,8 @@ class RemoteSystem( val sslContextRef = new AtomicReference[SSLContext]() val sslProviderSetup = - SSLEngineProviderSetup( - sys => new ProbedSSLEngineProvider(sys, sslContextRef, sslProviderServerProbe, sslProviderClientProbe)) + SSLEngineProviderSetup(sys => + new ProbedSSLEngineProvider(sys, sslContextRef, sslProviderServerProbe, sslProviderClientProbe)) val actorSystem = newRemoteSystem(Some(configString), Some(name), Some(ActorSystemSetup(sslProviderSetup))) diff --git a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/TlsResourcesSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/TlsResourcesSpec.scala index debc6a685ed..30db5d9bf13 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/TlsResourcesSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/TlsResourcesSpec.scala @@ -16,7 +16,6 @@ import org.scalatest.wordspec.AnyWordSpec import akka.util.ccompat.JavaConverters._ /** - * */ class TlsResourcesSpec extends AnyWordSpec with Matchers { @@ -34,7 +33,7 @@ class TlsResourcesSpec extends AnyWordSpec with Matchers { val sameSan = baseServers + baseClient + baseNode + baseRsaClient sameSan.foreach { prefix => val serverCert = loadCert(s"/ssl/$prefix.example.com.crt") - X509Readers.getAllSubjectNames(serverCert).contains("example.com") mustBe (true) + X509Readers.getAllSubjectNames(serverCert).contains("example.com") mustBe true } } @@ -42,7 +41,7 @@ class TlsResourcesSpec extends AnyWordSpec with Matchers { val notExampleSan = arteryNodeSet + baseIslandServer notExampleSan.foreach { prefix => val cert = loadCert(s"/ssl/$prefix.example.com.crt") - X509Readers.getAllSubjectNames(cert).contains("example.com") mustBe (false) + X509Readers.getAllSubjectNames(cert).contains("example.com") mustBe false } } @@ -52,7 +51,7 @@ class TlsResourcesSpec extends AnyWordSpec with Matchers { val clients = Set(baseClient, baseNode, baseRsaClient) ++ arteryNodeSet clients.foreach { prefix => val cert = loadCert(s"/ssl/$prefix.example.com.crt") - cert.getExtendedKeyUsage.asScala.contains(clientAuth) mustBe (true) + cert.getExtendedKeyUsage.asScala.contains(clientAuth) mustBe true } } @@ -60,7 +59,7 @@ class TlsResourcesSpec extends AnyWordSpec with Matchers { val servers = baseServers + baseIslandServer + baseNode ++ arteryNodeSet servers.foreach { prefix => val serverCert = loadCert(s"/ssl/$prefix.example.com.crt") - serverCert.getExtendedKeyUsage.asScala.contains(serverAuth) mustBe (true) + serverCert.getExtendedKeyUsage.asScala.contains(serverAuth) mustBe true } } diff --git a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/X509ReadersSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/X509ReadersSpec.scala index e1f48d20fd2..8d449a6c882 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/X509ReadersSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/tcp/ssl/X509ReadersSpec.scala @@ -8,7 +8,6 @@ import org.scalatest.matchers.must.Matchers import org.scalatest.wordspec.AnyWordSpec /** - * */ class X509ReadersSpec extends AnyWordSpec with Matchers { import TlsResourcesSpec._ diff --git a/akka-remote/src/test/scala/akka/remote/serialization/DaemonMsgCreateSerializerAllowJavaSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/DaemonMsgCreateSerializerAllowJavaSerializationSpec.scala index 16955b75456..f48f257beac 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/DaemonMsgCreateSerializerAllowJavaSerializationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/DaemonMsgCreateSerializerAllowJavaSerializationSpec.scala @@ -52,11 +52,10 @@ private[akka] trait SerializationVerification { self: AkkaSpec => // can't compare props.creator when function got.props.clazz should ===(expected.props.clazz) got.props.args.length should ===(expected.props.args.length) - got.props.args.zip(expected.props.args).foreach { - case (g, e) => - if (e.isInstanceOf[Function0[_]]) () - else if (e.isInstanceOf[Function1[_, _]]) () - else g should ===(e) + got.props.args.zip(expected.props.args).foreach { case (g, e) => + if (e.isInstanceOf[Function0[_]]) () + else if (e.isInstanceOf[Function1[_, _]]) () + else g should ===(e) } got.props.deploy should ===(expected.props.deploy) got.deploy should ===(expected.deploy) @@ -127,9 +126,11 @@ class DaemonMsgCreateSerializerAllowJavaSerializationSpec } } -class DaemonMsgCreateSerializerNoJavaSerializationSpec extends AkkaSpec(""" +class DaemonMsgCreateSerializerNoJavaSerializationSpec + extends AkkaSpec(""" akka.actor.allow-java-serialization=off - """) with SerializationVerification { + """) + with SerializationVerification { import DaemonMsgCreateSerializerAllowJavaSerializationSpec.MyActor diff --git a/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala index 6c0eeac3cd0..0be0aeaa202 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala @@ -45,8 +45,8 @@ object MiscMessageSerializerSpec { e.getMessage == getMessage && e.getCause == getCause && // on JDK9+ the stacktraces aren't equal, something about how they are constructed // they are alike enough to be roughly equal though - e.stackTrace.zip(stackTrace).forall { - case (t, o) => t.getClassName == o.getClassName && t.getFileName == o.getFileName + e.stackTrace.zip(stackTrace).forall { case (t, o) => + t.getClassName == o.getClassName && t.getFileName == o.getFileName } case _ => false } diff --git a/akka-remote/src/test/scala/akka/remote/serialization/PrimitivesSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/PrimitivesSerializationSpec.scala index 2e9290da284..05053ed5356 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/PrimitivesSerializationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/PrimitivesSerializationSpec.scala @@ -120,19 +120,18 @@ class PrimitivesSerializationSpec extends AkkaSpec(PrimitivesSerializationSpec.t "StringSerializer" must { val random = Random.nextString(256) Seq("empty string" -> "", "hello" -> "hello", "árvíztűrőütvefúrógép" -> "árvíztűrőütvefúrógép", "random" -> random) - .foreach { - case (scenario, item) => - s"resolve serializer for [$scenario]" in { - serializerFor(item).getClass should ===(classOf[StringSerializer]) - } - - s"serialize and de-serialize [$scenario]" in { - verifySerialization(item) - } - - s"serialize and de-serialize value [$scenario] using ByteBuffers" in { - verifySerializationByteBuffer(item) - } + .foreach { case (scenario, item) => + s"resolve serializer for [$scenario]" in { + serializerFor(item).getClass should ===(classOf[StringSerializer]) + } + + s"serialize and de-serialize [$scenario]" in { + verifySerialization(item) + } + + s"serialize and de-serialize value [$scenario] using ByteBuffers" in { + verifySerializationByteBuffer(item) + } } "have right serializer id" in { diff --git a/akka-remote/src/test/scala/akka/remote/serialization/SerializationTransportInformationSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/SerializationTransportInformationSpec.scala index f615f74d73f..5d8a9da189b 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/SerializationTransportInformationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/SerializationTransportInformationSpec.scala @@ -72,8 +72,7 @@ object SerializationTransportInformationSpec { } class SerializationTransportInformationSpec - extends AkkaSpec( - ConfigFactory.parseString(""" + extends AkkaSpec(ConfigFactory.parseString(""" akka { loglevel = info actor { diff --git a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/ActorRefModule.scala b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/ActorRefModule.scala index d27e6252767..d778d8dd00f 100644 --- a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/ActorRefModule.scala +++ b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/ActorRefModule.scala @@ -17,23 +17,17 @@ import com.fasterxml.jackson.databind.ser.std.StdScalarSerializer import akka.actor.ActorRef import akka.annotation.InternalApi -/** - * INTERNAL API: Adds support for serializing and deserializing [[ActorRef]]. - */ +/** INTERNAL API: Adds support for serializing and deserializing [[ActorRef]]. */ @InternalApi private[akka] trait ActorRefModule extends JacksonModule { addSerializer(classOf[ActorRef], () => ActorRefSerializer.instance, () => ActorRefDeserializer.instance) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ActorRefSerializer { val instance: ActorRefSerializer = new ActorRefSerializer } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class ActorRefSerializer extends StdScalarSerializer[ActorRef](classOf[ActorRef]) with ActorSystemAccess { @@ -43,16 +37,12 @@ import akka.annotation.InternalApi } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ActorRefDeserializer { val instance: ActorRefDeserializer = new ActorRefDeserializer } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class ActorRefDeserializer extends StdScalarDeserializer[ActorRef](classOf[ActorRef]) with ActorSystemAccess { diff --git a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/ActorSystemAccess.scala b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/ActorSystemAccess.scala index 290268e2829..7fe276363b3 100644 --- a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/ActorSystemAccess.scala +++ b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/ActorSystemAccess.scala @@ -8,9 +8,7 @@ import akka.actor.ExtendedActorSystem import akka.annotation.InternalApi import akka.serialization.Serialization -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait ActorSystemAccess { def currentSystem(): ExtendedActorSystem = { Serialization.currentTransportInformation.value match { @@ -22,7 +20,5 @@ import akka.serialization.Serialization } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ActorSystemAccess extends ActorSystemAccess diff --git a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/AddressModule.scala b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/AddressModule.scala index 6e290ea476d..5993f5a9b8a 100644 --- a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/AddressModule.scala +++ b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/AddressModule.scala @@ -16,39 +16,29 @@ import akka.actor.Address import akka.actor.AddressFromURIString import akka.annotation.InternalApi -/** - * INTERNAL API: Adds support for serializing and deserializing [[Address]]. - */ +/** INTERNAL API: Adds support for serializing and deserializing [[Address]]. */ @InternalApi private[akka] trait AddressModule extends JacksonModule { addSerializer(classOf[Address], () => AddressSerializer.instance, () => AddressDeserializer.instance) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object AddressSerializer { val instance: AddressSerializer = new AddressSerializer } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class AddressSerializer extends StdScalarSerializer[Address](classOf[Address]) { override def serialize(value: Address, jgen: JsonGenerator, provider: SerializerProvider): Unit = { jgen.writeString(value.toString) } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object AddressDeserializer { val instance: AddressDeserializer = new AddressDeserializer } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class AddressDeserializer extends StdScalarDeserializer[Address](classOf[Address]) { def deserialize(jp: JsonParser, ctxt: DeserializationContext): Address = { diff --git a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/AkkaJacksonModule.scala b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/AkkaJacksonModule.scala index bda02358286..4af2630ddb0 100644 --- a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/AkkaJacksonModule.scala +++ b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/AkkaJacksonModule.scala @@ -4,9 +4,7 @@ package akka.serialization.jackson -/** - * Complete module with support for all custom serializers. - */ +/** Complete module with support for all custom serializers. */ class AkkaJacksonModule extends JacksonModule with ActorRefModule with AddressModule with FiniteDurationModule { override def getModuleName = "AkkaJacksonModule" } diff --git a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/FiniteDurationModule.scala b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/FiniteDurationModule.scala index 3c4892bc540..6bbde927763 100644 --- a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/FiniteDurationModule.scala +++ b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/FiniteDurationModule.scala @@ -18,9 +18,7 @@ import com.fasterxml.jackson.datatype.jsr310.ser.DurationSerializer import akka.annotation.InternalApi import akka.util.JavaDurationConverters._ -/** - * INTERNAL API: Adds support for serializing and deserializing [[FiniteDuration]]. - */ +/** INTERNAL API: Adds support for serializing and deserializing [[FiniteDuration]]. */ @InternalApi private[akka] trait FiniteDurationModule extends JacksonModule { addSerializer( classOf[FiniteDuration], @@ -28,16 +26,12 @@ import akka.util.JavaDurationConverters._ () => FiniteDurationDeserializer.instance) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FiniteDurationSerializer { val instance: FiniteDurationSerializer = new FiniteDurationSerializer } -/** - * INTERNAL API: Delegates to DurationSerializer in `jackson-modules-java8` - */ +/** INTERNAL API: Delegates to DurationSerializer in `jackson-modules-java8` */ @InternalApi private[akka] class FiniteDurationSerializer extends StdScalarSerializer[FiniteDuration](classOf[FiniteDuration]) { override def serialize(value: FiniteDuration, jgen: JsonGenerator, provider: SerializerProvider): Unit = { @@ -45,16 +39,12 @@ import akka.util.JavaDurationConverters._ } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FiniteDurationDeserializer { val instance: FiniteDurationDeserializer = new FiniteDurationDeserializer } -/** - * INTERNAL API: Delegates to DurationDeserializer in `jackson-modules-java8` - */ +/** INTERNAL API: Delegates to DurationDeserializer in `jackson-modules-java8` */ @InternalApi private[akka] class FiniteDurationDeserializer extends StdScalarDeserializer[FiniteDuration](classOf[FiniteDuration]) { diff --git a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonModule.scala b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonModule.scala index 59c5e9f9a1c..09aebc5f08e 100644 --- a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonModule.scala +++ b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonModule.scala @@ -21,9 +21,7 @@ import com.fasterxml.jackson.databind.ser.Serializers import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object JacksonModule { lazy val version: Version = { @@ -62,16 +60,12 @@ import akka.annotation.InternalApi } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object VersionExtractor { def unapply(v: Version) = Some((v.getMajorVersion, v.getMinorVersion)) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait JacksonModule extends Module { import JacksonModule._ diff --git a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonObjectMapperProvider.scala b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonObjectMapperProvider.scala index bf5bfd9eaa4..f46be808c7e 100644 --- a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonObjectMapperProvider.scala +++ b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonObjectMapperProvider.scala @@ -55,9 +55,7 @@ object JacksonObjectMapperProvider extends ExtensionId[JacksonObjectMapperProvid override def createExtension(system: ExtendedActorSystem): JacksonObjectMapperProvider = new JacksonObjectMapperProvider(system) - /** - * The configuration for a given `bindingName`. - */ + /** The configuration for a given `bindingName`. */ def configForBinding(bindingName: String, systemConfig: Config): Config = { val basePath = "akka.serialization.jackson" val baseConf = systemConfig.getConfig("akka.serialization.jackson") @@ -85,43 +83,43 @@ object JacksonObjectMapperProvider extends ExtensionId[JacksonObjectMapperProvid } val configuredStreamReadFeatures = - features(config, "stream-read-features").map { - case (enumName, value) => StreamReadFeature.valueOf(enumName) -> value + features(config, "stream-read-features").map { case (enumName, value) => + StreamReadFeature.valueOf(enumName) -> value } val streamReadFeatures = objectMapperFactory.overrideConfiguredStreamReadFeatures(bindingName, configuredStreamReadFeatures) - streamReadFeatures.foreach { - case (feature, value) => jsonFactory.configure(feature.mappedFeature, value) + streamReadFeatures.foreach { case (feature, value) => + jsonFactory.configure(feature.mappedFeature, value) } val configuredStreamWriteFeatures = - features(config, "stream-write-features").map { - case (enumName, value) => StreamWriteFeature.valueOf(enumName) -> value + features(config, "stream-write-features").map { case (enumName, value) => + StreamWriteFeature.valueOf(enumName) -> value } val streamWriteFeatures = objectMapperFactory.overrideConfiguredStreamWriteFeatures(bindingName, configuredStreamWriteFeatures) - streamWriteFeatures.foreach { - case (feature, value) => jsonFactory.configure(feature.mappedFeature, value) + streamWriteFeatures.foreach { case (feature, value) => + jsonFactory.configure(feature.mappedFeature, value) } val configuredJsonReadFeatures = - features(config, "json-read-features").map { - case (enumName, value) => JsonReadFeature.valueOf(enumName) -> value + features(config, "json-read-features").map { case (enumName, value) => + JsonReadFeature.valueOf(enumName) -> value } val jsonReadFeatures = objectMapperFactory.overrideConfiguredJsonReadFeatures(bindingName, configuredJsonReadFeatures) - jsonReadFeatures.foreach { - case (feature, value) => jsonFactory.configure(feature.mappedFeature, value) + jsonReadFeatures.foreach { case (feature, value) => + jsonFactory.configure(feature.mappedFeature, value) } val configuredJsonWriteFeatures = - features(config, "json-write-features").map { - case (enumName, value) => JsonWriteFeature.valueOf(enumName) -> value + features(config, "json-write-features").map { case (enumName, value) => + JsonWriteFeature.valueOf(enumName) -> value } val jsonWriteFeatures = objectMapperFactory.overrideConfiguredJsonWriteFeatures(bindingName, configuredJsonWriteFeatures) - jsonWriteFeatures.foreach { - case (feature, value) => jsonFactory.configure(feature.mappedFeature, value) + jsonWriteFeatures.foreach { case (feature, value) => + jsonFactory.configure(feature.mappedFeature, value) } jsonFactory @@ -135,54 +133,53 @@ object JacksonObjectMapperProvider extends ExtensionId[JacksonObjectMapperProvid config: Config): Unit = { val configuredSerializationFeatures = - features(config, "serialization-features").map { - case (enumName, value) => SerializationFeature.valueOf(enumName) -> value + features(config, "serialization-features").map { case (enumName, value) => + SerializationFeature.valueOf(enumName) -> value } val serializationFeatures = objectMapperFactory.overrideConfiguredSerializationFeatures(bindingName, configuredSerializationFeatures) - serializationFeatures.foreach { - case (feature, value) => objectMapper.configure(feature, value) + serializationFeatures.foreach { case (feature, value) => + objectMapper.configure(feature, value) } val configuredDeserializationFeatures = - features(config, "deserialization-features").map { - case (enumName, value) => DeserializationFeature.valueOf(enumName) -> value + features(config, "deserialization-features").map { case (enumName, value) => + DeserializationFeature.valueOf(enumName) -> value } val deserializationFeatures = objectMapperFactory.overrideConfiguredDeserializationFeatures(bindingName, configuredDeserializationFeatures) - deserializationFeatures.foreach { - case (feature, value) => objectMapper.configure(feature, value) + deserializationFeatures.foreach { case (feature, value) => + objectMapper.configure(feature, value) } - val configuredMapperFeatures = features(config, "mapper-features").map { - case (enumName, value) => MapperFeature.valueOf(enumName) -> value + val configuredMapperFeatures = features(config, "mapper-features").map { case (enumName, value) => + MapperFeature.valueOf(enumName) -> value } val mapperFeatures = objectMapperFactory.overrideConfiguredMapperFeatures(bindingName, configuredMapperFeatures) - mapperFeatures.foreach { - case (feature, value) => - // TODO: This is deprecated and should used JsonMapper.Builder, but that would be difficult without - // breaking compatibility for custom JacksonObjectMapperProvider that may create a custom instance - // of the ObjectMapper - objectMapper.configure(feature, value) + mapperFeatures.foreach { case (feature, value) => + // TODO: This is deprecated and should used JsonMapper.Builder, but that would be difficult without + // breaking compatibility for custom JacksonObjectMapperProvider that may create a custom instance + // of the ObjectMapper + objectMapper.configure(feature, value) } - val configuredJsonParserFeatures = features(config, "json-parser-features").map { - case (enumName, value) => JsonParser.Feature.valueOf(enumName) -> value + val configuredJsonParserFeatures = features(config, "json-parser-features").map { case (enumName, value) => + JsonParser.Feature.valueOf(enumName) -> value } val jsonParserFeatures = objectMapperFactory.overrideConfiguredJsonParserFeatures(bindingName, configuredJsonParserFeatures) - jsonParserFeatures.foreach { - case (feature, value) => objectMapper.configure(feature, value) + jsonParserFeatures.foreach { case (feature, value) => + objectMapper.configure(feature, value) } - val configuredJsonGeneratorFeatures = features(config, "json-generator-features").map { - case (enumName, value) => JsonGenerator.Feature.valueOf(enumName) -> value + val configuredJsonGeneratorFeatures = features(config, "json-generator-features").map { case (enumName, value) => + JsonGenerator.Feature.valueOf(enumName) -> value } val jsonGeneratorFeatures = objectMapperFactory.overrideConfiguredJsonGeneratorFeatures(bindingName, configuredJsonGeneratorFeatures) - jsonGeneratorFeatures.foreach { - case (feature, value) => objectMapper.configure(feature, value) + jsonGeneratorFeatures.foreach { case (feature, value) => + objectMapper.configure(feature, value) } } @@ -193,14 +190,13 @@ object JacksonObjectMapperProvider extends ExtensionId[JacksonObjectMapperProvid config: Config): Unit = { val configuredVisibility: immutable.Seq[(PropertyAccessor, JsonAutoDetect.Visibility)] = - configPairs(config, "visibility").map { - case (property, visibility) => - PropertyAccessor.valueOf(property) -> JsonAutoDetect.Visibility.valueOf(visibility) + configPairs(config, "visibility").map { case (property, visibility) => + PropertyAccessor.valueOf(property) -> JsonAutoDetect.Visibility.valueOf(visibility) } val visibility = objectMapperFactory.overrideConfiguredVisibility(bindingName, configuredVisibility) - visibility.foreach { - case (property, visibility) => objectMapper.setVisibility(property, visibility) + visibility.foreach { case (property, visibility) => + objectMapper.setVisibility(property, visibility) } } @@ -299,9 +295,7 @@ object JacksonObjectMapperProvider extends ExtensionId[JacksonObjectMapperProvid } } -/** - * Registry of shared `ObjectMapper` instances, each with it's unique `bindingName`. - */ +/** Registry of shared `ObjectMapper` instances, each with it's unique `bindingName`. */ final class JacksonObjectMapperProvider(system: ExtendedActorSystem) extends Extension { private val objectMappers = new ConcurrentHashMap[String, ObjectMapper] diff --git a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonSerializer.scala b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonSerializer.scala index 22457d1a9f6..cdb2e3f4377 100644 --- a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonSerializer.scala +++ b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/JacksonSerializer.scala @@ -24,9 +24,7 @@ import akka.serialization.{ BaseSerializer, SerializationExtension, SerializerWi import akka.util.Helpers.toRootLowerCase import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object JacksonSerializer { /** @@ -65,8 +63,8 @@ import akka.util.OptionVal val name = clazz.getSimpleName // looking for "AbstractBeanFactoryPointcutAdvisor" but no point to allow any is there? if ("AbstractPointcutAdvisor".equals(name) - // ditto for "FileSystemXmlApplicationContext": block all ApplicationContexts - || "AbstractApplicationContext".equals(name)) + // ditto for "FileSystemXmlApplicationContext": block all ApplicationContexts + || "AbstractApplicationContext".equals(name)) false else isAllowedSpringClass(clazz.getSuperclass) @@ -198,10 +196,9 @@ import akka.util.OptionVal } private val migrations: Map[String, JacksonMigration] = { import akka.util.ccompat.JavaConverters._ - conf.getConfig("migrations").root.unwrapped.asScala.toMap.map { - case (k, v) => - val transformer = system.dynamicAccess.createInstanceFor[JacksonMigration](v.toString, Nil).get - k -> transformer + conf.getConfig("migrations").root.unwrapped.asScala.toMap.map { case (k, v) => + val transformer = system.dynamicAccess.createInstanceFor[JacksonMigration](v.toString, Nil).get + k -> transformer } } private val denyList: GadgetClassDenyList = new GadgetClassDenyList diff --git a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/StreamRefModule.scala b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/StreamRefModule.scala index 76089c8ec10..13f6b7d7fb0 100644 --- a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/StreamRefModule.scala +++ b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/StreamRefModule.scala @@ -19,24 +19,18 @@ import akka.stream.SinkRef import akka.stream.SourceRef import akka.stream.StreamRefResolver -/** - * INTERNAL API: Adds support for serializing and deserializing [[akka.stream.SourceRef]] and [[akka.stream.SinkRef]]. - */ +/** INTERNAL API: Adds support for serializing and deserializing [[akka.stream.SourceRef]] and [[akka.stream.SinkRef]]. */ @InternalApi private[akka] trait StreamRefModule extends JacksonModule { addSerializer(classOf[SourceRef[_]], () => SourceRefSerializer.instance, () => SourceRefDeserializer.instance) addSerializer(classOf[SinkRef[_]], () => SinkRefSerializer.instance, () => SinkRefDeserializer.instance) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object SourceRefSerializer { val instance: SourceRefSerializer = new SourceRefSerializer } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class SourceRefSerializer extends StdScalarSerializer[SourceRef[_]](classOf[SourceRef[_]]) with ActorSystemAccess { @@ -48,16 +42,12 @@ import akka.stream.StreamRefResolver } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object SourceRefDeserializer { val instance: SourceRefDeserializer = new SourceRefDeserializer } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class SourceRefDeserializer extends StdScalarDeserializer[SourceRef[_]](classOf[SourceRef[_]]) with ActorSystemAccess { @@ -71,16 +61,12 @@ import akka.stream.StreamRefResolver } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object SinkRefSerializer { val instance: SinkRefSerializer = new SinkRefSerializer } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class SinkRefSerializer extends StdScalarSerializer[SinkRef[_]](classOf[SinkRef[_]]) with ActorSystemAccess { @@ -92,16 +78,12 @@ import akka.stream.StreamRefResolver } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object SinkRefDeserializer { val instance: SinkRefDeserializer = new SinkRefDeserializer } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class SinkRefDeserializer extends StdScalarDeserializer[SinkRef[_]](classOf[SinkRef[_]]) with ActorSystemAccess { diff --git a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/TypedActorRefModule.scala b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/TypedActorRefModule.scala index a2410085d00..d5e74bd3e03 100644 --- a/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/TypedActorRefModule.scala +++ b/akka-serialization-jackson/src/main/scala/akka/serialization/jackson/TypedActorRefModule.scala @@ -17,23 +17,17 @@ import akka.actor.typed.ActorRefResolver import akka.actor.typed.scaladsl.adapter._ import akka.annotation.InternalApi -/** - * INTERNAL API: Adds support for serializing and deserializing [[akka.actor.typed.ActorRef]]. - */ +/** INTERNAL API: Adds support for serializing and deserializing [[akka.actor.typed.ActorRef]]. */ @InternalApi private[akka] trait TypedActorRefModule extends JacksonModule { addSerializer(classOf[ActorRef[_]], () => TypedActorRefSerializer.instance, () => TypedActorRefDeserializer.instance) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TypedActorRefSerializer { val instance: TypedActorRefSerializer = new TypedActorRefSerializer } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class TypedActorRefSerializer extends StdScalarSerializer[ActorRef[_]](classOf[ActorRef[_]]) with ActorSystemAccess { @@ -43,16 +37,12 @@ import akka.annotation.InternalApi } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TypedActorRefDeserializer { val instance: TypedActorRefDeserializer = new TypedActorRefDeserializer } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class TypedActorRefDeserializer extends StdScalarDeserializer[ActorRef[_]](classOf[ActorRef[_]]) with ActorSystemAccess { diff --git a/akka-serialization-jackson/src/test/scala/akka/serialization/jackson/JacksonSerializerSpec.scala b/akka-serialization-jackson/src/test/scala/akka/serialization/jackson/JacksonSerializerSpec.scala index 2859ebb27ac..dbf7ab0be99 100644 --- a/akka-serialization-jackson/src/test/scala/akka/serialization/jackson/JacksonSerializerSpec.scala +++ b/akka-serialization-jackson/src/test/scala/akka/serialization/jackson/JacksonSerializerSpec.scala @@ -121,7 +121,7 @@ object ScalaTestMessages { extends TestMessage // #jackson-scala-enumeration - //delegate to AkkaSerialization + // delegate to AkkaSerialization object HasAkkaSerializer { def apply(description: String): HasAkkaSerializer = new HasAkkaSerializer(description) } @@ -462,16 +462,18 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { "be possible to create custom ObjectMapper" in { val customJavaTimeModule = new SimpleModule() { import com.fasterxml.jackson.databind.ser.std._ - addSerializer(classOf[Instant], new StdSerializer[Instant](classOf[Instant]) { - override def serialize(value: Instant, gen: JsonGenerator, provider: SerializerProvider): Unit = { - gen.writeStartObject() - gen.writeFieldName("nanos") - gen.writeNumber(value.getNano) - gen.writeFieldName("custom") - gen.writeString("field") - gen.writeEndObject() - } - }) + addSerializer( + classOf[Instant], + new StdSerializer[Instant](classOf[Instant]) { + override def serialize(value: Instant, gen: JsonGenerator, provider: SerializerProvider): Unit = { + gen.writeStartObject() + gen.writeFieldName("nanos") + gen.writeNumber(value.getNano) + gen.writeFieldName("custom") + gen.writeString("field") + gen.writeEndObject() + } + }) } val customJacksonObjectMapperFactory = new JacksonObjectMapperFactory { @@ -629,8 +631,7 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { "JacksonJsonSerializer without type in manifest" should { import ScalaTestMessages._ - "deserialize messages using the serialization bindings" in withSystem( - """ + "deserialize messages using the serialization bindings" in withSystem(""" akka.actor { serializers.animal = "akka.serialization.jackson.JacksonJsonSerializer" serialization-identifiers.animal = 9091 @@ -648,8 +649,7 @@ class JacksonJsonSerializerSpec extends JacksonSerializerSpec("jackson-json") { deserialized should ===(msg) } - "deserialize messages using the configured deserialization type" in withSystem( - """ + "deserialize messages using the configured deserialization type" in withSystem(""" akka.actor { serializers.animal = "akka.serialization.jackson.JacksonJsonSerializer" serialization-identifiers.animal = 9091 @@ -861,8 +861,7 @@ abstract class JacksonSerializerSpec(serializerName: String) } // TODO: Consider moving the migrations Specs to a separate Spec - "deserialize with migrations" in withSystem( - """ + "deserialize with migrations" in withSystem(""" akka.serialization.jackson.migrations { ## Usually the key is a FQCN but we're hacking the name to use multiple migrations for the ## same type in a single test. @@ -1088,8 +1087,7 @@ abstract class JacksonSerializerSpec(serializerName: String) } // TODO: Consider moving the migrations Specs to a separate Spec - "deserialize with migrations" in withSystem( - """ + "deserialize with migrations" in withSystem(""" akka.serialization.jackson.migrations { ## Usually the key is a FQCN but we're hacking the name to use multiple migrations for the ## same type in a single test. @@ -1257,12 +1255,14 @@ abstract class JacksonSerializerSpec(serializerName: String) intercept[IllegalArgumentException] { val sys = ActorSystem( system.name, - ConfigFactory.parseString(s""" + ConfigFactory + .parseString(s""" akka.actor.serialization-bindings { "$className" = $serializerName "akka.serialization.jackson.ScalaTestMessages$$TestMessage" = $serializerName } - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) try { SerializationExtension(sys).serialize(SimpleCommand("hi")).get } finally shutdown(sys) diff --git a/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/CustomAdtSerializer.scala b/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/CustomAdtSerializer.scala index 4d16593f433..2c617f00fbf 100644 --- a/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/CustomAdtSerializer.scala +++ b/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/CustomAdtSerializer.scala @@ -8,7 +8,7 @@ import akka.serialization.jackson.JsonSerializable object CustomAdtSerializer { - //#adt-trait-object + // #adt-trait-object import com.fasterxml.jackson.core.JsonGenerator import com.fasterxml.jackson.core.JsonParser import com.fasterxml.jackson.databind.DeserializationContext @@ -57,5 +57,5 @@ object CustomAdtSerializer { } final case class Compass(currentDirection: Direction) extends JsonSerializable - //#adt-trait-object + // #adt-trait-object } diff --git a/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/SerializationDocSpec.scala b/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/SerializationDocSpec.scala index 8a08d83c1c3..0806601d291 100644 --- a/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/SerializationDocSpec.scala +++ b/akka-serialization-jackson/src/test/scala/doc/akka/serialization/jackson/SerializationDocSpec.scala @@ -127,7 +127,7 @@ object SerializationDocSpec { object Polymorphism { - //#polymorphism + // #polymorphism final case class Zoo(primaryAttraction: Animal) extends JsonSerializable @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") @@ -140,12 +140,12 @@ object SerializationDocSpec { final case class Lion(name: String) extends Animal final case class Elephant(name: String, age: Int) extends Animal - //#polymorphism + // #polymorphism } object PolymorphismMixedClassObject { - //#polymorphism-case-object + // #polymorphism-case-object final case class Zoo(primaryAttraction: Animal) extends JsonSerializable @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") @@ -168,7 +168,7 @@ object SerializationDocSpec { // whenever we need to deserialize an instance of Unicorn trait, we return the object Unicorn override def deserialize(p: JsonParser, ctxt: DeserializationContext): Unicorn = Unicorn } - //#polymorphism-case-object + // #polymorphism-case-object } val configDateTime = """ @@ -193,8 +193,7 @@ class SerializationDocSpec extends TestKit( ActorSystem( "SerializationDocSpec", - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.serialization.jackson.migrations { # migrations for Java classes "jdoc.akka.serialization.jackson.v2b.ItemAdded" = "jdoc.akka.serialization.jackson.v2b.ItemAddedMigration" diff --git a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala index d61321d4379..cb5a68c5087 100644 --- a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala +++ b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala @@ -4,7 +4,7 @@ package akka.event.slf4j -import org.slf4j.{ MDC, Marker, MarkerFactory, Logger => SLFLogger, LoggerFactory => SLFLoggerFactory } +import org.slf4j.{ Logger => SLFLogger, LoggerFactory => SLFLoggerFactory, MDC, Marker, MarkerFactory } import akka.actor._ import akka.dispatch.RequiresMessageQueue @@ -12,17 +12,13 @@ import akka.event.{ LogMarker, _ } import akka.event.Logging._ import akka.util.{ unused, Helpers } -/** - * Base trait for all classes that wants to be able use the SLF4J logging infrastructure. - */ +/** Base trait for all classes that wants to be able use the SLF4J logging infrastructure. */ trait SLF4JLogging { @transient lazy val log = Logger(this.getClass.getName) } -/** - * Logger is a factory for obtaining SLF4J-Loggers - */ +/** Logger is a factory for obtaining SLF4J-Loggers */ object Logger { /** @@ -41,9 +37,7 @@ object Logger { case _ => SLFLoggerFactory.getLogger(logClass) } - /** - * Returns the SLF4J Root Logger - */ + /** Returns the SLF4J Root Logger */ def root: SLFLogger = apply(SLFLogger.ROOT_LOGGER_NAME) } diff --git a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggingFilterSpec.scala b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggingFilterSpec.scala index 01f3923fe6a..b02f7bd7ce3 100644 --- a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggingFilterSpec.scala +++ b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggingFilterSpec.scala @@ -42,7 +42,7 @@ object Slf4jLoggingFilterSpec { sender() ! LoggerInitialized case SetTarget(ref) => target = Some(ref) - ref ! ("OK") + ref ! "OK" case event: LogEvent => println("# event: " + event) target.foreach { _ ! event } @@ -50,21 +50,19 @@ object Slf4jLoggingFilterSpec { } class DebugLevelProducer extends Actor with ActorLogging { - def receive = { - case s: String => - log.warning(s) - log.info(s) - println("# DebugLevelProducer: " + log.isDebugEnabled) - log.debug(s) + def receive = { case s: String => + log.warning(s) + log.info(s) + println("# DebugLevelProducer: " + log.isDebugEnabled) + log.debug(s) } } class WarningLevelProducer extends Actor with ActorLogging { - def receive = { - case s: String => - log.warning(s) - log.info(s) - log.debug(s) + def receive = { case s: String => + log.warning(s) + log.info(s) + log.debug(s) } } diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala index f52c56feb53..8277db84a51 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala @@ -24,9 +24,7 @@ import akka.testkit.TestActor.AutoPilot import akka.util.JavaDurationConverters import akka.util.ccompat._ -/** - * Provides factory methods for various Publishers. - */ +/** Provides factory methods for various Publishers. */ object TestPublisher { import StreamTestKit._ @@ -38,49 +36,35 @@ object TestPublisher { object SubscriptionDone extends NoSerializationVerificationNeeded - /** - * Publisher that signals complete to subscribers, after handing a void subscription. - */ + /** Publisher that signals complete to subscribers, after handing a void subscription. */ def empty[T](): Publisher[T] = EmptyPublisher[T] - /** - * Publisher that subscribes the subscriber and completes after the first request. - */ + /** Publisher that subscribes the subscriber and completes after the first request. */ def lazyEmpty[T]: Publisher[T] = new Publisher[T] { override def subscribe(subscriber: Subscriber[_ >: T]): Unit = subscriber.onSubscribe(CompletedSubscription(subscriber)) } - /** - * Publisher that signals error to subscribers immediately after handing out subscription. - */ + /** Publisher that signals error to subscribers immediately after handing out subscription. */ def error[T](cause: Throwable): Publisher[T] = ErrorPublisher(cause, "error").asInstanceOf[Publisher[T]] - /** - * Publisher that subscribes the subscriber and signals error after the first request. - */ + /** Publisher that subscribes the subscriber and signals error after the first request. */ def lazyError[T](cause: Throwable): Publisher[T] = new Publisher[T] { override def subscribe(subscriber: Subscriber[_ >: T]): Unit = subscriber.onSubscribe(FailedSubscription(subscriber, cause)) } - /** - * Probe that implements [[org.reactivestreams.Publisher]] interface. - */ + /** Probe that implements [[org.reactivestreams.Publisher]] interface. */ def manualProbe[T](autoOnSubscribe: Boolean = true)(implicit system: ActorSystem): ManualProbe[T] = new ManualProbe(autoOnSubscribe) - /** - * Probe that implements [[org.reactivestreams.Publisher]] interface and tracks demand. - */ + /** Probe that implements [[org.reactivestreams.Publisher]] interface and tracks demand. */ def probe[T](initialPendingRequests: Long = 0)(implicit system: ActorSystem): Probe[T] = new Probe(initialPendingRequests) object ManualProbe { - /** - * Probe that implements [[org.reactivestreams.Publisher]] interface. - */ + /** Probe that implements [[org.reactivestreams.Publisher]] interface. */ def apply[T](autoOnSubscribe: Boolean = true)(implicit system: ClassicActorSystemProvider): ManualProbe[T] = new ManualProbe(autoOnSubscribe)(system.classicSystem) } @@ -98,7 +82,7 @@ object TestPublisher { @ccompatUsedUntil213 private val probe: TestProbe = TestProbe() - //this is a way to pause receiving message from probe until subscription is done + // this is a way to pause receiving message from probe until subscription is done private val subscribed = new CountDownLatch(1) probe.ignoreMsg { case SubscriptionDone => true } probe.setAutoPilot(new TestActor.AutoPilot() { @@ -109,9 +93,7 @@ object TestPublisher { }) private val self = this.asInstanceOf[Self] - /** - * Subscribes a given [[org.reactivestreams.Subscriber]] to this probe publisher. - */ + /** Subscribes a given [[org.reactivestreams.Subscriber]] to this probe publisher. */ def subscribe(subscriber: Subscriber[_ >: I]): Unit = { val subscription: PublisherProbeSubscription[I] = new PublisherProbeSubscription[I](subscriber, probe) probe.ref ! Subscribe(subscription) @@ -126,17 +108,13 @@ object TestPublisher { f } - /** - * Expect a subscription. - */ + /** Expect a subscription. */ def expectSubscription(): PublisherProbeSubscription[I] = executeAfterSubscription { probe.expectMsgType[Subscribe].subscription.asInstanceOf[PublisherProbeSubscription[I]] } - /** - * Expect demand from a given subscription. - */ + /** Expect demand from a given subscription. */ def expectRequest(subscription: Subscription, n: Int): Self = executeAfterSubscription { probe.expectMsg(RequestMore(subscription, n)) self @@ -151,17 +129,13 @@ object TestPublisher { self } - /** - * Expect no messages for a given duration. - */ + /** Expect no messages for a given duration. */ def expectNoMessage(max: FiniteDuration): Self = executeAfterSubscription { probe.expectNoMessage(max) self } - /** - * Receive messages for a given duration or until one does not match a given partial function. - */ + /** Receive messages for a given duration or until one does not match a given partial function. */ def receiveWhile[T]( max: Duration = Duration.Undefined, idle: Duration = Duration.Inf, @@ -193,9 +167,7 @@ object TestPublisher { probe.within(min, max)(f) } - /** - * Same as calling `within(0 seconds, max)(f)`. - */ + /** Same as calling `within(0 seconds, max)(f)`. */ def within[T](max: FiniteDuration)(f: => T): T = executeAfterSubscription { probe.within(max)(f) } } @@ -204,9 +176,7 @@ object TestPublisher { new Probe(initialPendingRequests)(system.classicSystem) } - /** - * Single subscription and demand tracking for [[TestPublisher.ManualProbe]]. - */ + /** Single subscription and demand tracking for [[TestPublisher.ManualProbe]]. */ class Probe[T] private[TestPublisher] (initialPendingRequests: Long)(implicit system: ActorSystem) extends ManualProbe[T] { @@ -218,9 +188,7 @@ object TestPublisher { /** Asserts that a subscription has been received or will be received */ def ensureSubscription(): Unit = subscription // initializes lazy val - /** - * Current pending requests. - */ + /** Current pending requests. */ def pending: Long = pendingRequests def sendNext(elem: T): Self = { @@ -268,9 +236,7 @@ object TestPublisher { s"Expected cancellation cause to be of type ${scala.reflect.classTag[E]} but was ${cause.getClass}: $cause") } - /** - * Java API - */ + /** Java API */ def expectCancellationWithCause[E <: Throwable](causeClass: Class[E]): E = expectCancellationWithCause()(ClassTag(causeClass)) } @@ -294,9 +260,7 @@ object TestSubscriber { } } - /** - * Probe that implements [[org.reactivestreams.Subscriber]] interface. - */ + /** Probe that implements [[org.reactivestreams.Subscriber]] interface. */ def manualProbe[T]()(implicit system: ActorSystem): ManualProbe[T] = new ManualProbe() def probe[T]()(implicit system: ActorSystem): Probe[T] = new Probe() @@ -322,23 +286,17 @@ object TestSubscriber { private val self = this.asInstanceOf[Self] - /** - * Expect and return a [[org.reactivestreams.Subscription]]. - */ + /** Expect and return a [[org.reactivestreams.Subscription]]. */ def expectSubscription(): Subscription = { _subscription = probe.expectMsgType[OnSubscribe].subscription _subscription } - /** - * Expect and return [[SubscriberEvent]] (any of: `OnSubscribe`, `OnNext`, `OnError` or `OnComplete`). - */ + /** Expect and return [[SubscriberEvent]] (any of: `OnSubscribe`, `OnNext`, `OnError` or `OnComplete`). */ def expectEvent(): SubscriberEvent = probe.expectMsgType[SubscriberEvent] - /** - * Expect and return [[SubscriberEvent]] (any of: `OnSubscribe`, `OnNext`, `OnError` or `OnComplete`). - */ + /** Expect and return [[SubscriberEvent]] (any of: `OnSubscribe`, `OnNext`, `OnError` or `OnComplete`). */ def expectEvent(max: FiniteDuration): SubscriberEvent = probe.expectMsgType[SubscriberEvent](max) @@ -352,16 +310,12 @@ object TestSubscriber { self } - /** - * Expect and return a stream element. - */ + /** Expect and return a stream element. */ def expectNext(): I = { expectNext(probe.testKitSettings.SingleExpectDefaultTimeout.dilated) } - /** - * Expect and return a stream element during specified time or timeout. - */ + /** Expect and return a stream element during specified time or timeout. */ def expectNext(d: FiniteDuration): I = { val t = probe.remainingOr(d) probe.receiveOne(t) match { @@ -409,9 +363,7 @@ object TestSubscriber { def expectNextUnordered(e1: I, e2: I, es: I*): Self = expectNextUnorderedN((e1 +: e2 +: es).iterator.map(identity).to(immutable.IndexedSeq)) - /** - * Expect and return the next `n` stream elements. - */ + /** Expect and return the next `n` stream elements. */ def expectNextN(n: Long): immutable.Seq[I] = { val b = immutable.Seq.newBuilder[I] var i = 0 @@ -461,9 +413,7 @@ object TestSubscriber { self } - /** - * Expect and return the signalled [[Throwable]]. - */ + /** Expect and return the signalled [[Throwable]]. */ def expectError(): Throwable = probe.expectMsgType[OnError].cause /** @@ -587,9 +537,7 @@ object TestSubscriber { } } - /** - * Expect next element or stream completion - returning whichever was signalled. - */ + /** Expect next element or stream completion - returning whichever was signalled. */ def expectNextOrComplete(): Either[OnComplete.type, I] = { probe.fishForMessage(hint = s"OnNext(_) or OnComplete") { case OnNext(_) => true @@ -636,19 +584,14 @@ object TestSubscriber { self } - /** - * Java API: Assert that no message is received for the specified time. - */ + /** Java API: Assert that no message is received for the specified time. */ def expectNoMessage(remaining: java.time.Duration): Self = { import JavaDurationConverters._ probe.expectNoMessage(remaining.asScala) self } - /** - * Expect a stream element and test it with partial function. - * - */ + /** Expect a stream element and test it with partial function. */ def expectNextPF[T](f: PartialFunction[Any, T]): T = expectNextWithTimeoutPF(Duration.Undefined, f) @@ -658,9 +601,11 @@ object TestSubscriber { * @param max wait no more than max time, otherwise throw AssertionError */ def expectNextWithTimeoutPF[T](max: Duration, f: PartialFunction[Any, T]): T = - expectEventWithTimeoutPF(max, { - case OnNext(n) if f.isDefinedAt(n) => f(n) - }) + expectEventWithTimeoutPF( + max, + { + case OnNext(n) if f.isDefinedAt(n) => f(n) + }) /** * Expect a stream element during specified time or timeout and test it with partial function. @@ -686,18 +631,14 @@ object TestSubscriber { def expectEventPF[T](f: PartialFunction[SubscriberEvent, T]): T = expectEventWithTimeoutPF(Duration.Undefined, f) - /** - * Receive messages for a given duration or until one does not match a given partial function. - */ + /** Receive messages for a given duration or until one does not match a given partial function. */ def receiveWhile[T]( max: Duration = Duration.Undefined, idle: Duration = Duration.Inf, messages: Int = Int.MaxValue)(f: PartialFunction[SubscriberEvent, T]): immutable.Seq[T] = probe.receiveWhile(max, idle, messages)(f.asInstanceOf[PartialFunction[AnyRef, T]]) - /** - * Drains a given number of messages - */ + /** Drains a given number of messages */ def receiveWithin(max: FiniteDuration, messages: Int = Int.MaxValue): immutable.Seq[I] = probe .receiveWhile(max, max, messages) { @@ -754,9 +695,7 @@ object TestSubscriber { */ def within[T](min: FiniteDuration, max: FiniteDuration)(f: => T): T = probe.within(min, max)(f) - /** - * Same as calling `within(0 seconds, max)(f)`. - */ + /** Same as calling `within(0 seconds, max)(f)`. */ def within[T](max: FiniteDuration)(f: => T): T = probe.within(max)(f) def onSubscribe(subscription: Subscription): Unit = probe.ref ! OnSubscribe(subscription) @@ -769,9 +708,7 @@ object TestSubscriber { def apply[T]()(implicit system: ClassicActorSystemProvider): Probe[T] = new Probe()(system.classicSystem) } - /** - * Single subscription tracking for [[ManualProbe]]. - */ + /** Single subscription tracking for [[ManualProbe]]. */ class Probe[T] private[TestSubscriber] ()(implicit system: ActorSystem) extends ManualProbe[T] { override type Self = Probe[T] @@ -789,9 +726,7 @@ object TestSubscriber { this } - /** - * Request and expect a stream element. - */ + /** Request and expect a stream element. */ def requestNext(element: T): Self = { subscription.request(1) expectNext(element) @@ -812,17 +747,13 @@ object TestSubscriber { "Tried to cancel with cause but upstream subscription doesn't support cancellation with cause") } - /** - * Request and expect a stream element. - */ + /** Request and expect a stream element. */ def requestNext(): T = { subscription.request(1) expectNext() } - /** - * Request and expect a stream element during the specified time or timeout. - */ + /** Request and expect a stream element during the specified time or timeout. */ def requestNext(d: FiniteDuration): T = { subscription.request(1) expectNext(d) @@ -830,9 +761,7 @@ object TestSubscriber { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[stream] object StreamTestKit { import TestPublisher._ diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala index e7e978ea79a..0f412bcac39 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala @@ -12,9 +12,7 @@ import akka.stream.scaladsl.{ Sink, Source } import akka.stream.stage.{ GraphStageWithMaterializedValue, InHandler, OutHandler } import akka.testkit.TestProbe -/** - * Messages emitted after the corresponding `stageUnderTest` methods has been invoked. - */ +/** Messages emitted after the corresponding `stageUnderTest` methods has been invoked. */ object GraphStageMessages { sealed trait StageMessage case object Push extends StageMessage with NoSerializationVerificationNeeded diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/javadsl/TestSink.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/javadsl/TestSink.scala index 5fb786791e8..da72092f64a 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/javadsl/TestSink.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/javadsl/TestSink.scala @@ -12,16 +12,12 @@ import akka.stream.testkit._ /** Java API */ object TestSink { - /** - * A Sink that materialized to a [[akka.stream.testkit.TestSubscriber.Probe]]. - */ + /** A Sink that materialized to a [[akka.stream.testkit.TestSubscriber.Probe]]. */ @deprecated("Use `TestSink.create` with ClassicActorSystemProvider instead.", "2.7.0") def probe[T](system: ActorSystem): Sink[T, TestSubscriber.Probe[T]] = create(system) - /** - * A Sink that materialized to a [[akka.stream.testkit.TestSubscriber.Probe]]. - */ + /** A Sink that materialized to a [[akka.stream.testkit.TestSubscriber.Probe]]. */ def create[T](system: ClassicActorSystemProvider): Sink[T, TestSubscriber.Probe[T]] = new Sink(scaladsl.TestSink[T]()(system)) diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/javadsl/TestSource.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/javadsl/TestSource.scala index 10e64615dff..a589e8bf1a5 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/javadsl/TestSource.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/javadsl/TestSource.scala @@ -12,16 +12,12 @@ import akka.stream.testkit._ /** Java API */ object TestSource { - /** - * A Source that materializes to a [[akka.stream.testkit.TestPublisher.Probe]]. - */ + /** A Source that materializes to a [[akka.stream.testkit.TestPublisher.Probe]]. */ @deprecated("Use `TestSource.create` with ClassicActorSystemProvider instead.", "2.7.0") def probe[T](system: ActorSystem): Source[T, TestPublisher.Probe[T]] = create(system) - /** - * A Source that materializes to a [[akka.stream.testkit.TestPublisher.Probe]]. - */ + /** A Source that materializes to a [[akka.stream.testkit.TestPublisher.Probe]]. */ def create[T](system: ClassicActorSystemProvider): Source[T, TestPublisher.Probe[T]] = new Source(scaladsl.TestSource[T]()(system)) diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/StreamTestKit.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/StreamTestKit.scala index 3212703f66f..33ef232d1f9 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/StreamTestKit.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/StreamTestKit.scala @@ -48,11 +48,13 @@ object StreamTestKit { val c = sys.settings.config.getConfig("akka.stream.testkit") val timeout = c.getDuration("all-stages-stopped-timeout", MILLISECONDS).millis probe.within(timeout) { - try probe.awaitAssert { - supervisor.tell(StreamSupervisor.GetChildren, probe.ref) - val children = probe.expectMsgType[StreamSupervisor.Children].children - assert(children.isEmpty, s"expected no StreamSupervisor children, but got [${children.mkString(", ")}]") - } catch { + try + probe.awaitAssert { + supervisor.tell(StreamSupervisor.GetChildren, probe.ref) + val children = probe.expectMsgType[StreamSupervisor.Children].children + assert(children.isEmpty, s"expected no StreamSupervisor children, but got [${children.mkString(", ")}]") + } + catch { case ex: Throwable => import sys.dispatcher printDebugDump(supervisor) diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/TestSink.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/TestSink.scala index 6fd1b7f61ee..9ef5053550c 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/TestSink.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/TestSink.scala @@ -13,21 +13,15 @@ import akka.stream.testkit._ import akka.stream.testkit.StreamTestKit.ProbeSink import akka.stream.testkit.TestSubscriber.Probe -/** - * Factory methods for test sinks. - */ +/** Factory methods for test sinks. */ object TestSink { - /** - * A Sink that materialized to a [[akka.stream.testkit.TestSubscriber.Probe]]. - */ + /** A Sink that materialized to a [[akka.stream.testkit.TestSubscriber.Probe]]. */ @deprecated("Use `TestSink()` with implicit ClassicActorSystemProvider instead.", "2.7.0") def probe[T](implicit system: ActorSystem): Sink[T, Probe[T]] = apply() - /** - * A Sink that materialized to a [[akka.stream.testkit.TestSubscriber.Probe]]. - */ + /** A Sink that materialized to a [[akka.stream.testkit.TestSubscriber.Probe]]. */ def apply[T]()(implicit system: ClassicActorSystemProvider): Sink[T, Probe[T]] = { implicit val sys: ActorSystem = system.classicSystem Sink.fromGraph[T, TestSubscriber.Probe[T]](new ProbeSink(none, SinkShape(Inlet("ProbeSink.in")))) diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/TestSource.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/TestSource.scala index 40c94a75d8c..3cc7938201a 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/TestSource.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/TestSource.scala @@ -12,21 +12,15 @@ import akka.stream.scaladsl._ import akka.stream.testkit._ import akka.stream.testkit.StreamTestKit.ProbeSource -/** - * Factory methods for test sources. - */ +/** Factory methods for test sources. */ object TestSource { - /** - * A Source that materializes to a [[akka.stream.testkit.TestPublisher.Probe]]. - */ + /** A Source that materializes to a [[akka.stream.testkit.TestPublisher.Probe]]. */ @deprecated("Use `TestSource()` with implicit ClassicActorSystemProvider instead.", "2.7.0") def probe[T](implicit system: ActorSystem): Source[T, TestPublisher.Probe[T]] = apply() - /** - * A Source that materializes to a [[akka.stream.testkit.TestPublisher.Probe]]. - */ + /** A Source that materializes to a [[akka.stream.testkit.TestPublisher.Probe]]. */ def apply[T]()(implicit system: ClassicActorSystemProvider): Source[T, TestPublisher.Probe[T]] = { implicit val sys: ActorSystem = system.classicSystem Source.fromGraph[T, TestPublisher.Probe[T]](new ProbeSource(none, SourceShape(Outlet("ProbeSource.out")))) diff --git a/akka-stream-testkit/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala b/akka-stream-testkit/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala index 1320c33bd55..1a5ec19dc5e 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala @@ -26,9 +26,7 @@ import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler, _ import akka.stream.testkit.StreamSpec import akka.stream.testkit.Utils.TE -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case class NoMaterializer(system: ActorSystem) extends Materializer { override def withNamePrefix(name: String): Materializer = @@ -167,73 +165,67 @@ object GraphInterpreterSpecKit { (logics, inOwners, outOwners) } - /** - * Create connections given a list of flow logics where each one has one connection to the next one - */ + /** Create connections given a list of flow logics where each one has one connection to the next one */ private[stream] def createLinearFlowConnections(logics: Seq[GraphStageLogic]): Array[Connection] = { require(logics.length >= 2, s"$logics is too short to create a linear flow") logics .sliding(2) .zipWithIndex - .map { - case (window, idx) => - val outOwner = window(0) - val inOwner = window(1) - - val connection = new Connection( - id = idx, - outOwner = outOwner, - outHandler = outOwner.outHandler(0), - inOwner = inOwner, - inHandler = inOwner.inHandler(0)) - - outOwner.portToConn(outOwner.inCount) = connection - inOwner.portToConn(0) = connection - - connection + .map { case (window, idx) => + val outOwner = window(0) + val inOwner = window(1) + + val connection = new Connection( + id = idx, + outOwner = outOwner, + outHandler = outOwner.outHandler(0), + inOwner = inOwner, + inHandler = inOwner.inHandler(0)) + + outOwner.portToConn(outOwner.inCount) = connection + inOwner.portToConn(0) = connection + + connection } .toArray } - /** - * Create interpreter connections for all the given `connectedPorts`. - */ + /** Create interpreter connections for all the given `connectedPorts`. */ private[stream] def createConnections( connectedPorts: Seq[(Outlet[_], Inlet[_])], inOwners: SMap[Inlet[_], GraphStageLogic], outOwners: SMap[Outlet[_], GraphStageLogic]): Array[Connection] = { val connections = new Array[Connection](connectedPorts.size) - connectedPorts.zipWithIndex.foreach { - case ((outlet, inlet), idx) => - val outOwner = outOwners(outlet) - val inOwner = inOwners(inlet) - - val connection = new Connection( - id = idx, - outOwner = outOwner, - outHandler = outOwner.outHandler(outlet.id), - inOwner = inOwner, - inHandler = inOwner.inHandler(inlet.id)) - - connections(idx) = connection - inOwner.portToConn(inlet.id) = connection - outOwner.portToConn(outOwner.inCount + outlet.id) = connection + connectedPorts.zipWithIndex.foreach { case ((outlet, inlet), idx) => + val outOwner = outOwners(outlet) + val inOwner = inOwners(inlet) + + val connection = new Connection( + id = idx, + outOwner = outOwner, + outHandler = outOwner.outHandler(outlet.id), + inOwner = inOwner, + inHandler = inOwner.inHandler(inlet.id)) + + connections(idx) = connection + inOwner.portToConn(inlet.id) = connection + outOwner.portToConn(outOwner.inCount + outlet.id) = connection } connections } private def setPortIds(shape: Shape): Unit = { - shape.inlets.zipWithIndex.foreach { - case (inlet, idx) => inlet.id = idx + shape.inlets.zipWithIndex.foreach { case (inlet, idx) => + inlet.id = idx } - shape.outlets.zipWithIndex.foreach { - case (outlet, idx) => outlet.id = idx + shape.outlets.zipWithIndex.foreach { case (outlet, idx) => + outlet.id = idx } } private def setPortIds(stage: GraphStageWithMaterializedValue[_ <: Shape, _]): Unit = { - stage.shape.inlets.zipWithIndex.foreach { case (inlet, idx) => inlet.id = idx } + stage.shape.inlets.zipWithIndex.foreach { case (inlet, idx) => inlet.id = idx } stage.shape.outlets.zipWithIndex.foreach { case (inlet, idx) => inlet.id = idx } } @@ -264,25 +256,29 @@ trait GraphInterpreterSpecKit extends StreamSpec { out.id = 0 override def toString = "Upstream" - setHandler(out, new OutHandler { - override def onPull() = { - // TODO handler needed but should it do anything? - } + setHandler( + out, + new OutHandler { + override def onPull() = { + // TODO handler needed but should it do anything? + } - override def toString = "Upstream.OutHandler" - }) + override def toString = "Upstream.OutHandler" + }) } object Downstream extends DownstreamBoundaryStageLogic[Int] { override val in = Inlet[Int]("down") in.id = 0 - setHandler(in, new InHandler { - override def onPush() = { - // TODO handler needed but should it do anything? - } + setHandler( + in, + new InHandler { + override def onPush() = { + // TODO handler needed but should it do anything? + } - override def toString = "Downstream.InHandler" - }) + override def toString = "Downstream.InHandler" + }) override def toString = "Downstream" } @@ -368,11 +364,13 @@ trait GraphInterpreterSpecKit extends StreamSpec { val out = Outlet[T]("out") out.id = 0 - setHandler(out, new OutHandler { - override def onPull(): Unit = lastEvent += RequestOne(UpstreamProbe.this) - override def onDownstreamFinish(cause: Throwable): Unit = lastEvent += Cancel(UpstreamProbe.this, cause) - override def toString = s"${UpstreamProbe.this.toString}.outHandler" - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = lastEvent += RequestOne(UpstreamProbe.this) + override def onDownstreamFinish(cause: Throwable): Unit = lastEvent += Cancel(UpstreamProbe.this, cause) + override def toString = s"${UpstreamProbe.this.toString}.outHandler" + }) def onNext(elem: T, eventLimit: Int = Int.MaxValue): Unit = { if (GraphInterpreter.Debug) println(s"----- NEXT: $this $elem") @@ -397,12 +395,14 @@ trait GraphInterpreterSpecKit extends StreamSpec { val in = Inlet[T]("in") in.id = 0 - setHandler(in, new InHandler { - override def onPush(): Unit = lastEvent += OnNext(DownstreamProbe.this, grab(in)) - override def onUpstreamFinish(): Unit = lastEvent += OnComplete(DownstreamProbe.this) - override def onUpstreamFailure(ex: Throwable): Unit = lastEvent += OnError(DownstreamProbe.this, ex) - override def toString = s"${DownstreamProbe.this.toString}.inHandler" - }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = lastEvent += OnNext(DownstreamProbe.this, grab(in)) + override def onUpstreamFinish(): Unit = lastEvent += OnComplete(DownstreamProbe.this) + override def onUpstreamFailure(ex: Throwable): Unit = lastEvent += OnError(DownstreamProbe.this, ex) + override def toString = s"${DownstreamProbe.this.toString}.inHandler" + }) def requestOne(eventLimit: Int = Int.MaxValue): Unit = { if (GraphInterpreter.Debug) println(s"----- REQ $this") @@ -464,21 +464,23 @@ trait GraphInterpreterSpecKit extends StreamSpec { def cancel(): Unit = cancel(this.in) def grab(): T = grab(this.in) - setHandler(this.in, new InHandler { + setHandler( + this.in, + new InHandler { - // Modified onPush that does not grab() automatically the element. This accesses some internals. - override def onPush(): Unit = { - val internalEvent = portToConn(DownstreamPortProbe.this.in.id).slot + // Modified onPush that does not grab() automatically the element. This accesses some internals. + override def onPush(): Unit = { + val internalEvent = portToConn(DownstreamPortProbe.this.in.id).slot - internalEvent match { - case Failed(_, elem) => lastEvent += OnNext(DownstreamPortProbe.this, elem) - case elem => lastEvent += OnNext(DownstreamPortProbe.this, elem) + internalEvent match { + case Failed(_, elem) => lastEvent += OnNext(DownstreamPortProbe.this, elem) + case elem => lastEvent += OnNext(DownstreamPortProbe.this, elem) + } } - } - override def onUpstreamFinish() = lastEvent += OnComplete(DownstreamPortProbe.this) - override def onUpstreamFailure(ex: Throwable) = lastEvent += OnError(DownstreamPortProbe.this, ex) - }) + override def onUpstreamFinish() = lastEvent += OnComplete(DownstreamPortProbe.this) + override def onUpstreamFailure(ex: Throwable) = lastEvent += OnError(DownstreamPortProbe.this, ex) + }) } val (logics, connections) = @@ -527,18 +529,22 @@ trait GraphInterpreterSpecKit extends StreamSpec { } } - setHandler(stagein, new InHandler { - override def onPush(): Unit = mayFail(push(stageout, grab(stagein))) - override def onUpstreamFinish(): Unit = mayFail(completeStage()) - override def onUpstreamFailure(ex: Throwable): Unit = mayFail(failStage(ex)) - override def toString = "insideOutStage.stagein" - }) + setHandler( + stagein, + new InHandler { + override def onPush(): Unit = mayFail(push(stageout, grab(stagein))) + override def onUpstreamFinish(): Unit = mayFail(completeStage()) + override def onUpstreamFailure(ex: Throwable): Unit = mayFail(failStage(ex)) + override def toString = "insideOutStage.stagein" + }) - setHandler(stageout, new OutHandler { - override def onPull(): Unit = mayFail(pull(stagein)) - override def onDownstreamFinish(cause: Throwable): Unit = mayFail(completeStage()) - override def toString = "insideOutStage.stageout" - }) + setHandler( + stageout, + new OutHandler { + override def onPull(): Unit = mayFail(pull(stagein)) + override def onDownstreamFinish(cause: Throwable): Unit = mayFail(completeStage()) + override def toString = "insideOutStage.stageout" + }) override def preStart(): Unit = mayFail(lastEvent += PreStart(insideOutStage)) override def postStop(): Unit = @@ -643,16 +649,18 @@ trait GraphInterpreterSpecKit extends StreamSpec { val in = Inlet[TT]("in") in.id = 0 - setHandler(in, new InHandler { + setHandler( + in, + new InHandler { - // Modified onPush that does not grab() automatically the element. This accesses some internals. - override def onPush(): Unit = { - lastEvent += OnNext(grab(in)) - } + // Modified onPush that does not grab() automatically the element. This accesses some internals. + override def onPush(): Unit = { + lastEvent += OnNext(grab(in)) + } - override def onUpstreamFinish() = lastEvent += OnComplete - override def onUpstreamFailure(ex: Throwable) = lastEvent += OnError(ex) - }) + override def onUpstreamFinish() = lastEvent += OnComplete + override def onUpstreamFailure(ex: Throwable) = lastEvent += OnError(ex) + }) def requestOne(): Unit = { pull(in) diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala index de6e78a7e0d..080c5cf8ca5 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala @@ -120,8 +120,8 @@ trait ScriptedTest extends Matchers { def debug: String = s"Script(pending=($pendingIns in, $pendingOuts out), remainingIns=${providedInputs - .drop(inputCursor) - .mkString("/")}, remainingOuts=${expectedOutputs.drop(outputCursor).mkString("/")})" + .drop(inputCursor) + .mkString("/")}, remainingOuts=${expectedOutputs.drop(outputCursor).mkString("/")})" } class ScriptRunner[In, Out, M]( @@ -236,8 +236,8 @@ trait ScriptedTest extends Matchers { } @nowarn("msg=deprecated") - def runScript[In, Out, M](script: Script[In, Out])(op: Flow[In, In, NotUsed] => Flow[In, Out, M])( - implicit system: ActorSystem): Unit = + def runScript[In, Out, M](script: Script[In, Out])(op: Flow[In, In, NotUsed] => Flow[In, Out, M])(implicit + system: ActorSystem): Unit = runScript(script, SystemMaterializer(system).materializer.settings)(op)(system) def runScript[In, Out, M]( diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestKitSpec.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestKitSpec.scala index cd2bbe00e51..ba96c5d89c9 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestKitSpec.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestKitSpec.scala @@ -103,16 +103,15 @@ class StreamTestKitSpec extends AkkaSpec { } "#expectNextPF should pass with right element" in { - val result = Source.single(1).runWith(TestSink()).request(1).expectNextPF { - case 1 => "success" + val result = Source.single(1).runWith(TestSink()).request(1).expectNextPF { case 1 => + "success" } result should be("success") } "#expectNextPF should fail with wrong element" in { intercept[AssertionError] { - Source.single(1).runWith(TestSink()).request(1).expectNextPF { - case 2 => + Source.single(1).runWith(TestSink()).request(1).expectNextPF { case 2 => } }.getMessage should include("message matching partial function") } @@ -128,17 +127,17 @@ class StreamTestKitSpec extends AkkaSpec { .tick(initialDelay, 1.millis, 1) .runWith(TestSink()) .request(1) - .expectNextWithTimeoutPF(timeout, { - case 1 => + .expectNextWithTimeoutPF( + timeout, + { case 1 => system.log.info("Message received :(") - }) + }) }.getMessage should include("timeout") } "#expectNextChainingPF should pass with right element" in { - Source.single(1).runWith(TestSink()).request(1).expectNextChainingPF { - case 1 => + Source.single(1).runWith(TestSink()).request(1).expectNextChainingPF { case 1 => } } @@ -146,16 +145,14 @@ class StreamTestKitSpec extends AkkaSpec { Source(1 to 2) .runWith(TestSink()) .request(2) - .expectNextChainingPF { - case 1 => + .expectNextChainingPF { case 1 => } .expectNext(2) } "#expectNextChainingPF should fail with wrong element" in { intercept[AssertionError] { - Source.single(1).runWith(TestSink()).request(1).expectNextChainingPF { - case 2 => + Source.single(1).runWith(TestSink()).request(1).expectNextChainingPF { case 2 => } }.getMessage should include("message matching partial function") } @@ -171,10 +168,11 @@ class StreamTestKitSpec extends AkkaSpec { .tick(initialDelay, 1.millis, 1) .runWith(TestSink()) .request(1) - .expectNextChainingPF(timeout, { - case 1 => + .expectNextChainingPF( + timeout, + { case 1 => system.log.info("Message received :(") - }) + }) }.getMessage should include("timeout") } diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/TestPublisherSubscriberSpec.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/TestPublisherSubscriberSpec.scala index fcb26d96c4f..c2a1dda79c3 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/TestPublisherSubscriberSpec.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/TestPublisherSubscriberSpec.scala @@ -31,7 +31,7 @@ class TestPublisherSubscriberSpec extends AkkaSpec(""" upstreamSubscription.sendNext(1) downstreamSubscription.request(1) upstream.expectEventPF { case RequestMore(_, e) => e } should ===(1L) - downstream.expectEventPF { case OnNext(e) => e } should ===(1) + downstream.expectEventPF { case OnNext(e) => e } should ===(1) upstreamSubscription.sendNext(1) downstreamSubscription.request(1) @@ -53,7 +53,7 @@ class TestPublisherSubscriberSpec extends AkkaSpec(""" upstreamSubscription.sendNext(1) downstreamSubscription.request(1) - an[AssertionError] should be thrownBy upstream.expectEventPF { case Subscribe(e) => e } + an[AssertionError] should be thrownBy upstream.expectEventPF { case Subscribe(e) => e } an[AssertionError] should be thrownBy downstream.expectNextPF[String] { case e: String => e } upstreamSubscription.sendComplete() diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala index 4c71b253153..b9bb556f9be 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala @@ -38,6 +38,7 @@ abstract class AkkaPublisherVerification[T](val env: TestEnvironment, publisherS def iterable(elements: Long): immutable.Iterable[Int] = if (elements > Int.MaxValue) - new immutable.Iterable[Int] { override def iterator = Iterator.from(0) } else + new immutable.Iterable[Int] { override def iterator = Iterator.from(0) } + else 0 until elements.toInt } diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FanoutPublisherTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FanoutPublisherTest.scala index ff802fa9885..b5c0e8d00c6 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FanoutPublisherTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FanoutPublisherTest.scala @@ -15,7 +15,8 @@ class FanoutPublisherTest extends AkkaPublisherVerification[Int] { def createPublisher(elements: Long): Publisher[Int] = { val iterable: immutable.Iterable[Int] = - if (elements == 0) new immutable.Iterable[Int] { override def iterator = Iterator.from(0) } else + if (elements == 0) new immutable.Iterable[Int] { override def iterator = Iterator.from(0) } + else 0 until elements.toInt Source(iterable).runWith(Sink.asPublisher(true)) diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ForeachSinkSubscriberTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ForeachSinkSubscriberTest.scala index 1f11cc65eed..9341d168ef8 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ForeachSinkSubscriberTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ForeachSinkSubscriberTest.scala @@ -11,10 +11,7 @@ import akka.stream.scaladsl._ class ForeachSinkSubscriberTest extends AkkaSubscriberBlackboxVerification[Int] { override def createSubscriber(): Subscriber[Int] = - Flow[Int] - .to(Sink.foreach { _ => - }) - .runWith(Source.asSubscriber) + Flow[Int].to(Sink.foreach { _ => }).runWith(Source.asSubscriber) override def createElement(element: Int): Int = element } diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/Timeouts.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/Timeouts.scala index 3f5b2a8b731..12f7e71f982 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/Timeouts.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/Timeouts.scala @@ -4,9 +4,7 @@ package akka.stream.tck -/** - * Specifies timeouts for the TCK - */ +/** Specifies timeouts for the TCK */ object Timeouts { def publisherShutdownTimeoutMillis: Int = 3000 diff --git a/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala index aeeee4a9f0e..87f96b0b772 100755 --- a/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala @@ -112,37 +112,36 @@ class DslConsistencySpec extends AnyWordSpec with Matchers { ("SubFlow" -> List[Class[_]](sSubFlowClass, jSubFlowClass)) :: ("Sink" -> List[Class[_]](sSinkClass, jSinkClass)) :: ("RunnableFlow" -> List[Class[_]](sRunnableGraphClass, jRunnableGraphClass)) :: - Nil).foreach { - case (element, classes) => - s"provide same $element transforming operators" in { - val allOps = - (for { - c <- classes - m <- c.getMethods - if !Modifier.isStatic(m.getModifiers) - if !ignore(m.getName) - if !m.getName.contains("$") - if !materializing(m) - } yield m.getName).toSet - - for (c <- classes; op <- allOps) - assertHasMethod(c, op) - } - - s"provide same $element materializing operators" in { - val materializingOps = - (for { - c <- classes - m <- c.getMethods - if !Modifier.isStatic(m.getModifiers) - if !ignore(m.getName) - if !m.getName.contains("$") - if materializing(m) - } yield m.getName).toSet - - for (c <- classes; op <- materializingOps) - assertHasMethod(c, op) - } + Nil).foreach { case (element, classes) => + s"provide same $element transforming operators" in { + val allOps = + (for { + c <- classes + m <- c.getMethods + if !Modifier.isStatic(m.getModifiers) + if !ignore(m.getName) + if !m.getName.contains("$") + if !materializing(m) + } yield m.getName).toSet + + for (c <- classes; op <- allOps) + assertHasMethod(c, op) + } + + s"provide same $element materializing operators" in { + val materializingOps = + (for { + c <- classes + m <- c.getMethods + if !Modifier.isStatic(m.getModifiers) + if !ignore(m.getName) + if !m.getName.contains("$") + if materializing(m) + } yield m.getName).toSet + + for (c <- classes; op <- materializingOps) + assertHasMethod(c, op) + } } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala index 93980a6e934..b8ed6d6f816 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala @@ -152,21 +152,40 @@ class DslFactoriesConsistencySpec extends AnyWordSpec with Matchers { Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "apply", _ == 24, _ => true), Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "collection", _ => true, _ => true), Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "actorRef", _ => true, _ => true), // Internal in scaladsl - Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "actorRefWithAck", _ => true, _ => true), // Internal in scaladsl - Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "actorRefWithBackpressure", _ => true, _ => true), // Internal in scaladsl + Ignore( + _ == akka.stream.scaladsl.Sink.getClass, + _ == "actorRefWithAck", + _ => true, + _ => true + ), // Internal in scaladsl + Ignore( + _ == akka.stream.scaladsl.Sink.getClass, + _ == "actorRefWithBackpressure", + _ => true, + _ => true + ), // Internal in scaladsl Ignore(_ == akka.stream.scaladsl.Source.getClass, _ == "actorRef", _ => true, _ => true), // Internal in scaladsl - Ignore(_ == akka.stream.scaladsl.Source.getClass, _ == "actorRefWithAck", _ => true, _ => true), // Internal in scaladsl - Ignore(_ == akka.stream.scaladsl.Source.getClass, _ == "actorRefWithBackpressure", _ => true, _ => true), // Internal in scaladsl + Ignore( + _ == akka.stream.scaladsl.Source.getClass, + _ == "actorRefWithAck", + _ => true, + _ => true + ), // Internal in scaladsl + Ignore( + _ == akka.stream.scaladsl.Source.getClass, + _ == "actorRefWithBackpressure", + _ => true, + _ => true + ), // Internal in scaladsl Ignore(_ == akka.stream.scaladsl.BidiFlow.getClass, _ == "apply", _ == 24, _ => true), Ignore(_ == akka.stream.scaladsl.GraphDSL.getClass, _ == "runnable", _ == 24, _ => true), Ignore(_ == akka.stream.scaladsl.GraphDSL.getClass, _ == "create", _ == 24, _ => true), // all generated methods like scaladsl.Sink$.akka$stream$scaladsl$Sink$$newOnCompleteStage$1 Ignore(_ => true, _.contains("$"), _ => true, _ => true)) - ignores.foldLeft(false) { - case (acc, i) => - acc || (i.cls(m.declaringClass) && i.name(m.name) && i.parameters(m.parameterTypes.length) && i.paramTypes( - m.parameterTypes)) + ignores.foldLeft(false) { case (acc, i) => + acc || (i.cls(m.declaringClass) && i.name(m.name) && i.parameters(m.parameterTypes.length) && i.paramTypes( + m.parameterTypes)) } } @@ -176,13 +195,11 @@ class DslFactoriesConsistencySpec extends AnyWordSpec with Matchers { * runnableN => runnable * createN => create */ - private val unspecializeName: PartialFunction[Method, Method] = { - case m => m.copy(name = m.name.filter(Character.isLetter)) + private val unspecializeName: PartialFunction[Method, Method] = { case m => + m.copy(name = m.name.filter(Character.isLetter)) } - /** - * Adapt java side non curried functions to scala side like - */ + /** Adapt java side non curried functions to scala side like */ private val curryLikeJava: PartialFunction[Method, Method] = { case m if m.parameterTypes.size > 1 => m.copy( diff --git a/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala index f291cca337b..5a8ba38155e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala @@ -47,7 +47,7 @@ class FusingSpec extends StreamSpec { .futureValue .sorted should ===(0 to 9) val refs = receiveN(20) - refs.toSet should have size (11) // main flow + 10 subflows + refs.toSet should have size 11 // main flow + 10 subflows } "use multiple actors when there are asynchronous boundaries in the subflows (operator)" in { @@ -59,7 +59,7 @@ class FusingSpec extends StreamSpec { .futureValue .sorted should ===(0 to 9) val refs = receiveN(20) - refs.toSet should have size (11) // main flow + 10 subflows + refs.toSet should have size 11 // main flow + 10 subflows } "use one actor per grouped substream when there is an async boundary around the flow (manual)" in { @@ -92,7 +92,7 @@ class FusingSpec extends StreamSpec { refs.toSet should have size (in.size + 1) // outer/main actor + 1 actor per subflow } - //an UnfoldResourceSource equivalent without an async boundary + // an UnfoldResourceSource equivalent without an async boundary case class UnfoldResourceNoAsyncBoundry[T, S](create: () => S, readData: (S) => Option[T], close: (S) => Unit) extends GraphStage[SourceShape[T]] { val stage_ = new UnfoldResourceSource(create, readData, close) @@ -107,7 +107,7 @@ class FusingSpec extends StreamSpec { val slowInitSrc = UnfoldResourceNoAsyncBoundry( () => { Await.result(promise.future, 1.minute); () }, (_: Unit) => Some(1), - (_: Unit) => ()).asSource.watchTermination()(Keep.right).async //commenting this out, makes the test pass + (_: Unit) => ()).asSource.watchTermination()(Keep.right).async // commenting this out, makes the test pass val downstream = Flow[Int] .prepend(Source.single(1)) .flatMapPrefix(0) { @@ -122,13 +122,13 @@ class FusingSpec extends StreamSpec { val (f1, f2) = g.run() f2.failed.futureValue shouldEqual TE("I hate mondays") f1.value should be(empty) - //by now downstream managed to fail, hence it already processed the message from Flow.single, - //hence we know for sure that all graph stage locics in the downstream interpreter were initialized(=preStart) - //hence upstream subscription was initiated. - //since we're still blocking upstream's preStart we know for sure it didn't respond to the subscription request - //since a blocked actor can not process additional messages from its inbox. - //so long story short: downstream was able to initialize, subscribe and fail before upstream responded to the subscription request. - //prior to akka#29194, this scenario resulted with cancellation signal rather than the expected error signal. + // by now downstream managed to fail, hence it already processed the message from Flow.single, + // hence we know for sure that all graph stage locics in the downstream interpreter were initialized(=preStart) + // hence upstream subscription was initiated. + // since we're still blocking upstream's preStart we know for sure it didn't respond to the subscription request + // since a blocked actor can not process additional messages from its inbox. + // so long story short: downstream was able to initialize, subscribe and fail before upstream responded to the subscription request. + // prior to akka#29194, this scenario resulted with cancellation signal rather than the expected error signal. promise.success(Done) f1.failed.futureValue shouldEqual TE("I hate mondays") } @@ -138,7 +138,7 @@ class FusingSpec extends StreamSpec { val slowInitSrc = UnfoldResourceNoAsyncBoundry( () => { Await.result(promise.future, 1.minute); () }, (_: Unit) => Some(1), - (_: Unit) => ()).asSource.watchTermination()(Keep.right).async //commenting this out, makes the test pass + (_: Unit) => ()).asSource.watchTermination()(Keep.right).async // commenting this out, makes the test pass val failingSrc = Source.failed(TE("I hate mondays")).watchTermination()(Keep.right) @@ -147,13 +147,13 @@ class FusingSpec extends StreamSpec { val (f1, f2) = g.run() f2.failed.futureValue shouldEqual TE("I hate mondays") f1.value should be(empty) - //by now downstream managed to fail, hence it already processed the message from Flow.single, - //hence we know for sure that all graph stage locics in the downstream interpreter were initialized(=preStart) - //hence upstream subscription was initiated. - //since we're still blocking upstream's preStart we know for sure it didn't respond to the subscription request - //since a blocked actor can not process additional messages from its inbox. - //so long story short: downstream was able to initialize, subscribe and fail before upstream responded to the subscription request. - //prior to akka#29194, this scenario resulted with cancellation signal rather than the expected error signal. + // by now downstream managed to fail, hence it already processed the message from Flow.single, + // hence we know for sure that all graph stage locics in the downstream interpreter were initialized(=preStart) + // hence upstream subscription was initiated. + // since we're still blocking upstream's preStart we know for sure it didn't respond to the subscription request + // since a blocked actor can not process additional messages from its inbox. + // so long story short: downstream was able to initialize, subscribe and fail before upstream responded to the subscription request. + // prior to akka#29194, this scenario resulted with cancellation signal rather than the expected error signal. promise.success(Done) f1.failed.futureValue shouldEqual TE("I hate mondays") } diff --git a/akka-stream-tests/src/test/scala/akka/stream/MaterializerWithAttributesSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/MaterializerWithAttributesSpec.scala index 702352804ea..44cd9762ad9 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/MaterializerWithAttributesSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/MaterializerWithAttributesSpec.scala @@ -55,17 +55,16 @@ class MaterializerWithAttributesSpec } class StreamActor extends Actor { - def receive = { - case "do it!" => - implicit val materializer = Materializer(context, Attributes.name("bar")) + def receive = { case "do it!" => + implicit val materializer = Materializer(context, Attributes.name("bar")) - val firstAttributes = Source.fromGraph(attributesSource).to(Sink.ignore).run() - val secondAttributes = Source.fromGraph(attributesSource).to(Sink.ignore).run()(Materializer(context)) + val firstAttributes = Source.fromGraph(attributesSource).to(Sink.ignore).run() + val secondAttributes = Source.fromGraph(attributesSource).to(Sink.ignore).run()(Materializer(context)) - sender() ! firstAttributes - sender() ! secondAttributes + sender() ! firstAttributes + sender() ! secondAttributes - context.stop(self) + context.stop(self) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/StreamAttributeDocSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/StreamAttributeDocSpec.scala index 809a141f8c9..f95962fec8a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/StreamAttributeDocSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/StreamAttributeDocSpec.scala @@ -29,8 +29,8 @@ class StreamAttributeDocSpec extends StreamSpec("my-stream-dispatcher = \"akka.t .map(_.toString) .toMat(Sink.foreach(println))(Keep.right) .withAttributes(Attributes.inputBuffer(4, 4) and - ActorAttributes.dispatcher("my-stream-dispatcher") and - TcpAttributes.tcpWriteBufferSize(2048)) + ActorAttributes.dispatcher("my-stream-dispatcher") and + TcpAttributes.tcpWriteBufferSize(2048)) stream.run() // #attributes-on-stream diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/ChainedBufferSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/ChainedBufferSpec.scala index 4173126f402..fec5b32be8a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/ChainedBufferSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/ChainedBufferSpec.scala @@ -136,7 +136,7 @@ class ChainedBufferSpec extends StreamSpec { (1 to numEnqueues).foreach(chainedBuffer.enqueue) // sum of 1 .. numEnqueues - val allSum = (numEnqueues * (numEnqueues + 1) / 2) + val allSum = numEnqueues * (numEnqueues + 1) / 2 if (numEnqueues < headCapacity) { head.isFull shouldBe false diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala index 6cd9c2449e5..ab1004f046f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala @@ -61,17 +61,21 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S val out = Outlet[Int]("out") override val shape = FlowShape(in, out) override def createLogic(attr: Attributes) = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = push(out, grab(in)) - override def onUpstreamFinish(): Unit = { - emit(out, 5, () => emit(out, 6)) - emit(out, 7, () => emit(out, 8)) - completeStage() - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = push(out, grab(in)) + override def onUpstreamFinish(): Unit = { + emit(out, 5, () => emit(out, 6)) + emit(out, 7, () => emit(out, 8)) + completeStage() + } + }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) } } @@ -80,15 +84,19 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S val out = Outlet[Int]("out") override val shape = FlowShape(in, out) override def createLogic(attr: Attributes) = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = push(out, grab(in)) - override def onUpstreamFinish(): Unit = complete(out) - override def toString = "InHandler" - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - override def toString = "OutHandler" - }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = push(out, grab(in)) + override def onUpstreamFinish(): Unit = complete(out) + override def toString = "InHandler" + }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = pull(in) + override def toString = "OutHandler" + }) override def toString = "GraphStageLogicSpec.passthroughLogic" } override def toString = "GraphStageLogicSpec.passthrough" @@ -99,9 +107,11 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S override val shape = SourceShape(out) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(out, new OutHandler { - override def onPull(): Unit = emitMultiple(out, Iterator.empty, () => emit(out, 42, () => completeStage())) - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = emitMultiple(out, Iterator.empty, () => emit(out, 42, () => completeStage())) + }) } override def toString = "GraphStageLogicSpec.emitEmptyIterable" } @@ -114,7 +124,7 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S setHandler(shape.in, EagerTerminateInput) setHandler(shape.out, EagerTerminateOutput) override def preStart(): Unit = - readN(shape.in, n)(e => emitMultiple(shape.out, e.iterator, () => completeStage()), (_) => ()) + readN(shape.in, n)(e => emitMultiple(shape.out, e.iterator, () => completeStage()), _ => ()) } } @@ -168,7 +178,7 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S .runWith(TestSink()) .request(5) .expectNext(1) - //emitting with callback gives nondeterminism whether 2 or 3 will be pushed first + // emitting with callback gives nondeterminism whether 2 or 3 will be pushed first .expectNextUnordered(2, 3) .expectNext(4) .expectComplete() @@ -188,12 +198,14 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S override val shape = FlowShape(in, out) override def createLogic(attr: Attributes) = new GraphStageLogic(shape) { setHandler(in, eagerTerminateInput) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - completeStage() - testActor ! "pulled" - } - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + completeStage() + testActor ! "pulled" + } + }) override def preStart(): Unit = testActor ! "preStart" override def postStop(): Unit = testActor ! "postStop" } @@ -295,9 +307,11 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush() = ??? - }) + setHandler( + in, + new InHandler { + override def onPush() = ??? + }) // ups we forgot the out handler } @@ -321,9 +335,11 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush() = ??? - }) + setHandler( + in, + new InHandler { + override def onPush() = ??? + }) // ups we forgot the out handler } @@ -346,9 +362,11 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush() = ??? - }) + setHandler( + in, + new InHandler { + override def onPush() = ??? + }) // ups we forgot the out handler } @@ -370,9 +388,11 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush() = ??? - }) + setHandler( + in, + new InHandler { + override def onPush() = ??? + }) // ups we forgot the out handler } diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/SubInletOutletSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/SubInletOutletSpec.scala index f5cc3a9030b..7a213385165 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/SubInletOutletSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/SubInletOutletSpec.scala @@ -79,12 +79,14 @@ class SubInletOutletSpec extends StreamSpec { } }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - if (!subIn.hasBeenPulled) - subIn.pull() - } - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + if (!subIn.hasBeenPulled) + subIn.pull() + } + }) } } @@ -164,18 +166,20 @@ class SubInletOutletSpec extends StreamSpec { }) } - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - elem match { - case "completeStage" => completeStage() - case "cancelStage" => cancelStage(NoMoreElementsNeeded) - case "failStage" => failStage(TE("boom")) - case "completeAll" => cancel(in) - case other => subOut.push(other) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + elem match { + case "completeStage" => completeStage() + case "cancelStage" => cancelStage(NoMoreElementsNeeded) + case "failStage" => failStage(TE("boom")) + case "completeAll" => cancel(in) + case other => subOut.push(other) + } } - } - }) + }) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala index 904d45e2872..2865151aea8 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala @@ -126,8 +126,9 @@ class TimeoutsSpec extends StreamSpec { "BackpressureTimeout" must { "pass through elements unmodified" in { - Await.result(Source(1 to 100).backpressureTimeout(1.second).grouped(200).runWith(Sink.head), 3.seconds) should ===( - 1 to 100) + Await.result( + Source(1 to 100).backpressureTimeout(1.second).grouped(200).runWith(Sink.head), + 3.seconds) should ===(1 to 100) } "succeed if subscriber demand arrives" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala index b15d8e057d8..a656b1784ed 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala @@ -57,25 +57,33 @@ class ActorGraphInterpreterSpec extends StreamSpec { val shape = BidiShape(in1, out1, in2, out2) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in1, new InHandler { - override def onPush(): Unit = push(out1, grab(in1)) - override def onUpstreamFinish(): Unit = complete(out1) - }) + setHandler( + in1, + new InHandler { + override def onPush(): Unit = push(out1, grab(in1)) + override def onUpstreamFinish(): Unit = complete(out1) + }) - setHandler(in2, new InHandler { - override def onPush(): Unit = push(out2, grab(in2)) - override def onUpstreamFinish(): Unit = complete(out2) - }) + setHandler( + in2, + new InHandler { + override def onPush(): Unit = push(out2, grab(in2)) + override def onUpstreamFinish(): Unit = complete(out2) + }) - setHandler(out1, new OutHandler { - override def onPull(): Unit = pull(in1) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) - }) + setHandler( + out1, + new OutHandler { + override def onPull(): Unit = pull(in1) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) + }) - setHandler(out2, new OutHandler { - override def onPull(): Unit = pull(in2) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) - }) + setHandler( + out2, + new OutHandler { + override def onPull(): Unit = pull(in2) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) + }) } override def toString = "IdentityBidi" @@ -100,29 +108,37 @@ class ActorGraphInterpreterSpec extends StreamSpec { val shape = BidiShape(in1, out1, in2, out2) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in1, new InHandler { - override def onPush(): Unit = push(out1, grab(in1)) + setHandler( + in1, + new InHandler { + override def onPush(): Unit = push(out1, grab(in1)) - override def onUpstreamFinish(): Unit = complete(out1) - }) + override def onUpstreamFinish(): Unit = complete(out1) + }) - setHandler(in2, new InHandler { - override def onPush(): Unit = push(out2, grab(in2)) + setHandler( + in2, + new InHandler { + override def onPush(): Unit = push(out2, grab(in2)) - override def onUpstreamFinish(): Unit = complete(out2) - }) + override def onUpstreamFinish(): Unit = complete(out2) + }) - setHandler(out1, new OutHandler { - override def onPull(): Unit = pull(in1) + setHandler( + out1, + new OutHandler { + override def onPull(): Unit = pull(in1) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) - }) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) + }) - setHandler(out2, new OutHandler { - override def onPull(): Unit = pull(in2) + setHandler( + out2, + new OutHandler { + override def onPull(): Unit = pull(in2) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) - }) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) + }) } override def toString = "IdentityBidi" @@ -149,29 +165,37 @@ class ActorGraphInterpreterSpec extends StreamSpec { val shape = BidiShape(in1, out1, in2, out2) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in1, new InHandler { - override def onPush(): Unit = push(out1, grab(in1)) + setHandler( + in1, + new InHandler { + override def onPush(): Unit = push(out1, grab(in1)) - override def onUpstreamFinish(): Unit = complete(out1) - }) + override def onUpstreamFinish(): Unit = complete(out1) + }) - setHandler(in2, new InHandler { - override def onPush(): Unit = push(out2, grab(in2)) + setHandler( + in2, + new InHandler { + override def onPush(): Unit = push(out2, grab(in2)) - override def onUpstreamFinish(): Unit = complete(out2) - }) + override def onUpstreamFinish(): Unit = complete(out2) + }) - setHandler(out1, new OutHandler { - override def onPull(): Unit = pull(in1) + setHandler( + out1, + new OutHandler { + override def onPull(): Unit = pull(in1) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) - }) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) + }) - setHandler(out2, new OutHandler { - override def onPull(): Unit = pull(in2) + setHandler( + out2, + new OutHandler { + override def onPull(): Unit = pull(in2) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) - }) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) + }) } override def toString = "IdentityBidi" @@ -201,29 +225,37 @@ class ActorGraphInterpreterSpec extends StreamSpec { val shape = BidiShape(in1, out1, in2, out2) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in1, new InHandler { - override def onPush(): Unit = push(out2, grab(in1)) + setHandler( + in1, + new InHandler { + override def onPush(): Unit = push(out2, grab(in1)) - override def onUpstreamFinish(): Unit = complete(out2) - }) + override def onUpstreamFinish(): Unit = complete(out2) + }) - setHandler(in2, new InHandler { - override def onPush(): Unit = push(out1, grab(in2)) + setHandler( + in2, + new InHandler { + override def onPush(): Unit = push(out1, grab(in2)) - override def onUpstreamFinish(): Unit = complete(out1) - }) + override def onUpstreamFinish(): Unit = complete(out1) + }) - setHandler(out1, new OutHandler { - override def onPull(): Unit = pull(in2) + setHandler( + out1, + new OutHandler { + override def onPull(): Unit = pull(in2) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) - }) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in2, cause) + }) - setHandler(out2, new OutHandler { - override def onPull(): Unit = pull(in1) + setHandler( + out2, + new OutHandler { + override def onPull(): Unit = pull(in1) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) - }) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(in1, cause) + }) } override def toString = "IdentityBidi" @@ -256,13 +288,15 @@ class ActorGraphInterpreterSpec extends StreamSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(shape.out, new OutHandler { - override def onPull(): Unit = { - completeStage() - // This cannot be propagated now since the stage is already closed - push(shape.out, -1) - } - }) + setHandler( + shape.out, + new OutHandler { + override def onPull(): Unit = { + completeStage() + // This cannot be propagated now since the stage is already closed + push(shape.out, -1) + } + }) } } @@ -304,8 +338,8 @@ class ActorGraphInterpreterSpec extends StreamSpec { fail(shape.out0, te) } - setHandler(shape.out0, ignoreTerminateOutput) //We fail in preStart anyway - setHandler(shape.out1, ignoreTerminateOutput) //We fail in preStart anyway + setHandler(shape.out0, ignoreTerminateOutput) // We fail in preStart anyway + setHandler(shape.out1, ignoreTerminateOutput) // We fail in preStart anyway passAlong(shape.in, shape.out1) } } @@ -408,12 +442,16 @@ class ActorGraphInterpreterSpec extends StreamSpec { object PostStopSnitchFlow extends SimpleLinearGraphStage[String] { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = push(out, grab(in)) - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = push(out, grab(in)) + }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) override def postStop(): Unit = { gotStop.countDown() diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala index 30db0354f3f..8d4c5d4f4c8 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala @@ -59,17 +59,20 @@ class AsyncCallbackSpec extends AkkaSpec(""" probe ! Stopped } - setHandlers(in, out, new InHandler with OutHandler { - def onPush(): Unit = { - val n = grab(in) - probe ! Elem(n) - push(out, n) - } - - def onPull(): Unit = { - pull(in) - } - }) + setHandlers( + in, + out, + new InHandler with OutHandler { + def onPush(): Unit = { + val n = grab(in) + probe ! Elem(n) + push(out, n) + } + + def onPull(): Unit = { + pull(in) + } + }) } (logic, logic.callback) @@ -203,8 +206,8 @@ class AsyncCallbackSpec extends AkkaSpec(""" } probe.expectMsg(Started) - Future.sequence(feedbacks).futureValue should have size (100) - (1 to 100).map(_ => probe.expectMsgType[String]).toSet should have size (100) + Future.sequence(feedbacks).futureValue should have size 100 + (1 to 100).map(_ => probe.expectMsgType[String]).toSet should have size 100 in.sendComplete() probe.expectMsg(Stopped) @@ -250,9 +253,11 @@ class AsyncCallbackSpec extends AkkaSpec(""" def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { val logic = new GraphStageLogicWithAsyncCallback(shape) { override val callbacks = (0 to 10).map(_ => getAsyncCallback[AnyRef](probe ! _)).toSet - setHandler(out, new OutHandler { - def onPull(): Unit = () - }) + setHandler( + out, + new OutHandler { + def onPull(): Unit = () + }) } (logic, logic.callbacks) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterPortsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterPortsSpec.scala index 95ba96813d9..a7188c4fdab 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterPortsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterPortsSpec.scala @@ -704,7 +704,8 @@ class GraphInterpreterPortsSpec extends StreamSpec with GraphInterpreterSpecKit an[IllegalArgumentException] should be thrownBy { in.grab() } } - s"ignore any completion if they are concurrent (cancel first) (chasing = $chasing)" in new PortTestSetup(chasing) { + s"ignore any completion if they are concurrent (cancel first) (chasing = $chasing)" in new PortTestSetup( + chasing) { in.cancel() out.complete() diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala index 7e1d26a8b24..d5a8af690fb 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala @@ -449,7 +449,8 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { } - "work with pushAndFinish if upstream completes with pushAndFinish" in new OneBoundedSetup[Int](new PushFinishStage) { + "work with pushAndFinish if upstream completes with pushAndFinish" in new OneBoundedSetup[Int]( + new PushFinishStage) { lastEvents() should be(Set.empty) diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/KeepGoingStageSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/KeepGoingStageSpec.scala index 6370043fe1e..28ae3e0f35f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/KeepGoingStageSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/KeepGoingStageSpec.scala @@ -76,12 +76,14 @@ class KeepGoingStageSpec extends StreamSpec { } finally listener.foreach(_ ! EndOfEventHandler) } - setHandler(shape.in, new InHandler { - override def onPush(): Unit = pull(shape.in) - - // Ignore finish - override def onUpstreamFinish(): Unit = listener.foreach(_ ! UpstreamCompleted) - }) + setHandler( + shape.in, + new InHandler { + override def onPush(): Unit = pull(shape.in) + + // Ignore finish + override def onUpstreamFinish(): Unit = listener.foreach(_ ! UpstreamCompleted) + }) override def postStop(): Unit = listener.foreach(_ ! PostStop) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/FileSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/FileSinkSpec.scala index dcb6262c23e..276a88f4f5f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/FileSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/FileSinkSpec.scala @@ -55,13 +55,15 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) with ScalaFutures } "create new file if not exists" in { - targetFile({ f => - val completion = Source(TestByteStrings).runWith(FileIO.toPath(f)) - - val result = Await.result(completion, 3.seconds) - result.count should equal(6006) - checkFileContents(f, TestLines.mkString("")) - }, create = false) + targetFile( + { f => + val completion = Source(TestByteStrings).runWith(FileIO.toPath(f)) + + val result = Await.result(completion, 3.seconds) + result.count should equal(6006) + checkFileContents(f, TestLines.mkString("")) + }, + create = false) } "write into existing file without wiping existing data" in { @@ -195,7 +197,7 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) with ScalaFutures } "write single line to a file from lazy sink" in { - //LazySink must wait for result of initialization even if got upstreamComplete + // LazySink must wait for result of initialization even if got upstreamComplete targetFile { f => val completion = Source(List(TestByteStrings.head)).runWith( Sink diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSinkSpec.scala index 247e264872f..fa9c020e42e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSinkSpec.scala @@ -158,7 +158,7 @@ class InputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { val sinkProbe = TestProbe() val inputStream = Source[ByteString](bytes).runWith(testSink(sinkProbe)) - //need to wait while all elements arrive to sink + // need to wait while all elements arrive to sink bytes.foreach { _ => sinkProbe.expectMsg(GraphStageMessages.Push) } @@ -175,7 +175,7 @@ class InputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { val sinkProbe = TestProbe() val inputStream = Source(bytes1 :: bytes2 :: Nil).runWith(testSink(sinkProbe)) - //need to wait while both elements arrive to sink + // need to wait while both elements arrive to sink sinkProbe.expectMsgAllOf(GraphStageMessages.Push, GraphStageMessages.Push) readN(inputStream, 15) should ===((15, bytes1 ++ bytes2.take(5))) @@ -233,7 +233,7 @@ class InputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { itself throws an exception when being materialized. If Source.empty is used, the same exception is thrown by Materializer. - */ + */ } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSourceSpec.scala index d95f90ba3d8..9a1153eb464 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSourceSpec.scala @@ -44,12 +44,11 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { ManagementFactory.getThreadMXBean .dumpAllThreads(true, true) .toSeq - .filter( - t => - t.getThreadName.startsWith("OutputStreamSourceSpec") && - t.getLockName != null && - t.getLockName.startsWith("java.util.concurrent.locks.AbstractQueuedSynchronizer") && - t.getStackTrace.exists(s => s.getClassName.startsWith(classOf[OutputStreamSourceStage].getName))) + .filter(t => + t.getThreadName.startsWith("OutputStreamSourceSpec") && + t.getLockName != null && + t.getLockName.startsWith("java.util.concurrent.locks.AbstractQueuedSynchronizer") && + t.getStackTrace.exists(s => s.getClassName.startsWith(classOf[OutputStreamSourceStage].getName))) awaitAssert(threadsBlocked should ===(Seq()), 5.seconds, interval = 500.millis) } @@ -110,7 +109,7 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { outputStream.write(bytesArray) } - //blocked call + // blocked call val f = Future(outputStream.write(bytesArray)) expectTimeout(f, timeout) @@ -157,7 +156,7 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { itself throws an exception when being materialized. If Sink.ignore is used, the same exception is thrown by Materializer. - */ + */ } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala index 72c7c7fedff..6fce836bbb3 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala @@ -51,18 +51,16 @@ import akka.util.ByteString @nowarn("msg=never used") class NonResolvingDnsActor(cache: SimpleDnsCache, config: Config) extends Actor { - def receive = { - case msg => - throw new RuntimeException(s"Unexpected resolve message $msg") + def receive = { case msg => + throw new RuntimeException(s"Unexpected resolve message $msg") } } @nowarn("msg=never used") class NonResolvingDnsManager(ext: akka.io.DnsExt) extends Actor { - def receive = { - case msg => - throw new RuntimeException(s"Unexpected resolve message $msg") + def receive = { case msg => + throw new RuntimeException(s"Unexpected resolve message $msg") } } @@ -89,14 +87,17 @@ class FailingDnsResolver extends DnsProvider { override def managerClass = classOf[NonResolvingDnsManager] } -class TcpSpec extends StreamSpec(""" +class TcpSpec + extends StreamSpec(""" akka.loglevel = debug akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] akka.io.tcp.trace-logging = true akka.stream.materializer.subscription-timeout.timeout = 2s akka.stream.materializer.initial-input-buffer-size = 2 akka.stream.materializer.max-input-buffer-size = 2 - """) with TcpHelper with WithLogCapturing { + """) + with TcpHelper + with WithLogCapturing { "Outgoing TCP stream" must { @@ -264,10 +265,12 @@ class TcpSpec extends StreamSpec(""" tcpWriteProbe.close() // Need a write on the server side to detect the close event - awaitAssert({ - serverConnection.write(testData) - serverConnection.expectClosed(_.isErrorClosed, 500.millis) - }, max = 5.seconds) + awaitAssert( + { + serverConnection.write(testData) + serverConnection.expectClosed(_.isErrorClosed, 500.millis) + }, + max = 5.seconds) serverConnection.expectTerminated() } @@ -301,10 +304,12 @@ class TcpSpec extends StreamSpec(""" tcpReadProbe.tcpReadSubscription.cancel() // Need a write on the server side to detect the close event - awaitAssert({ - serverConnection.write(testData) - serverConnection.expectClosed(_.isErrorClosed, 500.millis) - }, max = 5.seconds) + awaitAssert( + { + serverConnection.write(testData) + serverConnection.expectClosed(_.isErrorClosed, 500.millis) + }, + max = 5.seconds) serverConnection.expectTerminated() } @@ -540,9 +545,11 @@ class TcpSpec extends StreamSpec(""" "handle when connection actor terminates unexpectedly" in { val system2 = ActorSystem( "TcpSpec-unexpected-system2", - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.loglevel = DEBUG # issue #21660 - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) try { implicit val materializer = SystemMaterializer(system2).materializer @@ -590,9 +597,11 @@ class TcpSpec extends StreamSpec(""" "provide full exceptions when connection attempt fails because name cannot be resolved" in { val systemWithBrokenDns = ActorSystem( "TcpSpec-resolution-failure", - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka.io.dns.inet-address.provider-object = akka.stream.io.FailingDnsResolver - """).withFallback(system.settings.config)) + """) + .withFallback(system.settings.config)) try { val unknownHostName = "abcdefghijklmnopkuh" diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala index f5ffa3a8746..2f1523a7ec9 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala @@ -72,15 +72,19 @@ object TlsSpec { override def preStart(): Unit = scheduleOnce((), duration) var last: ByteString = _ - setHandler(in, new InHandler { - override def onPush(): Unit = { - last = grab(in) - push(out, last) - } - }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + last = grab(in) + push(out, last) + } + }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) override def onTimer(x: Any): Unit = { failStage(new TimeoutException(s"timeout expired, last element was $last")) } @@ -567,7 +571,7 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing Await.result(run("unknown.example.org"), 3.seconds) } - cause.getClass should ===(classOf[SSLHandshakeException]) //General SSLEngine problem + cause.getClass should ===(classOf[SSLHandshakeException]) // General SSLEngine problem val rootCause = rootCauseOf(cause.getCause) rootCause.getClass should ===(classOf[CertificateException]) diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CodecSpecSupport.scala b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CodecSpecSupport.scala index 10bbbedd2cc..9c1d829b96b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CodecSpecSupport.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CodecSpecSupport.scala @@ -20,7 +20,7 @@ trait CodecSpecSupport extends Matchers with BeforeAndAfterAll { self: Suite => def fromHexDump(dump: String) = dump.grouped(2).toArray.map(chars => Integer.parseInt(new String(chars), 16).toByte) def printBytes(i: Int, id: String) = { - def byte(i: Int) = (i & 0xFF).toHexString + def byte(i: Int) = (i & 0xff).toHexString println(id + ": " + byte(i) + ":" + byte(i >> 8) + ":" + byte(i >> 16) + ":" + byte(i >> 24)) i } diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CoderSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CoderSpec.scala index 0a87ddf42c5..f6415ded500 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CoderSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CoderSpec.scala @@ -90,8 +90,8 @@ abstract class CoderSpec(codecName: String) extends AnyWordSpec with CodecSpecSu val chunks = largeTextBytes.grouped(512).toVector val comp = newCompressor() val compressedChunks = chunks.map { chunk => - comp.compressAndFlush(chunk) - } :+ comp.finish() + comp.compressAndFlush(chunk) + } :+ comp.finish() val uncompressed = decodeFromIterator(() => compressedChunks.iterator) uncompressed should readAs(largeText) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSourceSpec.scala index 2f03c61a36a..c96a86f0f18 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSourceSpec.scala @@ -26,7 +26,8 @@ class ActorRefBackpressureSourceSpec extends StreamSpec { val probe = TestProbe() val (ref, s) = Source .actorRefWithBackpressure[Int]( - AckMsg, { case "ok" => CompletionStrategy.draining }: PartialFunction[Any, CompletionStrategy], + AckMsg, + { case "ok" => CompletionStrategy.draining }: PartialFunction[Any, CompletionStrategy], PartialFunction.empty) .toMat(TestSink[Int]())(Keep.both) .run() @@ -79,7 +80,8 @@ class ActorRefBackpressureSourceSpec extends StreamSpec { val probe = TestProbe() val (ref, s) = Source .actorRefWithBackpressure[Int]( - AckMsg, { case "ok" => CompletionStrategy.draining }: PartialFunction[Any, CompletionStrategy], + AckMsg, + { case "ok" => CompletionStrategy.draining }: PartialFunction[Any, CompletionStrategy], PartialFunction.empty) .toMat(TestSink[Int]())(Keep.both) .run() @@ -100,7 +102,8 @@ class ActorRefBackpressureSourceSpec extends StreamSpec { val (ref, s) = Source .actorRefWithBackpressure[Int]( AckMsg, - PartialFunction.empty, { case Status.Failure(f) => f }: PartialFunction[Any, Throwable]) + PartialFunction.empty, + { case Status.Failure(f) => f }: PartialFunction[Any, Throwable]) .toMat(TestSink[Int]())(Keep.both) .run() @@ -119,7 +122,8 @@ class ActorRefBackpressureSourceSpec extends StreamSpec { val probe = TestProbe() val (ref, s) = Source .actorRefWithBackpressure[Int]( - AckMsg, { case "ok" => CompletionStrategy.draining }: PartialFunction[Any, CompletionStrategy], + AckMsg, + { case "ok" => CompletionStrategy.draining }: PartialFunction[Any, CompletionStrategy], PartialFunction.empty) .toMat(TestSink[Int]())(Keep.both) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefSinkSpec.scala index fbae4d1fce2..e5da2e43662 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefSinkSpec.scala @@ -13,8 +13,8 @@ import akka.testkit.TestProbe object ActorRefSinkSpec { case class Fw(ref: ActorRef) extends Actor { - def receive = { - case msg => ref.forward(msg) + def receive = { case msg => + ref.forward(msg) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AggregateWithBoundarySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AggregateWithBoundarySpec.scala index fce051f784b..0b5011a135d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AggregateWithBoundarySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AggregateWithBoundarySpec.scala @@ -24,10 +24,13 @@ class AggregateWithBoundarySpec extends StreamSpec { val stream = collection.immutable.Seq(1, 2, 3, 4, 5, 6, 7) val groupSize = 3 val result = Source(stream) - .aggregateWithBoundary(allocate = () => ListBuffer.empty[Int])(aggregate = (buffer, i) => { - buffer += i - (buffer, buffer.size >= groupSize) - }, harvest = buffer => buffer.toSeq, emitOnTimer = None) + .aggregateWithBoundary(allocate = () => ListBuffer.empty[Int])( + aggregate = (buffer, i) => { + buffer += i + (buffer, buffer.size >= groupSize) + }, + harvest = buffer => buffer.toSeq, + emitOnTimer = None) .runWith(Sink.collection) Await.result(result, 10.seconds) should be(stream.grouped(groupSize).toSeq) @@ -56,10 +59,13 @@ class AggregateWithBoundarySpec extends StreamSpec { val weight = 10 val result = Source(stream) - .aggregateWithBoundary(allocate = () => ListBuffer.empty[Int])(aggregate = (buffer, i) => { - buffer += i - (buffer, buffer.sum >= weight) - }, harvest = buffer => buffer.toSeq, emitOnTimer = None) + .aggregateWithBoundary(allocate = () => ListBuffer.empty[Int])( + aggregate = (buffer, i) => { + buffer += i + (buffer, buffer.sum >= weight) + }, + harvest = buffer => buffer.toSeq, + emitOnTimer = None) .runWith(Sink.collection) Await.result(result, 10.seconds) should be(Seq(Seq(1, 2, 3, 4), Seq(5, 6), Seq(7))) @@ -82,7 +88,7 @@ class AggregateWithTimeBoundaryAndSimulatedTimeSpec extends AnyWordSpecLike with private def getEts(actor: ActorSystem): ExplicitlyTriggeredScheduler = { actor.scheduler match { case ets: ExplicitlyTriggeredScheduler => ets - case other => throw new Exception(s"expecting ${classOf[ExplicitlyTriggeredScheduler]} but got ${other.getClass}") + case other => throw new Exception(s"expecting ${classOf[ExplicitlyTriggeredScheduler]} but got ${other.getClass}") } } @@ -120,17 +126,23 @@ class AggregateWithTimeBoundaryAndSimulatedTimeSpec extends AnyWordSpecLike with } } - source.aggregateWithBoundary(allocate = () => new ValueTimeWrapper(value = allocate))(aggregate = (agg, in) => { - agg.updateTime(currentTimeMs) - // user provided Agg type must be mutable - val (updated, result) = aggregate(agg.value, in) - agg.value = updated - (agg, result) - }, harvest = agg => harvest(agg.value), emitOnTimer = Some((agg => { - val currentTime = currentTimeMs - maxDuration.exists(md => currentTime - agg.firstTime >= md.toMillis) || - maxGap.exists(mg => currentTime - agg.lastTime >= mg.toMillis) - }, interval))) + source.aggregateWithBoundary(allocate = () => new ValueTimeWrapper(value = allocate))( + aggregate = (agg, in) => { + agg.updateTime(currentTimeMs) + // user provided Agg type must be mutable + val (updated, result) = aggregate(agg.value, in) + agg.value = updated + (agg, result) + }, + harvest = agg => harvest(agg.value), + emitOnTimer = Some( + ( + agg => { + val currentTime = currentTimeMs + maxDuration.exists(md => currentTime - agg.firstTime >= md.toMillis) || + maxGap.exists(mg => currentTime - agg.lastTime >= mg.toMillis) + }, + interval))) } } @@ -244,10 +256,16 @@ class AggregateWithTimeBoundaryAndSimulatedTimeSpec extends AnyWordSpecLike with .run() downstream.ensureSubscription() - upstream.sendNext(1) // onPush(1) -> aggregator=Seq(1), due to the preStart pull, will pull upstream again since queue is empty + upstream.sendNext( + 1 + ) // onPush(1) -> aggregator=Seq(1), due to the preStart pull, will pull upstream again since queue is empty timePasses(maxGap) // harvest onTimer, queue=Queue(Seq(1)), aggregator=null - upstream.sendNext(2) // onPush(2) -> aggregator=Seq(2), due to the previous pull, even the queue is already full at this point due to timer, but it won't pull upstream again - timePasses(maxGap) // harvest onTimer, queue=(Seq(1), Seq(2)), aggregator=null, note queue size can be 1 more than the threshold + upstream.sendNext( + 2 + ) // onPush(2) -> aggregator=Seq(2), due to the previous pull, even the queue is already full at this point due to timer, but it won't pull upstream again + timePasses( + maxGap + ) // harvest onTimer, queue=(Seq(1), Seq(2)), aggregator=null, note queue size can be 1 more than the threshold upstream.sendNext(3) // 3 will not be pushed to the stage until the stage pull upstream timePasses(maxGap) // since 3 stayed outside of the stage, this gap will not cause 3 to be emitted downstream.request(1).expectNext(Seq(1)) // onPull emit Seq(1), queue=(Seq(2)) @@ -262,8 +280,12 @@ class AggregateWithTimeBoundaryAndSimulatedTimeSpec extends AnyWordSpecLike with timePasses(maxGap) // emit Seq(3) onTimer downstream.expectNext(Seq(3)) upstream.sendNext(4) // onPush(4) -> aggregator=Seq(4) will follow, and pull upstream again - upstream.sendNext(5) // onPush(5) -> aggregator=Seq(4,5) will happen right after due to the previous pull from onPush(4), eagerly pull even out is not available - upstream.sendNext(6) // onPush(6) -> aggregator=Seq(4,5,6) will happen right after due to the previous pull from onPush(5), even the queue is full at this point + upstream.sendNext( + 5 + ) // onPush(5) -> aggregator=Seq(4,5) will happen right after due to the previous pull from onPush(4), eagerly pull even out is not available + upstream.sendNext( + 6 + ) // onPush(6) -> aggregator=Seq(4,5,6) will happen right after due to the previous pull from onPush(5), even the queue is full at this point timePasses(maxGap) // harvest queue=(Seq(4,5,6)) upstream.sendNext(7) // onPush(7), aggregator=Seq(7), queue=(Seq(4,5,6) no pulling upstream due to queue is full // if sending another message it will stay in upstream, prevent the upstream completion from happening diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AttributesSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AttributesSpec.scala index 0a4196b700f..b3d6396c593 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AttributesSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AttributesSpec.scala @@ -32,9 +32,11 @@ object AttributesSpec { override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Attributes) = { val logic = new GraphStageLogic(shape) { - setHandler(out, new OutHandler { - def onPull(): Unit = {} - }) + setHandler( + out, + new OutHandler { + def onPull(): Unit = {} + }) } (logic, inheritedAttributes) } @@ -52,10 +54,13 @@ object AttributesSpec { override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Attributes) = { val logic = new GraphStageLogic(shape) { - setHandlers(in, out, new InHandler with OutHandler { - override def onPush(): Unit = push(out, grab(in)) - override def onPull(): Unit = pull(in) - }) + setHandlers( + in, + out, + new InHandler with OutHandler { + override def onPush(): Unit = push(out, grab(in)) + override def onPull(): Unit = pull(in) + }) } (logic, inheritedAttributes) @@ -74,12 +79,14 @@ object AttributesSpec { override def preStart(): Unit = { pull(in) } - setHandler(in, new InHandler { - override def onPush(): Unit = { - grab(in) - pull(in) - } - }) + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + grab(in) + pull(in) + } + }) } (logic, inheritedAttributes) @@ -93,12 +100,14 @@ object AttributesSpec { override protected def initialAttributes: Attributes = initialDispatcher.fold(Attributes.none)(name => ActorAttributes.dispatcher(name)) def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { - setHandler(out, new OutHandler { - def onPull(): Unit = { - push(out, Thread.currentThread.getName) - completeStage() - } - }) + setHandler( + out, + new OutHandler { + def onPull(): Unit = { + push(out, Thread.currentThread.getName) + completeStage() + } + }) } } @@ -367,7 +376,7 @@ class AttributesSpec val streamSnapshot = awaitAssert { val snapshot = MaterializerState.streamSnapshots(materializer).futureValue - snapshot should have size (1) // just the one island in this case + snapshot should have size 1 // just the one island in this case snapshot.head } @@ -386,9 +395,10 @@ class AttributesSpec "make the attributes on fromGraph(flow-stage) Flow behave the same as the stage itself" in { val attributes = Source.empty - .viaMat(Flow - .fromGraph(new AttributesFlow(Attributes.name("original-name"))) - .withAttributes(Attributes.name("replaced")) // this actually replaces now + .viaMat( + Flow + .fromGraph(new AttributesFlow(Attributes.name("original-name"))) + .withAttributes(Attributes.name("replaced")) // this actually replaces now )(Keep.right) .withAttributes(Attributes.name("source-flow")) .toMat(Sink.ignore)(Keep.left) @@ -445,7 +455,7 @@ class AttributesSpec val snapshot = awaitAssert { val snapshot = MaterializerState.streamSnapshots(materializer).futureValue - snapshot should have size (2) // two stream "islands", one on blocking dispatcher and one on default + snapshot should have size 2 // two stream "islands", one on blocking dispatcher and one on default snapshot } @@ -480,7 +490,7 @@ class AttributesSpec val snapshot = awaitAssert { val snapshot = MaterializerState.streamSnapshots(system).futureValue - snapshot should have size (2) // two stream "islands", one on blocking dispatcher and one on default + snapshot should have size 2 // two stream "islands", one on blocking dispatcher and one on default snapshot } @@ -503,18 +513,19 @@ class AttributesSpec val materializer = Materializer(system) // for isolation try { val (sourcePromise, complete) = Source.maybe - .viaMat(Flow[Int] - .map { n => - // something else than identity so it's not optimized away - n - } - .async(Dispatchers.DefaultBlockingDispatcherId, 1))(Keep.left) + .viaMat( + Flow[Int] + .map { n => + // something else than identity so it's not optimized away + n + } + .async(Dispatchers.DefaultBlockingDispatcherId, 1))(Keep.left) .toMat(Sink.ignore)(Keep.both) .run()(SystemMaterializer(system).materializer) val snapshot = awaitAssert { val snapshot = MaterializerState.streamSnapshots(system).futureValue - snapshot should have size (2) // two stream "islands", one on blocking dispatcher and one on default + snapshot should have size 2 // two stream "islands", one on blocking dispatcher and one on default snapshot } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BoundedSourceQueueSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BoundedSourceQueueSpec.scala index 1666afd6771..e3e1af5200c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BoundedSourceQueueSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BoundedSourceQueueSpec.scala @@ -14,9 +14,11 @@ import akka.stream.testkit.{ StreamSpec, TestSubscriber } import akka.stream.testkit.scaladsl.TestSink import akka.testkit.WithLogCapturing -class BoundedSourceQueueSpec extends StreamSpec("""akka.loglevel = debug +class BoundedSourceQueueSpec + extends StreamSpec("""akka.loglevel = debug |akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] - |""".stripMargin) with WithLogCapturing { + |""".stripMargin) + with WithLogCapturing { override implicit def patienceConfig: PatienceConfig = PatienceConfig(5.seconds) @@ -37,8 +39,8 @@ class BoundedSourceQueueSpec extends StreamSpec("""akka.loglevel = debug queue.complete() val subIt = Iterator.continually(sub.requestNext()) - subIt.zip(elements.iterator).foreach { - case (subEle, origEle) => subEle should be(origEle) + subIt.zip(elements.iterator).foreach { case (subEle, origEle) => + subEle should be(origEle) } sub.expectComplete() } @@ -122,7 +124,9 @@ class BoundedSourceQueueSpec extends StreamSpec("""akka.loglevel = debug val numThreads = Runtime.getRuntime.availableProcessors() * 4 val stopProb = 10000 // specifies run time of test indirectly val expected = 1d / (1d - math.pow(1d - 1d / stopProb, numThreads)) - log.debug(s"Expected elements per thread: $expected") // variance might be quite high depending on number of threads + log.debug( + s"Expected elements per thread: $expected" + ) // variance might be quite high depending on number of threads val startBarrier = new CountDownLatch(numThreads) val stopBarrier = new CountDownLatch(numThreads) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CancellationStrategySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CancellationStrategySpec.scala index 4253d79e1e8..9f76c074d81 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CancellationStrategySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CancellationStrategySpec.scala @@ -31,8 +31,10 @@ import akka.stream.testkit.Utils.TE import akka.testkit._ import akka.testkit.WithLogCapturing -class CancellationStrategySpec extends StreamSpec("""akka.loglevel = DEBUG - akka.loggers = ["akka.testkit.SilenceAllTestEventListener"]""") with WithLogCapturing { +class CancellationStrategySpec + extends StreamSpec("""akka.loglevel = DEBUG + akka.loggers = ["akka.testkit.SilenceAllTestEventListener"]""") + with WithLogCapturing { "CancellationStrategyAttribute" should { "support strategies" should { "CompleteStage" should { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CollectionSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CollectionSinkSpec.scala index 3d6dee2b29b..f2c3f2bd5ed 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CollectionSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CollectionSinkSpec.scala @@ -20,7 +20,7 @@ class CollectionSinkSpec extends StreamSpec(""" "Sink.collection" when { "using Seq as Collection" must { "return a Seq[T] from a Source" in { - val input = (1 to 6) + val input = 1 to 6 val future: Future[immutable.Seq[Int]] = Source(input).runWith(Sink.collection) val result: immutable.Seq[Int] = Await.result(future, remainingOrDefault) result should be(input.toSeq) @@ -43,7 +43,7 @@ class CollectionSinkSpec extends StreamSpec(""" } "using Vector as Collection" must { "return a Vector[T] from a Source" in { - val input = (1 to 6) + val input = 1 to 6 val future: Future[immutable.Vector[Int]] = Source(input).runWith(Sink.collection) val result: immutable.Vector[Int] = Await.result(future, remainingOrDefault) result should be(input.toVector) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CoupledTerminationFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CoupledTerminationFlowSpec.scala index a6f7e88af48..4f31ec9bc2e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CoupledTerminationFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CoupledTerminationFlowSpec.scala @@ -22,9 +22,11 @@ import akka.stream.testkit._ import akka.stream.testkit.scaladsl.TestSource import akka.testkit.TestProbe -class CoupledTerminationFlowSpec extends StreamSpec(""" +class CoupledTerminationFlowSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { import system.dispatcher @@ -90,7 +92,9 @@ class CoupledTerminationFlowSpec extends StreamSpec(""" "completed out:Source => complete in:Sink" in { val probe = TestProbe() - val f = Flow.fromSinkAndSourceCoupledMat(Sink.onComplete(_ => probe.ref ! "done"), Source.empty)(Keep.none) // completes right away, should complete the sink as well + val f = Flow.fromSinkAndSourceCoupledMat(Sink.onComplete(_ => probe.ref ! "done"), Source.empty)( + Keep.none + ) // completes right away, should complete the sink as well f.runWith(Source.maybe, Sink.ignore) // these do nothing. @@ -99,15 +103,17 @@ class CoupledTerminationFlowSpec extends StreamSpec(""" "cancel in:Sink => cancel out:Source" in { val probe = TestProbe() - val f = Flow.fromSinkAndSourceCoupledMat(Sink.cancelled, Source.fromPublisher(new Publisher[String] { - override def subscribe(subscriber: Subscriber[_ >: String]): Unit = { - subscriber.onSubscribe(new Subscription { - override def cancel(): Unit = probe.ref ! "cancelled" - - override def request(l: Long): Unit = () // do nothing - }) - } - }))(Keep.none) // completes right away, should complete the sink as well + val f = Flow.fromSinkAndSourceCoupledMat( + Sink.cancelled, + Source.fromPublisher(new Publisher[String] { + override def subscribe(subscriber: Subscriber[_ >: String]): Unit = { + subscriber.onSubscribe(new Subscription { + override def cancel(): Unit = probe.ref ! "cancelled" + + override def request(l: Long): Unit = () // do nothing + }) + } + }))(Keep.none) // completes right away, should complete the sink as well f.runWith(Source.maybe, Sink.ignore) // these do nothing. diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAlsoToAllSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAlsoToAllSpec.scala index 7a7f5bdb28f..e2cca8498f4 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAlsoToAllSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAlsoToAllSpec.scala @@ -7,9 +7,11 @@ package akka.stream.scaladsl import akka.stream.testkit._ import akka.stream.testkit.scaladsl.TestSink -class FlowAlsoToAllSpec extends StreamSpec(""" +class FlowAlsoToAllSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { "An also to all" must { "publish elements to all its downstream" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAskSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAskSpec.scala index 3cc828ea3fd..cf979c28980 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAskSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAskSpec.scala @@ -28,35 +28,33 @@ object FlowAskSpec { case class Reply(payload: Int) class Replier extends Actor { - override def receive: Receive = { - case msg: Int => sender() ! Reply(msg) + override def receive: Receive = { case msg: Int => + sender() ! Reply(msg) } } class ReplyAndProxy(to: ActorRef) extends Actor { - override def receive: Receive = { - case msg: Int => - to ! msg - sender() ! Reply(msg) + override def receive: Receive = { case msg: Int => + to ! msg + sender() ! Reply(msg) } } class RandomDelaysReplier extends Actor { - override def receive: Receive = { - case msg: Int => - import context.dispatcher - - val replyTo = sender() - Future { - Thread.sleep(ThreadLocalRandom.current().nextInt(1, 10)) - replyTo ! Reply(msg) - } + override def receive: Receive = { case msg: Int => + import context.dispatcher + + val replyTo = sender() + Future { + Thread.sleep(ThreadLocalRandom.current().nextInt(1, 10)) + replyTo ! Reply(msg) + } } } class StatusReplier extends Actor { - override def receive: Receive = { - case msg: Int => sender() ! akka.actor.Status.Success(Reply(msg)) + override def receive: Receive = { case msg: Int => + sender() ! akka.actor.Status.Success(Reply(msg)) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatAllLazySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatAllLazySpec.scala index ff3c370e8ab..a57dc18d9f6 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatAllLazySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatAllLazySpec.scala @@ -81,11 +81,11 @@ class FlowConcatAllLazySpec extends StreamSpec(""" .expectNext(1, 2) .cancel() .expectNoMessage() - materialized.get() shouldBe (false) + materialized.get() shouldBe false } "work in example" in { - //#concatAllLazy + // #concatAllLazy val sourceA = Source(List(1, 2, 3)) val sourceB = Source(List(4, 5, 6)) val sourceC = Source(List(7, 8, 9)) @@ -93,8 +93,8 @@ class FlowConcatAllLazySpec extends StreamSpec(""" .concatAllLazy(sourceB, sourceC) .fold(new StringJoiner(","))((joiner, input) => joiner.add(String.valueOf(input))) .runWith(Sink.foreach(println)) - //prints 1,2,3,4,5,6,7,8,9 - //#concatAllLazy + // prints 1,2,3,4,5,6,7,8,9 + // #concatAllLazy } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatSpec.scala index d101509647b..5d110dd33d6 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatSpec.scala @@ -157,7 +157,7 @@ abstract class AbstractFlowConcatSpec extends BaseTwoStreamsSetup { m1.isInstanceOf[NotUsed] should be(true) m2.isInstanceOf[NotUsed] should be(true) - runnable.mapMaterializedValue((_) => "boo").run() should be("boo") + runnable.mapMaterializedValue(_ => "boo").run() should be("boo") } "work with Flow DSL" in { @@ -174,7 +174,7 @@ abstract class AbstractFlowConcatSpec extends BaseTwoStreamsSetup { m2.isInstanceOf[NotUsed] should be(true) m3.isInstanceOf[NotUsed] should be(true) - runnable.mapMaterializedValue((_) => "boo").run() should be("boo") + runnable.mapMaterializedValue(_ => "boo").run() should be("boo") } "work with Flow DSL2" in { @@ -244,14 +244,14 @@ class FlowConcatSpec extends AbstractFlowConcatSpec with ScalaFutures { "concat" must { "work in example" in { - //#concat + // #concat val sourceA = Source(List(1, 2, 3, 4)) val sourceB = Source(List(10, 20, 30, 40)) sourceA.concat(sourceB).runWith(Sink.foreach(println)) - //prints 1, 2, 3, 4, 10, 20, 30, 40 - //#concat + // prints 1, 2, 3, 4, 10, 20, 30, 40 + // #concat } } } @@ -282,12 +282,12 @@ class FlowConcatLazySpec extends AbstractFlowConcatSpec { } "work in example" in { - //#concatLazy + // #concatLazy val sourceA = Source(List(1, 2, 3, 4)) val sourceB = Source(List(10, 20, 30, 40)) sourceA.concatLazy(sourceB).runWith(Sink.foreach(println)) - //#concatLazy + // #concatLazy } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala index 0ace5fbe500..415e1c4baf6 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala @@ -48,12 +48,12 @@ class FlowDelaySpec extends StreamSpec { .delay(300.millis) .runWith(TestSink[Int]()) .request(2) - .expectNoMessage(200.millis) //delay - .expectNext(200.millis, 1) //delayed element - .expectNext(100.millis, 2) //buffered element + .expectNoMessage(200.millis) // delay + .expectNext(200.millis, 1) // delayed element + .expectNext(100.millis, 2) // buffered element .expectNoMessage(200.millis) .request(1) - .expectNext(3) //buffered element + .expectNext(3) // buffered element .expectComplete() } @@ -167,7 +167,7 @@ class FlowDelaySpec extends StreamSpec { c.expectNoMessage(300.millis) pSub.sendNext(17) c.expectNext(100.millis, 1) - //fail will terminate despite of non empty internal buffer + // fail will terminate despite of non empty internal buffer pSub.sendError(new RuntimeException() with NoStackTrace) } @@ -241,9 +241,8 @@ class FlowDelaySpec extends StreamSpec { } .delay(delayMillis.millis, DelayOverflowStrategy.backpressure) .withAttributes(Attributes.inputBuffer(4, 4)) - .map { - case (startTimestamp, elem) => - (System.nanoTime() - startTimestamp) / 1e6 -> elem + .map { case (startTimestamp, elem) => + (System.nanoTime() - startTimestamp) / 1e6 -> elem } .runWith(Sink.seq) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropSpec.scala index 246279c49ae..d37a80c1bff 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropSpec.scala @@ -8,9 +8,11 @@ import java.util.concurrent.ThreadLocalRandom.{ current => random } import akka.stream.testkit._ -class FlowDropSpec extends StreamSpec(""" +class FlowDropSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { "A Drop" must { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFilterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFilterSpec.scala index d5f754bf09e..e1b27484162 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFilterSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFilterSpec.scala @@ -16,9 +16,11 @@ import akka.stream.testkit._ import akka.stream.testkit.scaladsl.TestSink import akka.stream.testkit.scaladsl.TestSource -class FlowFilterSpec extends StreamSpec(""" +class FlowFilterSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { "A Filter" must { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlatMapPrefixSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlatMapPrefixSpec.scala index 57d1fa94f31..b70e5511f78 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlatMapPrefixSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlatMapPrefixSpec.scala @@ -190,7 +190,7 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { downstream.request(1000) upstream.expectRequest() - //completing publisher + // completing publisher upstream.sendComplete() matValue.futureValue should ===(Nil) @@ -299,7 +299,7 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { // inner stream was materialized innerMatVal.futureValue should ===(NotUsed) - subUpstream.expectRequest() should be >= (1L) + subUpstream.expectRequest() should be >= 1L subDownstream.request(1) subscriber.expectNext(2) subUpstream.sendNext(22) @@ -335,16 +335,16 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { val subDownstream = subscriber.expectSubscription() val subUpstream = publisher.expectSubscription() subDownstream.request(1) - subUpstream.expectRequest() should be >= (1L) + subUpstream.expectRequest() should be >= 1L subUpstream.sendNext(0) subUpstream.sendNext(1) - //subflow not materialized yet, hence mat value (future) isn't ready yet + // subflow not materialized yet, hence mat value (future) isn't ready yet matFlowWatchTerm.value should be(empty) if (delayDownstreamCancellation) { srcWatchTermF.value should be(empty) - //this one is sent AFTER downstream cancellation + // this one is sent AFTER downstream cancellation subUpstream.sendNext(2) subDownstream.cancel() @@ -381,7 +381,7 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { val subDownstream = subscriber.expectSubscription() val subUpstream = publisher.expectSubscription() subDownstream.request(1) - subUpstream.expectRequest() should be >= (1L) + subUpstream.expectRequest() should be >= 1L subUpstream.sendNext(0) subUpstream.sendNext(1) subDownstream.asInstanceOf[SubscriptionWithCancelException].cancel(TE("that again?!")) @@ -431,7 +431,7 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { notUsedF.futureValue should ===(NotUsed) - subUpstream.expectRequest() should be >= (1L) + subUpstream.expectRequest() should be >= 1L subDownstream.request(1) subscriber.expectNext(2) subUpstream.sendNext(2) @@ -482,8 +482,8 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { prefix should ===(0 until 2) Flow[Int] .concat(Source.repeat(3)) - .fold(0L) { - case (acc, _) => acc + 1 + .fold(0L) { case (acc, _) => + acc + 1 } .alsoToMat(Sink.head)(Keep.right) }(Keep.right) @@ -491,9 +491,9 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { .withAttributes(attributes) .run()(mat) val countF = countFF.futureValue - //at this point we know the flow was materialized, now we can stop the materializer + // at this point we know the flow was materialized, now we can stop the materializer mat.shutdown() - //expect the nested flow to be terminated abruptly. + // expect the nested flow to be terminated abruptly. countF.failed.futureValue should be(a[AbruptStageTerminationException]) } @@ -579,12 +579,12 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { subscriber.expectNoMessage() subsc.sendNext(1) val sinkSubscription = subscriber.expectSubscription() - //this indicates + // this indicates fHeadOpt.futureValue should be(empty) - //materialize flow immediately cancels upstream + // materialize flow immediately cancels upstream subsc.expectCancellation() - //at this point both ends of the 'external' fow are closed + // at this point both ends of the 'external' fow are closed sinkSubscription.request(10) subscriber.expectNext("a", "b", "c") @@ -602,11 +602,11 @@ class FlowFlatMapPrefixSpec extends StreamSpec("akka.loglevel = debug") { log.debug("closing sink") closeSink() log.debug("sink closed") - //closing the sink before returning means that it's higly probably - //for the flatMapPrefix stage to receive the downstream cancellation before the actor graph interpreter - //gets a chance to complete the new interpreter shell's registration. - //this in turn exposes a bug in the actor graph interpreter when all active flows complete - //but there are pending new interpreter shells to be registered. + // closing the sink before returning means that it's higly probably + // for the flatMapPrefix stage to receive the downstream cancellation before the actor graph interpreter + // gets a chance to complete the new interpreter shell's registration. + // this in turn exposes a bug in the actor graph interpreter when all active flows complete + // but there are pending new interpreter shells to be registered. Flow[Int].prepend(Source(seq)) }(Keep.right) .toMat(Sink.queue(10))(Keep.both) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala index 8e047d1c2ca..5485e2480ad 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala @@ -139,12 +139,14 @@ class FlowFlattenMergeSpec extends StreamSpec { val ex = new Exception("buh") val latch = TestLatch() Source(1 to 3) - .flatMapMerge(10, { - case 1 => Source.fromPublisher(p) - case 2 => - Await.ready(latch, 3.seconds) - throw ex - }) + .flatMapMerge( + 10, + { + case 1 => Source.fromPublisher(p) + case 2 => + Await.ready(latch, 3.seconds) + throw ex + }) .toMat(Sink.head)(Keep.right) .withAttributes(ActorAttributes.syncProcessingLimit(1) and Attributes.inputBuffer(1, 1)) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldAsyncSpec.scala index 31230a443f9..d5ecca1c1b6 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldAsyncSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldAsyncSpec.scala @@ -129,12 +129,11 @@ class FlowFoldAsyncSpec extends StreamSpec { val probe = TestSubscriber.probe[(Int, Int)]() implicit val ec = system.dispatcher Source(1 to 5) - .foldAsync(0 -> 1) { - case ((i, res), n) => - Future { - if (n == 3) throw new RuntimeException("err3") with NoStackTrace - else n -> (i + (res * n)) - } + .foldAsync(0 -> 1) { case ((i, res), n) => + Future { + if (n == 3) throw new RuntimeException("err3") with NoStackTrace + else n -> (i + (res * n)) + } } .withAttributes(supervisionStrategy(resumingDecider)) .to(Sink.fromSubscriber(probe)) @@ -150,12 +149,11 @@ class FlowFoldAsyncSpec extends StreamSpec { val probe = TestSubscriber.probe[(Int, Int)]() implicit val ec = system.dispatcher Source(1 to 5) - .foldAsync(0 -> 1) { - case ((i, res), n) => - Future { - if (n == 3) throw new RuntimeException("err3") with NoStackTrace - else n -> (i + (res * n)) - } + .foldAsync(0 -> 1) { case ((i, res), n) => + Future { + if (n == 3) throw new RuntimeException("err3") with NoStackTrace + else n -> (i + (res * n)) + } } .withAttributes(supervisionStrategy(restartingDecider)) .to(Sink.fromSubscriber(probe)) @@ -203,10 +201,9 @@ class FlowFoldAsyncSpec extends StreamSpec { val c = TestSubscriber.manualProbe[(Int, Int)]() implicit val ec = system.dispatcher Source(1 to 5) - .foldAsync(0 -> 1) { - case ((i, res), n) => - if (n == 3) throw new RuntimeException("err4") with NoStackTrace - else Future(n -> (i + (res * n))) + .foldAsync(0 -> 1) { case ((i, res), n) => + if (n == 3) throw new RuntimeException("err4") with NoStackTrace + else Future(n -> (i + (res * n))) } .withAttributes(supervisionStrategy(resumingDecider)) .to(Sink.fromSubscriber(c)) @@ -221,10 +218,9 @@ class FlowFoldAsyncSpec extends StreamSpec { val c = TestSubscriber.manualProbe[(Int, Int)]() implicit val ec = system.dispatcher Source(1 to 5) - .foldAsync(0 -> 1) { - case ((i, res), n) => - if (n == 3) throw new RuntimeException("err4") with NoStackTrace - else Future(n -> (i + (res * n))) + .foldAsync(0 -> 1) { case ((i, res), n) => + if (n == 3) throw new RuntimeException("err4") with NoStackTrace + else Future(n -> (i + (res * n))) } .withAttributes(supervisionStrategy(restartingDecider)) .to(Sink.fromSubscriber(c)) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFutureFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFutureFlowSpec.scala index b845c9ae7ec..270b958771b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFutureFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFutureFlowSpec.scala @@ -19,9 +19,9 @@ class FlowFutureFlowSpec extends StreamSpec { case x => x } - //this stage's behaviour in case of an 'early' downstream cancellation is governed by an attribute - //so we run all tests cases using both modes of the attributes. - //please notice most of the cases don't exhibit any difference in behaviour between the two modes + // this stage's behaviour in case of an 'early' downstream cancellation is governed by an attribute + // so we run all tests cases using both modes of the attributes. + // please notice most of the cases don't exhibit any difference in behaviour between the two modes for { (att, name) <- List( (Attributes.NestedMaterializationCancellationPolicy.EagerCancellation, "EagerCancellation"), @@ -110,8 +110,8 @@ class FlowFutureFlowSpec extends StreamSpec { .viaMat { Flow.futureFlow { Future.successful { - Flow[Int].recover { - case TE("fail on 5") => 99 + Flow[Int].recover { case TE("fail on 5") => + 99 } } } @@ -138,8 +138,8 @@ class FlowFutureFlowSpec extends StreamSpec { fSeq.value should be(empty) prFlow.success { - Flow[Int].recover { - case TE("fail on 5") => 99 + Flow[Int].recover { case TE("fail on 5") => + 99 } } @@ -192,8 +192,8 @@ class FlowFutureFlowSpec extends StreamSpec { Flow.futureFlow { Future.successful { Flow[Int] - .recover { - case TE("not today my friend") => 99 + .recover { case TE("not today my friend") => + 99 } .concat(src10()) } @@ -224,8 +224,8 @@ class FlowFutureFlowSpec extends StreamSpec { prFlow.success { Flow[Int] - .recover { - case TE("not today my friend") => 99 + .recover { case TE("not today my friend") => + 99 } .concat(src10()) } @@ -516,11 +516,10 @@ class FlowFutureFlowSpec extends StreamSpec { "NestedMaterializationCancellationPolicy" must { "default to false" in { - val fl = Flow.fromMaterializer { - case (_, attributes) => - val att = attributes.mandatoryAttribute[Attributes.NestedMaterializationCancellationPolicy] - att.propagateToNestedMaterialization should be(false) - Flow[Any] + val fl = Flow.fromMaterializer { case (_, attributes) => + val att = attributes.mandatoryAttribute[Attributes.NestedMaterializationCancellationPolicy] + att.propagateToNestedMaterialization should be(false) + Flow[Any] } Source.empty.via(fl).runWith(Sink.headOption).futureValue should be(empty) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala index 116c8bfd672..c1fc0713372 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala @@ -146,7 +146,7 @@ class FlowGroupBySpec extends StreamSpec(""" .runWith(TestSink[Seq[String]]()) down.request(1) val ex = down.expectError() - ex.getMessage.indexOf("Key cannot be null") should not be (-1) + ex.getMessage.indexOf("Key cannot be null") should not be -1 ex.isInstanceOf[IllegalArgumentException] should be(true) } @@ -585,7 +585,7 @@ class FlowGroupBySpec extends StreamSpec(""" state.probe.request(1) - //need to verify elements that are first element in subFlow or is in nextElement buffer before + // need to verify elements that are first element in subFlow or is in nextElement buffer before // pushing next element from upstream if (state.firstElement != null) { state.probe.expectNext() should ===(state.firstElement) @@ -622,10 +622,10 @@ class FlowGroupBySpec extends StreamSpec(""" val probe: TestSubscriber.Probe[ByteString] = Await.result(probes(probeIndex).future, 300.millis) probeIndex += 1 map.put(index, SubFlowState(probe, false, byteString)) - //stream automatically requests next element + // stream automatically requests next element } else { val state = map(index) - if (state.firstElement != null) { //first element in subFlow + if (state.firstElement != null) { // first element in subFlow if (!state.hasDemand) blockingNextElement = byteString randomDemand() } else if (state.hasDemand) { @@ -665,14 +665,23 @@ class FlowGroupBySpec extends StreamSpec(""" val threeProcessed = Promise[Done]() val blockSubStream1 = TestLatch() - List(Elem(1, 1, () => { - // timeout just to not wait forever if something is wrong, not really relevant for test - Await.result(blockSubStream1, 10.seconds) - 1 - }), Elem(2, 1, () => 2), Elem(3, 2, () => { - threeProcessed.success(Done) - 3 - })).foreach(queue.offer) + List( + Elem( + 1, + 1, + () => { + // timeout just to not wait forever if something is wrong, not really relevant for test + Await.result(blockSubStream1, 10.seconds) + 1 + }), + Elem(2, 1, () => 2), + Elem( + 3, + 2, + () => { + threeProcessed.success(Done) + 3 + })).foreach(queue.offer) // two and three are processed as fast as possible, not blocked by substream 1 being clogged threeProcessed.future.futureValue should ===(Done) // let 1 pass so stream can complete diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedSpec.scala index 74db5798b1f..e26662a408b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedSpec.scala @@ -11,9 +11,11 @@ import scala.collection.immutable import akka.stream.testkit.ScriptedTest import akka.stream.testkit.StreamSpec -class FlowGroupedSpec extends StreamSpec(""" +class FlowGroupedSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { "A Grouped" must { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWeightedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWeightedSpec.scala index d5b995e0983..1b922354b3f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWeightedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWeightedSpec.scala @@ -12,9 +12,11 @@ import akka.stream.testkit.{ ScriptedTest, StreamSpec, TestPublisher, TestSubscr import akka.testkit.TimingTest import akka.util.unused -class FlowGroupedWeightedSpec extends StreamSpec(""" +class FlowGroupedWeightedSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { "A GroupedWeighted" must { "produce no group (empty sink sequence) when source is empty" in { @@ -26,7 +28,7 @@ class FlowGroupedWeightedSpec extends StreamSpec(""" } "always exhaust a source into a single group if cost is 0" in { - val input = (1 to 15) + val input = 1 to 15 def costFn(@unused e: Int): Long = 0L val minWeight = 1 // chose the least possible value for minWeight val future = Source(input).groupedWeighted(minWeight)(costFn).runWith(Sink.seq) @@ -35,7 +37,7 @@ class FlowGroupedWeightedSpec extends StreamSpec(""" } "exhaust source into one group if minWeight equals the accumulated cost of the source" in { - val input = (1 to 16) + val input = 1 to 16 def costFn(@unused e: Int): Long = 1L val minWeight = input.length val future = Source(input).groupedWeighted(minWeight)(costFn).runWith(Sink.seq) @@ -75,18 +77,14 @@ class FlowGroupedWeightedSpec extends StreamSpec(""" } "fail during stream initialization when minWeight is negative" in { - val ex = the[IllegalArgumentException] thrownBy Source(1 to 5) - .groupedWeighted(-1)(_ => 1L) - .to(Sink.collection) - .run() + val ex = + the[IllegalArgumentException] thrownBy Source(1 to 5).groupedWeighted(-1)(_ => 1L).to(Sink.collection).run() ex.getMessage should be("requirement failed: minWeight must be greater than 0") } "fail during stream initialization when minWeight is 0" in { - val ex = the[IllegalArgumentException] thrownBy Source(1 to 5) - .groupedWeighted(0)(_ => 1L) - .to(Sink.collection) - .run() + val ex = + the[IllegalArgumentException] thrownBy Source(1 to 5).groupedWeighted(0)(_ => 1L).to(Sink.collection).run() ex.getMessage should be("requirement failed: minWeight must be greater than 0") } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala index 19d0ec519e5..395cb8ab9b9 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala @@ -168,10 +168,11 @@ class FlowGroupedWithinSpec extends StreamSpec with ScriptedTest { "group with rest" taggedAs TimingTest in { def script = - Script((TestConfig.RandomTestRange.map { _ => - val x, y, z = random.nextInt(); Seq(x, y, z) -> Seq(immutable.Seq(x, y, z)) - } - :+ { val x = random.nextInt(); Seq(x) -> Seq(immutable.Seq(x)) }): _*) + Script( + (TestConfig.RandomTestRange.map { _ => + val x, y, z = random.nextInt(); Seq(x, y, z) -> Seq(immutable.Seq(x, y, z)) + } + :+ { val x = random.nextInt(); Seq(x) -> Seq(immutable.Seq(x)) }): _*) TestConfig.RandomTestRange.foreach(_ => runScript(script)(_.groupedWithin(3, 10.minutes))) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala index 3cf038916de..715d3046982 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala @@ -25,8 +25,9 @@ class FlowIdleInjectSpec extends StreamSpec(""" "emit elements periodically after silent periods" in { val sourceWithIdleGap = Source(1 to 5) ++ Source(6 to 10).initialDelay(2.second) - Await.result(sourceWithIdleGap.keepAlive(0.6.seconds, () => 0).grouped(1000).runWith(Sink.head), 3.seconds) should ===( - List(1, 2, 3, 4, 5, 0, 0, 0, 6, 7, 8, 9, 10)) + Await.result( + sourceWithIdleGap.keepAlive(0.6.seconds, () => 0).grouped(1000).runWith(Sink.head), + 3.seconds) should ===(List(1, 2, 3, 4, 5, 0, 0, 0, 6, 7, 8, 9, 10)) } "immediately pull upstream" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveAllSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveAllSpec.scala index 39e8b3a91e2..d8a4c189e89 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveAllSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveAllSpec.scala @@ -100,7 +100,7 @@ class FlowInterleaveAllSpec extends StreamSpec(""" } "work in example" in { - //#interleaveAll + // #interleaveAll val sourceA = Source(List(1, 2, 7, 8)) val sourceB = Source(List(3, 4, 9)) val sourceC = Source(List(5, 6)) @@ -109,8 +109,8 @@ class FlowInterleaveAllSpec extends StreamSpec(""" .interleaveAll(List(sourceB, sourceC), 2, eagerClose = false) .fold(new StringJoiner(","))((joiner, input) => joiner.add(String.valueOf(input))) .runWith(Sink.foreach(println)) - //prints 1,2,3,4,5,6,7,8,9 - //#interleaveAll + // prints 1,2,3,4,5,6,7,8,9 + // #interleaveAll } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveSpec.scala index 1ac0b8fc516..fe31bd51c9f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveSpec.scala @@ -240,7 +240,7 @@ class FlowInterleaveSpec extends BaseTwoStreamsSetup { } "work in example" in { - //#interleave + // #interleave import akka.stream.scaladsl.Sink import akka.stream.scaladsl.Source @@ -248,8 +248,8 @@ class FlowInterleaveSpec extends BaseTwoStreamsSetup { val sourceB = Source(List(10, 20, 30, 40)) sourceA.interleave(sourceB, segmentSize = 2).runWith(Sink.foreach(println)) - //prints 1, 2, 10, 20, 3, 4, 30, 40 - //#interleave + // prints 1, 2, 10, 20, 3, 4, 30, 40 + // #interleave } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitSpec.scala index b05f1327555..b043c40a4b4 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitSpec.scala @@ -23,7 +23,7 @@ class FlowLimitSpec extends StreamSpec(""" } "produce output that is identical to the input when n = input.length" in { - val input = (1 to 6) + val input = 1 to 6 val n = input.length val future = Source(input).limit(n).grouped(Integer.MAX_VALUE).runWith(Sink.head) val result = Await.result(future, remainingOrDefault) @@ -31,7 +31,7 @@ class FlowLimitSpec extends StreamSpec(""" } "produce output that is identical to the input when n > input.length" in { - val input = (1 to 6) + val input = 1 to 6 val n = input.length + 2 // n > input.length val future = Source(input).limit(n).grouped(Integer.MAX_VALUE).runWith(Sink.head) val result = Await.result(future, remainingOrDefault) @@ -40,7 +40,7 @@ class FlowLimitSpec extends StreamSpec(""" "produce n messages before throwing a StreamLimitReachedException when n < input.size" in { // TODO: check if it actually produces n messages - val input = (1 to 6) + val input = 1 to 6 val n = input.length - 2 // n < input.length val future = Source(input).limit(n).grouped(Integer.MAX_VALUE).runWith(Sink.head) @@ -51,7 +51,7 @@ class FlowLimitSpec extends StreamSpec(""" } "throw a StreamLimitReachedException when n < 0" in { - val input = (1 to 6) + val input = 1 to 6 val n = -1 val future = Source(input).limit(n).grouped(Integer.MAX_VALUE).runWith(Sink.head) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala index 4355df54b58..5ab232e47c0 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala @@ -25,7 +25,7 @@ class FlowLimitWeightedSpec extends StreamSpec(""" } "always exhaust a source regardless of n (as long as n > 0) if cost is 0" in { - val input = (1 to 15) + val input = 1 to 15 def costFn(@unused e: Int): Long = 0L val n = 1 // must not matter since costFn always evaluates to 0 val future = Source(input).limitWeighted(n)(costFn).grouped(Integer.MAX_VALUE).runWith(Sink.head) @@ -34,7 +34,7 @@ class FlowLimitWeightedSpec extends StreamSpec(""" } "exhaust source if n equals to input length and cost is 1" in { - val input = (1 to 16) + val input = 1 to 16 def costFn(@unused e: Int): Long = 1L val n = input.length val future = Source(input).limitWeighted(n)(costFn).grouped(Integer.MAX_VALUE).runWith(Sink.head) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogSpec.scala index 482b7b47349..b724753baa9 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogSpec.scala @@ -17,9 +17,11 @@ import akka.stream.Supervision._ import akka.stream.testkit.{ ScriptedTest, StreamSpec } import akka.testkit.TestProbe -class FlowLogSpec extends StreamSpec(""" +class FlowLogSpec + extends StreamSpec(""" akka.loglevel = DEBUG # test verifies logging - """) with ScriptedTest { + """) + with ScriptedTest { val logProbe = { val p = TestProbe() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogWithMarkerSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogWithMarkerSpec.scala index 5c54a7b6c37..83725825015 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogWithMarkerSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogWithMarkerSpec.scala @@ -17,9 +17,11 @@ import akka.stream.Supervision._ import akka.stream.testkit.{ ScriptedTest, StreamSpec } import akka.testkit.TestProbe -class FlowLogWithMarkerSpec extends StreamSpec(""" +class FlowLogWithMarkerSpec + extends StreamSpec(""" akka.loglevel = DEBUG # test verifies logging - """) with ScriptedTest { + """) + with ScriptedTest { val logProbe = { val p = TestProbe() @@ -64,12 +66,19 @@ class FlowLogWithMarkerSpec extends StreamSpec(""" val debugging: javadsl.Flow[Integer, Integer, NotUsed] = javadsl.Flow .of(classOf[Integer]) .logWithMarker("log-1", _ => LogMarker("marker-1")) - .logWithMarker("log-2", _ => LogMarker("marker-2"), new akka.japi.function.Function[Integer, Integer] { - def apply(i: Integer) = i - }) - .logWithMarker("log-3", _ => LogMarker("marker-3"), new akka.japi.function.Function[Integer, Integer] { - def apply(i: Integer) = i - }, log) + .logWithMarker( + "log-2", + _ => LogMarker("marker-2"), + new akka.japi.function.Function[Integer, Integer] { + def apply(i: Integer) = i + }) + .logWithMarker( + "log-3", + _ => LogMarker("marker-3"), + new akka.japi.function.Function[Integer, Integer] { + def apply(i: Integer) = i + }, + log) .logWithMarker("log-4", _ => LogMarker("marker-4"), log) javadsl.Source.single[Integer](1).via(debugging).runWith(javadsl.Sink.ignore[Integer](), system) @@ -168,12 +177,19 @@ class FlowLogWithMarkerSpec extends StreamSpec(""" javadsl.Source .single[Integer](1) .logWithMarker("log-1", _ => LogMarker("marker-1")) - .logWithMarker("log-2", _ => LogMarker("marker-2"), new akka.japi.function.Function[Integer, Integer] { - def apply(i: Integer) = i - }) - .logWithMarker("log-3", _ => LogMarker("marker-3"), new akka.japi.function.Function[Integer, Integer] { - def apply(i: Integer) = i - }, log) + .logWithMarker( + "log-2", + _ => LogMarker("marker-2"), + new akka.japi.function.Function[Integer, Integer] { + def apply(i: Integer) = i + }) + .logWithMarker( + "log-3", + _ => LogMarker("marker-3"), + new akka.japi.function.Function[Integer, Integer] { + def apply(i: Integer) = i + }, + log) .logWithMarker("log-4", _ => LogMarker("marker-4"), log) .runWith(javadsl.Sink.ignore[Integer](), system) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncPartitionedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncPartitionedSpec.scala index 36f564dc8c1..03576b6e227 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncPartitionedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncPartitionedSpec.scala @@ -125,11 +125,10 @@ class FlowMapAsyncPartitionedSpec extends StreamSpec with WithLogCapturing { case class Elem(n: Int, promise: Promise[Done]) val (sourceProbe, result) = TestSource[Int]() - .viaMat(Flow[Int].mapAsyncPartitioned(10, 1)(_ < 9) { - case (n, _) => - val promise = Promise[Done]() - processingProbe.ref ! Elem(n, promise) - promise.future.map(_ => n)(ExecutionContexts.parasitic) + .viaMat(Flow[Int].mapAsyncPartitioned(10, 1)(_ < 9) { case (n, _) => + val promise = Promise[Done]() + processingProbe.ref ! Elem(n, promise) + promise.future.map(_ => n)(ExecutionContexts.parasitic) })(Keep.left) .toMat(Sink.seq[Int])(Keep.both) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala index b2ba3c14492..6c856fd2592 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala @@ -49,14 +49,13 @@ class FlowMapAsyncSpec extends StreamSpec { val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher Source(1 to 50) - .mapAsync(4)( - n => - if (n % 3 == 0) Future.successful(n) - else - Future { - Thread.sleep(ThreadLocalRandom.current().nextInt(1, 10)) - n - }) + .mapAsync(4)(n => + if (n % 3 == 0) Future.successful(n) + else + Future { + Thread.sleep(ThreadLocalRandom.current().nextInt(1, 10)) + n + }) .to(Sink.fromSubscriber(c)) .run() val sub = c.expectSubscription() @@ -98,14 +97,13 @@ class FlowMapAsyncSpec extends StreamSpec { val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher Source(1 to 5) - .mapAsync(4)( - n => - if (n == 3) Future.failed[Int](new TE("err1")) - else - Future { - Await.ready(latch, 10.seconds) - n - }) + .mapAsync(4)(n => + if (n == 3) Future.failed[Int](new TE("err1")) + else + Future { + Await.ready(latch, 10.seconds) + n + }) .to(Sink.fromSubscriber(c)) .run() val sub = c.expectSubscription() @@ -437,17 +435,18 @@ class FlowMapAsyncSpec extends StreamSpec { val delay = 50000 // nanoseconds var count = 0 @tailrec final override def run(): Unit = { - val cont = try { - val (promise, enqueued) = queue.take() - val wakeup = enqueued + delay - while (System.nanoTime() < wakeup) {} - counter.decrementAndGet() - promise.success(count) - count += 1 - true - } catch { - case _: InterruptedException => false - } + val cont = + try { + val (promise, enqueued) = queue.take() + val wakeup = enqueued + delay + while (System.nanoTime() < wakeup) {} + counter.decrementAndGet() + promise.success(count) + count += 1 + true + } catch { + case _: InterruptedException => false + } if (cont) run() } } @@ -516,13 +515,12 @@ class FlowMapAsyncSpec extends StreamSpec { import system.dispatcher val failCount = new AtomicInteger(0) val result = Source(List(true, false)) - .mapAsync(1)( - elem => - if (elem) throw TE("this has gone too far") - else - Future { - elem - }) + .mapAsync(1)(elem => + if (elem) throw TE("this has gone too far") + else + Future { + elem + }) .addAttributes(supervisionStrategy { case TE("this has gone too far") => failCount.incrementAndGet() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala index 592014fabb9..0ed56bf8570 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala @@ -320,17 +320,18 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec { val delay = 50000 // nanoseconds var count = 0 @tailrec final override def run(): Unit = { - val cont = try { - val (promise, enqueued) = queue.take() - val wakeup = enqueued + delay - while (System.nanoTime() < wakeup) {} - counter.decrementAndGet() - promise.success(count) - count += 1 - true - } catch { - case _: InterruptedException => false - } + val cont = + try { + val (promise, enqueued) = queue.take() + val wakeup = enqueued + delay + while (System.nanoTime() < wakeup) {} + counter.decrementAndGet() + promise.success(count) + count += 1 + true + } catch { + case _: InterruptedException => false + } if (cont) run() } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala index 8e906bce4ef..23ff0207ec4 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala @@ -11,9 +11,11 @@ import akka.stream.Supervision import akka.stream.testkit._ import akka.stream.testkit.scaladsl.TestSink -class FlowMapConcatSpec extends StreamSpec(""" +class FlowMapConcatSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { "A MapConcat" must { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala index 5995db52534..67217e8609f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala @@ -8,9 +8,11 @@ import java.util.concurrent.ThreadLocalRandom.{ current => random } import akka.stream.testkit._ -class FlowMapSpec extends StreamSpec(""" +class FlowMapSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { "A Map" must { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeAllSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeAllSpec.scala index 8ddc2b6d5c1..1ef829f2de2 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeAllSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeAllSpec.scala @@ -7,9 +7,11 @@ package akka.stream.scaladsl import akka.stream.testkit._ import akka.stream.testkit.scaladsl.TestSink -class FlowMergeAllSpec extends StreamSpec(""" +class FlowMergeAllSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { "Flow mergeAll" must { "merge all upstream elements to its downstream" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeSpec.scala index 0a8353d893d..49e2063c982 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeSpec.scala @@ -130,7 +130,7 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { } "works in number example for mergePreferred" in { - //#mergePreferred + // #mergePreferred import akka.stream.scaladsl.{ Sink, Source } val sourceA = Source(List(1, 2, 3, 4)) @@ -141,11 +141,11 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { sourceA.mergePreferred(sourceB, true).runWith(Sink.foreach(println)) // prints 10, 1, ... since both sources have their first element ready and the right source is preferred - //#mergePreferred + // #mergePreferred } "works in number example for mergePrioritized" in { - //#mergePrioritized + // #mergePrioritized import akka.stream.scaladsl.{ Sink, Source } val sourceA = Source(List(1, 2, 3, 4)) @@ -155,11 +155,11 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { // prints e.g. 1, 10, 2, 3, 4, 20, 30, 40 since both sources have their first element ready and the left source // has higher priority – if both sources have elements ready, sourceA has a 99% chance of being picked next // while sourceB has a 1% chance - //#mergePrioritized + // #mergePrioritized } "works in number example for mergePrioritizedN" in { - //#mergePrioritizedN + // #mergePrioritizedN import akka.stream.scaladsl.{ Sink, Source } val sourceA = Source(List(1, 2, 3, 4)) @@ -172,28 +172,28 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { // prints e.g. 1, 100, 2, 3, 4, 10, 20, 30, 40, 200, 300, 400 since both sources have their first element ready and // the left sourceA has higher priority - if both sources have elements ready, sourceA has a 99% chance of being picked next // while sourceB has a 0.99% chance and sourceC has a 0.01% chance - //#mergePrioritizedN + // #mergePrioritizedN } "works in number example for merge sorted" in { - //#merge-sorted + // #merge-sorted import akka.stream.scaladsl.{ Sink, Source } val sourceA = Source(List(1, 3, 5, 7)) val sourceB = Source(List(2, 4, 6, 8)) sourceA.mergeSorted(sourceB).runWith(Sink.foreach(println)) - //prints 1, 2, 3, 4, 5, 6, 7, 8 + // prints 1, 2, 3, 4, 5, 6, 7, 8 val sourceC = Source(List(20, 1, 1, 1)) sourceA.mergeSorted(sourceC).runWith(Sink.foreach(println)) - //prints 1, 3, 5, 7, 20, 1, 1, 1 - //#merge-sorted + // prints 1, 3, 5, 7, 20, 1, 1, 1 + // #merge-sorted } "works in number example for merge" in { - //#merge + // #merge import akka.stream.scaladsl.{ Sink, Source } val sourceA = Source(List(1, 2, 3, 4)) @@ -201,7 +201,7 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { sourceA.merge(sourceB).runWith(Sink.foreach(println)) // merging is not deterministic, can for example print 1, 2, 3, 4, 10, 20, 30, 40 - //#merge + // #merge } } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOnCompleteSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOnCompleteSpec.scala index 3750d275119..690245b6e20 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOnCompleteSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOnCompleteSpec.scala @@ -14,9 +14,11 @@ import akka.stream.Materializer import akka.stream.testkit._ import akka.testkit.TestProbe -class FlowOnCompleteSpec extends StreamSpec(""" +class FlowOnCompleteSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { "A Flow with onComplete" must { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOrElseSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOrElseSpec.scala index 739c5849dd1..0844f6de7f4 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOrElseSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOrElseSpec.scala @@ -133,7 +133,7 @@ class FlowOrElseSpec extends AkkaSpec { } "work in the example" in { - //#or-else + // #or-else val source1 = Source(List("First source")) val source2 = Source(List("Second source")) val emptySource = Source.empty[String] @@ -143,7 +143,7 @@ class FlowOrElseSpec extends AkkaSpec { emptySource.orElse(source2).runWith(Sink.foreach(println)) // this will print "Second source" - //#or-else + // #or-else } trait OrElseProbedFlow { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowPrependSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowPrependSpec.scala index c01c32de20b..5899d16093f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowPrependSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowPrependSpec.scala @@ -17,23 +17,23 @@ class FlowPrependSpec extends AkkaSpec { "An Prepend flow" should { "work in entrance example" in { - //#prepend + // #prepend val ladies = Source(List("Emma", "Emily")) val gentlemen = Source(List("Liam", "William")) gentlemen.prepend(ladies).runWith(Sink.foreach(println)) // this will print "Emma", "Emily", "Liam", "William" - //#prepend + // #prepend } "work in lazy entrance example" in { - //#prependLazy + // #prependLazy val ladies = Source(List("Emma", "Emily")) val gentlemen = Source(List("Liam", "William")) gentlemen.prependLazy(ladies).runWith(Sink.foreach(println)) // this will print "Emma", "Emily", "Liam", "William" - //#prependLazy + // #prependLazy } } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverWithSpec.scala index df0837c0782..428087b1ffe 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverWithSpec.scala @@ -140,9 +140,8 @@ class FlowRecoverWithSpec extends StreamSpec { .map { a => if (a == 3) throw new IndexOutOfBoundsException() else a } - .recoverWith { - case t: IndexOutOfBoundsException => - Source(List(11, 22)).map(m => if (m == 22) throw ex else m) + .recoverWith { case t: IndexOutOfBoundsException => + Source(List(11, 22)).map(m => if (m == 22) throw ex else m) } .runWith(TestSink[Int]()) .request(2) @@ -158,10 +157,11 @@ class FlowRecoverWithSpec extends StreamSpec { .map { a => if (a == 3) throw new IndexOutOfBoundsException() else a } - .recoverWithRetries(3, { - case t: Throwable => + .recoverWithRetries( + 3, + { case t: Throwable => Source(List(11, 22, 33)).map(m => if (m == 33) throw ex else m) - }) + }) .runWith(TestSink[Int]()) .request(100) .expectNextN(List(1, 2)) @@ -221,9 +221,11 @@ class FlowRecoverWithSpec extends StreamSpec { val result = Source .failed(TE("trigger")) - .recoverWithRetries(1, { - case _: TE => Source.fromGraph(FailingInnerMat) - }) + .recoverWithRetries( + 1, + { case _: TE => + Source.fromGraph(FailingInnerMat) + }) .runWith(Sink.ignore) result.failed.futureValue should ===(matFail) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSectionSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSectionSpec.scala index 8b209861b1d..2be16bb0b8a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSectionSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSectionSpec.scala @@ -57,7 +57,7 @@ class FlowSectionSpec extends StreamSpec(FlowSectionSpec.config) { } "include name in toString" in { - pending //FIXME: Flow has no simple toString anymore + pending // FIXME: Flow has no simple toString anymore val n = "Uppercase reverser" val f1 = Flow[String].map(_.toLowerCase) val f2 = Flow[String].map(_.toUpperCase).map(_.reverse).named(n).map(_.toLowerCase) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSlidingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSlidingSpec.scala index 4ce0b030a7a..3848c5c9c18 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSlidingSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSlidingSpec.scala @@ -23,16 +23,12 @@ class FlowSlidingSpec extends StreamSpec with ScalaCheckPropertyChecks { "Sliding" must { import org.scalacheck.Shrink.shrinkAny def check(gen: Gen[(Int, Int, Int)]): Unit = - forAll(gen, minSize(1000), sizeRange(0)) { - case (len, win, step) => - val af = Source - .fromIterator(() => Iterator.from(0).take(len)) - .sliding(win, step) - .runFold(Seq.empty[Seq[Int]])(_ :+ _) - val cf = Source - .fromIterator(() => Iterator.from(0).take(len).sliding(win, step)) - .runFold(Seq.empty[Seq[Int]])(_ :+ _) - af.futureValue should be(cf.futureValue) + forAll(gen, minSize(1000), sizeRange(0)) { case (len, win, step) => + val af = + Source.fromIterator(() => Iterator.from(0).take(len)).sliding(win, step).runFold(Seq.empty[Seq[Int]])(_ :+ _) + val cf = + Source.fromIterator(() => Iterator.from(0).take(len).sliding(win, step)).runFold(Seq.empty[Seq[Int]])(_ :+ _) + af.futureValue should be(cf.futureValue) } "behave just like collections sliding with step < window" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala index e71c786b432..a4c61a1c502 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala @@ -407,7 +407,9 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re upstreamSubscription.sendNext("a3") downstream.expectNext("a3") - downstream2.expectNoMessage(100.millis.dilated) // as nothing was requested yet, fanOutBox needs to cache element in this case + downstream2.expectNoMessage( + 100.millis.dilated + ) // as nothing was requested yet, fanOutBox needs to cache element in this case downstream2Subscription.request(1) downstream2.expectNext("a3") @@ -481,7 +483,9 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re downstream.expectNext("a3") downstream.expectComplete() - downstream2.expectNoMessage(100.millis.dilated) // as nothing was requested yet, fanOutBox needs to cache element in this case + downstream2.expectNoMessage( + 100.millis.dilated + ) // as nothing was requested yet, fanOutBox needs to cache element in this case downstream2Subscription.request(1) downstream2.expectNext("a3") @@ -531,9 +535,7 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re } } - /** - * Count elements that passing by this flow - * */ + /** Count elements that passing by this flow */ private class CounterFlow[T] extends GraphStageWithMaterializedValue[FlowShape[T, T], AtomicLong] { private val in = Inlet[T]("ElementCounterFlow.in") private val out = Outlet[T]("ElementCounterFlow.out") diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala index 6acaed2bf84..5f486c51960 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala @@ -104,7 +104,9 @@ class FlowSplitWhenSpec extends StreamSpec(""" .mapAsync(1)(_.runWith(Sink.headOption)) .grouped(10) .runWith(Sink.headOption), - 3.seconds) should ===(None) // rather tricky way of saying that no empty substream should be emitted (vs. Some(None)) + 3.seconds) should ===( + None + ) // rather tricky way of saying that no empty substream should be emitted (vs. Some(None)) } @@ -267,10 +269,10 @@ class FlowSplitWhenSpec extends StreamSpec(""" import system.dispatcher val stream = Source(1 to 5) - // Need to drop to internal API to get a plain Source[Source[Int]] instead of a SubFlow. - // `lift` doesn't cut here because it will prevent the behavior we'd like to see. - // In fact, this test is somewhat useless, as a user cannot trigger double materialization using - // the public splitWhen => SubFlow API. + // Need to drop to internal API to get a plain Source[Source[Int]] instead of a SubFlow. + // `lift` doesn't cut here because it will prevent the behavior we'd like to see. + // In fact, this test is somewhat useless, as a user cannot trigger double materialization using + // the public splitWhen => SubFlow API. .via(Split.when(_ => true, SubstreamCancelStrategy.drain)) .map { source => // run twice, but make sure we return the result of the materialization that ran second diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapConcatSpec.scala index 7e27846c644..18e83cf4a84 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapConcatSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapConcatSpec.scala @@ -11,9 +11,11 @@ import akka.stream.Supervision import akka.stream.testkit._ import akka.stream.testkit.scaladsl.TestSink -class FlowStatefulMapConcatSpec extends StreamSpec(""" +class FlowStatefulMapConcatSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { val ex = new Exception("TEST") with NoStackTrace diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapSpec.scala index b57d963a4dd..6eb54dc1276 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapSpec.scala @@ -32,9 +32,11 @@ class FlowStatefulMapSpec extends StreamSpec { "A StatefulMap" must { "work in the happy case" in { val sinkProb = Source(List(1, 2, 3, 4, 5)) - .statefulMap(() => 0)((agg, elem) => { - (agg + elem, (agg, elem)) - }, _ => None) + .statefulMap(() => 0)( + (agg, elem) => { + (agg + elem, (agg, elem)) + }, + _ => None) .runWith(TestSink[(Int, Int)]()) sinkProb.expectSubscription().request(6) sinkProb @@ -50,7 +52,7 @@ class FlowStatefulMapSpec extends StreamSpec { val sinkProb = Source(1 to 10) .statefulMap(() => List.empty[Int])( (state, elem) => { - //grouped 3 elements into a list + // grouped 3 elements into a list val newState = elem :: state if (newState.size == 3) (Nil, newState.reverse) @@ -66,12 +68,14 @@ class FlowStatefulMapSpec extends StreamSpec { "be able to resume" in { val testSink = Source(List(1, 2, 3, 4, 5)) - .statefulMap(() => 0)((agg, elem) => { - if (elem % 2 == 0) - throw ex - else - (agg + elem, (agg, elem)) - }, _ => None) + .statefulMap(() => 0)( + (agg, elem) => { + if (elem % 2 == 0) + throw ex + else + (agg + elem, (agg, elem)) + }, + _ => None) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .runWith(TestSink[(Int, Int)]()) @@ -81,12 +85,14 @@ class FlowStatefulMapSpec extends StreamSpec { "be able to restart" in { val testSink = Source(List(1, 2, 3, 4, 5)) - .statefulMap(() => 0)((agg, elem) => { - if (elem % 3 == 0) - throw ex - else - (agg + elem, (agg, elem)) - }, _ => None) + .statefulMap(() => 0)( + (agg, elem) => { + if (elem % 3 == 0) + throw ex + else + (agg + elem, (agg, elem)) + }, + _ => None) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) .runWith(TestSink[(Int, Int)]()) @@ -96,12 +102,14 @@ class FlowStatefulMapSpec extends StreamSpec { "be able to stop" in { val testSink = Source(List(1, 2, 3, 4, 5)) - .statefulMap(() => 0)((agg, elem) => { - if (elem % 3 == 0) - throw ex - else - (agg + elem, (agg, elem)) - }, _ => None) + .statefulMap(() => 0)( + (agg, elem) => { + if (elem % 3 == 0) + throw ex + else + (agg + elem, (agg, elem)) + }, + _ => None) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.stoppingDecider)) .runWith(TestSink[(Int, Int)]()) @@ -111,9 +119,11 @@ class FlowStatefulMapSpec extends StreamSpec { "fail on upstream failure" in { val (testSource, testSink) = TestSource[Int]() - .statefulMap(() => 0)((agg, elem) => { - (agg + elem, (agg, elem)) - }, _ => None) + .statefulMap(() => 0)( + (agg, elem) => { + (agg + elem, (agg, elem)) + }, + _ => None) .toMat(TestSink[(Int, Int)]())(Keep.both) .run() @@ -153,12 +163,14 @@ class FlowStatefulMapSpec extends StreamSpec { "cancel upstream when downstream cancel" in { val promise = Promise[Done]() val testSource = TestSource[Int]() - .statefulMap(() => 100)((agg, elem) => { - (agg + elem, (agg, elem)) - }, (state: Int) => { - promise.complete(Success(Done)) - Some((state, -1)) - }) + .statefulMap(() => 100)( + (agg, elem) => { + (agg + elem, (agg, elem)) + }, + (state: Int) => { + promise.complete(Success(Done)) + Some((state, -1)) + }) .toMat(Sink.cancelled)(Keep.left) .run() testSource.expectSubscription().expectCancellation() @@ -169,12 +181,14 @@ class FlowStatefulMapSpec extends StreamSpec { val promise = Promise[Done]() val testProb = TestSubscriber.probe[(Int, Int)]() val testSource = TestSource[Int]() - .statefulMap(() => 100)((agg, elem) => { - (agg + elem, (agg, elem)) - }, (state: Int) => { - promise.complete(Success(Done)) - Some((state, -1)) - }) + .statefulMap(() => 100)( + (agg, elem) => { + (agg + elem, (agg, elem)) + }, + (state: Int) => { + promise.complete(Success(Done)) + Some((state, -1)) + }) .toMat(Sink.fromSubscriber(testProb))(Keep.left) .run() testProb.cancel(ex) @@ -189,10 +203,12 @@ class FlowStatefulMapSpec extends StreamSpec { val matVal = Source .single(1) - .statefulMap(() => -1)((_, elem) => (elem, elem), _ => { - promise.complete(Success(Done)) - None - }) + .statefulMap(() => -1)( + (_, elem) => (elem, elem), + _ => { + promise.complete(Success(Done)) + None + }) .runWith(Sink.never)(mat) mat.shutdown() matVal.failed.futureValue shouldBe an[AbruptStageTerminationException] @@ -203,10 +219,12 @@ class FlowStatefulMapSpec extends StreamSpec { val promise = Promise[Done]() Source .single(1) - .statefulMap(() => -1)((_, _) => throw ex, _ => { - promise.complete(Success(Done)) - None - }) + .statefulMap(() => -1)( + (_, _) => throw ex, + _ => { + promise.complete(Success(Done)) + None + }) .runWith(Sink.ignore) Await.result(promise.future, 3.seconds) shouldBe Done } @@ -252,7 +270,7 @@ class FlowStatefulMapSpec extends StreamSpec { case _ => (Some(elem), Some(elem)) }, _ => None) - .collect({ case Some(elem) => elem }) + .collect { case Some(elem) => elem } .runWith(TestSink[String]()) .request(4) .expectNext("A") @@ -283,13 +301,15 @@ class FlowStatefulMapSpec extends StreamSpec { val closedCounter = new AtomicInteger(0) val probe = Source .repeat(1) - .statefulMap(() => 23)((_, _) => throw TE("failing read"), _ => { - closedCounter.incrementAndGet() - if (closedCounter.get == 1) { - throw TE("boom") - } - None - }) + .statefulMap(() => 23)( + (_, _) => throw TE("failing read"), + _ => { + closedCounter.incrementAndGet() + if (closedCounter.get == 1) { + throw TE("boom") + } + None + }) .runWith(TestSink[Int]()) EventFilter[TE](occurrences = 1).intercept { @@ -302,10 +322,13 @@ class FlowStatefulMapSpec extends StreamSpec { "will not call onComplete twice on cancel when `onComplete` fails" in { val closedCounter = new AtomicInteger(0) val (source, sink) = TestSource() - .viaMat(Flow[Int].statefulMap(() => 23)((s, elem) => (s, elem), _ => { - closedCounter.incrementAndGet() - throw TE("boom") - }))(Keep.left) + .viaMat( + Flow[Int].statefulMap(() => 23)( + (s, elem) => (s, elem), + _ => { + closedCounter.incrementAndGet() + throw TE("boom") + }))(Keep.left) .toMat(TestSink[Int]())(Keep.both) .run() @@ -322,10 +345,12 @@ class FlowStatefulMapSpec extends StreamSpec { "will not call onComplete twice if `onComplete` fail on upstream complete" in { val closedCounter = new AtomicInteger(0) val (pub, sub) = TestSource[Int]() - .statefulMap(() => 23)((state, value) => (state, value), _ => { - closedCounter.incrementAndGet() - throw TE("boom") - }) + .statefulMap(() => 23)( + (state, value) => (state, value), + _ => { + closedCounter.incrementAndGet() + throw TE("boom") + }) .toMat(TestSink[Int]())(Keep.both) .run() @@ -344,10 +369,12 @@ class FlowStatefulMapSpec extends StreamSpec { "emit onClose return value before restarting" in { val stateCounter = new AtomicInteger(0) val (source, sink) = TestSource[String]() - .viaMat(Flow[String].statefulMap(() => stateCounter.incrementAndGet())({ (s, elem) => - if (elem == "boom") throw TE("boom") - else (s, elem + s.toString) - }, _ => Some("onClose")))(Keep.left) + .viaMat(Flow[String].statefulMap(() => stateCounter.incrementAndGet())( + { (s, elem) => + if (elem == "boom") throw TE("boom") + else (s, elem + s.toString) + }, + _ => Some("onClose")))(Keep.left) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) .toMat(TestSink())(Keep.both) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeSpec.scala index 8ac7aa2fc9c..cf4bbbf2027 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeSpec.scala @@ -14,9 +14,11 @@ import akka.stream.impl.ActorSubscriberMessage.OnNext import akka.stream.impl.RequestMore import akka.stream.testkit._ -class FlowTakeSpec extends StreamSpec(""" +class FlowTakeSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { muteDeadLetters(classOf[OnNext], OnComplete.getClass, classOf[RequestMore[_]])() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowThrottleSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowThrottleSpec.scala index 9386f0836e2..bc293576b69 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowThrottleSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowThrottleSpec.scala @@ -31,7 +31,7 @@ class FlowThrottleSpec extends StreamSpec(""" "Throttle for single cost elements" must { "work for the happy case" in { - //Source(1 to 5).throttle(1, 100.millis, 0, Shaping) + // Source(1 to 5).throttle(1, 100.millis, 0, Shaping) Source(1 to 5) .throttle(19, 1000.millis, -1, Shaping) .runWith(TestSink[Int]()) @@ -123,8 +123,8 @@ class FlowThrottleSpec extends StreamSpec(""" val startMs = elementsAndTimestampsMs.head._1 val elemsAndTimeFromStart = elementsAndTimestampsMs.map { case (ts, n) => (ts - startMs, n) } - val perThrottleInterval = elemsAndTimeFromStart.groupBy { - case (fromStart, _) => math.round(fromStart.toDouble / throttleInterval.toMillis).toInt + val perThrottleInterval = elemsAndTimeFromStart.groupBy { case (fromStart, _) => + math.round(fromStart.toDouble / throttleInterval.toMillis).toInt } withClue(perThrottleInterval) { perThrottleInterval.forall { case (_, entries) => entries.size == 1 } should ===(true) @@ -162,7 +162,7 @@ class FlowThrottleSpec extends StreamSpec(""" upstream.sendNext(6) downstream.expectNoMessage(100.millis) downstream.expectNext(6) - downstream.expectNoMessage(500.millis) //wait to receive 2 in burst afterwards + downstream.expectNoMessage(500.millis) // wait to receive 2 in burst afterwards downstream.request(5) for (i <- 7 to 10) upstream.sendNext(i) downstream.receiveWithin(100.millis, 2) should be(Seq(7, 8)) @@ -170,7 +170,9 @@ class FlowThrottleSpec extends StreamSpec(""" } "throw exception when exceeding throughput in enforced mode" in { - Await.result(Source(1 to 5).throttle(1, 200.millis, 5, Enforcing).runWith(Sink.seq), 2.seconds) should ===(1 to 5) // Burst is 5 so this will not fail + Await.result(Source(1 to 5).throttle(1, 200.millis, 5, Enforcing).runWith(Sink.seq), 2.seconds) should ===( + 1 to 5 + ) // Burst is 5 so this will not fail an[RateExceededException] shouldBe thrownBy { Await.result(Source(1 to 6).throttle(1, 200.millis, 5, Enforcing).runWith(Sink.ignore), 2.seconds) @@ -191,7 +193,7 @@ class FlowThrottleSpec extends StreamSpec(""" "Throttle for various cost elements" must { "work for happy case" in { Source(1 to 5) - .throttle(1, 100.millis, 0, (_) => 1, Shaping) + .throttle(1, 100.millis, 0, _ => 1, Shaping) .runWith(TestSink[Int]()) .request(5) .expectNext(1, 2, 3, 4, 5) @@ -251,8 +253,8 @@ class FlowThrottleSpec extends StreamSpec(""" val startMs = elementsAndTimestampsMs.head._1 val elemsAndTimeFromStart = elementsAndTimestampsMs.map { case (ts, n) => (ts - startMs, n) } - val perThrottleInterval = elemsAndTimeFromStart.groupBy { - case (fromStart, _) => math.round(fromStart.toDouble / throttleInterval.toMillis).toInt + val perThrottleInterval = elemsAndTimeFromStart.groupBy { case (fromStart, _) => + math.round(fromStart.toDouble / throttleInterval.toMillis).toInt } withClue(perThrottleInterval) { perThrottleInterval.forall { case (_, entries) => entries.size == 1 } should ===(true) @@ -264,7 +266,7 @@ class FlowThrottleSpec extends StreamSpec(""" val downstream = TestSubscriber.probe[Int]() Source .fromPublisher(upstream) - .throttle(2, 400.millis, 5, (_) => 1, Shaping) + .throttle(2, 400.millis, 5, _ => 1, Shaping) .runWith(Sink.fromSubscriber(downstream)) // Exhaust bucket first @@ -288,7 +290,7 @@ class FlowThrottleSpec extends StreamSpec(""" val downstream = TestSubscriber.probe[Int]() Source .fromPublisher(upstream) - .throttle(2, 400.millis, 5, (e) => if (e < 9) 1 else 20, Shaping) + .throttle(2, 400.millis, 5, e => if (e < 9) 1 else 20, Shaping) .runWith(Sink.fromSubscriber(downstream)) // Exhaust bucket first @@ -300,7 +302,7 @@ class FlowThrottleSpec extends StreamSpec(""" upstream.sendNext(6) downstream.expectNoMessage(100.millis) downstream.expectNext(6) - downstream.expectNoMessage(500.millis) //wait to receive 2 in burst afterwards + downstream.expectNoMessage(500.millis) // wait to receive 2 in burst afterwards downstream.request(5) for (i <- 7 to 9) upstream.sendNext(i) downstream.receiveWithin(200.millis, 2) should be(Seq(7, 8)) @@ -308,8 +310,9 @@ class FlowThrottleSpec extends StreamSpec(""" } "throw exception when exceeding throughput in enforced mode" in { - Await.result(Source(1 to 4).throttle(2, 200.millis, 10, identity, Enforcing).runWith(Sink.seq), 2.seconds) should ===( - 1 to 4) // Burst is 10 so this will not fail + Await.result( + Source(1 to 4).throttle(2, 200.millis, 10, identity, Enforcing).runWith(Sink.seq), + 2.seconds) should ===(1 to 4) // Burst is 10 so this will not fail an[RateExceededException] shouldBe thrownBy { Await.result(Source(1 to 6).throttle(2, 200.millis, 0, identity, Enforcing).runWith(Sink.ignore), 2.seconds) @@ -329,7 +332,7 @@ class FlowThrottleSpec extends StreamSpec(""" "handle rate calculation function exception" in { val ex = new RuntimeException with NoStackTrace Source(1 to 5) - .throttle(2, 200.millis, 0, (_) => { throw ex }, Shaping) + .throttle(2, 200.millis, 0, _ => { throw ex }, Shaping) .throttle(1, 100.millis, 5, Enforcing) .runWith(TestSink[Int]()) .request(5) @@ -361,10 +364,10 @@ class FlowThrottleSpec extends StreamSpec(""" counter1.set(0) if (rate < expectedMinRate.get) throw new RuntimeException(s"Too low rate, got $rate, expected min ${expectedMinRate.get}, " + - s"after ${(now - startTime).nanos.toMillis} ms at element $elem") + s"after ${(now - startTime).nanos.toMillis} ms at element $elem") if (rate > expectedMaxRate.get) throw new RuntimeException(s"Too high rate, got $rate, expected max ${expectedMaxRate.get}, " + - s"after ${(now - startTime).nanos.toMillis} ms at element $elem") + s"after ${(now - startTime).nanos.toMillis} ms at element $elem") } })(Keep.both) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchSpec.scala index 3d80b94aee0..234baace79c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchSpec.scala @@ -17,8 +17,8 @@ object FlowWatchSpec { case class Reply(payload: Int) class Replier extends Actor { - override def receive: Receive = { - case msg: Int => sender() ! Reply(msg) + override def receive: Receive = { case msg: Int => + sender() ! Reply(msg) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchTerminationSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchTerminationSpec.scala index e6891727fcc..ae60b35074c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchTerminationSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchTerminationSpec.scala @@ -36,7 +36,7 @@ class FlowWatchTerminationSpec extends StreamSpec { val (p, future) = TestSource[Int]().watchTermination()(Keep.both).to(Sink.ignore).run() p.sendNext(1) p.sendError(ex) - whenReady(future.failed) { _ shouldBe (ex) } + whenReady(future.failed) { _ shouldBe ex } } "complete the future for an empty stream" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextLogSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextLogSpec.scala index 4ade395f676..792597e87e8 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextLogSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextLogSpec.scala @@ -11,9 +11,11 @@ import akka.stream.testkit.ScriptedTest import akka.stream.testkit.StreamSpec import akka.testkit.TestProbe -class FlowWithContextLogSpec extends StreamSpec(""" +class FlowWithContextLogSpec + extends StreamSpec(""" akka.loglevel = DEBUG # test verifies logging - """) with ScriptedTest { + """) + with ScriptedTest { val logProbe = { val p = TestProbe() @@ -23,7 +25,7 @@ class FlowWithContextLogSpec extends StreamSpec(""" "log() from FlowWithContextOps" must { - val supervisorPath = (SystemMaterializer(system).materializer).supervisor.path + val supervisorPath = SystemMaterializer(system).materializer.supervisor.path val LogSrc = s"akka.stream.Log($supervisorPath)" val LogClazz = classOf[Materializer] diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextSpec.scala index 78ee09f288e..1e96be227d0 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextSpec.scala @@ -26,7 +26,7 @@ class FlowWithContextSpec extends StreamSpec { .asSource .runWith(TestSink[(Message, Long)]()) .request(1) - .expectNext(((Message("az", 1L), 1L))) + .expectNext((Message("az", 1L), 1L)) .expectComplete() } @@ -42,15 +42,15 @@ class FlowWithContextSpec extends StreamSpec { .toMat(TestSink[(Message, Long)]())(Keep.both) .run() matValue shouldBe (42 -> materializedValue) - probe.request(1).expectNext(((Message("a", 1L), 1L))).expectComplete() + probe.request(1).expectNext((Message("a", 1L), 1L)).expectComplete() } "be able to map error via FlowWithContext.mapError" in { val ex = new RuntimeException("ex") with NoStackTrace val boom = new Exception("BOOM!") with NoStackTrace val mapErrorFlow = FlowWithContext[Message, Long] - .map { - case m @ Message(_, offset) => if (offset == 3) throw ex else m + .map { case m @ Message(_, offset) => + if (offset == 3) throw ex else m } .mapError { case _: Throwable => boom } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipSpec.scala index b8316f31838..82543bfa37e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipSpec.scala @@ -79,12 +79,12 @@ class FlowZipSpec extends BaseTwoStreamsSetup { } "work in fruits example" in { - //#zip + // #zip val sourceFruits = Source(List("apple", "orange", "banana")) val sourceFirstLetters = Source(List("A", "O", "B")) sourceFruits.zip(sourceFirstLetters).runWith(Sink.foreach(println)) // this will print ('apple', 'A'), ('orange', 'O'), ('banana', 'B') - //#zip + // #zip } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithIndexSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithIndexSpec.scala index bff879857b1..b4b34f146b3 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithIndexSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithIndexSpec.scala @@ -40,10 +40,10 @@ class FlowZipWithIndexSpec extends StreamSpec { } "work in fruit example" in { - //#zip-with-index + // #zip-with-index Source(List("apple", "orange", "banana")).zipWithIndex.runWith(Sink.foreach(println)) // this will print ('apple', 0), ('orange', 1), ('banana', 2) - //#zip-with-index + // #zip-with-index } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithSpec.scala index f85a8b0c207..2bba5f2604b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithSpec.scala @@ -101,7 +101,7 @@ class FlowZipWithSpec extends BaseTwoStreamsSetup { } "work in fruits example" in { - //#zip-with + // #zip-with val sourceCount = Source(List("one", "two", "three")) val sourceFruits = Source(List("apple", "orange", "banana")) @@ -111,7 +111,7 @@ class FlowZipWithSpec extends BaseTwoStreamsSetup { } .runWith(Sink.foreach(println)) // this will print 'one apple', 'two orange', 'three banana' - //#zip-with + // #zip-with } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala index 01578a4de53..4fe69106f96 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala @@ -102,11 +102,10 @@ class FramingSpec extends StreamSpec { } val futureResults = Future.sequence(resultFutures) - futureResults.futureValue.foreach { - case (result, expected, delimiter) => - withClue(s"delimiter: $delimiter") { - result should ===(expected) - } + futureResults.futureValue.foreach { case (result, expected, delimiter) => + withClue(s"delimiter: $delimiter") { + result should ===(expected) + } } } @@ -169,7 +168,7 @@ class FramingSpec extends StreamSpec { val referenceChunk = ByteString(scala.util.Random.nextString(0x100001)) val byteOrders = List(ByteOrder.BIG_ENDIAN, ByteOrder.LITTLE_ENDIAN) - val frameLengths = List(0, 1, 2, 3, 0xFF, 0x100, 0x101, 0xFFF, 0x1000, 0x1001, 0xFFFF, 0x10000, 0x10001) + val frameLengths = List(0, 1, 2, 3, 0xff, 0x100, 0x101, 0xfff, 0x1000, 0x1001, 0xffff, 0x10000, 0x10001) val fieldLengths = List(1, 2, 3, 4) val fieldOffsets = List(0, 1, 2, 3, 15, 16, 31, 32, 44, 107) @@ -217,11 +216,10 @@ class FramingSpec extends StreamSpec { } val futureResults = Future.sequence(resultFutures) - futureResults.futureValue.foreach { - case (result, expected, (byteOrder, fieldOffset, fieldLength)) => - withClue(s"byteOrder: $byteOrder, fieldOffset: $fieldOffset, fieldLength: $fieldLength") { - result should ===(expected) - } + futureResults.futureValue.foreach { case (result, expected, (byteOrder, fieldOffset, fieldLength)) => + withClue(s"byteOrder: $byteOrder, fieldOffset: $fieldOffset, fieldLength: $fieldLength") { + result should ===(expected) + } } } @@ -261,11 +259,10 @@ class FramingSpec extends StreamSpec { } val futureResults = Future.sequence(resultFutures) - futureResults.futureValue.foreach { - case (result, encodedFrames, (byteOrder, fieldOffset, fieldLength)) => - withClue(s"byteOrder: $byteOrder, fieldOffset: $fieldOffset, fieldLength: $fieldLength") { - result should ===(encodedFrames) - } + futureResults.futureValue.foreach { case (result, encodedFrames, (byteOrder, fieldOffset, fieldLength)) => + withClue(s"byteOrder: $byteOrder, fieldOffset: $fieldOffset, fieldLength: $fieldLength") { + result should ===(encodedFrames) + } } } @@ -322,7 +319,7 @@ class FramingSpec extends StreamSpec { "report truncated frames" in { import system.dispatcher val resultFutures: List[Future[(Throwable, (ByteOrder, Int, Int, Int))]] = for { - //_ <- 1 to 10 + // _ <- 1 to 10 byteOrder <- byteOrders fieldOffset <- fieldOffsets fieldLength <- fieldLengths @@ -342,12 +339,11 @@ class FramingSpec extends StreamSpec { } val futureResults = Future.sequence(resultFutures) - futureResults.futureValue.foreach { - case (ex, (byteOrder, fieldOffset, fieldLength, frameLength)) => - withClue( - s"byteOrder: $byteOrder, fieldOffset: $fieldOffset, fieldLength: $fieldLength, frameLength: $frameLength") { - ex shouldBe a[FramingException] - } + futureResults.futureValue.foreach { case (ex, (byteOrder, fieldOffset, fieldLength, frameLength)) => + withClue( + s"byteOrder: $byteOrder, fieldOffset: $fieldOffset, fieldLength: $fieldLength, frameLength: $frameLength") { + ex shouldBe a[FramingException] + } } } @@ -386,7 +382,7 @@ class FramingSpec extends StreamSpec { def computeFrameSize(@unused arr: Array[Byte], @unused l: Int): Int = 8 - val bs = ByteString.newBuilder.putInt(0xFF010203).putInt(0x04050607).result() + val bs = ByteString.newBuilder.putInt(0xff010203).putInt(0x04050607).result() val res = Source diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala index f2cab059316..f6f8087ef25 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala @@ -418,8 +418,8 @@ class GraphDSLCompileSpec extends StreamSpec { "support mapMaterializedValue" in { val anOp = op[String, String] - val anOpWithMappedMatVal = anOp.mapMaterializedValue { - case NotUsed => (NotUsed, NotUsed) + val anOpWithMappedMatVal = anOp.mapMaterializedValue { case NotUsed => + (NotUsed, NotUsed) } val g = Source.empty[String].viaMat(anOpWithMappedMatVal)(Keep.right).to(Sink.cancelled) val matVal = g.run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala index 8591d3fe176..f4099c43f92 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala @@ -64,11 +64,11 @@ class GraphMatValueSpec extends StreamSpec { } // Exposes the materialized value as a stream value - val foldFeedbackSource: Source[Future[Int], Future[Int]] = Source.fromGraph(GraphDSL.createGraph(foldSink) { - implicit b => fold => + val foldFeedbackSource: Source[Future[Int], Future[Int]] = + Source.fromGraph(GraphDSL.createGraph(foldSink) { implicit b => fold => Source(1 to 10) ~> fold SourceShape(b.materializedValue) - }) + }) "allow exposing the materialized value as port" in { val (f1, f2) = foldFeedbackSource.mapAsync(4)(identity).map(_ + 100).toMat(Sink.head)(Keep.both).run() @@ -78,7 +78,7 @@ class GraphMatValueSpec extends StreamSpec { "allow exposing the materialized value as port even if wrapped and the final materialized value is Unit" in { val noMatSource: Source[Int, Unit] = - foldFeedbackSource.mapAsync(4)(identity).map(_ + 100).mapMaterializedValue((_) => ()) + foldFeedbackSource.mapAsync(4)(identity).map(_ + 100).mapMaterializedValue(_ => ()) Await.result(noMatSource.runWith(Sink.head), 3.seconds) should ===(155) } @@ -92,13 +92,13 @@ class GraphMatValueSpec extends StreamSpec { SourceShape(zip.out) }) - val compositeSource2 = Source.fromGraph(GraphDSL.createGraph(compositeSource1, compositeSource1)(Keep.both) { - implicit b => (s1, s2) => + val compositeSource2 = + Source.fromGraph(GraphDSL.createGraph(compositeSource1, compositeSource1)(Keep.both) { implicit b => (s1, s2) => val zip = b.add(ZipWith[Int, Int, Int](_ + _)) s1.out ~> zip.in0 s2.out.map(_ * 10000) ~> zip.in1 SourceShape(zip.out) - }) + }) val (((f1, f2), (f3, f4)), result) = compositeSource2.toMat(Sink.head)(Keep.both).run() @@ -111,21 +111,21 @@ class GraphMatValueSpec extends StreamSpec { } "work also when the source’s module is copied" in { - val foldFlow: Flow[Int, Int, Future[Int]] = Flow.fromGraph(GraphDSL.createGraph(foldSink) { - implicit builder => fold => + val foldFlow: Flow[Int, Int, Future[Int]] = + Flow.fromGraph(GraphDSL.createGraph(foldSink) { implicit builder => fold => FlowShape(fold.in, builder.materializedValue.mapAsync(4)(identity).outlet) - }) + }) Await.result(Source(1 to 10).via(foldFlow).runWith(Sink.head), 3.seconds) should ===(55) } "work also when the source’s module is copied and the graph is extended before using the matValSrc" in { - val foldFlow: Flow[Int, Int, Future[Int]] = Flow.fromGraph(GraphDSL.createGraph(foldSink) { - implicit builder => fold => + val foldFlow: Flow[Int, Int, Future[Int]] = + Flow.fromGraph(GraphDSL.createGraph(foldSink) { implicit builder => fold => val map = builder.add(Flow[Future[Int]].mapAsync(4)(identity)) builder.materializedValue ~> map FlowShape(fold.in, map.outlet) - }) + }) Await.result(Source(1 to 10).via(foldFlow).runWith(Sink.head), 3.seconds) should ===(55) } @@ -137,7 +137,7 @@ class GraphMatValueSpec extends StreamSpec { Source.empty.mapMaterializedValue(_ => done = true) ~> Sink.ignore ClosedShape } - val r = RunnableGraph.fromGraph(GraphDSL.createGraph(Sink.ignore) { implicit b => (s) => + val r = RunnableGraph.fromGraph(GraphDSL.createGraph(Sink.ignore) { implicit b => s => b.add(g) Source(1 to 10) ~> s ClosedShape @@ -226,7 +226,7 @@ class GraphMatValueSpec extends StreamSpec { val nest3 = Flow[String].via(nest2) val nest4 = Flow[String].via(nest3) - //fails + // fails val matValue = Source(List("")).via(nest4).to(Sink.ignore).run() matValue should ===(NotUsed) @@ -248,20 +248,20 @@ class GraphMatValueSpec extends StreamSpec { "build more complicated graph with flows optimized for identity flows" in { val flow1 = Flow.fromSinkAndSourceMat(Sink.ignore, Source.single(1).viaMat(Flow[Int])(Keep.both))(Keep.both) val (mA, (m1, m2)) = Source.single(8).viaMat(flow1)(Keep.right).to(Sink.ignore).run() - Await.result(mA, 1.second) should ===(Done) //from Sink.ignore - m1 should ===(NotUsed) //from Source.single(1) - m2 should ===(NotUsed) //from Flow[Int] + Await.result(mA, 1.second) should ===(Done) // from Sink.ignore + m1 should ===(NotUsed) // from Source.single(1) + m2 should ===(NotUsed) // from Flow[Int] val flow2 = Flow.fromSinkAndSourceMat(Sink.ignore, Source.maybe[Int].viaMat(Flow[Int])(Keep.left))(Keep.both) val (mB, m3) = Source.single(8).viaMat(flow2)(Keep.right).to(Sink.ignore).run() - Await.result(mB, 1.second) should ===(Done) //from Sink.ignore + Await.result(mB, 1.second) should ===(Done) // from Sink.ignore // Fails with ClassCastException if value is wrong - m3.success(None) //from Source.maybe[Int] + m3.success(None) // from Source.maybe[Int] val flow3 = Flow.fromSinkAndSourceMat(Sink.ignore, Source.single(1).viaMat(Flow[Int])(Keep.right))(Keep.both) val (mC, m4) = Source.single(8).viaMat(flow3)(Keep.right).to(Sink.ignore).run() - Await.result(mC, 1.second) should ===(Done) //from Sink.ignore - m4 should ===(NotUsed) //from Flow[Int] + Await.result(mC, 1.second) should ===(Done) // from Sink.ignore + m4 should ===(NotUsed) // from Flow[Int] } "provide a new materialized value for each materialization" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala index 7405d14b89e..920c67bdfaf 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala @@ -67,7 +67,7 @@ class GraphMergePreferredSpec extends TwoStreamsSetup { val resultSeq = Await.result(result, 3.seconds) resultSeq.toSet should ===((1 to 400).toSet) - //test ordering of elements coming from each of the flows + // test ordering of elements coming from each of the flows resultSeq.filter(_ <= 100) should ===(1 to 100) resultSeq.filter(e => e > 100 && e <= 200) should ===(101 to 200) resultSeq.filter(e => e > 200 && e <= 300) should ===(201 to 300) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala index 06c862ffd92..ee5f8ac9d4a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala @@ -56,7 +56,7 @@ class GraphMergeSpec extends TwoStreamsSetup { subscription.request(1) collected :+= probe.expectNext() } - //test ordering of elements coming from each of nonempty flows + // test ordering of elements coming from each of nonempty flows collected.filter(_ <= 4) should ===(1 to 4) collected.filter(_ >= 5) should ===(5 to 10) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala index 3f6332a229b..2c80163833c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala @@ -48,7 +48,7 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" "support broadcast - merge layouts" in { val resultFuture = RunnableGraph - .fromGraph(GraphDSL.createGraph(Sink.head[Seq[Int]]) { implicit b => (sink) => + .fromGraph(GraphDSL.createGraph(Sink.head[Seq[Int]]) { implicit b => sink => import GraphDSL.Implicits._ val bcast = b.add(Broadcast[Int](2)) val merge = b.add(Merge[Int](2)) @@ -67,7 +67,7 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" "support balance - merge (parallelization) layouts" in { val elements = 0 to 10 val out = RunnableGraph - .fromGraph(GraphDSL.createGraph(Sink.head[Seq[Int]]) { implicit b => (sink) => + .fromGraph(GraphDSL.createGraph(Sink.head[Seq[Int]]) { implicit b => sink => import GraphDSL.Implicits._ val balance = b.add(Balance[Int](5)) val merge = b.add(Merge[Int](5)) @@ -139,7 +139,7 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" "allow adding of flows to sources and sinks to flows" in { val resultFuture = RunnableGraph - .fromGraph(GraphDSL.createGraph(Sink.head[Seq[Int]]) { implicit b => (sink) => + .fromGraph(GraphDSL.createGraph(Sink.head[Seq[Int]]) { implicit b => sink => import GraphDSL.Implicits._ val bcast = b.add(Broadcast[Int](2)) val merge = b.add(Merge[Int](2)) @@ -208,13 +208,13 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" "be possible to use with generated components" in { implicit val ex = system.dispatcher - //#graph-from-list + // #graph-from-list val sinks = immutable .Seq("a", "b", "c") .map(prefix => Flow[String].filter(str => str.startsWith(prefix)).toMat(Sink.head[String])(Keep.right)) - val g: RunnableGraph[Seq[Future[String]]] = RunnableGraph.fromGraph(GraphDSL.create(sinks) { - implicit b => sinkList => + val g: RunnableGraph[Seq[Future[String]]] = + RunnableGraph.fromGraph(GraphDSL.create(sinks) { implicit b => sinkList => import GraphDSL.Implicits._ val broadcast = b.add(Broadcast[String](sinkList.size)) @@ -222,10 +222,10 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" sinkList.foreach(sink => broadcast ~> sink) ClosedShape - }) + }) val matList: Seq[Future[String]] = g.run() - //#graph-from-list + // #graph-from-list val result: Seq[String] = Await.result(Future.sequence(matList), 3.seconds) @@ -240,8 +240,8 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" val sinks = immutable.Seq(Sink.seq[Int]) - val g: RunnableGraph[Seq[Future[immutable.Seq[Int]]]] = RunnableGraph.fromGraph(GraphDSL.create(sinks) { - implicit b => sinkList => + val g: RunnableGraph[Seq[Future[immutable.Seq[Int]]]] = + RunnableGraph.fromGraph(GraphDSL.create(sinks) { implicit b => sinkList => import GraphDSL.Implicits._ val broadcast = b.add(Broadcast[Int](sinkList.size)) @@ -249,7 +249,7 @@ class GraphOpsIntegrationSpec extends StreamSpec(""" sinkList.foreach(sink => broadcast ~> sink) ClosedShape - }) + }) val matList: Seq[Future[immutable.Seq[Int]]] = g.run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala index b778cd0f43c..a2b7a2bb270 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala @@ -26,11 +26,14 @@ class GraphPartitionSpec extends StreamSpec(""" val (s1, s2, s3) = RunnableGraph .fromGraph(GraphDSL.createGraph(Sink.seq[Int], Sink.seq[Int], Sink.seq[Int])(Tuple3.apply) { implicit b => (sink1, sink2, sink3) => - val partition = b.add(Partition[Int](3, { - case g if (g > 3) => 0 - case l if (l < 3) => 1 - case e if (e == 3) => 2 - })) + val partition = b.add( + Partition[Int]( + 3, + { + case g if g > 3 => 0 + case l if l < 3 => 1 + case e if e == 3 => 2 + })) Source(List(1, 2, 3, 4, 5)) ~> partition.in partition.out(0) ~> sink1.in partition.out(1) ~> sink2.in @@ -51,10 +54,13 @@ class GraphPartitionSpec extends StreamSpec(""" RunnableGraph .fromGraph(GraphDSL.create() { implicit b => - val partition = b.add(Partition[String](2, { - case s if (s.length > 4) => 0 - case _ => 1 - })) + val partition = b.add( + Partition[String]( + 2, + { + case s if s.length > 4 => 0 + case _ => 1 + })) Source(List("this", "is", "just", "another", "test")) ~> partition.in partition.out(0) ~> Sink.fromSubscriber(c1) partition.out(1) ~> Sink.fromSubscriber(c2) @@ -164,10 +170,13 @@ class GraphPartitionSpec extends StreamSpec(""" RunnableGraph .fromGraph(GraphDSL.create() { implicit b => - val partition = b.add(Partition[String](2, { - case s if s == "a" || s == "b" => 0 - case _ => 1 - })) + val partition = b.add( + Partition[String]( + 2, + { + case s if s == "a" || s == "b" => 0 + case _ => 1 + })) Source(List("a", "b", "c", "d")) ~> partition.in partition.out(0) ~> Sink.fromSubscriber(c1) partition.out(1) ~> Sink.fromSubscriber(c2) @@ -190,10 +199,13 @@ class GraphPartitionSpec extends StreamSpec(""" RunnableGraph .fromGraph(GraphDSL.create() { implicit b => - val partition = b.add(Partition[String](2, { - case s if s == "a" || s == "b" => 0 - case _ => 1 - })) + val partition = b.add( + Partition[String]( + 2, + { + case s if s == "a" || s == "b" => 0 + case _ => 1 + })) Source(List("a", "b", "c")) ~> partition.in partition.out(0) ~> Sink.fromSubscriber(c1) partition.out(1) ~> Sink.fromSubscriber(c2) @@ -281,11 +293,14 @@ class GraphPartitionSpec extends StreamSpec(""" val (s1, s2, s3) = RunnableGraph .fromGraph(GraphDSL.createGraph(Sink.seq[Int], Sink.seq[Int], Sink.seq[Int])(Tuple3.apply) { implicit b => (sink1, sink2, sink3) => - val partition = b.add(Partition[Int](3, { - case g if g > 3 => 0 - case l if l < 3 => 1 - case e if e == 3 => throw TE("Resume") - })) + val partition = b.add( + Partition[Int]( + 3, + { + case g if g > 3 => 0 + case l if l < 3 => 1 + case e if e == 3 => throw TE("Resume") + })) Source(List(1, 2, 3, 4, 5)) ~> partition.in partition.out(0) ~> sink1.in partition.out(1) ~> sink2.in @@ -304,11 +319,14 @@ class GraphPartitionSpec extends StreamSpec(""" val (s1, s2, s3) = RunnableGraph .fromGraph(GraphDSL.createGraph(Sink.seq[Int], Sink.seq[Int], Sink.seq[Int])(Tuple3.apply) { implicit b => (sink1, sink2, sink3) => - val partition = b.add(Partition[Int](3, { - case g if g > 3 => 0 - case l if l < 3 => 1 - case e if e == 3 => throw TE("Restart") - })) + val partition = b.add( + Partition[Int]( + 3, + { + case g if g > 3 => 0 + case l if l < 3 => 1 + case e if e == 3 => throw TE("Restart") + })) Source(List(1, 2, 3, 4, 5)) ~> partition.in partition.out(0) ~> sink1.in partition.out(1) ~> sink2.in @@ -328,11 +346,14 @@ class GraphPartitionSpec extends StreamSpec(""" val (s1, s2, s3) = RunnableGraph .fromGraph(GraphDSL.createGraph(Sink.seq[Int], Sink.seq[Int], Sink.seq[Int])(Tuple3.apply) { implicit b => (sink1, sink2, sink3) => - val partition = b.add(Partition[Int](3, { - case g if g > 3 => 0 - case l if l < 3 => 1 - case e if e == 3 => -1 // out of bounds - })) + val partition = b.add( + Partition[Int]( + 3, + { + case g if g > 3 => 0 + case l if l < 3 => 1 + case e if e == 3 => -1 // out of bounds + })) Source(List(1, 2, 3, 4, 5)) ~> partition.in partition.out(0) ~> sink1.in partition.out(1) ~> sink2.in diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphStageTimersSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphStageTimersSpec.scala index 82d3085f412..a306bea2e38 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphStageTimersSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphStageTimersSpec.scala @@ -45,13 +45,17 @@ class GraphStageTimersSpec extends StreamSpec { override def createLogic(inheritedAttributes: Attributes) = new TimerGraphStageLogic(shape) { val tickCount = Iterator.from(1) - setHandler(in, new InHandler { - override def onPush() = push(out, grab(in)) - }) + setHandler( + in, + new InHandler { + override def onPush() = push(out, grab(in)) + }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) override def preStart() = { sideChannel.asyncCallback = getAsyncCallback(onTestEvent) @@ -143,8 +147,8 @@ class GraphStageTimersSpec extends StreamSpec { val driver = setupIsolatedStage driver ! TestRepeatedTimer - val seq = receiveWhile(2.seconds) { - case t: Tick => t + val seq = receiveWhile(2.seconds) { case t: Tick => + t } (seq should have).length(5) expectNoMessage(1.second) @@ -158,16 +162,20 @@ class GraphStageTimersSpec extends StreamSpec { override def preStart(): Unit = scheduleWithFixedDelay("tick", 100.millis, 100.millis) - setHandler(out, new OutHandler { - override def onPull() = () // Do nothing - override def onDownstreamFinish(cause: Throwable) = completeStage() - }) - - setHandler(in, new InHandler { - override def onPush() = () // Do nothing - override def onUpstreamFinish() = completeStage() - override def onUpstreamFailure(ex: Throwable) = failStage(ex) - }) + setHandler( + out, + new OutHandler { + override def onPull() = () // Do nothing + override def onDownstreamFinish(cause: Throwable) = completeStage() + }) + + setHandler( + in, + new InHandler { + override def onPush() = () // Do nothing + override def onUpstreamFinish() = completeStage() + override def onUpstreamFailure(ex: Throwable) = failStage(ex) + }) override def onTimer(timerKey: Any) = { tickCount += 1 @@ -205,13 +213,17 @@ class GraphStageTimersSpec extends StreamSpec { override def createLogic(inheritedAttributes: Attributes) = new TimerGraphStageLogic(shape) { override def preStart(): Unit = scheduleOnce("tick", 100.millis) - setHandler(in, new InHandler { - override def onPush() = () // Ignore - }) - - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler( + in, + new InHandler { + override def onPush() = () // Ignore + }) + + setHandler( + out, + new OutHandler { + override def onPull(): Unit = pull(in) + }) override def onTimer(timerKey: Any) = throw exception } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala index d6ea3f19e30..d7ae69b0143 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala @@ -278,7 +278,7 @@ class GraphUnzipWithSpec extends StreamSpec(""" RunnableGraph .fromGraph(GraphDSL.create() { implicit b => - val split22 = (a: (List[Int])) => + val split22 = (a: List[Int]) => ( a(0), a(0).toString, diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HeadSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HeadSinkSpec.scala index 031e19ba3bf..b6e02357e6c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HeadSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HeadSinkSpec.scala @@ -12,20 +12,22 @@ import akka.stream.AbruptTerminationException import akka.stream.Materializer import akka.stream.testkit._ -class HeadSinkSpec extends StreamSpec(""" +class HeadSinkSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 2 - """) with ScriptedTest { + """) + with ScriptedTest { "A Flow with Sink.head" must { "yield the first value for simple source" in { implicit val ec = system.dispatcher - //#head-operator-example + // #head-operator-example val source = Source(1 to 10) val result: Future[Int] = source.runWith(Sink.head) result.map(println) // 1 - //#head-operator-example + // #head-operator-example result.futureValue shouldEqual 1 } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala index fbdca22e978..043aa9852ac 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala @@ -562,14 +562,18 @@ class HubSpec extends StreamSpec { } "be able to use as round-robin router" in { - val source = Source(0 until 10).runWith(PartitionHub.statefulSink(() => { - var n = 0L + val source = Source(0 until 10).runWith( + PartitionHub.statefulSink( + () => { + var n = 0L - (info, _) => { - n += 1 - info.consumerIdByIdx((n % info.size).toInt) - } - }, startAfterNrOfConsumers = 2, bufferSize = 8)) + (info, _) => { + n += 1 + info.consumerIdByIdx((n % info.size).toInt) + } + }, + startAfterNrOfConsumers = 2, + bufferSize = 8)) val result1 = source.runWith(Sink.seq) val result2 = source.runWith(Sink.seq) result1.futureValue should ===(1 to 9 by 2) @@ -577,21 +581,25 @@ class HubSpec extends StreamSpec { } "be able to use as sticky session router" in { - val source = Source(List("usr-1", "usr-2", "usr-1", "usr-3")).runWith(PartitionHub.statefulSink(() => { - var sessions = Map.empty[String, Long] - var n = 0L - - (info, elem) => { - sessions.get(elem) match { - case Some(id) if info.consumerIds.exists(_ == id) => id - case _ => - n += 1 - val id = info.consumerIdByIdx((n % info.size).toInt) - sessions = sessions.updated(elem, id) - id - } - } - }, startAfterNrOfConsumers = 2, bufferSize = 8)) + val source = Source(List("usr-1", "usr-2", "usr-1", "usr-3")).runWith( + PartitionHub.statefulSink( + () => { + var sessions = Map.empty[String, Long] + var n = 0L + + (info, elem) => { + sessions.get(elem) match { + case Some(id) if info.consumerIds.exists(_ == id) => id + case _ => + n += 1 + val id = info.consumerIdByIdx((n % info.size).toInt) + sessions = sessions.updated(elem, id) + id + } + } + }, + startAfterNrOfConsumers = 2, + bufferSize = 8)) val result1 = source.runWith(Sink.seq) val result2 = source.runWith(Sink.seq) result1.futureValue should ===(List("usr-2")) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala index 1c7f004c5d7..e53372ee4bc 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala @@ -82,8 +82,8 @@ class JsonFramingSpec extends AkkaSpec { val result = Source(List(ByteString(input1), ByteString(input2))) .via(JsonFraming.objectScanner(Int.MaxValue)) - .runFold(Seq.empty[String]) { - case (acc, entry) => acc ++ Seq(entry.utf8String) + .runFold(Seq.empty[String]) { case (acc, entry) => + acc ++ Seq(entry.utf8String) } result.futureValue shouldBe Seq("""{ "name" : "john" }""", """{ "name" : "jack" }""") @@ -99,8 +99,8 @@ class JsonFramingSpec extends AkkaSpec { .single(ByteString(input)) .via(JsonFraming.objectScanner(Int.MaxValue)) .take(1) - .runFold(Seq.empty[String]) { - case (acc, entry) => acc ++ Seq(entry.utf8String) + .runFold(Seq.empty[String]) { case (acc, entry) => + acc ++ Seq(entry.utf8String) } Await.result(result, 3.seconds) shouldBe Seq("""{ "name": "john" }""") @@ -519,8 +519,8 @@ class JsonFramingSpec extends AkkaSpec { .single(ByteString(input)) .via(JsonFraming.objectScanner(5)) .map(_.utf8String) - .runFold(Seq.empty[String]) { - case (acc, entry) => acc ++ Seq(entry) + .runFold(Seq.empty[String]) { case (acc, entry) => + acc ++ Seq(entry) } a[FramingException] shouldBe thrownBy { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LastSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LastSinkSpec.scala index 2073b2dcf21..46ed6abb51e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LastSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LastSinkSpec.scala @@ -18,12 +18,12 @@ class LastSinkSpec extends StreamSpec with ScriptedTest { "A Flow with Sink.last" must { "yield the last value" in { - //#last-operator-example + // #last-operator-example val source = Source(1 to 10) val result: Future[Int] = source.runWith(Sink.last) result.map(println) // 10 - //#last-operator-example + // #last-operator-example result.futureValue shouldEqual 10 } @@ -55,12 +55,12 @@ class LastSinkSpec extends StreamSpec with ScriptedTest { } "yield None for empty stream" in { - //#lastOption-operator-example + // #lastOption-operator-example val source = Source.empty[Int] val result: Future[Option[Int]] = source.runWith(Sink.lastOption) result.map(println) // None - //#lastOption-operator-example + // #lastOption-operator-example result.futureValue shouldEqual None } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazyFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazyFlowSpec.scala index 57347a349bc..f2d53286079 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazyFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazyFlowSpec.scala @@ -217,7 +217,7 @@ class LazyFlowSpec extends StreamSpec(""" val deferredMatVal = result._1 val list = result._2 list.failed.futureValue shouldBe a[TE] - //futureFlow's behaviour in case of mat failure (follows flatMapPrefix) + // futureFlow's behaviour in case of mat failure (follows flatMapPrefix) deferredMatVal.failed.futureValue shouldBe a[NeverMaterializedException] deferredMatVal.failed.futureValue.getCause shouldEqual TE("mat-failed") } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/MapWithResourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/MapWithResourceSpec.scala index 07371edbc67..d5e069dbabd 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/MapWithResourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/MapWithResourceSpec.scala @@ -84,12 +84,14 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { "MapWithResource" must { "can read contents from a file" in { val p = Source(List(1, 10, 20, 30)) - .mapWithResource(() => newBufferedReader())((reader, count) => { - readLines(reader, count) - }, reader => { - reader.close() - None - }) + .mapWithResource(() => newBufferedReader())( + (reader, count) => { + readLines(reader, count) + }, + reader => { + reader.close() + None + }) .mapConcat(identity) .runWith(Sink.asPublisher(false)) @@ -118,13 +120,15 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { val p = Source .repeat(1) .take(100) - .mapWithResource(() => newBufferedReader())((reader, _) => { - val s = reader.readLine() - if (s != null && s.contains("b")) throw TE("") else s - }, reader => { - reader.close() - None - }) + .mapWithResource(() => newBufferedReader())( + (reader, _) => { + val s = reader.readLine() + if (s != null && s.contains("b")) throw TE("") else s + }, + reader => { + reader.close() + None + }) .withAttributes(supervisionStrategy(resumingDecider)) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[String]() @@ -144,13 +148,15 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { val p = Source .repeat(1) .take(100) - .mapWithResource(() => newBufferedReader())((reader, _) => { - val s = reader.readLine() - if (s != null && s.contains("b")) throw TE("") else s - }, reader => { - reader.close() - None - }) + .mapWithResource(() => newBufferedReader())( + (reader, _) => { + val s = reader.readLine() + if (s != null && s.contains("b")) throw TE("") else s + }, + reader => { + reader.close() + None + }) .withAttributes(supervisionStrategy(restartingDecider)) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[String]() @@ -170,16 +176,18 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { val buffer = new Array[Char](chunkSize) val p = Source .repeat(1) - .mapWithResource(() => newBufferedReader())((reader, _) => { - val s = reader.read(buffer) - if (s > 0) Some(ByteString(buffer.mkString("")).take(s)) else None - }, reader => { - reader.close() - None - }) + .mapWithResource(() => newBufferedReader())( + (reader, _) => { + val s = reader.read(buffer) + if (s > 0) Some(ByteString(buffer.mkString("")).take(s)) else None + }, + reader => { + reader.close() + None + }) .takeWhile(_.isDefined) - .collect { - case Some(bytes) => bytes + .collect { case Some(bytes) => + bytes } .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[ByteString]() @@ -205,10 +213,12 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { "use dedicated blocking-io-dispatcher by default" in { val p = Source .single(1) - .mapWithResource(() => newBufferedReader())((reader, _) => Option(reader.readLine()), reader => { - reader.close() - None - }) + .mapWithResource(() => newBufferedReader())( + (reader, _) => Option(reader.readLine()), + reader => { + reader.close() + None + }) .runWith(TestSink()) SystemMaterializer(system).materializer @@ -224,10 +234,12 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { EventFilter[TE](occurrences = 1).intercept { val p = Source .single(1) - .mapWithResource[BufferedReader, String](() => throw TE(""))((reader, _) => reader.readLine(), reader => { - reader.close() - None - }) + .mapWithResource[BufferedReader, String](() => throw TE(""))( + (reader, _) => reader.readLine(), + reader => { + reader.close() + None + }) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[String]() p.subscribe(c) @@ -274,11 +286,13 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { val closedCounter = new AtomicInteger(0) val probe = Source .repeat(1) - .mapWithResource(() => 23)((_, _) => throw TE("failing read"), _ => { - closedCounter.incrementAndGet() - if (closedCounter.get == 1) throw TE("boom") - None - }) + .mapWithResource(() => 23)( + (_, _) => throw TE("failing read"), + _ => { + closedCounter.incrementAndGet() + if (closedCounter.get == 1) throw TE("boom") + None + }) .runWith(TestSink[Int]()) EventFilter[TE](occurrences = 1).intercept { @@ -291,11 +305,13 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { "will close the resource when upstream complete" in { val closedCounter = new AtomicInteger(0) val (pub, sub) = TestSource[Int]() - .mapWithResource(() => newBufferedReader())((reader, count) => readLines(reader, count), reader => { - reader.close() - closedCounter.incrementAndGet() - Some(List("End")) - }) + .mapWithResource(() => newBufferedReader())( + (reader, count) => readLines(reader, count), + reader => { + reader.close() + closedCounter.incrementAndGet() + Some(List("End")) + }) .mapConcat(identity) .toMat(TestSink())(Keep.both) .run() @@ -305,17 +321,19 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { pub.sendComplete() sub.expectNext("End") sub.expectComplete() - closedCounter.get shouldBe (1) + closedCounter.get shouldBe 1 } "will close the resource when upstream fail" in { val closedCounter = new AtomicInteger(0) val (pub, sub) = TestSource[Int]() - .mapWithResource(() => newBufferedReader())((reader, count) => readLines(reader, count), reader => { - reader.close() - closedCounter.incrementAndGet() - Some(List("End")) - }) + .mapWithResource(() => newBufferedReader())( + (reader, count) => readLines(reader, count), + reader => { + reader.close() + closedCounter.incrementAndGet() + Some(List("End")) + }) .mapConcat(identity) .toMat(TestSink())(Keep.both) .run() @@ -325,17 +343,19 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { pub.sendError(ex) sub.expectNext("End") sub.expectError(ex) - closedCounter.get shouldBe (1) + closedCounter.get shouldBe 1 } "will close the resource when downstream cancel" in { val closedCounter = new AtomicInteger(0) val (pub, sub) = TestSource[Int]() - .mapWithResource(() => newBufferedReader())((reader, count) => readLines(reader, count), reader => { - reader.close() - closedCounter.incrementAndGet() - Some(List("End")) - }) + .mapWithResource(() => newBufferedReader())( + (reader, count) => readLines(reader, count), + reader => { + reader.close() + closedCounter.incrementAndGet() + Some(List("End")) + }) .mapConcat(identity) .toMat(TestSink())(Keep.both) .run() @@ -345,17 +365,19 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { sub.expectNext(manyLinesArray(0)) subscription.cancel() pub.expectCancellation() - closedCounter.get shouldBe (1) + closedCounter.get shouldBe 1 } "will close the resource when downstream fail" in { val closedCounter = new AtomicInteger(0) val (pub, sub) = TestSource[Int]() - .mapWithResource(() => newBufferedReader())((reader, count) => readLines(reader, count), reader => { - reader.close() - closedCounter.incrementAndGet() - Some(List("End")) - }) + .mapWithResource(() => newBufferedReader())( + (reader, count) => readLines(reader, count), + reader => { + reader.close() + closedCounter.incrementAndGet() + Some(List("End")) + }) .mapConcat(identity) .toMat(TestSink())(Keep.both) .run() @@ -365,7 +387,7 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { sub.expectNext(manyLinesArray(1)) sub.cancel(ex) pub.expectCancellationWithCause(ex) - closedCounter.get shouldBe (1) + closedCounter.get shouldBe 1 } "will close the resource on abrupt materializer termination" in { @@ -376,11 +398,13 @@ class MapWithResourceSpec extends StreamSpec(UnboundedMailboxConfig) { .single(1) .mapWithResource(() => { newBufferedReader() - })((reader, count) => readLines(reader, count), reader => { - reader.close() - promise.complete(Success(Done)) - Some(List("End")) - }) + })( + (reader, count) => readLines(reader, count), + reader => { + reader.close() + promise.complete(Success(Done)) + Some(List("End")) + }) .mapConcat(identity) .runWith(Sink.never)(mat) mat.shutdown() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSinkSpec.scala index 831bb08020e..42c15e3168b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSinkSpec.scala @@ -130,9 +130,9 @@ class QueueSinkSpec extends StreamSpec { "fail future immediately if stream already canceled" in { val queue = Source.empty[Int].runWith(Sink.queue()) // race here because no way to observe that queue sink saw termination - awaitAssert({ + awaitAssert { queue.pull().failed.futureValue shouldBe a[StreamDetachedException] - }) + } } "timeout future when stream cannot provide data" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala index f3e5a5d6cce..769af009fc6 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala @@ -211,7 +211,7 @@ class QueueSourceSpec extends StreamSpec { val s = TestSubscriber.manualProbe[Int]() val queue = Source.queue(1, OverflowStrategy.fail).to(Sink.fromSubscriber(s)).run() queue.watchCompletion().pipeTo(testActor) - queue.offer(1) //need to wait when first offer is done as initialization can be done in this moment + queue.offer(1) // need to wait when first offer is done as initialization can be done in this moment queue.offer(2) expectMsgClass(classOf[Status.Failure]) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala index 596b269a1cb..0c2beadd5f6 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala @@ -330,7 +330,9 @@ class RestartSpec probe.requestNext("a") probe.requestNext("a") - Thread.sleep((shortMinBackoff + (shortMinBackoff * 2) + shortMinBackoff).toMillis) // if using shortMinBackoff as deadline cause reset + Thread.sleep( + (shortMinBackoff + (shortMinBackoff * 2) + shortMinBackoff).toMillis + ) // if using shortMinBackoff as deadline cause reset probe.requestNext("a") @@ -593,7 +595,9 @@ class RestartSpec sinkProbe.requestNext("cancel") // The probe should now be backing off for 2 * shortMinBackoff - Thread.sleep((shortMinBackoff + (shortMinBackoff * 2) + minBackoff).toMillis) // if using shortMinBackoff as deadline cause reset + Thread.sleep( + (shortMinBackoff + (shortMinBackoff * 2) + minBackoff).toMillis + ) // if using shortMinBackoff as deadline cause reset probe.sendNext("cancel") sinkProbe.requestNext("cancel") diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RetryFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RetryFlowSpec.scala index 3364f9277fa..9642c46822a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RetryFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RetryFlowSpec.scala @@ -14,10 +14,12 @@ import akka.stream.OverflowStrategy import akka.stream.testkit.{ StreamSpec, TestPublisher, TestSubscriber } import akka.stream.testkit.scaladsl.{ TestSink, TestSource } -class RetryFlowSpec extends StreamSpec(""" +class RetryFlowSpec + extends StreamSpec(""" akka.stream.materializer.initial-input-buffer-size = 1 akka.stream.materializer.max-input-buffer-size = 1 - """) with CustomMatchers { + """) + with CustomMatchers { final val Failed = new Exception("prepared failure") final val FailedElem: Try[Int] = Failure(Failed) @@ -29,8 +31,8 @@ class RetryFlowSpec extends StreamSpec(""" }) val failAllValuesFlow: FlowWithContext[Int, Int, Try[Int], Int, NotUsed] = - FlowWithContext.fromTuples(Flow.fromFunction { - case (_, j) => (FailedElem, j) + FlowWithContext.fromTuples(Flow.fromFunction { case (_, j) => + (FailedElem, j) }) val alwaysRecoveringFunc: ((Int, Int), (Try[Int], Int)) => Option[(Int, Int)] = { @@ -170,14 +172,14 @@ class RetryFlowSpec extends StreamSpec(""" "allow retrying a successful element" in { class SomeContext - //#retry-success + // #retry-success val flow: FlowWithContext[Int, SomeContext, Int, SomeContext, NotUsed] = // ??? - //#retry-success - FlowWithContext.fromTuples[Int, SomeContext, Int, SomeContext, NotUsed](Flow.fromFunction { - case (i, ctx) => i / 2 -> ctx + // #retry-success + FlowWithContext.fromTuples[Int, SomeContext, Int, SomeContext, NotUsed](Flow.fromFunction { case (i, ctx) => + i / 2 -> ctx }) - //#retry-success + // #retry-success val retryFlow: FlowWithContext[Int, SomeContext, Int, SomeContext, NotUsed] = RetryFlow.withBackoffAndContext( @@ -189,7 +191,7 @@ class RetryFlowSpec extends StreamSpec(""" case ((_, _), (result, ctx)) if result > 0 => Some(result -> ctx) case _ => None }) - //#retry-success + // #retry-success val (source, sink) = TestSource[(Int, SomeContext)]().via(retryFlow).toMat(TestSink())(Keep.both).run() @@ -269,8 +271,8 @@ class RetryFlowSpec extends StreamSpec(""" }) val (source, sink) = TestSource[(State, NotUsed)]() - .via(RetryFlow.withBackoffAndContext(10.millis, 5.seconds, 0d, NumRetries, flow) { - case (_, (s, _)) => Some(s -> NotUsed) + .via(RetryFlow.withBackoffAndContext(10.millis, 5.seconds, 0d, NumRetries, flow) { case (_, (s, _)) => + Some(s -> NotUsed) }) .toMat(TestSink())(Keep.both) .run() @@ -282,8 +284,8 @@ class RetryFlowSpec extends StreamSpec(""" val timesBetweenRetries = retriedAt .sliding(2) - .collect { - case before :: after :: Nil => before - after + .collect { case before :: after :: Nil => + before - after } .toIndexedSeq @@ -370,7 +372,8 @@ class RetryFlowSpec extends StreamSpec(""" externalIn.expectCancellation() } - "propagate error before the RetryFlow, while on retry spin" in new ConstructBench[Int, Int, Int]((v, _) => Some(v)) { + "propagate error before the RetryFlow, while on retry spin" in new ConstructBench[Int, Int, Int]((v, _) => + Some(v)) { externalOut.request(92) // spinning message externalIn.sendNext(1 -> 0) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SeqSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SeqSinkSpec.scala index 8f8810b175b..94bbea4b8f4 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SeqSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SeqSinkSpec.scala @@ -19,7 +19,7 @@ class SeqSinkSpec extends StreamSpec(""" "Sink.toSeq" must { "return a Seq[T] from a Source" in { - val input = (1 to 6) + val input = 1 to 6 val future: Future[immutable.Seq[Int]] = Source(input).runWith(Sink.seq) val result: immutable.Seq[Int] = Await.result(future, remainingOrDefault) result should be(input.toSeq) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala index 9eb1a4266b3..631aba3cb4e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala @@ -36,7 +36,7 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { s.request(3) } probes.zipWithIndex.foreach { case (p, i) => p.expectNext(i) } - probes.foreach { case p => p.expectComplete() } + probes.foreach { case p => p.expectComplete() } } "be composable with importing 1 module" in { @@ -54,7 +54,7 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { s.request(3) } probes.zipWithIndex.foreach { case (p, i) => p.expectNext(i) } - probes.foreach { case p => p.expectComplete() } + probes.foreach { case p => p.expectComplete() } } "be composable with importing 2 modules" in { @@ -76,7 +76,7 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { s.request(3) } probes.zipWithIndex.foreach { case (p, i) => p.expectNext(i) } - probes.foreach { case p => p.expectComplete() } + probes.foreach { case p => p.expectComplete() } } "be composable with importing 3 modules" in { @@ -99,7 +99,7 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { s.request(3) } probes.zipWithIndex.foreach { case (p, i) => p.expectNext(i) } - probes.foreach { case p => p.expectComplete() } + probes.foreach { case p => p.expectComplete() } } "combine to many outputs with simplified API" in { @@ -268,13 +268,13 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { "The reduce sink" must { "sum up 1 to 10 correctly" in { - //#reduce-operator-example + // #reduce-operator-example val source = Source(1 to 10) val result = source.runWith(Sink.reduce[Int]((a, b) => a + b)) result.map(println)(system.dispatcher) // will print // 55 - //#reduce-operator-example + // #reduce-operator-example assert(result.futureValue == (1 to 10).sum) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala index 1763739cc9e..ace72ad317d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala @@ -34,11 +34,11 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "produce exactly one element" in { implicit val ec = system.dispatcher - //#source-single + // #source-single val s: Future[immutable.Seq[Int]] = Source.single(1).runWith(Sink.seq) s.foreach(list => println(s"Collected elements: $list")) // prints: Collected elements: List(1) - //#source-single + // #source-single s.futureValue should ===(immutable.Seq(1)) @@ -185,14 +185,16 @@ class SourceSpec extends StreamSpec with DefaultTimeout { // compiler to check the correct materialized value of type = SourceQueueWithComplete[Int] available val combined1: Source[Int, BoundedSourceQueue[Int]] = - Source.combineMat(queueSource, intSeqSource)(Concat(_))(Keep.left) //Keep.left (i.e. preserve queueSource's materialized value) + Source.combineMat(queueSource, intSeqSource)(Concat(_))( + Keep.left + ) // Keep.left (i.e. preserve queueSource's materialized value) val (queue1, sinkProbe1) = combined1.toMat(TestSink[Int]())(Keep.both).run() sinkProbe1.request(6) queue1.offer(10) queue1.offer(20) queue1.offer(30) - queue1.complete() //complete queueSource so that combined1 with `Concat` then pulls elements from intSeqSource + queue1.complete() // complete queueSource so that combined1 with `Concat` then pulls elements from intSeqSource sinkProbe1.expectNext(10) sinkProbe1.expectNext(20) sinkProbe1.expectNext(30) @@ -202,19 +204,23 @@ class SourceSpec extends StreamSpec with DefaultTimeout { // compiler to check the correct materialized value of type = SourceQueueWithComplete[Int] available val combined2: Source[Int, BoundedSourceQueue[Int]] = - //queueSource to be the second of combined source - Source.combineMat(intSeqSource, queueSource)(Concat(_))(Keep.right) //Keep.right (i.e. preserve queueSource's materialized value) + // queueSource to be the second of combined source + Source.combineMat(intSeqSource, queueSource)(Concat(_))( + Keep.right + ) // Keep.right (i.e. preserve queueSource's materialized value) val (queue2, sinkProbe2) = combined2.toMat(TestSink[Int]())(Keep.both).run() sinkProbe2.request(6) queue2.offer(10) queue2.offer(20) queue2.offer(30) - queue2.complete() //complete queueSource so that combined1 with `Concat` then pulls elements from queueSource - sinkProbe2.expectNext(1) //as intSeqSource iss the first in combined source, elements from intSeqSource come first + queue2.complete() // complete queueSource so that combined1 with `Concat` then pulls elements from queueSource + sinkProbe2.expectNext( + 1 + ) // as intSeqSource iss the first in combined source, elements from intSeqSource come first sinkProbe2.expectNext(2) sinkProbe2.expectNext(3) - sinkProbe2.expectNext(10) //after intSeqSource run out elements, queueSource elements come + sinkProbe2.expectNext(10) // after intSeqSource run out elements, queueSource elements come sinkProbe2.expectNext(20) sinkProbe2.expectNext(30) } @@ -258,13 +264,14 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "terminate with a failure if there is an exception thrown" in { val t = new RuntimeException("expected") EventFilter[RuntimeException](message = "expected", occurrences = 1).intercept( - whenReady(Source - .unfold((0, 1)) { - case (a, _) if a > 10000000 => throw t - case (a, b) => Some((b, a + b) -> a) - } - .runFold(List.empty[Int]) { case (xs, x) => x :: xs } - .failed) { x => + whenReady( + Source + .unfold((0, 1)) { + case (a, _) if a > 10000000 => throw t + case (a, b) => Some((b, a + b) -> a) + } + .runFold(List.empty[Int]) { case (xs, x) => x :: xs } + .failed) { x => (x should be).theSameInstanceAs(t) }) } @@ -281,7 +288,7 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "generate an unbounded fibonacci sequence" in { Source - .unfold((0, 1))({ case (a, b) => Some((b, a + b) -> a) }) + .unfold((0, 1)) { case (a, b) => Some((b, a + b) -> a) } .take(36) .runFold(List.empty[Int]) { case (xs, x) => x :: xs } .futureValue should ===(expected) @@ -389,24 +396,24 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "continuously generate the same sequence" in { val expected = Seq(1, 2, 3, 1, 2, 3, 1, 2, 3) - //#cycle + // #cycle Source .cycle(() => List(1, 2, 3).iterator) .grouped(9) .runWith(Sink.head) // This will produce the Seq(1, 2, 3, 1, 2, 3, 1, 2, 3) - //#cycle + // #cycle .futureValue should ===(expected) } "throw an exception in case of empty iterator" in { - //#cycle-error + // #cycle-error val empty = Iterator.empty Source .cycle(() => empty) .runWith(Sink.head) // This will return a failed future with an `IllegalArgumentException` - //#cycle-error + // #cycle-error .failed .futureValue shouldBe an[IllegalArgumentException] } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceWithContextSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceWithContextSpec.scala index 4741a0b7cc9..e23d8ca6a83 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceWithContextSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceWithContextSpec.scala @@ -124,8 +124,8 @@ class SourceWithContextSpec extends StreamSpec { .map { offset => Message("a", offset) } - .map { - case m @ Message(_, offset) => if (offset == 3) throw ex else m + .map { case m @ Message(_, offset) => + if (offset == 3) throw ex else m } .asSourceWithContext(_.offset) .mapError { case _: Throwable => boom } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala index 091dc97d045..1e6cb241afc 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala @@ -155,15 +155,16 @@ class StageActorRefSpec extends StreamSpec with ImplicitSender { stageRef ! PoisonPill // should log a warning, and NOT stop the stage. val actorName = """StageActorRef-[\d+]""" - val expectedMsg = s"[PoisonPill|Kill] message sent to StageActorRef($actorName) will be ignored,since it is not a real Actor. " + + val expectedMsg = + s"[PoisonPill|Kill] message sent to StageActorRef($actorName) will be ignored,since it is not a real Actor. " + "Use a custom message type to communicate with it instead." - expectMsgPF(1.second, expectedMsg) { - case Logging.Warning(_, _, msg) => expectedMsg.r.pattern.matcher(msg.toString).matches() + expectMsgPF(1.second, expectedMsg) { case Logging.Warning(_, _, msg) => + expectedMsg.r.pattern.matcher(msg.toString).matches() } stageRef ! Kill // should log a warning, and NOT stop the stage. - expectMsgPF(1.second, expectedMsg) { - case Logging.Warning(_, _, msg) => expectedMsg.r.pattern.matcher(msg.toString).matches() + expectMsgPF(1.second, expectedMsg) { case Logging.Warning(_, _, msg) => + expectedMsg.r.pattern.matcher(msg.toString).matches() } source.success(Some(2)) @@ -209,8 +210,8 @@ object StageActorRefSpec { case (_, PullNow) => pull(in) case (sender, CallInitStageActorRef) => sender ! getStageActor(behavior).ref case (_, BecomeStringEcho) => - getStageActor { - case (theSender, msg) => theSender ! msg.toString + getStageActor { case (theSender, msg) => + theSender ! msg.toString } case (_, StopNow) => p.trySuccess(sum) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamConvertersSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamConvertersSpec.scala index 2761a9160b4..8d27b59ea11 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamConvertersSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamConvertersSpec.scala @@ -37,9 +37,11 @@ class StreamConvertersSpec extends StreamSpec with DefaultTimeout { import scala.compat.java8.FunctionConverters._ def javaStreamInts = - IntStream.iterate(1, { (i: Int) => - i + 1 - }.asJava) + IntStream.iterate( + 1, + { (i: Int) => + i + 1 + }.asJava) "work with Java collections" in { val list = new java.util.LinkedList[Integer]() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamRefsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamRefsSpec.scala index 09663fc93b1..87e6f36e9e7 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamRefsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamRefsSpec.scala @@ -114,11 +114,10 @@ object StreamRefsSpec { .sinkRef[String]() .viaMat(KillSwitches.single)(Keep.both) .alsoToMat(Sink.head)(Keep.both) - .mapMaterializedValue { - case ((sink, ks), firstF) => - // shutdown the stream after first element - firstF.foreach(_ => ks.shutdown())(context.dispatcher) - sink + .mapMaterializedValue { case ((sink, ks), firstF) => + // shutdown the stream after first element + firstF.foreach(_ => ks.shutdown())(context.dispatcher) + sink } .watchTermination()(Keep.both) .to(Sink.actorRef(probe, "", f => ": " + f.getMessage)) @@ -168,7 +167,8 @@ object StreamRefsSpec { final case class BulkSinkMsg(dataSink: SinkRef[ByteString]) def config(): Config = { - ConfigFactory.parseString(""" + ConfigFactory + .parseString(""" akka { loglevel = DEBUG @@ -189,8 +189,8 @@ object StreamRefsSpec { def props(probe: ActorRef) = Props(new SnitchActor(probe)) } class SnitchActor(probe: ActorRef) extends Actor { - def receive = { - case msg => probe ! msg + def receive = { case msg => + probe ! msg } } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala index 052969515bd..2d84773c6f4 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala @@ -28,7 +28,7 @@ class TakeLastSinkSpec extends StreamSpec { "return top three student based on GPA correctly" in { implicit val ex = system.dispatcher - //#takeLast-operator-example + // #takeLast-operator-example case class Student(name: String, gpa: Double) val students = List( @@ -56,7 +56,7 @@ class TakeLastSinkSpec extends StreamSpec { Name: Kendra, GPA: 4.2 */ - //#takeLast-operator-example + // #takeLast-operator-example result.futureValue shouldEqual students.takeRight(3) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala index 269ada7e83a..830c7bed358 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala @@ -371,13 +371,16 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "close resource when stream is quickly cancelled reproducer 2" in { val closed = Promise[Done]() Source - .unfoldResourceAsync[String, Iterator[String]]({ () => - Future(Iterator("a", "b", "c")) - }, { m => - Future(if (m.hasNext) Some(m.next()) else None) - }, { _ => - closed.success(Done).future - }) + .unfoldResourceAsync[String, Iterator[String]]( + { () => + Future(Iterator("a", "b", "c")) + }, + { m => + Future(if (m.hasNext) Some(m.next()) else None) + }, + { _ => + closed.success(Done).future + }) .map(m => println(s"Elem=> $m")) .runWith(Sink.cancelled) @@ -388,10 +391,13 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val closeProbe = TestProbe() val probe = TestSubscriber.probe[Unit]() Source - .unfoldResourceAsync[Unit, Unit](() => Future.successful(()), _ => Future.failed(TE("read failed")), { _ => - closeProbe.ref ! "closed" - Future.successful(Done) - }) + .unfoldResourceAsync[Unit, Unit]( + () => Future.successful(()), + _ => Future.failed(TE("read failed")), + { _ => + closeProbe.ref ! "closed" + Future.successful(Done) + }) .runWith(Sink.fromSubscriber(probe)) probe.ensureSubscription() probe.request(1L) @@ -403,10 +409,13 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val closeProbe = TestProbe() val probe = TestSubscriber.probe[Unit]() Source - .unfoldResourceAsync[Unit, Unit](() => Future.successful(()), _ => throw TE("read failed"), { _ => - closeProbe.ref ! "closed" - Future.successful(Done) - }) + .unfoldResourceAsync[Unit, Unit]( + () => Future.successful(()), + _ => throw TE("read failed"), + { _ => + closeProbe.ref ! "closed" + Future.successful(Done) + }) .runWith(Sink.fromSubscriber(probe)) probe.ensureSubscription() probe.request(1L) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala index f3e4546a54c..d38cd0af3d2 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala @@ -79,10 +79,13 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "continue when Strategy is Resume and exception happened" in { val p = Source - .unfoldResource[String, BufferedReader](() => newBufferedReader(), reader => { - val s = reader.readLine() - if (s != null && s.contains("b")) throw TE("") else Option(s) - }, reader => reader.close()) + .unfoldResource[String, BufferedReader]( + () => newBufferedReader(), + reader => { + val s = reader.readLine() + if (s != null && s.contains("b")) throw TE("") else Option(s) + }, + reader => reader.close()) .withAttributes(supervisionStrategy(resumingDecider)) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[String]() @@ -100,10 +103,13 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "close and open stream again when Strategy is Restart" in { val p = Source - .unfoldResource[String, BufferedReader](() => newBufferedReader(), reader => { - val s = reader.readLine() - if (s != null && s.contains("b")) throw TE("") else Option(s) - }, reader => reader.close()) + .unfoldResource[String, BufferedReader]( + () => newBufferedReader(), + reader => { + val s = reader.readLine() + if (s != null && s.contains("b")) throw TE("") else Option(s) + }, + reader => reader.close()) .withAttributes(supervisionStrategy(restartingDecider)) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[String]() @@ -122,10 +128,13 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val chunkSize = 50 val buffer = new Array[Char](chunkSize) val p = Source - .unfoldResource[ByteString, Reader](() => newBufferedReader(), reader => { - val s = reader.read(buffer) - if (s > 0) Some(ByteString(buffer.mkString("")).take(s)) else None - }, reader => reader.close()) + .unfoldResource[ByteString, Reader]( + () => newBufferedReader(), + reader => { + val s = reader.read(buffer) + if (s > 0) Some(ByteString(buffer.mkString("")).take(s)) else None + }, + reader => reader.close()) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[ByteString]() @@ -218,7 +227,8 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val probe = Source .unfoldResource[Int, Int]( () => 23, // the best resource there is - _ => throw TE("failing read"), { _ => + _ => throw TE("failing read"), + { _ => closedCounter.incrementAndGet() if (closedCounter.get == 1) throw TE("boom") }) diff --git a/akka-stream-tests/src/test/scala/akka/stream/snapshot/MaterializerStateSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/snapshot/MaterializerStateSpec.scala index 5e4e3423c53..b5043d50107 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/snapshot/MaterializerStateSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/snapshot/MaterializerStateSpec.scala @@ -23,13 +23,15 @@ class MaterializerStateSpec extends AkkaSpec() { try { Source.maybe[Int].map(_.toString).zipWithIndex.runWith(Sink.seq) - awaitAssert({ - val snapshot = MaterializerState.streamSnapshots(mat).futureValue - - snapshot should have size (1) - snapshot.head.activeInterpreters should have size (1) - snapshot.head.activeInterpreters.head.logics should have size (4) // all 4 operators - }, remainingOrDefault) + awaitAssert( + { + val snapshot = MaterializerState.streamSnapshots(mat).futureValue + + snapshot should have size 1 + snapshot.head.activeInterpreters should have size 1 + snapshot.head.activeInterpreters.head.logics should have size 4 // all 4 operators + }, + remainingOrDefault) } finally { mat.shutdown() } @@ -39,23 +41,27 @@ class MaterializerStateSpec extends AkkaSpec() { val promise = Promise[Int]() Source.future(promise.future).map(_.toString).zipWithIndex.runWith(Sink.seq) - awaitAssert({ - val snapshot = MaterializerState.streamSnapshots(system).futureValue + awaitAssert( + { + val snapshot = MaterializerState.streamSnapshots(system).futureValue - snapshot should have size (1) - snapshot.head.activeInterpreters should have size (1) - snapshot.head.activeInterpreters.head.logics should have size (4) // all 4 operators - }, remainingOrDefault) + snapshot should have size 1 + snapshot.head.activeInterpreters should have size 1 + snapshot.head.activeInterpreters.head.logics should have size 4 // all 4 operators + }, + remainingOrDefault) promise.success(1) } "snapshot a running stream that includes a TLSActor" in { Source.never - .via(Tcp(system).outgoingConnectionWithTls(InetSocketAddress.createUnresolved("akka.io", 443), () => { - val engine = SSLContext.getDefault.createSSLEngine("akka.io", 443) - engine.setUseClientMode(true) - engine - })) + .via(Tcp(system).outgoingConnectionWithTls( + InetSocketAddress.createUnresolved("akka.io", 443), + () => { + val engine = SSLContext.getDefault.createSSLEngine("akka.io", 443) + engine.setUseClientMode(true) + engine + })) .runWith(Sink.seq) val snapshots = MaterializerState.streamSnapshots(system).futureValue @@ -72,12 +78,14 @@ class MaterializerStateSpec extends AkkaSpec() { .concat(Source.maybe[String]) // make sure we leave it running .runWith(probe) out.requestNext("one") - awaitAssert({ - val snapshot = MaterializerState.streamSnapshots(mat).futureValue - snapshot should have size (1) - snapshot.head.activeInterpreters should have size (1) - snapshot.head.activeInterpreters.head.stoppedLogics should have size (2) // Source.single and a detach - }, remainingOrDefault) + awaitAssert( + { + val snapshot = MaterializerState.streamSnapshots(mat).futureValue + snapshot should have size 1 + snapshot.head.activeInterpreters should have size 1 + snapshot.head.activeInterpreters.head.stoppedLogics should have size 2 // Source.single and a detach + }, + remainingOrDefault) } finally { mat.shutdown() @@ -92,12 +100,15 @@ class MaterializerStateSpec extends AkkaSpec() { val graph = Flow.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ - val partition = b.add(Partition[String](4, { - case "green" => 0 - case "red" => 1 - case "blue" => 2 - case _ => 3 - })) + val partition = b.add( + Partition[String]( + 4, + { + case "green" => 0 + case "red" => 1 + case "blue" => 2 + case _ => 3 + })) val merge = b.add(Merge[String](4, eagerComplete = false)) val discard = b.add(Sink.ignore.async) val one = b.add(Source.single("purple")) diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorFlow.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorFlow.scala index d4513f55039..772cbba12f4 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorFlow.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorFlow.scala @@ -15,9 +15,7 @@ import akka.pattern.StatusReply import akka.stream.javadsl.Flow import akka.util.JavaDurationConverters -/** - * Collection of Flows aimed at integrating with typed Actors. - */ +/** Collection of Flows aimed at integrating with typed Actors. */ object ActorFlow { /** @@ -136,9 +134,7 @@ object ActorFlow { .askWithStatus[I, Q, A](parallelism)(ref)((i, ref) => makeMessage(i, ref))(timeout.toMillis.millis) .asJava - /** - * Use the `ask` pattern to send a request-reply message to the target `ref` actor without including the context. - */ + /** Use the `ask` pattern to send a request-reply message to the target `ref` actor without including the context. */ def askWithContext[I, Q, A, Ctx]( ref: ActorRef[Q], timeout: java.time.Duration, @@ -172,9 +168,7 @@ object ActorFlow { .map { case (a, ctx) => Pair(a, ctx) }) .asJava - /** - * Use the `ask` pattern to send a request-reply message to the target `ref` actor without including the context. - */ + /** Use the `ask` pattern to send a request-reply message to the target `ref` actor without including the context. */ def askWithContext[I, Q, A, Ctx]( parallelism: Int, ref: ActorRef[Q], diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSink.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSink.scala index 461dce4be7c..af2231ff28d 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSink.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSink.scala @@ -9,9 +9,7 @@ import akka.actor.typed._ import akka.stream.javadsl._ import akka.stream.typed -/** - * Collection of Sinks aimed at integrating with typed Actors. - */ +/** Collection of Sinks aimed at integrating with typed Actors. */ object ActorSink { /** diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSource.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSource.scala index 2d7c19aa2e7..6024625695b 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSource.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSource.scala @@ -11,9 +11,7 @@ import akka.japi.JavaPartialFunction import akka.stream.{ CompletionStrategy, OverflowStrategy } import akka.stream.javadsl._ -/** - * Collection of Sources aimed at integrating with typed Actors. - */ +/** Collection of Sources aimed at integrating with typed Actors. */ object ActorSource { /** diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorFlow.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorFlow.scala index e17f97702ac..40ad3549b1d 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorFlow.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorFlow.scala @@ -15,16 +15,14 @@ import akka.stream._ import akka.stream.scaladsl._ import akka.util.Timeout -/** - * Collection of Flows aimed at integrating with typed Actors. - */ +/** Collection of Flows aimed at integrating with typed Actors. */ object ActorFlow { // TODO would be nice to provide Implicits to allow .ask() directly on Flow/Source - private def askImpl[I, Q, A, O](parallelism: Int)(ref: ActorRef[Q])( - makeMessage: (I, ActorRef[A]) => Q, - makeOut: (I, Future[A]) => Future[O])(implicit timeout: Timeout): Flow[I, O, NotUsed] = { + private def askImpl[I, Q, A, O](parallelism: Int)( + ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q, makeOut: (I, Future[A]) => Future[O])(implicit + timeout: Timeout): Flow[I, O, NotUsed] = { import akka.actor.typed.scaladsl.adapter._ val classicRef = ref.toClassic @@ -91,8 +89,8 @@ object ActorFlow { * @tparam A Answer type that the Actor is expected to reply with, it will become the Output type of this Flow */ @implicitNotFound("Missing an implicit akka.util.Timeout for the ask() stage") - def ask[I, Q, A](ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q)( - implicit timeout: Timeout): Flow[I, A, NotUsed] = + def ask[I, Q, A](ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q)(implicit + timeout: Timeout): Flow[I, A, NotUsed] = ask(parallelism = 2)(ref)(makeMessage)(timeout) /** @@ -130,16 +128,16 @@ object ActorFlow { * @tparam A answer type that the Actor is expected to reply with, it will become the Output type of this Flow */ @implicitNotFound("Missing an implicit akka.util.Timeout for the ask() stage") - def ask[I, Q, A](parallelism: Int)(ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q)( - implicit timeout: Timeout): Flow[I, A, NotUsed] = askImpl(parallelism)(ref)(makeMessage, (_, o: Future[A]) => o) + def ask[I, Q, A](parallelism: Int)(ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q)(implicit + timeout: Timeout): Flow[I, A, NotUsed] = askImpl(parallelism)(ref)(makeMessage, (_, o: Future[A]) => o) /** * Use for messages whose response is known to be a [[akka.pattern.StatusReply]]. When a [[akka.pattern.StatusReply#success]] response * arrives the future is completed with the wrapped value, if a [[akka.pattern.StatusReply#error]] arrives the future is instead * failed. */ - def askWithStatus[I, Q, A](ref: ActorRef[Q])(makeMessage: (I, ActorRef[StatusReply[A]]) => Q)( - implicit timeout: Timeout): Flow[I, A, NotUsed] = + def askWithStatus[I, Q, A](ref: ActorRef[Q])(makeMessage: (I, ActorRef[StatusReply[A]]) => Q)(implicit + timeout: Timeout): Flow[I, A, NotUsed] = askWithStatus(2)(ref)(makeMessage) /** @@ -157,20 +155,16 @@ object ActorFlow { } - /** - * Use the `ask` pattern to send a request-reply message to the target `ref` actor without including the context. - */ + /** Use the `ask` pattern to send a request-reply message to the target `ref` actor without including the context. */ @implicitNotFound("Missing an implicit akka.util.Timeout for the ask() stage") - def askWithContext[I, Q, A, Ctx](ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q)( - implicit timeout: Timeout): Flow[(I, Ctx), (A, Ctx), NotUsed] = + def askWithContext[I, Q, A, Ctx](ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q)(implicit + timeout: Timeout): Flow[(I, Ctx), (A, Ctx), NotUsed] = askWithContext(parallelism = 2)(ref)(makeMessage) - /** - * Use the `ask` pattern to send a request-reply message to the target `ref` actor without including the context. - */ + /** Use the `ask` pattern to send a request-reply message to the target `ref` actor without including the context. */ @implicitNotFound("Missing an implicit akka.util.Timeout for the ask() stage") - def askWithContext[I, Q, A, Ctx](parallelism: Int)(ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q)( - implicit timeout: Timeout): Flow[(I, Ctx), (A, Ctx), NotUsed] = + def askWithContext[I, Q, A, Ctx](parallelism: Int)(ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q)(implicit + timeout: Timeout): Flow[(I, Ctx), (A, Ctx), NotUsed] = askImpl[(I, Ctx), Q, A, (A, Ctx)](parallelism)(ref)( (in, r) => makeMessage(in._1, r), (in, o: Future[A]) => o.map(a => a -> in._2)(ExecutionContexts.parasitic)) @@ -180,8 +174,8 @@ object ActorFlow { * arrives the future is completed with the wrapped value, if a [[akka.pattern.StatusReply#error]] arrives the future is instead * failed. */ - def askWithStatusAndContext[I, Q, A, Ctx](ref: ActorRef[Q])(makeMessage: (I, ActorRef[StatusReply[A]]) => Q)( - implicit timeout: Timeout): Flow[(I, Ctx), (A, Ctx), NotUsed] = + def askWithStatusAndContext[I, Q, A, Ctx](ref: ActorRef[Q])(makeMessage: (I, ActorRef[StatusReply[A]]) => Q)(implicit + timeout: Timeout): Flow[(I, Ctx), (A, Ctx), NotUsed] = askWithStatusAndContext(2)(ref)(makeMessage) /** diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala index 7b993088e48..364bbdb35a6 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala @@ -8,9 +8,7 @@ import akka.NotUsed import akka.actor.typed._ import akka.stream.scaladsl._ -/** - * Collection of Sinks aimed at integrating with typed Actors. - */ +/** Collection of Sinks aimed at integrating with typed Actors. */ object ActorSink { import akka.actor.typed.scaladsl.adapter._ diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSource.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSource.scala index 72a8a9a43ac..2697140c76f 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSource.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSource.scala @@ -8,9 +8,7 @@ import akka.actor.typed._ import akka.stream.{ CompletionStrategy, OverflowStrategy } import akka.stream.scaladsl._ -/** - * Collection of Sources aimed at integrating with typed Actors. - */ +/** Collection of Sources aimed at integrating with typed Actors. */ object ActorSource { import akka.actor.typed.scaladsl.adapter._ diff --git a/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorSourceSinkSpec.scala b/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorSourceSinkSpec.scala index 5e05c7e49de..4697f767b97 100644 --- a/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorSourceSinkSpec.scala +++ b/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorSourceSinkSpec.scala @@ -150,7 +150,8 @@ class ActorSourceSinkSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike val (in, out) = ActorSource .actorRefWithBackpressure[String, String]( p.ref, - "ack", { case "complete" => CompletionStrategy.draining }, + "ack", + { case "complete" => CompletionStrategy.draining }, PartialFunction.empty) .toMat(Sink.seq)(Keep.both) .run() diff --git a/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/MaterializerForTypedSpec.scala b/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/MaterializerForTypedSpec.scala index 84c64ee5535..2b2c530042f 100644 --- a/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/MaterializerForTypedSpec.scala +++ b/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/MaterializerForTypedSpec.scala @@ -39,11 +39,10 @@ class MaterializerForTypedSpec extends ScalaTestWithActorTestKit with AnyWordSpe val actor = testKit.spawn(Behaviors.setup[String] { context => val materializerForActor = Materializer(context) - Behaviors.receiveMessagePartial[String] { - case "run" => - val f = Source.single("hello").runWith(Sink.head)(materializerForActor) - f.onComplete(probe.ref ! _)(system.executionContext) - Behaviors.same + Behaviors.receiveMessagePartial[String] { case "run" => + val f = Source.single("hello").runWith(Sink.head)(materializerForActor) + f.onComplete(probe.ref ! _)(system.executionContext) + Behaviors.same } }) actor ! "run" diff --git a/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala b/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala index f1ea5309977..2ea8eec3bd6 100644 --- a/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala +++ b/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala @@ -24,15 +24,18 @@ object ActorSourceSinkExample { case object Complete extends Protocol case class Fail(ex: Exception) extends Protocol - val source: Source[Protocol, ActorRef[Protocol]] = ActorSource.actorRef[Protocol](completionMatcher = { - case Complete => - }, failureMatcher = { - case Fail(ex) => ex - }, bufferSize = 8, overflowStrategy = OverflowStrategy.fail) + val source: Source[Protocol, ActorRef[Protocol]] = ActorSource.actorRef[Protocol]( + completionMatcher = { case Complete => + }, + failureMatcher = { case Fail(ex) => + ex + }, + bufferSize = 8, + overflowStrategy = OverflowStrategy.fail) val ref = source - .collect { - case Message(msg) => msg + .collect { case Message(msg) => + msg } .to(Sink.foreach(println)) .run() @@ -74,16 +77,16 @@ object ActorSourceSinkExample { ackTo = ackReceiver, ackMessage = Emitted, // complete when we send ReachedEnd - completionMatcher = { - case ReachedEnd => CompletionStrategy.draining + completionMatcher = { case ReachedEnd => + CompletionStrategy.draining }, - failureMatcher = { - case FailureOccured(ex) => ex + failureMatcher = { case FailureOccured(ex) => + ex }) val streamActor: ActorRef[Event] = source - .collect { - case Element(msg) => msg + .collect { case Element(msg) => + msg } .to(Sink.foreach(println)) .run() @@ -166,7 +169,7 @@ object ActorSourceSinkExample { onInitMessage = (responseActorRef: ActorRef[Ack]) => Init(responseActorRef), ackMessage = Ack, onCompleteMessage = Complete, - onFailureMessage = (exception) => Fail(exception)) + onFailureMessage = exception => Fail(exception)) Source.single("msg1").runWith(sink) // #actor-sink-ref-with-backpressure diff --git a/akka-stream-typed/src/test/scala/docs/scaladsl/ActorFlowSpec.scala b/akka-stream-typed/src/test/scala/docs/scaladsl/ActorFlowSpec.scala index b5dfb94c64f..ffcae8224e1 100644 --- a/akka-stream-typed/src/test/scala/docs/scaladsl/ActorFlowSpec.scala +++ b/akka-stream-typed/src/test/scala/docs/scaladsl/ActorFlowSpec.scala @@ -24,13 +24,13 @@ import scala.concurrent.duration._ import scala.concurrent.{ Await, Future } object ActorFlowSpec { - //#ask-actor + // #ask-actor final case class Asking(s: String, replyTo: ActorRef[Reply]) final case class Reply(msg: String) final case class AskingWithStatus(s: String, replyTo: ActorRef[StatusReply[String]]) - //#ask-actor + // #ask-actor } class ActorFlowSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { @@ -143,15 +143,15 @@ class ActorFlowSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { } "produce asked elements in order" in { - //#ask-actor + // #ask-actor val ref = spawn(Behaviors.receiveMessage[Asking] { asking => asking.replyTo ! Reply(asking.s + "!!!") Behaviors.same }) - //#ask-actor + // #ask-actor - //#ask + // #ask implicit val timeout: Timeout = 1.second val askFlow: Flow[String, Reply, NotUsed] = @@ -163,7 +163,7 @@ class ActorFlowSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike { val in: Future[immutable.Seq[String]] = Source(1 to 50).map(_.toString).via(askFlow).map(_.msg).runWith(Sink.seq) - //#ask + // #ask askFlowExplicit.map(identity) in.futureValue shouldEqual List.tabulate(51)(i => s"$i!!!").drop(1) diff --git a/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala b/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala index 3fbc6bab20a..a0cf504af34 100644 --- a/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala +++ b/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala @@ -44,8 +44,8 @@ object ActorMaterializer { @deprecated( "Use the system wide materializer with stream attributes or configuration settings to change defaults", "2.6.0") - def apply(materializerSettings: Option[ActorMaterializerSettings] = None, namePrefix: Option[String] = None)( - implicit context: ActorRefFactory): ActorMaterializer = { + def apply(materializerSettings: Option[ActorMaterializerSettings] = None, namePrefix: Option[String] = None)(implicit + context: ActorRefFactory): ActorMaterializer = { val system = actorSystemOf(context) val settings = materializerSettings.getOrElse(SystemMaterializer(system).materializerSettings) @@ -67,8 +67,8 @@ object ActorMaterializer { @deprecated( "Use the system wide materializer with stream attributes or configuration settings to change defaults", "2.6.0") - def apply(materializerSettings: ActorMaterializerSettings, namePrefix: String)( - implicit context: ActorRefFactory): ActorMaterializer = { + def apply(materializerSettings: ActorMaterializerSettings, namePrefix: String)(implicit + context: ActorRefFactory): ActorMaterializer = { context match { case system: ActorSystem => @@ -186,14 +186,10 @@ object ActorMaterializer { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object ActorMaterializerHelper { - /** - * INTERNAL API - */ + /** INTERNAL API */ @deprecated("The Materializer now has all methods the ActorMaterializer used to have", "2.6.0") private[akka] def downcast(materializer: Materializer): ActorMaterializer = materializer match { @@ -205,9 +201,7 @@ private[akka] object ActorMaterializerHelper { } } -/** - * An ActorMaterializer takes a stream blueprint and turns it into a running stream. - */ +/** An ActorMaterializer takes a stream blueprint and turns it into a running stream. */ @deprecated("The Materializer now has all methods the ActorMaterializer used to have", "2.6.0") abstract class ActorMaterializer extends Materializer with MaterializerLoggingProvider { @@ -223,29 +217,19 @@ abstract class ActorMaterializer extends Materializer with MaterializerLoggingPr */ def shutdown(): Unit - /** - * Indicates if the materializer has been shut down. - */ + /** Indicates if the materializer has been shut down. */ def isShutdown: Boolean - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def actorOf(context: MaterializationContext, props: Props): ActorRef - /** - * INTERNAL API - */ + /** INTERNAL API */ def system: ActorSystem - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def logger: LoggingAdapter - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def supervisor: ActorRef } @@ -393,9 +377,7 @@ object ActorMaterializerSettings { config.getString(ActorAttributes.IODispatcher.dispatcher)) } - /** - * Create [[ActorMaterializerSettings]] from the settings of an [[akka.actor.ActorSystem]] (Java). - */ + /** Create [[ActorMaterializerSettings]] from the settings of an [[akka.actor.ActorSystem]] (Java). */ @deprecated( "Use config or attributes to configure the materializer. See migration guide for details https://doc.akka.io/docs/akka/2.6/project/migration-guide-2.5.x-2.6.x.html", "2.6.0") @@ -584,25 +566,19 @@ final class ActorMaterializerSettings @InternalApi private ( if (enable == this.fuzzingMode) this else copy(fuzzingMode = enable) - /** - * Maximum number of elements emitted in batch if downstream signals large demand. - */ + /** Maximum number of elements emitted in batch if downstream signals large demand. */ @deprecated("Use attribute 'ActorAttributes.OutputBurstLimit' to change setting value", "2.6.0") def withOutputBurstLimit(limit: Int): ActorMaterializerSettings = if (limit == this.outputBurstLimit) this else copy(outputBurstLimit = limit) - /** - * Limit for number of messages that can be processed synchronously in stream to substream communication - */ + /** Limit for number of messages that can be processed synchronously in stream to substream communication */ @deprecated("Use attribute 'ActorAttributes.SyncProcessingLimit' to change setting value", "2.6.0") def withSyncProcessingLimit(limit: Int): ActorMaterializerSettings = if (limit == this.syncProcessingLimit) this else copy(syncProcessingLimit = limit) - /** - * Enable to log all elements that are dropped due to failures (at DEBUG level). - */ + /** Enable to log all elements that are dropped due to failures (at DEBUG level). */ @deprecated("Use attribute 'ActorAttributes.DebugLogging' to change setting value", "2.6.0") def withDebugLogging(enable: Boolean): ActorMaterializerSettings = if (enable == this.debugLogging) this @@ -662,9 +638,7 @@ final class ActorMaterializerSettings @InternalApi private ( case _ => false } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def toAttributes: Attributes = asAttributes @@ -747,38 +721,32 @@ final class IOSettings private ( object StreamSubscriptionTimeoutSettings { import akka.stream.StreamSubscriptionTimeoutTerminationMode._ - /** - * Create settings from individual values (Java). - */ + /** Create settings from individual values (Java). */ def create( mode: StreamSubscriptionTimeoutTerminationMode, timeout: FiniteDuration): StreamSubscriptionTimeoutSettings = new StreamSubscriptionTimeoutSettings(mode, timeout) - /** - * Create settings from individual values (Scala). - */ + /** Create settings from individual values (Scala). */ def apply( mode: StreamSubscriptionTimeoutTerminationMode, timeout: FiniteDuration): StreamSubscriptionTimeoutSettings = new StreamSubscriptionTimeoutSettings(mode, timeout) - /** - * Create settings from a Config subsection (Java). - */ + /** Create settings from a Config subsection (Java). */ def create(config: Config): StreamSubscriptionTimeoutSettings = apply(config) - /** - * Create settings from a Config subsection (Scala). - */ + /** Create settings from a Config subsection (Scala). */ def apply(config: Config): StreamSubscriptionTimeoutSettings = { val c = config.getConfig("subscription-timeout") - StreamSubscriptionTimeoutSettings(mode = toRootLowerCase(c.getString("mode")) match { - case "no" | "off" | "false" | "noop" => NoopTermination - case "warn" => WarnTermination - case "cancel" => CancelTermination - }, timeout = c.getDuration("timeout", TimeUnit.MILLISECONDS).millis) + StreamSubscriptionTimeoutSettings( + mode = toRootLowerCase(c.getString("mode")) match { + case "no" | "off" | "false" | "noop" => NoopTermination + case "warn" => WarnTermination + case "cancel" => CancelTermination + }, + timeout = c.getDuration("timeout", TimeUnit.MILLISECONDS).millis) } } @@ -812,19 +780,13 @@ object StreamSubscriptionTimeoutTerminationMode { case object WarnTermination extends StreamSubscriptionTimeoutTerminationMode case object CancelTermination extends StreamSubscriptionTimeoutTerminationMode - /** - * Do not do anything when timeout expires. - */ + /** Do not do anything when timeout expires. */ def noop: StreamSubscriptionTimeoutTerminationMode = NoopTermination - /** - * Log a warning when the timeout expires. - */ + /** Log a warning when the timeout expires. */ def warn: StreamSubscriptionTimeoutTerminationMode = WarnTermination - /** - * When the timeout expires attach a Subscriber that will immediately cancel its subscription. - */ + /** When the timeout expires attach a Subscriber that will immediately cancel its subscription. */ def cancel: StreamSubscriptionTimeoutTerminationMode = CancelTermination } diff --git a/akka-stream/src/main/scala/akka/stream/Attributes.scala b/akka-stream/src/main/scala/akka/stream/Attributes.scala index 2b18253c418..f5c8e12d933 100644 --- a/akka-stream/src/main/scala/akka/stream/Attributes.scala +++ b/akka-stream/src/main/scala/akka/stream/Attributes.scala @@ -54,16 +54,15 @@ final class Attributes private[akka] ( def this(attributeList: List[Attributes.Attribute] = Nil) = this( attributeList, - (attributeList.reverseIterator - .foldLeft(Map.newBuilder[Class[AnyRef], Attributes.MandatoryAttribute]) { - case (builder, attribute) => - attribute match { - case m: Attributes.MandatoryAttribute => - builder += (m.getClass.asInstanceOf[Class[AnyRef]] -> m) - builder - case _ => builder - } - }) + attributeList.reverseIterator + .foldLeft(Map.newBuilder[Class[AnyRef], Attributes.MandatoryAttribute]) { case (builder, attribute) => + attribute match { + case m: Attributes.MandatoryAttribute => + builder += (m.getClass.asInstanceOf[Class[AnyRef]] -> m) + builder + case _ => builder + } + } .result()) /** @@ -193,9 +192,7 @@ final class Attributes private[akka] ( } - /** - * Extracts Name attributes and concatenates them. - */ + /** Extracts Name attributes and concatenates them. */ def nameLifted: Option[String] = { @tailrec def concatNames(i: Iterator[Attribute], first: String, buf: java.lang.StringBuilder): String = if (i.hasNext) @@ -207,15 +204,14 @@ final class Attributes private[akka] ( concatNames(i, null, b.append(first).append('-').append(n)) } else concatNames(i, n, null) case _ => concatNames(i, first, buf) - } else if (buf eq null) first + } + else if (buf eq null) first else buf.toString Option(concatNames(attributeList.reverseIterator, null, null)) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private def getName(): Option[String] = { @tailrec def find(attrs: List[Attribute]): Option[String] = attrs match { case Attributes.Name(name) :: _ => Some(name) @@ -290,9 +286,7 @@ final class Attributes private[akka] ( attributeList.collect { case attr if c.isAssignableFrom(attr.getClass) => c.cast(attr) } } - /** - * Scala API: Get the least specific attribute (added first) of a given type parameter T `Class` or subclass thereof. - */ + /** Scala API: Get the least specific attribute (added first) of a given type parameter T `Class` or subclass thereof. */ // deprecated but used by Akka HTTP so needs to stay @deprecated("Attributes should always be most specific, use get[T]", "2.5.7") def getFirst[T <: Attribute: ClassTag]: Option[T] = { @@ -330,9 +324,7 @@ final class Attributes private[akka] ( } } -/** - * Note that more attributes for the [[Materializer]] are defined in [[ActorAttributes]]. - */ +/** Note that more attributes for the [[Materializer]] are defined in [[ActorAttributes]]. */ object Attributes { trait Attribute @@ -353,7 +345,7 @@ object Attributes { // for binary compatibility @deprecated("Use explicit methods on Attributes to interact, not the synthetic case class ones", "2.8.0") - def unapply(attrs: Attributes): Option[(List[Attribute])] = + def unapply(attrs: Attributes): Option[List[Attribute]] = Some(attrs.attributeList) final case class Name(n: String) extends Attribute @@ -363,18 +355,19 @@ object Attributes { * for debugging. Included in the default toString of GraphStageLogic if present */ final class SourceLocation(lambda: AnyRef) extends Attribute { - lazy val locationName: String = try { - val locationName = LineNumbers(lambda) match { - case LineNumbers.NoSourceInfo => "unknown" - case LineNumbers.UnknownSourceFormat(_) => "unknown" - case LineNumbers.SourceFile(filename) => filename - case LineNumbers.SourceFileLines(filename, from, _) => - s"$filename:$from" + lazy val locationName: String = + try { + val locationName = LineNumbers(lambda) match { + case LineNumbers.NoSourceInfo => "unknown" + case LineNumbers.UnknownSourceFormat(_) => "unknown" + case LineNumbers.SourceFile(filename) => filename + case LineNumbers.SourceFileLines(filename, from, _) => + s"$filename:$from" + } + s"${lambda.getClass.getPackage.getName}-$locationName" + } catch { + case NonFatal(_) => "unknown" // location is not critical so give up without failing } - s"${lambda.getClass.getPackage.getName}-$locationName" - } catch { - case NonFatal(_) => "unknown" // location is not critical so give up without failing - } override def toString: String = locationName } @@ -411,15 +404,11 @@ object Attributes { object CancellationStrategy { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[stream] val Default: CancellationStrategy = CancellationStrategy(PropagateFailure) - /** - * Not for user extension - */ + /** Not for user extension */ @DoNotInherit sealed trait Strategy @@ -707,9 +696,7 @@ object Attributes { /** Java API: Use to enable logging at DEBUG level for certain operations when configuring [[Attributes#createLogLevels]] */ def logLevelDebug: Logging.LogLevel = LogLevels.Debug - /** - * INTERNAL API - */ + /** INTERNAL API */ def apply(attribute: Attribute): Attributes = apply(attribute :: Nil) @@ -737,7 +724,6 @@ object Attributes { * * Configures `log()` operator log-levels to be used when logging. * Logging a certain operation can be completely disabled by using [[Attributes#logLevelOff]]. - * */ def createLogLevels( onElement: Logging.LogLevel, @@ -750,7 +736,6 @@ object Attributes { * * Configures `log()` operator log-levels to be used when logging onElement. * Logging a certain operation can be completely disabled by using [[Attributes#logLevelOff]]. - * */ def createLogLevels(onElement: Logging.LogLevel): Attributes = logLevels(onElement) @@ -794,9 +779,7 @@ object ActorAttributes { val IODispatcher: Dispatcher = ActorAttributes.Dispatcher("akka.stream.materializer.blocking-io-dispatcher") - /** - * Specifies the name of the dispatcher. This also adds an async boundary. - */ + /** Specifies the name of the dispatcher. This also adds an async boundary. */ def dispatcher(dispatcher: String): Attributes = Attributes(Dispatcher(dispatcher)) /** @@ -826,7 +809,6 @@ object ActorAttributes { * * Configures `log()` operator log-levels to be used when logging. * Logging a certain operation can be completely disabled by using [[Attributes#logLevelOff]]. - * */ def createLogLevels( onElement: Logging.LogLevel, @@ -839,7 +821,6 @@ object ActorAttributes { * * Configures `log()` operator log-levels to be used when logging onElement. * Logging a certain operation can be completely disabled by using [[Attributes#logLevelOff]]. - * */ def createLogLevels(onElement: Logging.LogLevel): Attributes = logLevels(onElement) @@ -863,9 +844,7 @@ object ActorAttributes { */ final case class DebugLogging(enabled: Boolean) extends MandatoryAttribute - /** - * Enables additional low level troubleshooting logging at DEBUG log level - */ + /** Enables additional low level troubleshooting logging at DEBUG log level */ def debugLogging(enabled: Boolean): Attributes = Attributes(DebugLogging(enabled)) @@ -877,15 +856,11 @@ object ActorAttributes { final case class StreamSubscriptionTimeout(timeout: FiniteDuration, mode: StreamSubscriptionTimeoutTerminationMode) extends MandatoryAttribute - /** - * Scala API: Defines a timeout for stream subscription and what action to take when that hits. - */ + /** Scala API: Defines a timeout for stream subscription and what action to take when that hits. */ def streamSubscriptionTimeout(timeout: FiniteDuration, mode: StreamSubscriptionTimeoutTerminationMode): Attributes = Attributes(StreamSubscriptionTimeout(timeout, mode)) - /** - * Java API: Defines a timeout for stream subscription and what action to take when that hits. - */ + /** Java API: Defines a timeout for stream subscription and what action to take when that hits. */ def streamSubscriptionTimeout(timeout: Duration, mode: StreamSubscriptionTimeoutTerminationMode): Attributes = streamSubscriptionTimeout(timeout.asScala, mode) @@ -896,9 +871,7 @@ object ActorAttributes { */ final case class OutputBurstLimit(limit: Int) extends MandatoryAttribute - /** - * Maximum number of elements emitted in batch if downstream signals large demand. - */ + /** Maximum number of elements emitted in batch if downstream signals large demand. */ def outputBurstLimit(limit: Int): Attributes = Attributes(OutputBurstLimit(limit)) @@ -941,9 +914,7 @@ object ActorAttributes { */ final case class SyncProcessingLimit(limit: Int) extends MandatoryAttribute - /** - * Limit for number of messages that can be processed synchronously in stream to substream communication - */ + /** Limit for number of messages that can be processed synchronously in stream to substream communication */ def syncProcessingLimit(limit: Int): Attributes = Attributes(SyncProcessingLimit(limit)) @@ -956,7 +927,8 @@ object ActorAttributes { object StreamRefAttributes { import Attributes._ - /** Attributes specific to stream refs. + /** + * Attributes specific to stream refs. * * Not for user extension. */ @@ -970,42 +942,28 @@ object StreamRefAttributes { final case class DemandRedeliveryInterval(timeout: FiniteDuration) extends StreamRefAttribute final case class FinalTerminationSignalDeadline(timeout: FiniteDuration) extends StreamRefAttribute - /** - * Scala API: Specifies the subscription timeout within which the remote side MUST subscribe to the handed out stream reference. - */ + /** Scala API: Specifies the subscription timeout within which the remote side MUST subscribe to the handed out stream reference. */ def subscriptionTimeout(timeout: FiniteDuration): Attributes = Attributes(SubscriptionTimeout(timeout)) - /** - * Java API: Specifies the subscription timeout within which the remote side MUST subscribe to the handed out stream reference. - */ + /** Java API: Specifies the subscription timeout within which the remote side MUST subscribe to the handed out stream reference. */ def subscriptionTimeout(timeout: Duration): Attributes = subscriptionTimeout(timeout.asScala) - /** - * Specifies the size of the buffer on the receiving side that is eagerly filled even without demand. - */ + /** Specifies the size of the buffer on the receiving side that is eagerly filled even without demand. */ def bufferCapacity(capacity: Int): Attributes = Attributes(BufferCapacity(capacity)) - /** - * Scala API: If no new elements arrive within this timeout, demand is redelivered. - */ + /** Scala API: If no new elements arrive within this timeout, demand is redelivered. */ def demandRedeliveryInterval(timeout: FiniteDuration): Attributes = Attributes(DemandRedeliveryInterval(timeout)) - /** - * Java API: If no new elements arrive within this timeout, demand is redelivered. - */ + /** Java API: If no new elements arrive within this timeout, demand is redelivered. */ def demandRedeliveryInterval(timeout: Duration): Attributes = demandRedeliveryInterval(timeout.asScala) - /** - * Scala API: The time between the Terminated signal being received and when the local SourceRef determines to fail itself - */ + /** Scala API: The time between the Terminated signal being received and when the local SourceRef determines to fail itself */ def finalTerminationSignalDeadline(timeout: FiniteDuration): Attributes = Attributes(FinalTerminationSignalDeadline(timeout)) - /** - * Java API: The time between the Terminated signal being received and when the local SourceRef determines to fail itself - */ + /** Java API: The time between the Terminated signal being received and when the local SourceRef determines to fail itself */ def finalTerminationSignalDeadline(timeout: Duration): Attributes = finalTerminationSignalDeadline(timeout.asScala) diff --git a/akka-stream/src/main/scala/akka/stream/BoundedSourceQueue.scala b/akka-stream/src/main/scala/akka/stream/BoundedSourceQueue.scala index acfa2bd402f..cc6fea7b878 100644 --- a/akka-stream/src/main/scala/akka/stream/BoundedSourceQueue.scala +++ b/akka-stream/src/main/scala/akka/stream/BoundedSourceQueue.scala @@ -23,18 +23,12 @@ trait BoundedSourceQueue[T] { */ def offer(elem: T): QueueOfferResult - /** - * Completes the stream normally. - */ + /** Completes the stream normally. */ def complete(): Unit - /** - * Completes the stream with a failure. - */ + /** Completes the stream with a failure. */ def fail(ex: Throwable): Unit - /** - * Returns the approximate number of elements in this queue. - */ + /** Returns the approximate number of elements in this queue. */ def size(): Int } diff --git a/akka-stream/src/main/scala/akka/stream/CompletionStrategy.scala b/akka-stream/src/main/scala/akka/stream/CompletionStrategy.scala index 18b5f6abf48..8e7e5e28ac1 100644 --- a/akka-stream/src/main/scala/akka/stream/CompletionStrategy.scala +++ b/akka-stream/src/main/scala/akka/stream/CompletionStrategy.scala @@ -11,25 +11,17 @@ sealed trait CompletionStrategy case object CompletionStrategy { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case object Immediately extends CompletionStrategy - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] case object Draining extends CompletionStrategy - /** - * The completion will be signaled immediately even if elements are still buffered. - */ + /** The completion will be signaled immediately even if elements are still buffered. */ def immediately: CompletionStrategy = Immediately - /** - * Already buffered elements will be signaled before signaling completion. - */ + /** Already buffered elements will be signaled before signaling completion. */ def draining: CompletionStrategy = Draining } diff --git a/akka-stream/src/main/scala/akka/stream/FanInShape.scala b/akka-stream/src/main/scala/akka/stream/FanInShape.scala index 7a1d473257c..98e6547a2c3 100644 --- a/akka-stream/src/main/scala/akka/stream/FanInShape.scala +++ b/akka-stream/src/main/scala/akka/stream/FanInShape.scala @@ -35,14 +35,10 @@ abstract class FanInShape[+O] private ( final def out: Outlet[O @uncheckedVariance] = _out final override def outlets: immutable.Seq[Outlet[O @uncheckedVariance]] = _out :: Nil - /** - * Not meant for overriding outside of Akka. - */ + /** Not meant for overriding outside of Akka. */ override def inlets: immutable.Seq[Inlet[_]] = _inlets - /** - * Performance of subclass `UniformFanInShape` relies on `_inlets` being a `Vector`, not a `List`. - */ + /** Performance of subclass `UniformFanInShape` relies on `_inlets` being a `Vector`, not a `List`. */ private var _inlets: Vector[Inlet[_]] = Vector.empty protected def newInlet[T](name: String): Inlet[T] = { val p = if (_registered.hasNext) _registered.next().asInstanceOf[Inlet[T]] else Inlet[T](s"${_name}.$name") diff --git a/akka-stream/src/main/scala/akka/stream/FanOutShape.scala b/akka-stream/src/main/scala/akka/stream/FanOutShape.scala index 4629581ec3c..442e75327d6 100644 --- a/akka-stream/src/main/scala/akka/stream/FanOutShape.scala +++ b/akka-stream/src/main/scala/akka/stream/FanOutShape.scala @@ -34,15 +34,11 @@ abstract class FanOutShape[-I] private ( final def in: Inlet[I @uncheckedVariance] = _in - /** - * Not meant for overriding outside of Akka. - */ + /** Not meant for overriding outside of Akka. */ override def outlets: immutable.Seq[Outlet[_]] = _outlets final override def inlets: immutable.Seq[Inlet[I @uncheckedVariance]] = in :: Nil - /** - * Performance of subclass `UniformFanOutShape` relies on `_outlets` being a `Vector`, not a `List`. - */ + /** Performance of subclass `UniformFanOutShape` relies on `_outlets` being a `Vector`, not a `List`. */ private var _outlets: Vector[Outlet[_]] = Vector.empty protected def newOutlet[T](name: String): Outlet[T] = { val p = if (_registered.hasNext) _registered.next().asInstanceOf[Outlet[T]] else Outlet[T](s"${_name}.$name") diff --git a/akka-stream/src/main/scala/akka/stream/FlowMonitor.scala b/akka-stream/src/main/scala/akka/stream/FlowMonitor.scala index 3b37b6c9b29..4bfee14e02b 100644 --- a/akka-stream/src/main/scala/akka/stream/FlowMonitor.scala +++ b/akka-stream/src/main/scala/akka/stream/FlowMonitor.scala @@ -18,14 +18,10 @@ trait FlowMonitor[+T] { object FlowMonitorState { sealed trait StreamState[+U] - /** - * Stream was created, but no events have passed through it - */ + /** Stream was created, but no events have passed through it */ case object Initialized extends StreamState[Nothing] - /** - * Java API - */ + /** Java API */ def initialized[U](): StreamState[U] = Initialized /** @@ -35,9 +31,7 @@ object FlowMonitorState { */ final case class Received[+U](msg: U) extends StreamState[U] - /** - * Java API - */ + /** Java API */ def received[U](msg: U): StreamState[U] = Received(msg) /** @@ -47,18 +41,12 @@ object FlowMonitorState { */ final case class Failed(cause: Throwable) extends StreamState[Nothing] - /** - * Java API - */ + /** Java API */ def failed[U](cause: Throwable): StreamState[U] = Failed(cause) - /** - * Stream completed successfully - */ + /** Stream completed successfully */ case object Finished extends StreamState[Nothing] - /** - * Java API - */ + /** Java API */ def finished[U](): StreamState[U] = Finished } diff --git a/akka-stream/src/main/scala/akka/stream/Graph.scala b/akka-stream/src/main/scala/akka/stream/Graph.scala index c8138daf9ba..fcc13344cea 100644 --- a/akka-stream/src/main/scala/akka/stream/Graph.scala +++ b/akka-stream/src/main/scala/akka/stream/Graph.scala @@ -17,14 +17,10 @@ import akka.stream.scaladsl.GenericGraph */ trait Graph[+S <: Shape, +M] { - /** - * Type-level accessor for the shape parameter of this graph. - */ + /** Type-level accessor for the shape parameter of this graph. */ type Shape = S @uncheckedVariance - /** - * The shape of a graph is all that is externally visible: its inlets and outlets. - */ + /** The shape of a graph is all that is externally visible: its inlets and outlets. */ def shape: S /** @@ -47,9 +43,7 @@ trait Graph[+S <: Shape, +M] { */ def named(name: String): Graph[S, M] = addAttributes(Attributes.name(name)) - /** - * Put an asynchronous boundary around this `Graph` - */ + /** Put an asynchronous boundary around this `Graph` */ def async: Graph[S, M] = addAttributes(Attributes.asyncBoundary) /** diff --git a/akka-stream/src/main/scala/akka/stream/IOResult.scala b/akka-stream/src/main/scala/akka/stream/IOResult.scala index 60645a65432..627757bf7dd 100644 --- a/akka-stream/src/main/scala/akka/stream/IOResult.scala +++ b/akka-stream/src/main/scala/akka/stream/IOResult.scala @@ -26,14 +26,10 @@ final case class IOResult( @deprecated("status is always set to Success(Done)", "2.6.0") def withStatus(value: Try[Done]): IOResult = copy(status = value) - /** - * Java API: Numeric value depending on context, for example IO operations performed or bytes processed. - */ + /** Java API: Numeric value depending on context, for example IO operations performed or bytes processed. */ def getCount: Long = count - /** - * Java API: Indicates whether IO operation completed successfully or not. - */ + /** Java API: Indicates whether IO operation completed successfully or not. */ @deprecated("status is always set to Success(Done)", "2.6.0") def wasSuccessful: Boolean = status.isSuccess diff --git a/akka-stream/src/main/scala/akka/stream/KillSwitch.scala b/akka-stream/src/main/scala/akka/stream/KillSwitch.scala index 96ff219ea66..8e2b2b4c168 100644 --- a/akka-stream/src/main/scala/akka/stream/KillSwitch.scala +++ b/akka-stream/src/main/scala/akka/stream/KillSwitch.scala @@ -23,7 +23,6 @@ import akka.stream.stage._ * to that materialized Flow itself. * * Creates a [[SharedKillSwitch]] that can be used to externally control the completion of various streams. - * */ object KillSwitches { @@ -60,8 +59,8 @@ object KillSwitches { case Some(status) => onSwitch(status) case _ => // callback.invoke is a simple actor send, so it is fine to run on the invoking thread - terminationSignal.onComplete(getAsyncCallback[Try[Done]](onSwitch).invoke)( - akka.dispatch.ExecutionContexts.parasitic) + terminationSignal + .onComplete(getAsyncCallback[Try[Done]](onSwitch).invoke)(akka.dispatch.ExecutionContexts.parasitic) } } @@ -108,21 +107,27 @@ object KillSwitches { val logic = new KillableGraphStageLogic(promise.future, shape) { - setHandlers(shape.in1, shape.out1, new InHandler with OutHandler { - override def onPush(): Unit = push(shape.out1, grab(shape.in1)) - override def onUpstreamFinish(): Unit = complete(shape.out1) - override def onUpstreamFailure(ex: Throwable): Unit = fail(shape.out1, ex) - override def onPull(): Unit = pull(shape.in1) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(shape.in1, cause) - }) - - setHandlers(shape.in2, shape.out2, new InHandler with OutHandler { - override def onPush(): Unit = push(shape.out2, grab(shape.in2)) - override def onUpstreamFinish(): Unit = complete(shape.out2) - override def onUpstreamFailure(ex: Throwable): Unit = fail(shape.out2, ex) - override def onPull(): Unit = pull(shape.in2) - override def onDownstreamFinish(cause: Throwable): Unit = cancel(shape.in2, cause) - }) + setHandlers( + shape.in1, + shape.out1, + new InHandler with OutHandler { + override def onPush(): Unit = push(shape.out1, grab(shape.in1)) + override def onUpstreamFinish(): Unit = complete(shape.out1) + override def onUpstreamFailure(ex: Throwable): Unit = fail(shape.out1, ex) + override def onPull(): Unit = pull(shape.in1) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(shape.in1, cause) + }) + + setHandlers( + shape.in2, + shape.out2, + new InHandler with OutHandler { + override def onPush(): Unit = push(shape.out2, grab(shape.in2)) + override def onUpstreamFinish(): Unit = complete(shape.out2) + override def onUpstreamFailure(ex: Throwable): Unit = fail(shape.out2, ex) + override def onPull(): Unit = pull(shape.in2) + override def onDownstreamFinish(cause: Throwable): Unit = cancel(shape.in2, cause) + }) } @@ -141,14 +146,10 @@ object KillSwitches { //#kill-switch trait KillSwitch { - /** - * After calling [[KillSwitch#shutdown]] the linked [[Graph]]s of [[FlowShape]] are completed normally. - */ + /** After calling [[KillSwitch#shutdown]] the linked [[Graph]]s of [[FlowShape]] are completed normally. */ def shutdown(): Unit - /** - * After calling [[KillSwitch#abort]] the linked [[Graph]]s of [[FlowShape]] are failed. - */ + /** After calling [[KillSwitch#abort]] the linked [[Graph]]s of [[FlowShape]] are failed. */ def abort(ex: Throwable): Unit } //#kill-switch diff --git a/akka-stream/src/main/scala/akka/stream/Materializer.scala b/akka-stream/src/main/scala/akka/stream/Materializer.scala index 724ca97e16b..6ea3dcab638 100644 --- a/akka-stream/src/main/scala/akka/stream/Materializer.scala +++ b/akka-stream/src/main/scala/akka/stream/Materializer.scala @@ -26,7 +26,9 @@ import akka.event.LoggingAdapter * Not for user extension */ @implicitNotFound("A Materializer is required. You may want to have the ActorSystem in implicit scope") -@nowarn("msg=deprecated") // Name(symbol) is deprecated but older Scala versions don't have a string signature, since "2.5.8" +@nowarn( + "msg=deprecated" +) // Name(symbol) is deprecated but older Scala versions don't have a string signature, since "2.5.8" @DoNotInherit abstract class Materializer { @@ -154,9 +156,7 @@ abstract class Materializer { */ def shutdown(): Unit - /** - * Indicates if the materializer has been shut down. - */ + /** Indicates if the materializer has been shut down. */ def isShutdown: Boolean /** @@ -174,15 +174,11 @@ abstract class Materializer { @InternalApi private[akka] def logger: LoggingAdapter - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def supervisor: ActorRef - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def actorOf(context: MaterializationContext, props: Props): ActorRef @@ -192,9 +188,7 @@ abstract class Materializer { object Materializer { - /** - * Implicitly provides the system wide materializer from a classic or typed `ActorSystem` - */ + /** Implicitly provides the system wide materializer from a classic or typed `ActorSystem` */ implicit def matFromSystem(implicit provider: ClassicActorSystemProvider): Materializer = SystemMaterializer(provider.classicSystem).materializer diff --git a/akka-stream/src/main/scala/akka/stream/MaterializerLoggingProvider.scala b/akka-stream/src/main/scala/akka/stream/MaterializerLoggingProvider.scala index ac82a082163..e0621d83320 100644 --- a/akka-stream/src/main/scala/akka/stream/MaterializerLoggingProvider.scala +++ b/akka-stream/src/main/scala/akka/stream/MaterializerLoggingProvider.scala @@ -7,9 +7,7 @@ package akka.stream import akka.annotation.DoNotInherit import akka.event.LoggingAdapter -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit trait MaterializerLoggingProvider { this: Materializer => diff --git a/akka-stream/src/main/scala/akka/stream/OverflowStrategy.scala b/akka-stream/src/main/scala/akka/stream/OverflowStrategy.scala index 36f9189aa3b..9b92a2b080e 100644 --- a/akka-stream/src/main/scala/akka/stream/OverflowStrategy.scala +++ b/akka-stream/src/main/scala/akka/stream/OverflowStrategy.scala @@ -37,57 +37,43 @@ sealed abstract class OverflowStrategy extends DelayOverflowStrategy { private[akka] object OverflowStrategies { - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] case class DropHead(logLevel: LogLevel) extends OverflowStrategy { override def withLogLevel(logLevel: LogLevel): DropHead = DropHead(logLevel) private[akka] override def isBackpressure: Boolean = false } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] case class DropTail(logLevel: LogLevel) extends OverflowStrategy { override def withLogLevel(logLevel: LogLevel): DropTail = DropTail(logLevel) private[akka] override def isBackpressure: Boolean = false } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] case class DropBuffer(logLevel: LogLevel) extends OverflowStrategy { override def withLogLevel(logLevel: LogLevel): DropBuffer = DropBuffer(logLevel) private[akka] override def isBackpressure: Boolean = false } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] case class DropNew(logLevel: LogLevel) extends OverflowStrategy { override def withLogLevel(logLevel: LogLevel): DropNew = DropNew(logLevel) private[akka] override def isBackpressure: Boolean = false } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] case class Backpressure(logLevel: LogLevel) extends OverflowStrategy { override def withLogLevel(logLevel: LogLevel): Backpressure = Backpressure(logLevel) private[akka] override def isBackpressure: Boolean = true } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] case class Fail(logLevel: LogLevel) extends OverflowStrategy { override def withLogLevel(logLevel: LogLevel): Fail = Fail(logLevel) private[akka] override def isBackpressure: Boolean = false } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] case object EmitEarly extends DelayOverflowStrategy { private[akka] override def isBackpressure: Boolean = true } @@ -107,9 +93,7 @@ object OverflowStrategy { */ def dropTail: OverflowStrategy = DropTail(Logging.DebugLevel) - /** - * If the buffer is full when a new element arrives, drops all the buffered elements to make space for the new element. - */ + /** If the buffer is full when a new element arrives, drops all the buffered elements to make space for the new element. */ def dropBuffer: OverflowStrategy = DropBuffer(Logging.DebugLevel) /** @@ -127,9 +111,7 @@ object OverflowStrategy { */ def backpressure: OverflowStrategy = Backpressure(Logging.DebugLevel) - /** - * If the buffer is full when a new element is available this strategy completes the stream with failure. - */ + /** If the buffer is full when a new element is available this strategy completes the stream with failure. */ def fail: OverflowStrategy = Fail(Logging.ErrorLevel) } @@ -153,14 +135,10 @@ object DelayOverflowStrategy { */ def dropTail: DelayOverflowStrategy = DropTail(Logging.DebugLevel) - /** - * If the buffer is full when a new element arrives, drops all the buffered elements to make space for the new element. - */ + /** If the buffer is full when a new element arrives, drops all the buffered elements to make space for the new element. */ def dropBuffer: DelayOverflowStrategy = DropBuffer(Logging.DebugLevel) - /** - * If the buffer is full when a new element arrives, drops the new element. - */ + /** If the buffer is full when a new element arrives, drops the new element. */ def dropNew: DelayOverflowStrategy = DropNew(Logging.DebugLevel) /** @@ -169,8 +147,6 @@ object DelayOverflowStrategy { */ def backpressure: DelayOverflowStrategy = Backpressure(Logging.DebugLevel) - /** - * If the buffer is full when a new element is available this strategy completes the stream with failure. - */ + /** If the buffer is full when a new element is available this strategy completes the stream with failure. */ def fail: DelayOverflowStrategy = Fail(Logging.ErrorLevel) } diff --git a/akka-stream/src/main/scala/akka/stream/QueueOfferResult.scala b/akka-stream/src/main/scala/akka/stream/QueueOfferResult.scala index 4072366dfa8..609e91fa258 100644 --- a/akka-stream/src/main/scala/akka/stream/QueueOfferResult.scala +++ b/akka-stream/src/main/scala/akka/stream/QueueOfferResult.scala @@ -6,51 +6,35 @@ package akka.stream import akka.annotation.DoNotInherit -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit sealed abstract class QueueOfferResult { - /** - * Return ture if the element was already enqueued, otherwise false. - * */ + /** Return ture if the element was already enqueued, otherwise false. */ def isEnqueued: Boolean } -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit sealed abstract class QueueCompletionResult extends QueueOfferResult -/** - * Contains types that is used as return types for streams Source queues - */ +/** Contains types that is used as return types for streams Source queues */ object QueueOfferResult { - /** - * Type is used to indicate that stream is successfully enqueued an element - */ + /** Type is used to indicate that stream is successfully enqueued an element */ case object Enqueued extends QueueOfferResult { override def isEnqueued: Boolean = true } - /** - * Java API: The `Enqueued` singleton instance - */ + /** Java API: The `Enqueued` singleton instance */ def enqueued: QueueOfferResult = Enqueued - /** - * Type is used to indicate that stream is dropped an element - */ + /** Type is used to indicate that stream is dropped an element */ case object Dropped extends QueueOfferResult { override def isEnqueued: Boolean = false } - /** - * Java API: The `Dropped` singleton instance - */ + /** Java API: The `Dropped` singleton instance */ def dropped: QueueOfferResult = Dropped /** @@ -61,9 +45,7 @@ object QueueOfferResult { override def isEnqueued: Boolean = false } - /** - * Type is used to indicate that stream is completed before call - */ + /** Type is used to indicate that stream is completed before call */ case object QueueClosed extends QueueCompletionResult { override def isEnqueued: Boolean = false } diff --git a/akka-stream/src/main/scala/akka/stream/Shape.scala b/akka-stream/src/main/scala/akka/stream/Shape.scala index 57042e30410..6c2c509c298 100644 --- a/akka-stream/src/main/scala/akka/stream/Shape.scala +++ b/akka-stream/src/main/scala/akka/stream/Shape.scala @@ -21,19 +21,13 @@ sealed abstract class InPort { self: Inlet[_] => final override def hashCode: Int = super.hashCode final override def equals(that: Any): Boolean = this eq that.asInstanceOf[AnyRef] - /** - * INTERNAL API - */ + /** INTERNAL API */ @volatile private[stream] var id: Int = -1 - /** - * INTERNAL API - */ + /** INTERNAL API */ @volatile private[stream] var mappedTo: InPort = this - /** - * INTERNAL API - */ + /** INTERNAL API */ private[stream] def inlet: Inlet[_] = this } @@ -47,19 +41,13 @@ sealed abstract class OutPort { self: Outlet[_] => final override def hashCode: Int = super.hashCode final override def equals(that: Any): Boolean = this eq that.asInstanceOf[AnyRef] - /** - * INTERNAL API - */ + /** INTERNAL API */ @volatile private[stream] var id: Int = -1 - /** - * INTERNAL API - */ + /** INTERNAL API */ @volatile private[stream] var mappedTo: OutPort = this - /** - * INTERNAL API - */ + /** INTERNAL API */ private[stream] def outlet: Outlet[_] = this } @@ -94,9 +82,7 @@ final class Inlet[T] private (val s: String) extends InPort { in } - /** - * INTERNAL API. - */ + /** INTERNAL API. */ def as[U]: Inlet[U] = this.asInstanceOf[Inlet[U]] override def toString: String = @@ -136,9 +122,7 @@ final class Outlet[T] private (val s: String) extends OutPort { out } - /** - * INTERNAL API. - */ + /** INTERNAL API. */ def as[U]: Outlet[U] = this.asInstanceOf[Outlet[U]] override def toString: String = @@ -147,9 +131,7 @@ final class Outlet[T] private (val s: String) extends OutPort { else s" mapped to $mappedTo") } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Shape { /** @@ -172,14 +154,10 @@ final class Outlet[T] private (val s: String) extends OutPort { */ abstract class Shape { - /** - * Scala API: get a list of all input ports - */ + /** Scala API: get a list of all input ports */ def inlets: immutable.Seq[Inlet[_]] - /** - * Scala API: get a list of all output ports - */ + /** Scala API: get a list of all output ports */ def outlets: immutable.Seq[Outlet[_]] /** @@ -189,56 +167,38 @@ abstract class Shape { */ def deepCopy(): Shape - /** - * Java API: get a list of all input ports - */ + /** Java API: get a list of all input ports */ def getInlets: java.util.List[Inlet[_]] = inlets.asJava - /** - * Java API: get a list of all output ports - */ + /** Java API: get a list of all output ports */ def getOutlets: java.util.List[Outlet[_]] = outlets.asJava - /** - * Compare this to another shape and determine whether the set of ports is the same (ignoring their ordering). - */ + /** Compare this to another shape and determine whether the set of ports is the same (ignoring their ordering). */ def hasSamePortsAs(s: Shape): Boolean = inlets.toSet == s.inlets.toSet && outlets.toSet == s.outlets.toSet - /** - * Compare this to another shape and determine whether the arrangement of ports is the same (including their ordering). - */ + /** Compare this to another shape and determine whether the arrangement of ports is the same (including their ordering). */ def hasSamePortsAndShapeAs(s: Shape): Boolean = inlets == s.inlets && outlets == s.outlets - /** - * Asserting version of [[#hasSamePortsAs]]. - */ + /** Asserting version of [[#hasSamePortsAs]]. */ def requireSamePortsAs(s: Shape): Unit = require(hasSamePortsAs(s), nonCorrespondingMessage(s)) - /** - * Asserting version of [[#hasSamePortsAndShapeAs]]. - */ + /** Asserting version of [[#hasSamePortsAndShapeAs]]. */ def requireSamePortsAndShapeAs(s: Shape): Unit = require(hasSamePortsAndShapeAs(s), nonCorrespondingMessage(s)) private def nonCorrespondingMessage(s: Shape) = - s"The inlets [${s.inlets.mkString(", ")}] and outlets [${s.outlets.mkString(", ")}] must correspond to the inlets [${inlets - .mkString(", ")}] and outlets [${outlets.mkString(", ")}]" + s"The inlets [${s.inlets.mkString(", ")}] and outlets [${s.outlets.mkString( + ", ")}] must correspond to the inlets [${inlets.mkString(", ")}] and outlets [${outlets.mkString(", ")}]" } -/** - * Java API for creating custom [[Shape]] types. - */ +/** Java API for creating custom [[Shape]] types. */ abstract class AbstractShape extends Shape { - /** - * Provide the list of all input ports of this shape. - */ + /** Provide the list of all input ports of this shape. */ def allInlets: java.util.List[Inlet[_]] - /** - * Provide the list of all output ports of this shape. - */ + /** Provide the list of all output ports of this shape. */ def allOutlets: java.util.List[Outlet[_]] final override lazy val inlets: immutable.Seq[Inlet[_]] = allInlets.asScala.toList @@ -258,9 +218,7 @@ object ClosedShape extends ClosedShape { override val outlets: immutable.Seq[Outlet[_]] = EmptyImmutableSeq override def deepCopy() = this - /** - * Java API: obtain ClosedShape instance - */ + /** Java API: obtain ClosedShape instance */ def getInstance: ClosedShape = this override def toString: String = "ClosedShape" @@ -311,9 +269,7 @@ object FlowShape { FlowShape(inlet, outlet) } -/** - * A Sink [[Shape]] has exactly one input and no outputs, it models a data sink. - */ +/** A Sink [[Shape]] has exactly one input and no outputs, it models a data sink. */ final case class SinkShape[-T](in: Inlet[T @uncheckedVariance]) extends Shape { override val inlets: immutable.Seq[Inlet[_]] = in :: Nil override val outlets: immutable.Seq[Outlet[_]] = EmptyImmutableSeq @@ -346,19 +302,17 @@ final case class BidiShape[-In1, +Out1, -In2, +Out2]( in2: Inlet[In2 @uncheckedVariance], out2: Outlet[Out2 @uncheckedVariance]) extends Shape { - //#implementation-details-elided + // #implementation-details-elided override val inlets: immutable.Seq[Inlet[_]] = in1 :: in2 :: Nil override val outlets: immutable.Seq[Outlet[_]] = out1 :: out2 :: Nil - /** - * Java API for creating from a pair of unidirectional flows. - */ + /** Java API for creating from a pair of unidirectional flows. */ def this(top: FlowShape[In1, Out1], bottom: FlowShape[In2, Out2]) = this(top.in, top.out, bottom.in, bottom.out) override def deepCopy(): BidiShape[In1, Out1, In2, Out2] = BidiShape(in1.carbonCopy(), out1.carbonCopy(), in2.carbonCopy(), out2.carbonCopy()) - //#implementation-details-elided + // #implementation-details-elided } //#bidi-shape object BidiShape { diff --git a/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala b/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala index cf5b39a73fc..c0a406005b3 100644 --- a/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala +++ b/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala @@ -18,14 +18,10 @@ import akka.util.ByteString */ object TLSRole { - /** - * Java API: obtain the [[Client]] singleton value. - */ + /** Java API: obtain the [[Client]] singleton value. */ def client: TLSRole = Client - /** - * Java API: obtain the [[Server]] singleton value. - */ + /** Java API: obtain the [[Server]] singleton value. */ def server: TLSRole = Server } sealed abstract class TLSRole @@ -82,57 +78,41 @@ sealed abstract class TLSClosing { } object TLSClosing { - /** - * Java API: obtain the [[EagerClose]] singleton value. - */ + /** Java API: obtain the [[EagerClose]] singleton value. */ def eagerClose: TLSClosing = EagerClose - /** - * Java API: obtain the [[IgnoreCancel]] singleton value. - */ + /** Java API: obtain the [[IgnoreCancel]] singleton value. */ def ignoreCancel: TLSClosing = IgnoreCancel - /** - * Java API: obtain the [[IgnoreComplete]] singleton value. - */ + /** Java API: obtain the [[IgnoreComplete]] singleton value. */ def ignoreComplete: TLSClosing = IgnoreComplete - /** - * Java API: obtain the [[IgnoreBoth]] singleton value. - */ + /** Java API: obtain the [[IgnoreBoth]] singleton value. */ def ignoreBoth: TLSClosing = IgnoreBoth } -/** - * see [[TLSClosing]] - */ +/** see [[TLSClosing]] */ sealed abstract class EagerClose extends TLSClosing { override def ignoreCancel = false override def ignoreComplete = false } case object EagerClose extends EagerClose -/** - * see [[TLSClosing]] - */ +/** see [[TLSClosing]] */ sealed abstract class IgnoreCancel extends TLSClosing { override def ignoreCancel = true override def ignoreComplete = false } case object IgnoreCancel extends IgnoreCancel -/** - * see [[TLSClosing]] - */ +/** see [[TLSClosing]] */ sealed abstract class IgnoreComplete extends TLSClosing { override def ignoreCancel = false override def ignoreComplete = true } case object IgnoreComplete extends IgnoreComplete -/** - * see [[TLSClosing]] - */ +/** see [[TLSClosing]] */ sealed abstract class IgnoreBoth extends TLSClosing { override def ignoreCancel = true override def ignoreComplete = true @@ -204,26 +184,18 @@ object TLSProtocol { sslParameters: Option[SSLParameters]) extends SslTlsOutbound { - /** - * Java API: Make a copy of this message with the given `enabledCipherSuites`. - */ + /** Java API: Make a copy of this message with the given `enabledCipherSuites`. */ @varargs def withCipherSuites(s: String*): NegotiateNewSession = copy(enabledCipherSuites = Some(s.toList)) - /** - * Java API: Make a copy of this message with the given `enabledProtocols`. - */ + /** Java API: Make a copy of this message with the given `enabledProtocols`. */ @varargs def withProtocols(p: String*): NegotiateNewSession = copy(enabledProtocols = Some(p.toList)) - /** - * Java API: Make a copy of this message with the given [[TLSClientAuth]] setting. - */ + /** Java API: Make a copy of this message with the given [[TLSClientAuth]] setting. */ def withClientAuth(ca: TLSClientAuth): NegotiateNewSession = copy(clientAuth = Some(ca)) - /** - * Java API: Make a copy of this message with the given [[SSLParameters]]. - */ + /** Java API: Make a copy of this message with the given [[SSLParameters]]. */ def withParameters(p: SSLParameters): NegotiateNewSession = copy(sslParameters = Some(p)) } diff --git a/akka-stream/src/main/scala/akka/stream/StreamRefs.scala b/akka-stream/src/main/scala/akka/stream/StreamRefs.scala index 649cc937094..56098d328ec 100644 --- a/akka-stream/src/main/scala/akka/stream/StreamRefs.scala +++ b/akka-stream/src/main/scala/akka/stream/StreamRefs.scala @@ -17,9 +17,7 @@ import akka.annotation.DoNotInherit import akka.stream.impl.streamref.StreamRefResolverImpl import akka.stream.scaladsl.{ Sink, Source } -/** - * See full documentation on [[SinkRef]]. - */ +/** See full documentation on [[SinkRef]]. */ object SinkRef { /** Implicitly converts a [[SinkRef]] to a [[Sink]]. The same can be achieved by calling `.sink` on the reference. */ @@ -51,9 +49,7 @@ trait SinkRef[In] { final def getSink(): javadsl.Sink[In, NotUsed] = sink().asJava } -/** - * See full documentation on [[SourceRef]]. - */ +/** See full documentation on [[SourceRef]]. */ object SourceRef { /** Implicitly converts a SourceRef to a Source. The same can be achieved by calling `.source` on the SourceRef itself. */ @@ -117,9 +113,7 @@ final case class InvalidPartnerActorException(expectedRef: ActorRef, gotRef: Act s"Do note that stream refs are one-shot references and have to be paired up in 1:1 pairs. " + s"Multi-cast such as broadcast etc can be implemented by sharing multiple new stream references. ") -/** - * The stream ref resolver extension provides a way to serialize and deserialize streamrefs in user serializers. - */ +/** The stream ref resolver extension provides a way to serialize and deserialize streamrefs in user serializers. */ object StreamRefResolver extends ExtensionId[StreamRefResolver] { override def get(system: ActorSystem): StreamRefResolver = super.get(system) override def get(system: ClassicActorSystemProvider): StreamRefResolver = super.get(system) @@ -147,13 +141,9 @@ object StreamRefResolver extends ExtensionId[StreamRefResolver] { */ def toSerializationFormat[T](ref: SinkRef[T]): String - /** - * Deserialize an `SourceRef` in the [[#toSerializationFormat]]. - */ + /** Deserialize an `SourceRef` in the [[#toSerializationFormat]]. */ def resolveSourceRef[T](serializedSourceRef: String): SourceRef[T] - /** - * Deserialize an `SinkRef` in the [[#toSerializationFormat]]. - */ + /** Deserialize an `SinkRef` in the [[#toSerializationFormat]]. */ def resolveSinkRef[T](serializedSinkRef: String): SinkRef[T] } diff --git a/akka-stream/src/main/scala/akka/stream/SubscriptionWithCancelException.scala b/akka-stream/src/main/scala/akka/stream/SubscriptionWithCancelException.scala index 31f05da35fa..5a714060eff 100644 --- a/akka-stream/src/main/scala/akka/stream/SubscriptionWithCancelException.scala +++ b/akka-stream/src/main/scala/akka/stream/SubscriptionWithCancelException.scala @@ -25,9 +25,7 @@ trait SubscriptionWithCancelException extends Subscription { } object SubscriptionWithCancelException { - /** - * Not for user extension - */ + /** Not for user extension */ @DoNotInherit sealed abstract class NonFailureCancellation extends RuntimeException with NoStackTrace case object NoMoreElementsNeeded extends NonFailureCancellation diff --git a/akka-stream/src/main/scala/akka/stream/SubstreamCancelStrategy.scala b/akka-stream/src/main/scala/akka/stream/SubstreamCancelStrategy.scala index 39805d6bed0..77a1e53b9c3 100644 --- a/akka-stream/src/main/scala/akka/stream/SubstreamCancelStrategy.scala +++ b/akka-stream/src/main/scala/akka/stream/SubstreamCancelStrategy.scala @@ -6,33 +6,23 @@ package akka.stream import SubstreamCancelStrategies._ -/** - * Represents a strategy that decides how to deal with substream events. - */ +/** Represents a strategy that decides how to deal with substream events. */ sealed abstract class SubstreamCancelStrategy private[akka] object SubstreamCancelStrategies { - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] case object Propagate extends SubstreamCancelStrategy - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] case object Drain extends SubstreamCancelStrategy } object SubstreamCancelStrategy { - /** - * Cancel the stream of streams if any substream is cancelled. - */ + /** Cancel the stream of streams if any substream is cancelled. */ def propagate: SubstreamCancelStrategy = Propagate - /** - * Drain substream on cancellation in order to prevent stalling of the stream of streams. - */ + /** Drain substream on cancellation in order to prevent stalling of the stream of streams. */ def drain: SubstreamCancelStrategy = Drain } diff --git a/akka-stream/src/main/scala/akka/stream/Supervision.scala b/akka-stream/src/main/scala/akka/stream/Supervision.scala index 225190f2f9b..c918cc9d784 100644 --- a/akka-stream/src/main/scala/akka/stream/Supervision.scala +++ b/akka-stream/src/main/scala/akka/stream/Supervision.scala @@ -51,43 +51,31 @@ object Supervision { type Decider = Function[Throwable, Directive] - /** - * Scala API: [[Decider]] that returns [[Stop]] for all exceptions. - */ + /** Scala API: [[Decider]] that returns [[Stop]] for all exceptions. */ val stoppingDecider: Decider with japi.Function[Throwable, Directive] = new Decider with japi.Function[Throwable, Directive] { override def apply(e: Throwable) = Stop } - /** - * Java API: Decider function that returns [[#stop]] for all exceptions. - */ + /** Java API: Decider function that returns [[#stop]] for all exceptions. */ val getStoppingDecider: japi.Function[Throwable, Directive] = stoppingDecider - /** - * Scala API: [[Decider]] that returns [[Resume]] for all exceptions. - */ + /** Scala API: [[Decider]] that returns [[Resume]] for all exceptions. */ val resumingDecider: Decider with japi.Function[Throwable, Directive] = new Decider with japi.Function[Throwable, Directive] { override def apply(e: Throwable) = Resume } - /** - * Java API: Decider function that returns [[#resume]] for all exceptions. - */ + /** Java API: Decider function that returns [[#resume]] for all exceptions. */ val getResumingDecider: japi.Function[Throwable, Directive] = resumingDecider - /** - * Scala API: [[Decider]] that returns [[Restart]] for all exceptions. - */ + /** Scala API: [[Decider]] that returns [[Restart]] for all exceptions. */ val restartingDecider: Decider with japi.Function[Throwable, Directive] = new Decider with japi.Function[Throwable, Directive] { override def apply(e: Throwable) = Restart } - /** - * Java API: Decider function that returns [[#restart]] for all exceptions. - */ + /** Java API: Decider function that returns [[#restart]] for all exceptions. */ val getRestartingDecider: japi.Function[Throwable, Directive] = restartingDecider } diff --git a/akka-stream/src/main/scala/akka/stream/SystemMaterializer.scala b/akka-stream/src/main/scala/akka/stream/SystemMaterializer.scala index b8b599169ca..dced52b7c4c 100644 --- a/akka-stream/src/main/scala/akka/stream/SystemMaterializer.scala +++ b/akka-stream/src/main/scala/akka/stream/SystemMaterializer.scala @@ -42,9 +42,7 @@ final class SystemMaterializer(system: ExtendedActorSystem) extends Extension { private val systemMaterializerPromise = Promise[Materializer]() // load these here so we can share the same instance across materializer guardian and other uses - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi @nowarn("msg=deprecated") private[akka] val materializerSettings = ActorMaterializerSettings(system) @@ -60,9 +58,7 @@ final class SystemMaterializer(system: ExtendedActorSystem) extends Extension { .withDeploy(Deploy.local), "Materializers") - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def createAdditionalSystemMaterializer(): Materializer = { val started = @@ -80,9 +76,7 @@ final class SystemMaterializer(system: ExtendedActorSystem) extends Extension { Await.result(started, materializerTimeout.duration).materializer } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def createAdditionalLegacySystemMaterializer( namePrefix: String, diff --git a/akka-stream/src/main/scala/akka/stream/ThrottleMode.scala b/akka-stream/src/main/scala/akka/stream/ThrottleMode.scala index 9a62bc8a9b5..f0efb9d6f46 100644 --- a/akka-stream/src/main/scala/akka/stream/ThrottleMode.scala +++ b/akka-stream/src/main/scala/akka/stream/ThrottleMode.scala @@ -4,35 +4,23 @@ package akka.stream -/** - * Represents a mode that decides how to deal exceed rate for Throttle operator - */ +/** Represents a mode that decides how to deal exceed rate for Throttle operator */ sealed abstract class ThrottleMode object ThrottleMode { - /** - * Tells throttle to make pauses before emitting messages to meet throttle rate - */ + /** Tells throttle to make pauses before emitting messages to meet throttle rate */ case object Shaping extends ThrottleMode - /** - * Makes throttle fail with exception when upstream is faster than throttle rate - */ + /** Makes throttle fail with exception when upstream is faster than throttle rate */ case object Enforcing extends ThrottleMode - /** - * Java API: Tells throttle to make pauses before emitting messages to meet throttle rate - */ + /** Java API: Tells throttle to make pauses before emitting messages to meet throttle rate */ def shaping = Shaping - /** - * Java API: Makes throttle fail with exception when upstream is faster than throttle rate - */ + /** Java API: Makes throttle fail with exception when upstream is faster than throttle rate */ def enforcing = Enforcing } -/** - * Exception that is thrown when rated controlled by stream is exceeded - */ +/** Exception that is thrown when rated controlled by stream is exceeded */ class RateExceededException(msg: String) extends RuntimeException(msg) diff --git a/akka-stream/src/main/scala/akka/stream/UniformFanInShape.scala b/akka-stream/src/main/scala/akka/stream/UniformFanInShape.scala index 704731c0eaa..97aef05609b 100644 --- a/akka-stream/src/main/scala/akka/stream/UniformFanInShape.scala +++ b/akka-stream/src/main/scala/akka/stream/UniformFanInShape.scala @@ -20,7 +20,7 @@ object UniformFanInShape { class UniformFanInShape[-T, +O](val n: Int, _init: FanInShape.Init[O]) extends FanInShape[O](_init) { - //ports get added to `FanInShape.inlets` as a side-effect of calling `newInlet` + // ports get added to `FanInShape.inlets` as a side-effect of calling `newInlet` for (i <- 0 until n) newInlet[T](s"in$i") def this(n: Int) = this(n, FanInShape.Name[O]("UniformFanIn")) diff --git a/akka-stream/src/main/scala/akka/stream/UniformFanOutShape.scala b/akka-stream/src/main/scala/akka/stream/UniformFanOutShape.scala index 2690f54b40a..9a6f865fcfe 100644 --- a/akka-stream/src/main/scala/akka/stream/UniformFanOutShape.scala +++ b/akka-stream/src/main/scala/akka/stream/UniformFanOutShape.scala @@ -14,7 +14,7 @@ object UniformFanOutShape { class UniformFanOutShape[-I, +O](n: Int, _init: FanOutShape.Init[I @uncheckedVariance]) extends FanOutShape[I](_init) { - //initialize by side-effect + // initialize by side-effect for (i <- 0 until n) newOutlet[O](s"out$i") def this(n: Int) = this(n, FanOutShape.Name[I]("UniformFanOut")) diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala index 4c8ee189099..e5137822cfc 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala @@ -46,9 +46,7 @@ import akka.util.OptionVal defaultPhase: Phase[Any], phases: Map[IslandTag, Phase[Any]]): Mat - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] override def actorOf(context: MaterializationContext, props: Props): ActorRef = { val effectiveProps = props.dispatcher match { case Dispatchers.DefaultDispatcherId => @@ -62,9 +60,7 @@ import akka.util.OptionVal actorOf(effectiveProps, context.islandName) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def actorOf(props: Props, name: String): ActorRef = { supervisor match { case ref: LocalActorRef => @@ -74,14 +70,10 @@ import akka.util.OptionVal } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] override def logger: LoggingAdapter - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] override def supervisor: ActorRef } @@ -167,9 +159,7 @@ private[akka] class SubFusingActorMaterializerImpl( override def settings: ActorMaterializerSettings = delegate.settings } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FlowNames extends ExtensionId[FlowNames] with ExtensionIdProvider { override def get(system: ActorSystem): FlowNames = super.get(system) override def get(system: ClassicActorSystemProvider): FlowNames = super.get(system) @@ -177,16 +167,12 @@ private[akka] class SubFusingActorMaterializerImpl( override def createExtension(system: ExtendedActorSystem): FlowNames = new FlowNames } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class FlowNames extends Extension { val name = SeqActorName("Flow") } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object StreamSupervisor { def props(attributes: Attributes, haveShutDown: AtomicBoolean): Props = Props(new StreamSupervisor(haveShutDown)) @@ -220,9 +206,7 @@ private[akka] class SubFusingActorMaterializerImpl( case object StoppedChildren } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class StreamSupervisor(haveShutDown: AtomicBoolean) extends Actor { import akka.stream.impl.StreamSupervisor._ implicit val ec: ExecutionContextExecutor = context.dispatcher diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorProcessor.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorProcessor.scala index 824751156ea..75bbe1cfd79 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorProcessor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorProcessor.scala @@ -14,9 +14,7 @@ import akka.stream.ActorAttributes import akka.stream.impl.ActorSubscriberMessage.{ OnComplete, OnError, OnNext, OnSubscribe } import akka.util.unused -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ActorProcessor { def apply[I, O](impl: ActorRef): ActorProcessor[I, O] = { @@ -27,9 +25,7 @@ import akka.util.unused } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class ActorProcessor[I, O](impl: ActorRef) extends ActorPublisher[O](impl) with Processor[I, O] { @@ -48,9 +44,7 @@ import akka.util.unused } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] abstract class BatchingInputBuffer(val size: Int, val pump: Pump) extends DefaultInputTransferStates { if (size < 1) throw new IllegalArgumentException(s"buffer size must be positive (was: $size)") @@ -151,8 +145,8 @@ import akka.util.unused case OnSubscribe(subscription) => subscription.cancel() // spec rule 2.5 } - protected def completed: Actor.Receive = { - case OnSubscribe(_) => throw new IllegalStateException("onSubscribe called after onError or onComplete") + protected def completed: Actor.Receive = { case OnSubscribe(_) => + throw new IllegalStateException("onSubscribe called after onError or onComplete") } protected def inputOnError(@unused e: Throwable): Unit = { @@ -161,9 +155,7 @@ import akka.util.unused } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class SimpleOutputs(val actor: ActorRef, val pump: Pump) extends DefaultOutputTransferStates { import ReactiveStreamsCompliance._ @@ -255,9 +247,7 @@ private[akka] object ActorProcessorImpl { case object SubscriptionTimeout } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] abstract class ActorProcessorImpl(attributes: Attributes) extends Actor with ActorLogging @@ -275,9 +265,7 @@ private[akka] object ActorProcessorImpl { protected val primaryOutputs: Outputs = new SimpleOutputs(self, this) def subTimeoutHandling: Receive - /** - * Subclass may override [[#activeReceive]] - */ + /** Subclass may override [[#activeReceive]] */ final override def receive = new ExposedPublisherReceive(activeReceive, unhandled) { override def receiveExposedPublisher(ep: ExposedPublisher): Unit = { primaryOutputs.subreceive(ep) diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala index 1c197d6e724..6d2018fdba6 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala @@ -16,9 +16,7 @@ import org.reactivestreams.Subscription import akka.actor.{ Actor, ActorRef, Terminated } import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ActorPublisher { val NormalShutdownReasonMessage = "Cannot subscribe to shut-down Publisher" class NormalShutdownException extends IllegalStateException(NormalShutdownReasonMessage) with NoStackTrace @@ -85,23 +83,23 @@ import akka.annotation.InternalApi @volatile private var shutdownReason: Option[Throwable] = None private def reportSubscribeFailure(subscriber: Subscriber[_ >: T]): Unit = - try shutdownReason match { - case Some(_: SpecViolation) => // ok, not allowed to call onError - case Some(e) => - tryOnSubscribe(subscriber, CancelledSubscription) - tryOnError(subscriber, e) - case None => - tryOnSubscribe(subscriber, CancelledSubscription) - tryOnComplete(subscriber) - } catch { + try + shutdownReason match { + case Some(_: SpecViolation) => // ok, not allowed to call onError + case Some(e) => + tryOnSubscribe(subscriber, CancelledSubscription) + tryOnError(subscriber, e) + case None => + tryOnSubscribe(subscriber, CancelledSubscription) + tryOnComplete(subscriber) + } + catch { case _: SpecViolation => // nothing to do } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class ActorSubscription[T]( final val impl: ActorRef, final val subscriber: Subscriber[_ >: T]) @@ -110,16 +108,12 @@ import akka.annotation.InternalApi override def cancel(): Unit = impl ! Cancel(this) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class ActorSubscriptionWithCursor[T](_impl: ActorRef, _subscriber: Subscriber[_ >: T]) extends ActorSubscription[T](_impl, _subscriber) with SubscriptionWithCursor[T] -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait SoftShutdown { this: Actor => def softShutdown(): Unit = { val children = context.children diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala index 0f64eaf3346..2e03acc1990 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala @@ -13,9 +13,7 @@ import akka.stream.Attributes.InputBuffer import akka.stream.impl.Stages.DefaultAttributes import akka.stream.stage._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class ActorRefBackpressureSinkStage[In]( ref: ActorRef, messageAdapter: ActorRef => In => Any, @@ -53,7 +51,7 @@ import akka.stream.stage._ if (buffer.size() == maxBuffer) tryPull(in) dequeueAndSend() } - case _ => //ignore all other messages + case _ => // ignore all other messages } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSource.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSource.scala index 2f8a4f722d1..f024c298e9b 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSource.scala @@ -15,9 +15,7 @@ private object ActorRefBackpressureSource { private sealed trait ActorRefStage { def ref: ActorRef } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class ActorRefBackpressureSource[T]( ackTo: Option[ActorRef], ackMessage: Any, diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkStage.scala index 4889035b11d..bc925019b5d 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkStage.scala @@ -10,9 +10,7 @@ import akka.stream.{ AbruptStageTerminationException, Attributes, Inlet, SinkSha import akka.stream.impl.Stages.DefaultAttributes import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, StageLogging } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final private[akka] class ActorRefSinkStage[T]( ref: ActorRef, @@ -34,12 +32,12 @@ final private[akka] class ActorRefSinkStage[T]( var completionSignalled = false override def preStart(): Unit = { - getStageActor({ + getStageActor { case (_, Terminated(`ref`)) => completeStage() case msg => log.error("Unexpected message to stage actor {}", msg.getClass) - }).watch(ref) + }.watch(ref) pull(in) } diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSource.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSource.scala index f87219c0033..9c86c871909 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSource.scala @@ -15,9 +15,7 @@ private object ActorRefSource { private sealed trait ActorRefStage { def ref: ActorRef } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class ActorRefSource[T]( maxBuffer: Int, overflowStrategy: OverflowStrategy, diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorSubscriberMessage.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorSubscriberMessage.scala index 78f61f83203..7d7d3abc252 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorSubscriberMessage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorSubscriberMessage.scala @@ -10,16 +10,12 @@ import akka.actor.DeadLetterSuppression import akka.actor.NoSerializationVerificationNeeded import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] sealed abstract class ActorSubscriberMessage extends DeadLetterSuppression with NoSerializationVerificationNeeded -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ActorSubscriberMessage { final case class OnNext(element: Any) extends ActorSubscriberMessage final case class OnError(cause: Throwable) extends ActorSubscriberMessage diff --git a/akka-stream/src/main/scala/akka/stream/impl/BoundedSourceQueue.scala b/akka-stream/src/main/scala/akka/stream/impl/BoundedSourceQueue.scala index 33280069c33..93025abfd8b 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/BoundedSourceQueue.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/BoundedSourceQueue.scala @@ -13,9 +13,7 @@ import akka.dispatch.AbstractBoundedNodeQueue import akka.stream._ import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue, OutHandler, StageLogging } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object BoundedSourceQueueStage { sealed trait State case object NeedsActivation extends State @@ -23,9 +21,7 @@ import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue, Out case class Done(result: QueueCompletionResult) extends State } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class BoundedSourceQueueStage[T](bufferSize: Int) extends GraphStageWithMaterializedValue[SourceShape[T], BoundedSourceQueue[T]] { import BoundedSourceQueueStage._ diff --git a/akka-stream/src/main/scala/akka/stream/impl/Buffers.scala b/akka-stream/src/main/scala/akka/stream/impl/Buffers.scala index f840a564b4b..a98f4bf013d 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Buffers.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Buffers.scala @@ -11,9 +11,7 @@ import scala.collection.mutable import akka.annotation.{ InternalApi, InternalStableApi } import akka.stream._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait Buffer[T] { def capacity: Int def used: Int @@ -42,9 +40,7 @@ private[akka] object Buffer { else new BoundedBuffer(size) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FixedSizeBuffer { /** @@ -141,9 +137,7 @@ private[akka] object Buffer { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class BoundedBuffer[T](val capacity: Int) extends Buffer[T] { import BoundedBuffer._ @@ -171,9 +165,7 @@ private[akka] object Buffer { private var q: Buffer[T] = new FixedQueue[T](capacity, newBuffer => q = newBuffer) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object BoundedBuffer { private final class FixedQueue[T](override val capacity: Int, switchBuffer: Buffer[T] => Unit) extends Buffer[T] { import Buffer._ @@ -386,8 +378,8 @@ private[impl] final class PartitionedBuffer[K, V](size: Int) { def clear(): Unit = { linearBuffer.clear() // ensure that all sub-buffers are cleared - partitionBuffers.foreach { - case (_, buf) => buf.clear() + partitionBuffers.foreach { case (_, buf) => + buf.clear() } partitionBuffers.clear() } diff --git a/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala b/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala index 90d73394830..e17b7343340 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala @@ -8,9 +8,7 @@ import org.reactivestreams.{ Publisher, Subscriber, Subscription } import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case object EmptyPublisher extends Publisher[Nothing] { import ReactiveStreamsCompliance._ override def subscribe(subscriber: Subscriber[_ >: Nothing]): Unit = @@ -25,9 +23,7 @@ import akka.annotation.InternalApi override def toString: String = "already-completed-publisher" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class ErrorPublisher(t: Throwable, name: String) extends Publisher[Nothing] { ReactiveStreamsCompliance.requireNonNullElement(t) @@ -54,9 +50,7 @@ import akka.annotation.InternalApi override def cancel(): Unit = () } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class CancellingSubscriber[T] extends Subscriber[T] { override def onError(t: Throwable): Unit = () override def onSubscribe(s: Subscription): Unit = s.cancel() @@ -64,9 +58,7 @@ import akka.annotation.InternalApi override def onNext(t: T): Unit = () } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case object RejectAdditionalSubscribers extends Publisher[Nothing] { import ReactiveStreamsCompliance._ override def subscribe(subscriber: Subscriber[_ >: Nothing]): Unit = diff --git a/akka-stream/src/main/scala/akka/stream/impl/ContextPropagation.scala b/akka-stream/src/main/scala/akka/stream/impl/ContextPropagation.scala index cd59310e831..ba22409a0de 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ContextPropagation.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ContextPropagation.scala @@ -6,9 +6,7 @@ package akka.stream.impl import akka.annotation.InternalStableApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalStableApi trait ContextPropagation { def suspendContext(): Unit def resumeContext(): Unit @@ -16,14 +14,10 @@ import akka.annotation.InternalStableApi def resumeContext(context: AnyRef): Unit } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalStableApi object ContextPropagation { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalStableApi def apply(): ContextPropagation = new ContextPropagationImpl } diff --git a/akka-stream/src/main/scala/akka/stream/impl/EmptySource.scala b/akka-stream/src/main/scala/akka/stream/impl/EmptySource.scala index 2add8804861..d4c935aff84 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/EmptySource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/EmptySource.scala @@ -9,9 +9,7 @@ import akka.stream._ import akka.stream.impl.Stages.DefaultAttributes import akka.stream.stage._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object EmptySource extends GraphStage[SourceShape[Nothing]] { val out = Outlet[Nothing]("EmptySource.out") override val shape = SourceShape(out) diff --git a/akka-stream/src/main/scala/akka/stream/impl/ExposedPublisherReceive.scala b/akka-stream/src/main/scala/akka/stream/impl/ExposedPublisherReceive.scala index 08bc079914e..5e44b93b943 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ExposedPublisherReceive.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ExposedPublisherReceive.scala @@ -7,9 +7,7 @@ package akka.stream.impl import akka.actor.Actor import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] abstract class ExposedPublisherReceive(activeReceive: Actor.Receive, unhandled: Any => Unit) extends Actor.Receive { private var stash = List.empty[Any] diff --git a/akka-stream/src/main/scala/akka/stream/impl/FailedSource.scala b/akka-stream/src/main/scala/akka/stream/impl/FailedSource.scala index 389b3773f6d..376650fd5e1 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/FailedSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/FailedSource.scala @@ -9,9 +9,7 @@ import akka.stream.{ Attributes, Outlet, SourceShape } import akka.stream.impl.Stages.DefaultAttributes import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class FailedSource[T](failure: Throwable) extends GraphStage[SourceShape[T]] { val out = Outlet[T]("FailedSource.out") override val shape = SourceShape(out) diff --git a/akka-stream/src/main/scala/akka/stream/impl/FanIn.scala b/akka-stream/src/main/scala/akka/stream/impl/FanIn.scala index bf138f141de..344e7168aa3 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/FanIn.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/FanIn.scala @@ -13,9 +13,7 @@ import akka.stream.ActorAttributes import akka.stream.Attributes import akka.util.unused -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FanIn { final case class OnError(id: Int, cause: Throwable) @@ -259,9 +257,7 @@ import akka.util.unused } -/** - * INTERNAL API - */ +/** INTERNAL API */ @DoNotInherit private[akka] class FanIn(attributes: Attributes, val inputCount: Int) extends Actor with ActorLogging diff --git a/akka-stream/src/main/scala/akka/stream/impl/FanOut.scala b/akka-stream/src/main/scala/akka/stream/impl/FanOut.scala index 358836f41ab..29c3ec77e7e 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/FanOut.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/FanOut.scala @@ -15,9 +15,7 @@ import akka.stream.ActorAttributes import akka.stream.Attributes import akka.util.unused -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FanOut { final case class SubstreamRequestMore(id: Int, demand: Long) @@ -228,9 +226,8 @@ import akka.util.unused def subreceive: SubReceive = new SubReceive({ case ExposedPublishers(publishers) => - publishers.zip(outputs).foreach { - case (pub, output) => - output.subreceive(ExposedPublisher(pub)) + publishers.zip(outputs).foreach { case (pub, output) => + output.subreceive(ExposedPublisher(pub)) } case SubstreamRequestMore(id, demand) => @@ -257,9 +254,7 @@ import akka.util.unused } -/** - * INTERNAL API - */ +/** INTERNAL API */ @DoNotInherit private[akka] abstract class FanOut(attributes: Attributes, val outputCount: Int) extends Actor with ActorLogging @@ -304,17 +299,13 @@ import akka.util.unused def receive = primaryInputs.subreceive.orElse[Any, Unit](outputBunch.subreceive) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Unzip { def props(attributes: Attributes): Props = Props(new Unzip(attributes)).withDeploy(Deploy.local) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class Unzip(attributes: Attributes) extends FanOut(attributes, outputCount = 2) { outputBunch.markAllOutputs() diff --git a/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala b/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala index 69c636264f7..f6aec5eaca4 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala @@ -16,9 +16,7 @@ import akka.stream.Attributes import akka.stream.StreamSubscriptionTimeoutTerminationMode import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] abstract class FanoutOutputs( val maxBufferSize: Int, val initialBufferSize: Int, @@ -109,17 +107,13 @@ import akka.util.OptionVal } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object FanoutProcessorImpl { def props(attributes: Attributes): Props = Props(new FanoutProcessorImpl(attributes)).withDeploy(Deploy.local) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class FanoutProcessorImpl(attributes: Attributes) extends ActorProcessorImpl(attributes) { val StreamSubscriptionTimeout(timeout, timeoutMode) = attributes.mandatoryAttribute[StreamSubscriptionTimeout] @@ -156,18 +150,17 @@ import akka.util.OptionVal initialPhase(1, running) - def subTimeoutHandling: Receive = { - case ActorProcessorImpl.SubscriptionTimeout => - import StreamSubscriptionTimeoutTerminationMode._ - if (!primaryOutputs.subscribed) { - timeoutMode match { - case CancelTermination => - primaryInputs.cancel() - context.stop(self) - case WarnTermination => - log.warning("Subscription timeout for {}", this) - case NoopTermination => // won't happen - } + def subTimeoutHandling: Receive = { case ActorProcessorImpl.SubscriptionTimeout => + import StreamSubscriptionTimeoutTerminationMode._ + if (!primaryOutputs.subscribed) { + timeoutMode match { + case CancelTermination => + primaryInputs.cancel() + context.stop(self) + case WarnTermination => + log.warning("Subscription timeout for {}", this) + case NoopTermination => // won't happen } + } } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/JavaFlowAndRsConverters.scala b/akka-stream/src/main/scala/akka/stream/impl/JavaFlowAndRsConverters.scala index f38ab7b9bf4..5250d27360c 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/JavaFlowAndRsConverters.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/JavaFlowAndRsConverters.scala @@ -65,7 +65,7 @@ private[akka] object JavaFlowAndRsConverters { final def asJava[T](p: rs.Publisher[T]): Flow.Publisher[T] = p match { case null => null // null remains null case adapter: JavaFlowPublisherToRsAdapter[T] => adapter.delegate // unwrap adapter instead of wrapping again - case delegate => new RsPublisherToJavaFlowAdapter(delegate) // adapt, it is a real Publisher + case delegate => new RsPublisherToJavaFlowAdapter(delegate) // adapt, it is a real Publisher } final def asRs[T](p: Flow.Publisher[T]): rs.Publisher[T] = p match { case null => null // null remains null diff --git a/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala b/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala index 1780ab252d1..8dbf42558ea 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala @@ -10,9 +10,7 @@ import akka.annotation.InternalApi import akka.stream.scaladsl.Framing.FramingException import akka.util.ByteString -/** - * INTERNAL API: Use [[akka.stream.scaladsl.JsonFraming]] instead. - */ +/** INTERNAL API: Use [[akka.stream.scaladsl.JsonFraming]] instead. */ @InternalApi private[akka] object JsonObjectParser { final val SquareBraceStart = '['.toByte @@ -53,7 +51,8 @@ import akka.util.ByteString private[this] var buffer: Array[Byte] = Array.empty private[this] var pos = 0 // latest position of pointer while scanning for json object end - private[this] var start = 0 // number of chars to drop from the front of the bytestring before emitting (skip whitespace etc) + private[this] var start = + 0 // number of chars to drop from the front of the bytestring before emitting (skip whitespace etc) private[this] var depth = 0 // counter of object-nesting depth, once hits 0 an object should be emitted private[this] var completedObject = false diff --git a/akka-stream/src/main/scala/akka/stream/impl/LazySource.scala b/akka-stream/src/main/scala/akka/stream/impl/LazySource.scala index 66a2f7a4efa..ab240e2c7b6 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/LazySource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/LazySource.scala @@ -14,16 +14,12 @@ import akka.stream.impl.Stages.DefaultAttributes import akka.stream.scaladsl.{ Keep, Source } import akka.stream.stage._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object LazySource { def apply[T, M](sourceFactory: () => Source[T, M]) = new LazySource[T, M](sourceFactory) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class LazySource[T, M](sourceFactory: () => Source[T, M]) extends GraphStageWithMaterializedValue[SourceShape[T], Future[M]] { val out = Outlet[T]("LazySource.out") @@ -41,26 +37,29 @@ import akka.stream.stage._ } override def onPull(): Unit = { - val source = try { - sourceFactory() - } catch { - case NonFatal(ex) => - matPromise.tryFailure(ex) - throw ex - } + val source = + try { + sourceFactory() + } catch { + case NonFatal(ex) => + matPromise.tryFailure(ex) + throw ex + } val subSink = new SubSinkInlet[T]("LazySource") subSink.pull() - setHandler(out, new OutHandler { - override def onPull(): Unit = { - subSink.pull() - } - - override def onDownstreamFinish(cause: Throwable): Unit = { - subSink.cancel(cause) - completeStage() - } - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + subSink.pull() + } + + override def onDownstreamFinish(cause: Throwable): Unit = { + subSink.cancel(cause) + completeStage() + } + }) subSink.setHandler(new InHandler { override def onPush(): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/MaterializerGuardian.scala b/akka-stream/src/main/scala/akka/stream/impl/MaterializerGuardian.scala index 2d809a84086..26cbca8cd30 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/MaterializerGuardian.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/MaterializerGuardian.scala @@ -35,9 +35,7 @@ private[akka] object MaterializerGuardian { Props(new MaterializerGuardian(systemMaterializer, materializerSettings)) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @nowarn("msg=deprecated") @InternalApi private[akka] final class MaterializerGuardian( diff --git a/akka-stream/src/main/scala/akka/stream/impl/MaybeSource.scala b/akka-stream/src/main/scala/akka/stream/impl/MaybeSource.scala index 754c4492b0f..4d5322b7717 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/MaybeSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/MaybeSource.scala @@ -14,9 +14,7 @@ import akka.stream.impl.Stages.DefaultAttributes import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue, OutHandler } import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object MaybeSource extends GraphStageWithMaterializedValue[SourceShape[AnyRef], Promise[Option[AnyRef]]] { val out = Outlet[AnyRef]("MaybeSource.out") diff --git a/akka-stream/src/main/scala/akka/stream/impl/Messages.scala b/akka-stream/src/main/scala/akka/stream/impl/Messages.scala index be3b5a16101..a7fb97ad618 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Messages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Messages.scala @@ -7,30 +7,22 @@ package akka.stream.impl import akka.actor.{ DeadLetterSuppression, NoSerializationVerificationNeeded } import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case object SubscribePending extends DeadLetterSuppression with NoSerializationVerificationNeeded -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class RequestMore[T](subscription: ActorSubscription[T], demand: Long) extends DeadLetterSuppression with NoSerializationVerificationNeeded -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Cancel[T](subscription: ActorSubscription[T]) extends DeadLetterSuppression with NoSerializationVerificationNeeded -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class ExposedPublisher(publisher: ActorPublisher[Any]) extends DeadLetterSuppression with NoSerializationVerificationNeeded diff --git a/akka-stream/src/main/scala/akka/stream/impl/Modules.scala b/akka-stream/src/main/scala/akka/stream/impl/Modules.scala index b00573d24ff..b3ab55eb85f 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Modules.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Modules.scala @@ -14,9 +14,7 @@ import akka.event.Logging import akka.stream._ import akka.stream.impl.StreamLayout.AtomicModule -/** - * INTERNAL API - */ +/** INTERNAL API */ @DoNotInherit private[akka] abstract class SourceModule[+Out, +Mat](val shape: SourceShape[Out]) extends AtomicModule[SourceShape[Out], Mat] { diff --git a/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala b/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala index b4bc54b9e83..c4bdfa675f7 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala @@ -45,9 +45,7 @@ import akka.stream.stage.InHandler import akka.stream.stage.OutHandler import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object PhasedFusingActorMaterializer { val Debug = false @@ -612,9 +610,7 @@ private final case class SavedIslandData( override def makeLogger(logSource: Class[Any]): LoggingAdapter = Logging(system, logSource) - /** - * INTERNAL API - */ + /** INTERNAL API */ @nowarn("msg=deprecated") @InternalApi private[akka] override def actorOf(context: MaterializationContext, props: Props): ActorRef = { val effectiveProps = props.dispatcher match { @@ -630,14 +626,10 @@ private final case class SavedIslandData( } -/** - * INTERNAL API - */ +/** INTERNAL API */ @DoNotInherit private[akka] trait IslandTag -/** - * INTERNAL API - */ +/** INTERNAL API */ @DoNotInherit private[akka] trait Phase[M] { def apply( settings: ActorMaterializerSettings, @@ -646,9 +638,7 @@ private final case class SavedIslandData( islandName: String): PhaseIsland[M] } -/** - * INTERNAL API - */ +/** INTERNAL API */ @DoNotInherit private[akka] trait PhaseIsland[M] { def name: String @@ -672,14 +662,10 @@ private final case class SavedIslandData( } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object GraphStageTag extends IslandTag -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class GraphStageIsland( effectiveAttributes: Attributes, materializer: PhasedFusingActorMaterializer, @@ -841,21 +827,18 @@ private final case class SavedIslandData( if (isIn) s"in port id [$missingHandlerIdx]" else s"out port id [$missingHandlerIdx]" } - throw new IllegalStateException(s"No handler defined in stage [${logic.toString}] for $portLabel." + - " All inlets and outlets must be assigned a handler with setHandler in the constructor of your graph stage logic.") + throw new IllegalStateException( + s"No handler defined in stage [${logic.toString}] for $portLabel." + + " All inlets and outlets must be assigned a handler with setHandler in the constructor of your graph stage logic.") } override def toString: String = "GraphStagePhase" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object SourceModuleIslandTag extends IslandTag -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class SourceModulePhase( materializer: PhasedFusingActorMaterializer, islandName: String) @@ -880,14 +863,10 @@ private final case class SavedIslandData( override def onIslandReady(): Unit = () } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object SinkModuleIslandTag extends IslandTag -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class SinkModulePhase(materializer: PhasedFusingActorMaterializer, islandName: String) extends PhaseIsland[AnyRef] { override def name: String = s"SinkModule phase" @@ -915,21 +894,17 @@ private final case class SavedIslandData( subscriberOrVirtualPublisher match { case v: VirtualPublisher[_] => v.registerPublisher(publisher) case s: Subscriber[Any] @unchecked => publisher.subscribe(s) - case _ => throw new IllegalStateException() // won't happen, compiler exhaustiveness check pleaser + case _ => throw new IllegalStateException() // won't happen, compiler exhaustiveness check pleaser } } override def onIslandReady(): Unit = () } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ProcessorModuleIslandTag extends IslandTag -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class ProcessorModulePhase() extends PhaseIsland[Processor[Any, Any]] { override def name: String = "ProcessorModulePhase" private[this] var processor: Processor[Any, Any] = _ @@ -950,14 +925,10 @@ private final case class SavedIslandData( override def onIslandReady(): Unit = () } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TlsModuleIslandTag extends IslandTag -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class TlsModulePhase(materializer: PhasedFusingActorMaterializer, islandName: String) extends PhaseIsland[NotUsed] { def name: String = "TlsModulePhase" diff --git a/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala b/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala index 9978073f58f..9e080ea6cfd 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala @@ -13,9 +13,7 @@ import akka.stream.OverflowStrategies._ import akka.stream.scaladsl.SourceQueueWithComplete import akka.stream.stage._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object QueueSource { sealed trait Input[+T] @@ -25,9 +23,7 @@ import akka.stream.stage._ } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class QueueSource[T]( maxBuffer: Int, overflowStrategy: OverflowStrategy, diff --git a/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala b/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala index 99df20ad2c0..1f326438f0a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala @@ -11,9 +11,7 @@ import org.reactivestreams.{ Subscriber, Subscription } import akka.annotation.InternalApi import akka.stream.SubscriptionWithCancelException -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[stream] object ReactiveStreamsCompliance { final val CanNotSubscribeTheSameSubscriberMultipleTimes = @@ -130,10 +128,12 @@ import akka.stream.SubscriptionWithCancelException final def tryCancel(subscription: Subscription, cause: Throwable): Unit = { if (subscription eq null) throw new IllegalStateException("Subscription must be not null on cancel() call, rule 1.3") - try subscription match { - case s: SubscriptionWithCancelException => s.cancel(cause) - case s => s.cancel() - } catch { + try + subscription match { + case s: SubscriptionWithCancelException => s.cancel(cause) + case s => s.cancel() + } + catch { case NonFatal(t) => throw new SignalThrewException("It is illegal to throw exceptions from cancel(), rule 3.15", t) } diff --git a/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala b/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala index ea13263e30d..3e7672bb7e2 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala @@ -45,33 +45,23 @@ import akka.annotation.InternalApi // bit mask for converting a cursor into an array index private def mask: Int = Int.MaxValue >> (31 - lenBit) - /** - * The number of elements currently in the buffer. - */ + /** The number of elements currently in the buffer. */ def size: Int = writeIx - readIx def isEmpty: Boolean = size == 0 def nonEmpty: Boolean = !isEmpty - /** - * The number of elements the buffer can still take without having to be resized. - */ + /** The number of elements the buffer can still take without having to be resized. */ def immediatelyAvailable: Int = array.length - size - /** - * The maximum number of elements the buffer can still take. - */ + /** The maximum number of elements the buffer can still take. */ def maxAvailable: Int = (1 << maxSizeBit) - size - /** - * Returns the number of elements that the buffer currently contains for the given cursor. - */ + /** Returns the number of elements that the buffer currently contains for the given cursor. */ def count(cursor: Cursor): Int = writeIx - cursor.cursor - /** - * Initializes the given Cursor to the oldest buffer entry that is still available. - */ + /** Initializes the given Cursor to the oldest buffer entry that is still available. */ def initCursor(cursor: Cursor): Unit = cursor.cursor = readIx /** @@ -143,9 +133,7 @@ import akka.annotation.InternalApi s"ResizableMultiReaderRingBuffer(size=$size, writeIx=$writeIx, readIx=$readIx, cursors=${cursors.cursors.size})" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ResizableMultiReaderRingBuffer { object NothingToReadException extends RuntimeException with NoStackTrace diff --git a/akka-stream/src/main/scala/akka/stream/impl/RetryFlowCoordinator.scala b/akka-stream/src/main/scala/akka/stream/impl/RetryFlowCoordinator.scala index e514bc67104..627efc27636 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/RetryFlowCoordinator.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/RetryFlowCoordinator.scala @@ -112,11 +112,13 @@ import akka.util.OptionVal } }) - setHandler(externalOut, new OutHandler { - override def onPull(): Unit = - // external demand - if (!hasBeenPulled(internalIn)) pull(internalIn) - }) + setHandler( + externalOut, + new OutHandler { + override def onPull(): Unit = + // external demand + if (!hasBeenPulled(internalIn)) pull(internalIn) + }) private def pushInternal(element: In): Unit = { push(internalOut, element) diff --git a/akka-stream/src/main/scala/akka/stream/impl/SeqActorName.scala b/akka-stream/src/main/scala/akka/stream/impl/SeqActorName.scala index 99584c6331b..06aeba224b5 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/SeqActorName.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/SeqActorName.scala @@ -20,16 +20,12 @@ import akka.annotation.{ DoNotInherit, InternalApi } def copy(name: String): SeqActorName } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object SeqActorName { def apply(prefix: String) = new SeqActorNameImpl(prefix, new AtomicLong(0)) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class SeqActorNameImpl(val prefix: String, counter: AtomicLong) extends SeqActorName { def next(): String = prefix + '-' + counter.getAndIncrement() diff --git a/akka-stream/src/main/scala/akka/stream/impl/SinkholeSubscriber.scala b/akka-stream/src/main/scala/akka/stream/impl/SinkholeSubscriber.scala index 9d714daec58..0ce931297c9 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/SinkholeSubscriber.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/SinkholeSubscriber.scala @@ -11,9 +11,7 @@ import org.reactivestreams.{ Subscriber, Subscription } import akka.Done import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class SinkholeSubscriber[T](whenComplete: Promise[Done]) extends Subscriber[T] { private[this] var running: Boolean = false diff --git a/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala b/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala index de2c2496e98..667bfe7b446 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala @@ -35,9 +35,7 @@ import akka.stream.scaladsl.{ Keep, Sink, SinkQueueWithCancel, Source } import akka.stream.stage._ import akka.util.ccompat._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @DoNotInherit private[akka] abstract class SinkModule[-In, Mat](val shape: SinkShape[In]) extends AtomicModule[SinkShape[In], Mat] { @@ -98,9 +96,7 @@ import akka.util.ccompat._ new PublisherSink[In](attr, amendShape(attr)) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class FanoutPublisherSink[In](val attributes: Attributes, shape: SinkShape[In]) extends SinkModule[In, Publisher[In]](shape) { @@ -143,9 +139,7 @@ import akka.util.ccompat._ override def withAttributes(attr: Attributes): SinkModule[Any, NotUsed] = new CancelSink(attr, amendShape(attr)) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class TakeLastStage[T](n: Int) extends GraphStageWithMaterializedValue[SinkShape[T], Future[immutable.Seq[T]]] { if (n <= 0) @@ -157,43 +151,43 @@ import akka.util.ccompat._ override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { val p: Promise[immutable.Seq[T]] = Promise() - (new GraphStageLogic(shape) with InHandler { - private[this] val buffer = mutable.Queue.empty[T] - private[this] var count = 0 - - override def preStart(): Unit = pull(in) - - override def onPush(): Unit = { - buffer.enqueue(grab(in)) - if (count < n) - count += 1 - else - buffer.dequeue() - pull(in) - } + ( + new GraphStageLogic(shape) with InHandler { + private[this] val buffer = mutable.Queue.empty[T] + private[this] var count = 0 + + override def preStart(): Unit = pull(in) + + override def onPush(): Unit = { + buffer.enqueue(grab(in)) + if (count < n) + count += 1 + else + buffer.dequeue() + pull(in) + } - override def onUpstreamFinish(): Unit = { - val elements = buffer.toList - buffer.clear() - p.trySuccess(elements) - completeStage() - } + override def onUpstreamFinish(): Unit = { + val elements = buffer.toList + buffer.clear() + p.trySuccess(elements) + completeStage() + } - override def onUpstreamFailure(ex: Throwable): Unit = { - p.tryFailure(ex) - failStage(ex) - } + override def onUpstreamFailure(ex: Throwable): Unit = { + p.tryFailure(ex) + failStage(ex) + } - setHandler(in, this) - }, p.future) + setHandler(in, this) + }, + p.future) } override def toString: String = "TakeLastStage" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class HeadOptionStage[T] extends GraphStageWithMaterializedValue[SinkShape[T], Future[Option[T]]] { @@ -203,38 +197,38 @@ import akka.util.ccompat._ override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { val p: Promise[Option[T]] = Promise() - (new GraphStageLogic(shape) with InHandler { - override def preStart(): Unit = pull(in) + ( + new GraphStageLogic(shape) with InHandler { + override def preStart(): Unit = pull(in) - def onPush(): Unit = { - p.trySuccess(Option(grab(in))) - completeStage() - } + def onPush(): Unit = { + p.trySuccess(Option(grab(in))) + completeStage() + } - override def onUpstreamFinish(): Unit = { - p.trySuccess(None) - completeStage() - } + override def onUpstreamFinish(): Unit = { + p.trySuccess(None) + completeStage() + } - override def onUpstreamFailure(ex: Throwable): Unit = { - p.tryFailure(ex) - failStage(ex) - } + override def onUpstreamFailure(ex: Throwable): Unit = { + p.tryFailure(ex) + failStage(ex) + } - override def postStop(): Unit = { - if (!p.isCompleted) p.failure(new AbruptStageTerminationException(this)) - } + override def postStop(): Unit = { + if (!p.isCompleted) p.failure(new AbruptStageTerminationException(this)) + } - setHandler(in, this) - }, p.future) + setHandler(in, this) + }, + p.future) } override def toString: String = "HeadOptionStage" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class SeqStage[T, That](implicit cbf: Factory[T, That with immutable.Iterable[_]]) extends GraphStageWithMaterializedValue[SinkShape[T], Future[That]] { val in = Inlet[T]("seq.in") @@ -279,18 +273,14 @@ import akka.util.ccompat._ } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object QueueSink { sealed trait Output[+T] final case class Pull[T](promise: Promise[Option[T]]) extends Output[T] case object Cancel extends Output[Nothing] } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class QueueSink[T](maxConcurrentPulls: Int) extends GraphStageWithMaterializedValue[SinkShape[T], SinkQueueWithCancel[T]] { @@ -322,7 +312,7 @@ import akka.util.ccompat._ if (currentRequests.isFull) pullPromise.failure( new IllegalStateException(s"Too many concurrent pulls. Specified maximum is $maxConcurrentPulls. " + - "You have to wait for one previous future to be resolved to send another request")) + "You have to wait for one previous future to be resolved to send another request")) else if (buffer.isEmpty) currentRequests.enqueue(pullPromise) else { if (buffer.used == maxBuffer) tryPull(in) @@ -335,7 +325,7 @@ import akka.util.ccompat._ val e = buffer.dequeue() promise.complete(e) e match { - case Success(_: Some[_]) => //do nothing + case Success(_: Some[_]) => // do nothing case Success(None) => completeStage() case Failure(t) => failStage(t) } @@ -509,9 +499,7 @@ import akka.util.ccompat._ def finish(): R = collector.finisher().apply(reduced) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final private[stream] class LazySink[T, M](sinkFactory: T => Future[Sink[T, M]]) extends GraphStageWithMaterializedValue[SinkShape[T], Future[M]] { val in = Inlet[T]("lazySink.in") diff --git a/akka-stream/src/main/scala/akka/stream/impl/Stages.scala b/akka-stream/src/main/scala/akka/stream/impl/Stages.scala index 13c9a3e0bca..9656de2f2d4 100755 --- a/akka-stream/src/main/scala/akka/stream/impl/Stages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Stages.scala @@ -8,9 +8,7 @@ import akka.annotation.InternalApi import akka.stream._ import akka.stream.Attributes._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Stages { object DefaultAttributes { diff --git a/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala b/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala index 0a3c2dbca25..cb019abc99f 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala @@ -19,9 +19,7 @@ import akka.stream._ import akka.stream.impl.Stages.DefaultAttributes import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[stream] object StreamLayout { // compile-time constant @@ -35,9 +33,7 @@ import akka.util.OptionVal trait AtomicModule[+S <: Shape, +M] extends Graph[S, M] } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[stream] object VirtualProcessor { // intentional syntax to make compile time constant @@ -69,7 +65,6 @@ import akka.util.OptionVal * downstream and upstream, this needs an atomic state machine which looks a * little like this: * - * * +--------+ (2) +---------------+ * | null +------------>+ Subscriber | * +---+----+ +-----+---------+ @@ -92,7 +87,6 @@ import akka.util.OptionVal * | Publisher +-----> | Inert | | (5, *) * +--------------+ +---------------+ <-- * - * * The idea is to keep the major state in only one atomic reference. The actions * that can happen are: * @@ -324,10 +318,12 @@ import akka.util.OptionVal if (!compareAndSet(x, ErrorPublisher(ex, "failed-VirtualProcessor"))) rec() case s: Subscriber[_] => try s.onError(ex) - catch { case NonFatal(_) => } finally set(Inert) + catch { case NonFatal(_) => } + finally set(Inert) case Both(s) => try s.onError(ex) - catch { case NonFatal(_) => } finally set(Inert) + catch { case NonFatal(_) => } + finally set(Inert) case _ => // spec violation or cancellation race, but nothing we can do } rec() @@ -525,9 +521,7 @@ import akka.util.OptionVal override def toString: String = s"VirtualPublisher(state = ${get()})" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class ProcessorModule[In, Out, Mat]( val createProcessor: () => (Processor[In, Out], Mat), attributes: Attributes = DefaultAttributes.processor) diff --git a/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala b/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala index 251e636a6cd..8ea38bfd657 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala @@ -15,14 +15,10 @@ import akka.annotation.InternalApi import akka.stream.StreamSubscriptionTimeoutSettings import akka.stream.StreamSubscriptionTimeoutTerminationMode.{ CancelTermination, NoopTermination, WarnTermination } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object StreamSubscriptionTimeoutSupport { - /** - * A subscriber who calls `cancel` directly from `onSubscribe` and ignores all other callbacks. - */ + /** A subscriber who calls `cancel` directly from `onSubscribe` and ignores all other callbacks. */ case object CancelingSubscriber extends Subscriber[Any] { override def onSubscribe(s: Subscription): Unit = { ReactiveStreamsCompliance.requireNonNullSubscription(s) @@ -64,9 +60,7 @@ import akka.stream.StreamSubscriptionTimeoutTerminationMode.{ CancelTermination, import StreamSubscriptionTimeoutSupport._ - /** - * Default settings for subscription timeouts. - */ + /** Default settings for subscription timeouts. */ protected def subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings /** @@ -111,9 +105,7 @@ import akka.stream.StreamSubscriptionTimeoutTerminationMode.{ CancelTermination, target.getClass.getCanonicalName) } - /** - * Called by the actor when a subscription has timed out. Expects the actual `Publisher` or `Processor` target. - */ + /** Called by the actor when a subscription has timed out. Expects the actual `Publisher` or `Processor` target. */ @nowarn("msg=deprecated") protected def subscriptionTimedOut(target: Publisher[_]): Unit = subscriptionTimeoutSettings.mode match { case NoopTermination => // ignore... @@ -121,13 +113,9 @@ import akka.stream.StreamSubscriptionTimeoutTerminationMode.{ CancelTermination, case CancelTermination => cancel(target, subscriptionTimeoutSettings.timeout) } - /** - * Callback that should ensure that the target is canceled with the given cause. - */ + /** Callback that should ensure that the target is canceled with the given cause. */ protected def handleSubscriptionTimeout(target: Publisher[_], cause: Exception): Unit } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class SubscriptionTimeoutException(msg: String) extends RuntimeException(msg) diff --git a/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala index 5219f612c4b..21d784bf603 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala @@ -9,18 +9,14 @@ import akka.annotation.InternalApi import akka.stream._ import akka.stream.scaladsl._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object SubFlowImpl { trait MergeBack[In, F[+_]] { def apply[T](f: Flow[In, T, NotUsed], breadth: Int): F[T] } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class SubFlowImpl[In, Out, Mat, F[+_], C]( val subFlow: Flow[In, Out, NotUsed], mergeBackFunction: SubFlowImpl.MergeBack[In, F], diff --git a/akka-stream/src/main/scala/akka/stream/impl/SubscriberManagement.scala b/akka-stream/src/main/scala/akka/stream/impl/SubscriberManagement.scala index a35f983e1fe..00814d8549a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/SubscriberManagement.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/SubscriberManagement.scala @@ -8,9 +8,7 @@ import scala.annotation.tailrec import org.reactivestreams.{ Subscriber, Subscription } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object SubscriberManagement { sealed trait EndOfStream { @@ -34,9 +32,7 @@ private[akka] object SubscriberManagement { val ShutDown = new ErrorCompleted(ActorPublisher.NormalShutdownReason) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] trait SubscriptionWithCursor[T] extends Subscription with ResizableMultiReaderRingBuffer.Cursor { import ReactiveStreamsCompliance._ @@ -51,9 +47,7 @@ private[akka] trait SubscriptionWithCursor[T] extends Subscription with Resizabl var cursor: Int = 0 // buffer cursor, managed by buffer } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] trait SubscriberManagement[T] extends ResizableMultiReaderRingBuffer.Cursors { import SubscriberManagement._ type S <: SubscriptionWithCursor[T] @@ -74,14 +68,10 @@ private[akka] trait SubscriberManagement[T] extends ResizableMultiReaderRingBuff */ protected def cancelUpstream(): Unit - /** - * called when the spi.Publisher/Processor is ready to be shut down - */ + /** called when the spi.Publisher/Processor is ready to be shut down */ protected def shutdown(completed: Boolean): Unit - /** - * Use to register a subscriber - */ + /** Use to register a subscriber */ protected def createSubscription(subscriber: Subscriber[_ >: T]): S private[this] val buffer = new ResizableMultiReaderRingBuffer[T](initialBufferSize, maxBufferSize, this) @@ -99,9 +89,7 @@ private[akka] trait SubscriberManagement[T] extends ResizableMultiReaderRingBuff def cursors = subscriptions - /** - * more demand was signaled from a given subscriber - */ + /** more demand was signaled from a given subscriber */ protected def moreRequested(subscription: S, elements: Long): Unit = if (subscription.active) { import ReactiveStreamsCompliance._ @@ -122,14 +110,15 @@ private[akka] trait SubscriberManagement[T] extends ResizableMultiReaderRingBuff // if we are at end-of-stream and have nothing more to read we complete now rather than after the next `requestMore` if ((eos ne NotReached) && buffer.count(subscription) == 0) Long.MinValue else 0 } else if (buffer.count(subscription) > 0) { - val goOn = try { - subscription.dispatch(buffer.read(subscription)) - true - } catch { - case _: SpecViolation => - unregisterSubscriptionInternal(subscription) - false - } + val goOn = + try { + subscription.dispatch(buffer.read(subscription)) + true + } catch { + case _: SpecViolation => + unregisterSubscriptionInternal(subscription) + false + } if (goOn) dispatchFromBufferAndReturnRemainingRequested(requested - 1, eos) else Long.MinValue } else if (eos ne NotReached) Long.MinValue @@ -162,9 +151,7 @@ private[akka] trait SubscriberManagement[T] extends ResizableMultiReaderRingBuff } } - /** - * this method must be called by the implementing class whenever a new value is available to be pushed downstream - */ + /** this method must be called by the implementing class whenever a new value is available to be pushed downstream */ protected def pushToDownstream(value: T): Unit = { @tailrec def dispatch(remaining: Subscriptions, sent: Boolean = false): Boolean = remaining match { @@ -210,18 +197,14 @@ private[akka] trait SubscriberManagement[T] extends ResizableMultiReaderRingBuff } // else ignore, we need to be idempotent } - /** - * this method must be called by the implementing class to push an error downstream - */ + /** this method must be called by the implementing class to push an error downstream */ protected def abortDownstream(cause: Throwable): Unit = { endOfStream = ErrorCompleted(cause) subscriptions.foreach(s => endOfStream(s.subscriber)) subscriptions = Nil } - /** - * Register a new subscriber. - */ + /** Register a new subscriber. */ protected def registerSubscriber(subscriber: Subscriber[_ >: T]): Unit = endOfStream match { case NotReached if subscriptions.exists(_.subscriber == subscriber) => ReactiveStreamsCompliance.rejectDuplicateSubscriber(subscriber) diff --git a/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala b/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala index eb992973cd7..ee188861af5 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala @@ -13,17 +13,13 @@ import akka.stream.impl.fusing.GraphStages.SimpleLinearGraphStage import akka.stream.stage._ import akka.util.NanoTimeTokenBucket -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Throttle { final val AutomaticMaximumBurst = -1 private case object TimerKey } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class Throttle[T]( val cost: Int, val per: FiniteDuration, @@ -42,7 +38,7 @@ import akka.util.NanoTimeTokenBucket // 100 ms is a realistic minimum between tokens, otherwise the maximumBurst is adjusted // to be able to support higher rates val effectiveMaximumBurst: Long = - if (maximumBurst == Throttle.AutomaticMaximumBurst) math.max(1, ((100 * 1000 * 1000) / nanosBetweenTokens)) + if (maximumBurst == Throttle.AutomaticMaximumBurst) math.max(1, (100 * 1000 * 1000) / nanosBetweenTokens) else maximumBurst require(!(mode == ThrottleMode.Enforcing && effectiveMaximumBurst < 0), "maximumBurst must be > 0 in Enforcing mode") diff --git a/akka-stream/src/main/scala/akka/stream/impl/Transfer.scala b/akka-stream/src/main/scala/akka/stream/impl/Transfer.scala index 1cede3ca1cb..193aab12adb 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Transfer.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Transfer.scala @@ -9,9 +9,7 @@ import scala.util.control.NonFatal import akka.actor.Actor import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class SubReceive(initial: Actor.Receive) extends Actor.Receive { private var currentReceive = initial @@ -23,9 +21,7 @@ import akka.annotation.InternalApi } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait Inputs { def NeedsInput: TransferState def NeedsInputOrComplete: TransferState @@ -42,9 +38,7 @@ import akka.annotation.InternalApi def inputsAvailable: Boolean } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait DefaultInputTransferStates extends Inputs { override val NeedsInput: TransferState = new TransferState { def isReady = inputsAvailable @@ -56,9 +50,7 @@ import akka.annotation.InternalApi } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait Outputs { def NeedsDemand: TransferState def NeedsDemandOrCancel: TransferState @@ -78,9 +70,7 @@ import akka.annotation.InternalApi def isOpen: Boolean = !isClosed } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait DefaultOutputTransferStates extends Outputs { override val NeedsDemand: TransferState = new TransferState { def isReady = demandAvailable @@ -93,9 +83,7 @@ import akka.annotation.InternalApi } // States of the operation that is executed by this processor -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait TransferState { def isReady: Boolean def isCompleted: Boolean @@ -112,47 +100,35 @@ import akka.annotation.InternalApi } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Completed extends TransferState { def isReady = false def isCompleted = true } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object NotInitialized extends TransferState { def isReady = false def isCompleted = false } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case class WaitingForUpstreamSubscription(remaining: Int, andThen: TransferPhase) extends TransferState { def isReady = false def isCompleted = false } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Always extends TransferState { def isReady = true def isCompleted = false } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class TransferPhase(precondition: TransferState)(val action: () => Unit) -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] trait Pump { private var transferState: TransferState = NotInitialized private var currentAction: () => Unit = @@ -199,9 +175,11 @@ import akka.annotation.InternalApi // Exchange input buffer elements and output buffer "requests" until one of them becomes empty. // Generate upstream requestMore for every Nth consumed input element final def pump(): Unit = { - try while (transferState.isExecutable) { - currentAction() - } catch { case NonFatal(e) => pumpFailed(e) } + try + while (transferState.isExecutable) { + currentAction() + } + catch { case NonFatal(e) => pumpFailed(e) } if (isPumpFinished) pumpFinished() } diff --git a/akka-stream/src/main/scala/akka/stream/impl/TraversalBuilder.scala b/akka-stream/src/main/scala/akka/stream/impl/TraversalBuilder.scala index a2b707630e0..04875a21dc4 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/TraversalBuilder.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/TraversalBuilder.scala @@ -39,9 +39,7 @@ import akka.util.unused */ @InternalApi private[akka] sealed trait Traversal { - /** - * Concatenates two traversals building a new Traversal which traverses both. - */ + /** Concatenates two traversals building a new Traversal which traverses both. */ def concat(that: Traversal): Traversal = { Concat.normalizeConcat(this, that) } @@ -49,9 +47,7 @@ import akka.util.unused def rewireFirstTo(@unused relativeOffset: Int): Traversal = null } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Concat { /** @@ -136,31 +132,21 @@ import akka.util.unused override def concat(that: Traversal): Traversal = that } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] sealed trait MaterializedValueOp extends Traversal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case object Pop extends MaterializedValueOp -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case object PushNotUsed extends MaterializedValueOp -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Transform(mapper: AnyFunction1) extends MaterializedValueOp { def apply(arg: Any): Any = mapper.asInstanceOf[Any => Any](arg) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Compose(composer: AnyFunction2, reverse: Boolean = false) extends MaterializedValueOp { def apply(arg1: Any, arg2: Any): Any = { @@ -171,29 +157,19 @@ import akka.util.unused } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class PushAttributes(attributes: Attributes) extends Traversal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case object PopAttributes extends Traversal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class EnterIsland(islandTag: IslandTag) extends Traversal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] case object ExitIsland extends Traversal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TraversalBuilder { // The most generic function1 and function2 (also completely useless, as we have thrown away all types) // needs to be casted once to be useful (pending runtime exception in cases of bugs). @@ -239,9 +215,7 @@ import akka.util.unused } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def empty(attributes: Attributes = Attributes.none): TraversalBuilder = { if (attributes eq Attributes.none) cachedEmptyCompleted else CompletedTraversalBuilder(PushNotUsed, 0, Map.empty, attributes) @@ -274,9 +248,7 @@ import akka.util.unused builder.setAttributes(attributes) } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[impl] def printTraversal(t: Traversal, indent: Int = 0): Unit = { var current: Traversal = t @@ -307,9 +279,7 @@ import akka.util.unused } } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[impl] def printWiring(t: Traversal, baseSlot: Int = 0): Int = { var current: Traversal = t var slot = baseSlot @@ -370,9 +340,7 @@ import akka.util.unused } } - /** - * Test if a Graph is an empty Source. - * */ + /** Test if a Graph is an empty Source. */ def isEmptySource(graph: Graph[SourceShape[_], _]): Boolean = graph match { case source: scaladsl.Source[_, _] if source eq scaladsl.Source.empty => true case source: javadsl.Source[_, _] if source eq javadsl.Source.empty() => true @@ -436,14 +404,10 @@ import akka.util.unused */ def offsetOfModule(out: OutPort): Int - /** - * Returns whether the given output port has been wired in the graph or not. - */ + /** Returns whether the given output port has been wired in the graph or not. */ def isUnwired(out: OutPort): Boolean - /** - * Returns whether the given input port has been wired in the graph or not. - */ + /** Returns whether the given input port has been wired in the graph or not. */ def isUnwired(in: InPort): Boolean /** @@ -472,14 +436,10 @@ import akka.util.unused */ def inSlots: Int - /** - * Returns the Traversal if ready for this (sub)graph. - */ + /** Returns the Traversal if ready for this (sub)graph. */ def traversal: Traversal = throw new IllegalStateException("Traversal can be only acquired from a completed builder") - /** - * The number of output ports that have not been wired. - */ + /** The number of output ports that have not been wired. */ def unwiredOuts: Int /** @@ -621,9 +581,7 @@ import akka.util.unused TraversalBuilder.empty().add(this, module.shape, Keep.right).makeIsland(islandTag) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object LinearTraversalBuilder { // TODO: Remove @@ -761,9 +719,7 @@ import akka.util.unused "composite builder instead and add the second module to that.") } - /** - * This builder can always return a traversal. - */ + /** This builder can always return a traversal. */ override def traversal: Traversal = { if (outPort.isDefined) throw new IllegalStateException("Traversal cannot be acquired until all output ports have been wired") @@ -880,8 +836,8 @@ import akka.util.unused if (toAppend.isEmpty) { copy(traversalSoFar = PushNotUsed.concat(LinearTraversalBuilder.addMatCompose(traversalSoFar, matCompose))) } else if (this.isEmpty) { - toAppend.copy( - traversalSoFar = toAppend.traversalSoFar.concat(LinearTraversalBuilder.addMatCompose(traversal, matCompose))) + toAppend.copy(traversalSoFar = + toAppend.traversalSoFar.concat(LinearTraversalBuilder.addMatCompose(traversal, matCompose))) } else { if (outPort.isDefined) { if (toAppend.inPort.isEmpty) @@ -1114,9 +1070,7 @@ import akka.util.unused } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @DoNotInherit private[akka] sealed trait TraversalBuildStep /** @@ -1132,9 +1086,7 @@ import akka.util.unused override def toString = s"K:$hashCode" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class AppendTraversal(traversal: Traversal) extends TraversalBuildStep /** @@ -1197,9 +1149,7 @@ import akka.util.unused override def internalSetAttributes(attributes: Attributes): TraversalBuilder = copy(attributes = attributes) - /** - * Convert this builder to a [[CompletedTraversalBuilder]] if there are no more unwired outputs. - */ + /** Convert this builder to a [[CompletedTraversalBuilder]] if there are no more unwired outputs. */ def completeIfPossible: TraversalBuilder = { if (unwiredOuts == 0) { var traversal: Traversal = finalSteps @@ -1226,9 +1176,7 @@ import akka.util.unused } else this } - /** - * Assign an output port a relative slot (relative to the base input slot of its module, see [[MaterializeAtomic]]) - */ + /** Assign an output port a relative slot (relative to the base input slot of its module, see [[MaterializeAtomic]]) */ override def assign(out: OutPort, relativeSlot: Int): TraversalBuilder = { // Which module out belongs to (indirection via BuilderKey and pendingBuilders) val builderKey = outOwners(out) diff --git a/akka-stream/src/main/scala/akka/stream/impl/Unfold.scala b/akka-stream/src/main/scala/akka/stream/impl/Unfold.scala index d9cf43a606d..52bce550723 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Unfold.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Unfold.scala @@ -16,9 +16,7 @@ import akka.stream._ import akka.stream.impl.Stages.DefaultAttributes import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class Unfold[S, E](s: S, f: S => Option[(S, E)]) extends GraphStage[SourceShape[E]] { val out: Outlet[E] = Outlet("Unfold.out") override val shape: SourceShape[E] = SourceShape(out) @@ -39,9 +37,7 @@ import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler } } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class UnfoldAsync[S, E](s: S, f: S => Future[Option[(S, E)]]) extends GraphStage[SourceShape[E]] { val out: Outlet[E] = Outlet("UnfoldAsync.out") diff --git a/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSource.scala b/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSource.scala index 285ebae6092..caad2cdcddc 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSource.scala @@ -14,9 +14,7 @@ import akka.stream.Attributes.SourceLocation import akka.stream.impl.Stages.DefaultAttributes import akka.stream.stage._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class UnfoldResourceSource[R, T]( create: () => R, readData: R => Option[T], diff --git a/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSourceAsync.scala b/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSourceAsync.scala index f8e2b45f043..22ad172bb12 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSourceAsync.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSourceAsync.scala @@ -18,9 +18,7 @@ import akka.stream.impl.Stages.DefaultAttributes import akka.stream.stage._ import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class UnfoldResourceSourceAsync[R, T]( create: () => Future[R], readData: R => Future[Option[T]], @@ -45,19 +43,18 @@ import akka.util.OptionVal case Failure(t) => failStage(t) }.invokeWithFeedback _ - private val errorHandler: PartialFunction[Throwable, Unit] = { - case NonFatal(ex) => - decider(ex) match { - case Supervision.Stop => - failStage(ex) - case Supervision.Restart => - try { - restartResource() - } catch { - case NonFatal(ex) => failStage(ex) - } - case Supervision.Resume => onPull() - } + private val errorHandler: PartialFunction[Throwable, Unit] = { case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => + failStage(ex) + case Supervision.Restart => + try { + restartResource() + } catch { + case NonFatal(ex) => failStage(ex) + } + case Supervision.Resume => onPull() + } } private val readCallback = getAsyncCallback[Try[Option[T]]](handle).invoke _ @@ -104,7 +101,7 @@ import akka.util.OptionVal override def postStop(): Unit = maybeResource match { case OptionVal.Some(resource) => close(resource) - case _ => //do nothing + case _ => // do nothing } private def restartResource(): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala index f07a504ff07..eb0616e76dc 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala @@ -35,9 +35,7 @@ import akka.stream.stage.InHandler import akka.stream.stage.OutHandler import akka.util.OptionVal -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ActorGraphInterpreter { object Resume extends DeadLetterSuppression with NoSerializationVerificationNeeded @@ -356,15 +354,17 @@ import akka.util.OptionVal @volatile private var shutdownReason: OptionVal[Throwable] = OptionVal.None private def reportSubscribeFailure(subscriber: Subscriber[Any]): Unit = - try shutdownReason match { - case OptionVal.Some(_: SpecViolation) => // ok, not allowed to call onError - case OptionVal.Some(e) => - tryOnSubscribe(subscriber, CancelledSubscription) - tryOnError(subscriber, e) - case _ => - tryOnSubscribe(subscriber, CancelledSubscription) - tryOnComplete(subscriber) - } catch { + try + shutdownReason match { + case OptionVal.Some(_: SpecViolation) => // ok, not allowed to call onError + case OptionVal.Some(e) => + tryOnSubscribe(subscriber, CancelledSubscription) + tryOnError(subscriber, e) + case _ => + tryOnSubscribe(subscriber, CancelledSubscription) + tryOnComplete(subscriber) + } + catch { case _: SpecViolation => // nothing to do } @@ -484,9 +484,7 @@ import akka.util.OptionVal } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class GraphInterpreterShell( var connections: Array[Connection], var logics: Array[GraphStageLogic], @@ -558,13 +556,20 @@ import akka.util.OptionVal private var enqueueToShortCircuit: (Any) => Unit = _ lazy val interpreter: GraphInterpreter = - new GraphInterpreter(mat, log, logics, connections, (logic, event, promise, handler) => { - val asyncInput = AsyncInput(this, logic, event, promise, handler) - val currentInterpreter = GraphInterpreter.currentInterpreterOrNull - if (currentInterpreter == null || (currentInterpreter.context ne self)) - self ! asyncInput - else enqueueToShortCircuit(asyncInput) - }, attributes.mandatoryAttribute[ActorAttributes.FuzzingMode].enabled, self) + new GraphInterpreter( + mat, + log, + logics, + connections, + (logic, event, promise, handler) => { + val asyncInput = AsyncInput(this, logic, event, promise, handler) + val currentInterpreter = GraphInterpreter.currentInterpreterOrNull + if (currentInterpreter == null || (currentInterpreter.context ne self)) + self ! asyncInput + else enqueueToShortCircuit(asyncInput) + }, + attributes.mandatoryAttribute[ActorAttributes.FuzzingMode].enabled, + self) // TODO: really needed? private var subscribesPending = 0 @@ -654,9 +659,11 @@ import akka.util.OptionVal else { waitingForShutdown = true val subscriptionTimeout = attributes.mandatoryAttribute[ActorAttributes.StreamSubscriptionTimeout].timeout - mat.scheduleOnce(subscriptionTimeout, new Runnable { - override def run(): Unit = self ! Abort(GraphInterpreterShell.this) - }) + mat.scheduleOnce( + subscriptionTimeout, + new Runnable { + override def run(): Unit = self ! Abort(GraphInterpreterShell.this) + }) } } else if (interpreter.isSuspended && !resumeScheduled) sendResume(!usingShellLimit) @@ -702,17 +709,14 @@ import akka.util.OptionVal def toSnapshot: InterpreterSnapshot = { if (!isInitialized) - UninitializedInterpreterImpl(logics.zipWithIndex.map { - case (logic, idx) => - LogicSnapshotImpl(idx, logic.toString, logic.attributes) + UninitializedInterpreterImpl(logics.zipWithIndex.map { case (logic, idx) => + LogicSnapshotImpl(idx, logic.toString, logic.attributes) }.toVector) else interpreter.toSnapshot } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class ActorGraphInterpreter(_initial: GraphInterpreterShell) extends Actor with ActorLogging { @@ -738,10 +742,10 @@ import akka.util.OptionVal false } - //this limits number of messages that can be processed synchronously during one actor receive. + // this limits number of messages that can be processed synchronously during one actor receive. private val eventLimit: Int = _initial.attributes.mandatoryAttribute[ActorAttributes.SyncProcessingLimit].limit private var currentLimit: Int = eventLimit - //this is a var in order to save the allocation when no short-circuiting actually happens + // this is a var in order to save the allocation when no short-circuiting actually happens private var shortCircuitBuffer: util.ArrayDeque[Any] = null def enqueueToShortCircuit(input: Any): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/AggregateWithBoundary.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/AggregateWithBoundary.scala index 050fc4d0eec..c5548a2fca6 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/AggregateWithBoundary.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/AggregateWithBoundary.scala @@ -10,9 +10,7 @@ import akka.annotation.InternalApi import akka.stream.{ Attributes, FlowShape, Inlet, Outlet } import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler, TimerGraphStageLogic } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class AggregateWithBoundary[In, Agg, Out]( allocate: () => Agg, @@ -21,8 +19,8 @@ private[akka] final case class AggregateWithBoundary[In, Agg, Out]( emitOnTimer: Option[(Agg => Boolean, FiniteDuration)]) extends GraphStage[FlowShape[In, Out]] { - emitOnTimer.foreach { - case (_, interval) => require(interval.gteq(1.milli), s"timer(${interval.toCoarsest}) must not be smaller than 1ms") + emitOnTimer.foreach { case (_, interval) => + require(interval.gteq(1.milli), s"timer(${interval.toCoarsest}) must not be smaller than 1ms") } val in: Inlet[In] = Inlet[In](s"${this.getClass.getName}.in") @@ -35,14 +33,14 @@ private[akka] final case class AggregateWithBoundary[In, Agg, Out]( private[this] var aggregated: Agg = null.asInstanceOf[Agg] override def preStart(): Unit = { - emitOnTimer.foreach { - case (_, interval) => scheduleWithFixedDelay(s"${this.getClass.getSimpleName}Timer", interval, interval) + emitOnTimer.foreach { case (_, interval) => + scheduleWithFixedDelay(s"${this.getClass.getSimpleName}Timer", interval, interval) } } override protected def onTimer(timerKey: Any): Unit = { - emitOnTimer.foreach { - case (isReadyOnTimer, _) => if (aggregated != null && isReadyOnTimer(aggregated)) harvestAndEmit() + emitOnTimer.foreach { case (isReadyOnTimer, _) => + if (aggregated != null && isReadyOnTimer(aggregated)) harvestAndEmit() } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/FlatMapPrefix.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/FlatMapPrefix.scala index 6478c1155b9..41752127241 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/FlatMapPrefix.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/FlatMapPrefix.scala @@ -44,7 +44,7 @@ import akka.util.OptionVal setHandlers(in, out, this) override def postStop(): Unit = { - //this covers the case when the nested flow was never materialized + // this covers the case when the nested flow was never materialized if (!matPromise.isCompleted) { matPromise.failure(new AbruptStageTerminationException(this)) } @@ -59,7 +59,7 @@ import akka.util.OptionVal if (accumulated.size == n) { materializeFlow() } else { - //gi'me some more! + // gi'me some more! pull(in) } } @@ -76,7 +76,7 @@ import akka.util.OptionVal subSource match { case OptionVal.Some(s) => s.fail(ex) case _ => - //flow won't be materialized, so we have to complete the future with a failure indicating this + // flow won't be materialized, so we have to complete the future with a failure indicating this matPromise.failure(new NeverMaterializedException(ex)) super.onUpstreamFailure(ex) } @@ -85,12 +85,12 @@ import akka.util.OptionVal override def onPull(): Unit = { subSink match { case OptionVal.Some(s) => - //delegate to subSink + // delegate to subSink s.pull() case _ => if (accumulated.size < n) pull(in) else if (accumulated.size == n) { - //corner case for n = 0, can be handled in FlowOps + // corner case for n = 0, can be handled in FlowOps materializeFlow() } else { throw new IllegalStateException(s"Unexpected accumulated size: ${accumulated.size} (n: $n)") @@ -105,9 +105,9 @@ import akka.util.OptionVal if (propagateToNestedMaterialization) { downstreamCause = OptionVal.Some(cause) if (accumulated.size == n) { - //corner case for n = 0, can be handled in FlowOps + // corner case for n = 0, can be handled in FlowOps materializeFlow() - } else if (!hasBeenPulled(in)) { //if in was already closed, nested flow would have already been materialized + } else if (!hasBeenPulled(in)) { // if in was already closed, nested flow would have already been materialized pull(in) } } else { @@ -154,31 +154,32 @@ import akka.util.OptionVal } } } - val matVal = try { - val flow = f(prefix) - val runnableGraph = Source.fromGraph(theSubSource.source).viaMat(flow)(Keep.right).to(theSubSink.sink) - interpreter.subFusingMaterializer.materialize(runnableGraph, inheritedAttributes) - } catch { - case NonFatal(ex) => - matPromise.failure(new NeverMaterializedException(ex)) - subSource = OptionVal.None - subSink = OptionVal.None - throw ex - } + val matVal = + try { + val flow = f(prefix) + val runnableGraph = Source.fromGraph(theSubSource.source).viaMat(flow)(Keep.right).to(theSubSink.sink) + interpreter.subFusingMaterializer.materialize(runnableGraph, inheritedAttributes) + } catch { + case NonFatal(ex) => + matPromise.failure(new NeverMaterializedException(ex)) + subSource = OptionVal.None + subSink = OptionVal.None + throw ex + } matPromise.success(matVal) - //in case downstream was closed + // in case downstream was closed downstreamCause match { case OptionVal.Some(ex) => theSubSink.cancel(ex) case _ => } - //in case we've materialized due to upstream completion + // in case we've materialized due to upstream completion if (isClosed(in)) { theSubSource.complete() } - //in case we've been pulled by downstream + // in case we've been pulled by downstream if (isAvailable(out)) { theSubSink.pull() } diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/FutureFlow.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/FutureFlow.scala index f313cf64ff0..75c0e8b7c7b 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/FutureFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/FutureFlow.scala @@ -36,7 +36,7 @@ import akka.util.OptionVal val innerMatValue = Promise[M]() val logic = new GraphStageLogic(shape) { - //seems like we must set handlers BEFORE preStart + // seems like we must set handlers BEFORE preStart setHandlers(in, out, Initializing) override def preStart(): Unit = { @@ -46,7 +46,7 @@ import akka.util.OptionVal case None => val cb = getAsyncCallback(Initializing.onFuture) futureFlow.onComplete(cb.invoke)(ExecutionContexts.parasitic) - //in case both ports are closed before future completion + // in case both ports are closed before future completion setKeepGoing(true) } } @@ -66,10 +66,10 @@ import akka.util.OptionVal upstreamFailure = OptionVal.Some(ex) } - //will later be propagated to the materialized flow (by examining isClosed(in)) + // will later be propagated to the materialized flow (by examining isClosed(in)) override def onUpstreamFinish(): Unit = {} - //will later be propagated to the materialized flow (by examining isAvailable(out)) + // will later be propagated to the materialized flow (by examining isAvailable(out)) override def onPull(): Unit = {} var downstreamCause = OptionVal.none[Throwable] @@ -88,7 +88,7 @@ import akka.util.OptionVal innerMatValue.failure(new NeverMaterializedException(exception)) failStage(exception) case Success(flow) => - //materialize flow, connect inlet and outlet, feed with potential events and set handlers + // materialize flow, connect inlet and outlet, feed with potential events and set handlers connect(flow) setKeepGoing(false) } @@ -123,13 +123,16 @@ import akka.util.OptionVal case OptionVal.Some(cause) => subSink.cancel(cause) case _ => if (isAvailable(out)) subSink.pull() } - setHandlers(in, out, new InHandler with OutHandler { - override def onPull(): Unit = subSink.pull() - override def onDownstreamFinish(cause: Throwable): Unit = subSink.cancel(cause) - override def onPush(): Unit = subSource.push(grab(in)) - override def onUpstreamFinish(): Unit = subSource.complete() - override def onUpstreamFailure(ex: Throwable): Unit = subSource.fail(ex) - }) + setHandlers( + in, + out, + new InHandler with OutHandler { + override def onPull(): Unit = subSink.pull() + override def onDownstreamFinish(cause: Throwable): Unit = subSink.cancel(cause) + override def onPush(): Unit = subSource.push(grab(in)) + override def onUpstreamFinish(): Unit = subSource.complete() + override def onUpstreamFailure(ex: Throwable): Unit = subSource.fail(ex) + }) } catch { case NonFatal(ex) => innerMatValue.failure(new NeverMaterializedException(ex)) diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala index b80877f85b4..8ddb9ccf10d 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala @@ -26,9 +26,7 @@ import akka.stream.stage._ */ @InternalApi private[akka] object GraphInterpreter { - /** - * Compile time constant, enable it for debug logging to the console. - */ + /** Compile time constant, enable it for debug logging to the console. */ final val Debug = false final val NoEvent = null @@ -44,8 +42,8 @@ import akka.stream.stage._ final val PullStartFlip = 3 // 0011 final val PullEndFlip = 10 // 1010 - final val PushStartFlip = 12 //1100 - final val PushEndFlip = 5 //0101 + final val PushStartFlip = 12 // 1100 + final val PushEndFlip = 5 // 0101 final val KeepGoingFlag = 0x4000000 final val KeepGoingMask = 0x3ffffff @@ -113,16 +111,12 @@ import akka.stream.stage._ override def initialValue: Array[AnyRef] = new Array(1) } - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def currentInterpreter: GraphInterpreter = _currentInterpreter.get()(0).asInstanceOf[GraphInterpreter].nonNull // nonNull is just a debug helper to find nulls more timely - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def currentInterpreterOrNull: GraphInterpreter = _currentInterpreter.get()(0).asInstanceOf[GraphInterpreter] @@ -217,9 +211,7 @@ import akka.stream.stage._ private[this] val ChaseLimit = if (fuzzingMode) 0 else 16 - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[stream] var activeStage: GraphStageLogic = _ // The number of currently running stages. Once this counter reaches zero, the interpreter is considered to be @@ -261,35 +253,25 @@ import akka.stream.stage._ _Name } else _Name - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[stream] def nonNull: GraphInterpreter = this - /** - * Dynamic handler changes are communicated from a GraphStageLogic by this method. - */ + /** Dynamic handler changes are communicated from a GraphStageLogic by this method. */ def setHandler(connection: Connection, handler: InHandler): Unit = { if (Debug) println(s"$Name SETHANDLER ${inOwnerName(connection)} (in) $handler") connection.inHandler = handler } - /** - * Dynamic handler changes are communicated from a GraphStageLogic by this method. - */ + /** Dynamic handler changes are communicated from a GraphStageLogic by this method. */ def setHandler(connection: Connection, handler: OutHandler): Unit = { if (Debug) println(s"$Name SETHANDLER ${outOwnerName(connection)} (out) $handler") connection.outHandler = handler } - /** - * Returns true if there are pending unprocessed events in the event queue. - */ + /** Returns true if there are pending unprocessed events in the event queue. */ def isSuspended: Boolean = queueHead != queueTail - /** - * Returns true if there are no more running operators and pending events. - */ + /** Returns true if there are no more running operators and pending events. */ def isCompleted: Boolean = runningStages == 0 && !isSuspended /** @@ -318,9 +300,7 @@ import akka.stream.stage._ } } - /** - * Finalizes the state of all operators by calling postStop() (if necessary). - */ + /** Finalizes the state of all operators by calling postStop() (if necessary). */ def finish(): Unit = { var i = 0 while (i < logics.length) { @@ -518,8 +498,8 @@ import akka.stream.stage._ } else if ((code & (OutClosed | InClosed)) == InClosed) { activeStage = connection.outOwner if (Debug) - println( - s"$Name CANCEL ${inOwnerName(connection)} -> ${outOwnerName(connection)} (${connection.outHandler}) [${outLogicName(connection)}]") + println(s"$Name CANCEL ${inOwnerName(connection)} -> ${outOwnerName( + connection)} (${connection.outHandler}) [${outLogicName(connection)}]") connection.portState |= OutClosed completeConnection(connection.outOwner.stageId) val cause = connection.slot.asInstanceOf[Cancelled].cause @@ -531,8 +511,8 @@ import akka.stream.stage._ if ((code & Pushing) == 0) { // Normal completion (no push pending) if (Debug) - println( - s"$Name COMPLETE ${outOwnerName(connection)} -> ${inOwnerName(connection)} (${connection.inHandler}) [${inLogicName(connection)}]") + println(s"$Name COMPLETE ${outOwnerName(connection)} -> ${inOwnerName( + connection)} (${connection.inHandler}) [${inLogicName(connection)}]") connection.portState |= InClosed activeStage = connection.inOwner completeConnection(connection.inOwner.stageId) @@ -550,8 +530,8 @@ import akka.stream.stage._ @InternalStableApi private def processPush(connection: Connection): Unit = { if (Debug) - println( - s"$Name PUSH ${outOwnerName(connection)} -> ${inOwnerName(connection)}, ${connection.slot} (${connection.inHandler}) [${inLogicName(connection)}]") + println(s"$Name PUSH ${outOwnerName(connection)} -> ${inOwnerName( + connection)}, ${connection.slot} (${connection.inHandler}) [${inLogicName(connection)}]") activeStage = connection.inOwner connection.portState ^= PushEndFlip connection.inHandler.onPush() @@ -560,8 +540,8 @@ import akka.stream.stage._ @InternalStableApi private def processPull(connection: Connection): Unit = { if (Debug) - println( - s"$Name PULL ${inOwnerName(connection)} -> ${outOwnerName(connection)} (${connection.outHandler}) [${outLogicName(connection)}]") + println(s"$Name PULL ${inOwnerName(connection)} -> ${outOwnerName( + connection)} (${connection.outHandler}) [${outLogicName(connection)}]") activeStage = connection.outOwner connection.portState ^= PullEndFlip connection.outHandler.onPull() @@ -697,9 +677,8 @@ import akka.stream.stage._ */ def toSnapshot: RunningInterpreter = { - val logicSnapshots = logics.zipWithIndex.map { - case (logic, idx) => - LogicSnapshotImpl(idx, logic.toString, logic.attributes) + val logicSnapshots = logics.zipWithIndex.map { case (logic, idx) => + LogicSnapshotImpl(idx, logic.toString, logic.attributes) } val logicIndexes = logics.zipWithIndex.map { case (stage, idx) => stage -> idx }.toMap val connectionSnapshots = connections.filter(_ != null).map { connection => diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala index 341c06df396..65fdd77fedd 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala @@ -32,9 +32,7 @@ import akka.stream.impl.StreamLayout._ import akka.stream.scaladsl._ import akka.stream.stage._ -/** - * INTERNAL API - */ +/** INTERNAL API */ // TODO: Fix variance issues @InternalApi private[akka] final case class GraphStageModule[+S <: Shape @uncheckedVariance, +M]( shape: S, @@ -51,14 +49,10 @@ import akka.stream.stage._ override def toString: String = f"GraphStage($stage) [${System.identityHashCode(this)}%08x]" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object GraphStages { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] abstract class SimpleLinearGraphStage[T] extends GraphStage[FlowShape[T, T]] { val in = Inlet[T](Logging.simpleName(this) + ".in") val out = Outlet[T](Logging.simpleName(this) + ".out") @@ -82,9 +76,7 @@ import akka.stream.stage._ def identity[T] = Identity.asInstanceOf[SimpleLinearGraphStage[T]] - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object Detacher extends SimpleLinearGraphStage[Any] { override def initialAttributes = DefaultAttributes.detacher @@ -133,37 +125,39 @@ import akka.stream.stage._ override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = { val finishPromise = Promise[Done]() - (new GraphStageLogic(shape) with InHandler with OutHandler { - def onPush(): Unit = push(out, grab(in)) + ( + new GraphStageLogic(shape) with InHandler with OutHandler { + def onPush(): Unit = push(out, grab(in)) - override def onUpstreamFinish(): Unit = { - finishPromise.success(Done) - completeStage() - } + override def onUpstreamFinish(): Unit = { + finishPromise.success(Done) + completeStage() + } - override def onUpstreamFailure(ex: Throwable): Unit = { - finishPromise.failure(ex) - failStage(ex) - } + override def onUpstreamFailure(ex: Throwable): Unit = { + finishPromise.failure(ex) + failStage(ex) + } - def onPull(): Unit = pull(in) + def onPull(): Unit = pull(in) - override def onDownstreamFinish(cause: Throwable): Unit = { - cause match { - case _: SubscriptionWithCancelException.NonFailureCancellation => - finishPromise.success(Done) - case ex => - finishPromise.failure(ex) + override def onDownstreamFinish(cause: Throwable): Unit = { + cause match { + case _: SubscriptionWithCancelException.NonFailureCancellation => + finishPromise.success(Done) + case ex => + finishPromise.failure(ex) + } + cancelStage(cause) } - cancelStage(cause) - } - override def postStop(): Unit = { - if (!finishPromise.isCompleted) finishPromise.failure(new AbruptStageTerminationException(this)) - } + override def postStop(): Unit = { + if (!finishPromise.isCompleted) finishPromise.failure(new AbruptStageTerminationException(this)) + } - setHandlers(in, out, this) - }, finishPromise.future) + setHandlers(in, out, this) + }, + finishPromise.future) } override def toString = "TerminationWatcher" @@ -411,11 +405,10 @@ import akka.stream.stage._ } } - .recover { - case t => - sinkIn.cancel() - materialized.failure(t) - failStage(t) + .recover { case t => + sinkIn.cancel() + materialized.failure(t) + failStage(t) } } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/MapAsyncPartitioned.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/MapAsyncPartitioned.scala index 70b78c1308a..0da5e218c4c 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/MapAsyncPartitioned.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/MapAsyncPartitioned.scala @@ -20,9 +20,7 @@ import akka.stream.impl.PartitionedBuffer import akka.stream.impl.Stages.DefaultAttributes import akka.stream.stage._ -/** - * Internal API - */ +/** Internal API */ @InternalApi private[akka] object MapAsyncPartitioned { final class Holder[P, I, O]( @@ -51,9 +49,7 @@ private[akka] object MapAsyncPartitioned { val NotYetThere = MapAsync.NotYetThere } -/** - * Internal API - */ +/** Internal API */ @InternalApi private[akka] final case class MapAsyncPartitioned[In, Out, Partition]( parallelism: Int, diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala index 92c4ddbd415..652e18c12f1 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala @@ -27,7 +27,7 @@ import akka.stream.Attributes.{ InputBuffer, LogLevels } import akka.stream.Attributes.SourceLocation import akka.stream.OverflowStrategies._ import akka.stream.Supervision.Decider -import akka.stream.impl.{ ContextPropagation, ReactiveStreamsCompliance, Buffer => BufferImpl } +import akka.stream.impl.{ Buffer => BufferImpl, ContextPropagation, ReactiveStreamsCompliance } import akka.stream.impl.Stages.DefaultAttributes import akka.stream.impl.TraversalBuilder import akka.stream.impl.fusing.GraphStages.SimpleLinearGraphStage @@ -38,9 +38,7 @@ import akka.util.ccompat._ // This file is perhaps getting long (Github Issue #31619), please add new operators in other files -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Map[In, Out](f: In => Out) extends GraphStage[FlowShape[In, Out]] { val in = Inlet[In]("Map.in") val out = Outlet[Out]("Map.out") @@ -71,9 +69,7 @@ import akka.util.ccompat._ } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Filter[T](p: T => Boolean) extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.filter and SourceLocation.forLambda(p) @@ -97,7 +93,8 @@ import akka.util.ccompat._ } else { buffer = OptionVal.Some(elem) contextPropagation.suspendContext() - } else pull(in) + } + else pull(in) } catch { case NonFatal(ex) => decider(ex) match { @@ -125,9 +122,7 @@ import akka.util.ccompat._ } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class TakeWhile[T](p: T => Boolean, inclusive: Boolean = false) extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.takeWhile and SourceLocation.forLambda(p) @@ -164,9 +159,7 @@ import akka.util.ccompat._ } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class DropWhile[T](p: T => Boolean) extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.dropWhile and SourceLocation.forLambda(p) @@ -199,9 +192,7 @@ import akka.util.ccompat._ override def toString = "DropWhile" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @DoNotInherit private[akka] abstract class SupervisedGraphStageLogic(inheritedAttributes: Attributes, shape: Shape) extends GraphStageLogic(shape) { private lazy val decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider @@ -233,9 +224,7 @@ private[stream] object Collect { final val NotApplied: Any => Any = _ => Collect.NotApplied } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Collect[In, Out](pf: PartialFunction[In, Out]) extends GraphStage[FlowShape[In, Out]] { val in = Inlet[In]("Collect.in") @@ -256,9 +245,9 @@ private[stream] object Collect { result match { case NotApplied => pull(in) case result: Out @unchecked => push(out, result) - case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser + case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser } - case _ => //do nothing + case _ => // do nothing } override def onResume(t: Throwable): Unit = if (!hasBeenPulled(in)) pull(in) @@ -271,9 +260,7 @@ private[stream] object Collect { override def toString = "Collect" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Recover[T](pf: PartialFunction[Throwable, T]) extends SimpleLinearGraphStage[T] { override protected def initialAttributes: Attributes = DefaultAttributes.recover and SourceLocation.forLambda(pf) @@ -295,18 +282,20 @@ private[stream] object Collect { } override def onUpstreamFailure(ex: Throwable): Unit = - try pf.applyOrElse(ex, NotApplied) match { - case NotApplied => failStage(ex) - case result: T @unchecked => - ReactiveStreamsCompliance.requireNonNullElement(result) - if (isAvailable(out)) { - push(out, result) - completeStage() - } else { - recovered = OptionVal.Some(result) - } - case _ => throw new IllegalStateException() // won't happen, compiler exhaustiveness check pleaser - } catch { + try + pf.applyOrElse(ex, NotApplied) match { + case NotApplied => failStage(ex) + case result: T @unchecked => + ReactiveStreamsCompliance.requireNonNullElement(result) + if (isAvailable(out)) { + push(out, result) + completeStage() + } else { + recovered = OptionVal.Some(result) + } + case _ => throw new IllegalStateException() // won't happen, compiler exhaustiveness check pleaser + } + catch { case NonFatal(ex) => failStage(ex) } @@ -343,9 +332,7 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Take[T](count: Long) extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.take @@ -372,9 +359,7 @@ private[stream] object Collect { override def toString: String = "Take" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Drop[T](count: Long) extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.drop @@ -397,9 +382,7 @@ private[stream] object Collect { override def toString: String = "Drop" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Scan[In, Out](zero: Out, f: (Out, In) => Out) extends GraphStage[FlowShape[In, Out]] { override val shape = FlowShape[In, Out](Inlet("Scan.in"), Outlet("Scan.out")) @@ -410,7 +393,6 @@ private[stream] object Collect { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { self => - private var aggregator = zero private lazy val decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider @@ -424,12 +406,14 @@ private[stream] object Collect { new InHandler with OutHandler { override def onPush(): Unit = () override def onUpstreamFinish(): Unit = - setHandler(out, new OutHandler { - override def onPull(): Unit = { - push(out, aggregator) - completeStage() - } - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + push(out, aggregator) + completeStage() + } + }) override def onPull(): Unit = { push(out, aggregator) setHandlers(in, out, self) @@ -456,9 +440,7 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class ScanAsync[In, Out](zero: Out, f: (Out, In) => Future[Out]) extends GraphStage[FlowShape[In, Out]] { @@ -474,7 +456,6 @@ private[stream] object Collect { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { self => - private var current: Out = zero private var elementHandled: Boolean = false @@ -491,12 +472,14 @@ private[stream] object Collect { } override def onUpstreamFinish(): Unit = - setHandler(out, new OutHandler { - override def onPull(): Unit = { - push(out, current) - completeStage() - } - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + push(out, current) + completeStage() + } + }) } private def onRestart(): Unit = { @@ -578,9 +561,7 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Fold[In, Out](zero: Out, f: (Out, In) => Out) extends GraphStage[FlowShape[In, Out]] { @@ -635,9 +616,7 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class FoldAsync[In, Out](zero: Out, f: (Out, In) => Future[Out]) extends GraphStage[FlowShape[In, Out]] { @@ -732,9 +711,7 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Intersperse[T](start: Option[T], inject: T, end: Option[T]) extends SimpleLinearGraphStage[T] { ReactiveStreamsCompliance.requireNonNullElement(inject) @@ -772,9 +749,7 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class GroupedWeighted[T](minWeight: Long, costFn: T => Long) extends GraphStage[FlowShape[T, immutable.Seq[T]]] { require(minWeight > 0, "minWeight must be greater than 0") @@ -829,9 +804,7 @@ private[stream] object Collect { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class LimitWeighted[T](val n: Long, val costFn: T => Long) extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.limitWeighted and SourceLocation.forLambda(costFn) @@ -846,7 +819,7 @@ private[stream] object Collect { case OptionVal.Some(weight) => left -= weight if (left >= 0) push(out, elem) else failStage(new StreamLimitReachedException(n)) - case _ => //do nothing + case _ => // do nothing } } @@ -865,9 +838,7 @@ private[stream] object Collect { override def toString = "LimitWeighted" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Sliding[T](val n: Int, val step: Int) extends GraphStage[FlowShape[T, immutable.Seq[T]]] { require(n > 0, "n must be greater than 0") @@ -925,9 +896,7 @@ private[stream] object Collect { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Buffer[T](size: Int, overflowStrategy: OverflowStrategy) extends SimpleLinearGraphStage[T] { @@ -1040,9 +1009,7 @@ private[stream] object Collect { } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Batch[In, Out]( val max: Long, val costFn: In => Long, @@ -1175,9 +1142,7 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class Expand[In, Out](val extrapolate: In => Iterator[Out]) extends GraphStage[FlowShape[In, Out]] { private val in = Inlet[In]("expand.in") @@ -1233,9 +1198,7 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object MapAsync { final class Holder[T](var elem: Try[T], val cb: AsyncCallback[Holder[T]]) extends (Try[T] => Unit) { @@ -1267,9 +1230,7 @@ private[stream] object Collect { val NotYetThere = Failure(new Exception with NoStackTrace) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class MapAsync[In, Out](parallelism: Int, f: In => Future[Out]) extends GraphStage[FlowShape[In, Out]] { @@ -1318,7 +1279,7 @@ private[stream] object Collect { v match { // this optimization also requires us to stop the stage to fail fast if the decider says so: case Failure(ex) if holder.supervisionDirectiveFor(decider, ex) == Supervision.Stop => failStage(ex) - case _ => pushNextIfPossible() + case _ => pushNextIfPossible() } } @@ -1374,9 +1335,7 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class MapAsyncUnordered[In, Out](parallelism: Int, f: In => Future[Out]) extends GraphStage[FlowShape[In, Out]] { @@ -1479,9 +1438,7 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class Log[T](name: String, extract: T => Any, logAdapter: Option[LoggingAdapter]) extends SimpleLinearGraphStage[T] { @@ -1566,9 +1523,7 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Log { /** @@ -1595,9 +1550,7 @@ private[stream] object Collect { LogLevels(onElement = Logging.DebugLevel, onFinish = Logging.DebugLevel, onFailure = Logging.ErrorLevel) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class LogWithMarker[T]( name: String, marker: T => LogMarker, @@ -1686,9 +1639,7 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object LogWithMarker { /** @@ -1719,9 +1670,7 @@ private[stream] object Collect { val groupedWeightedWithinTimer = "GroupedWeightedWithinTimer" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class GroupedWeightedWithin[T]( val maxWeight: Long, val maxNumber: Int, @@ -1868,17 +1817,13 @@ private[stream] object Collect { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi object Delay { private val TimerName = "DelayedTimer" private val DelayPrecisionMS = 10 } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class Delay[T]( delayStrategySupplier: () => DelayStrategy[T], overflowStrategy: DelayOverflowStrategy) @@ -2008,17 +1953,13 @@ private[stream] object Collect { override def toString = "Delay" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TakeWithin { val takeWithinTimer = "TakeWithinTimer" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class TakeWithin[T](val timeout: FiniteDuration) extends SimpleLinearGraphStage[T] { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = @@ -2037,9 +1978,7 @@ private[akka] object TakeWithin { override def toString = "TakeWithin" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class DropWithin[T](val timeout: FiniteDuration) extends SimpleLinearGraphStage[T] { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { @@ -2053,9 +1992,11 @@ private[akka] object TakeWithin { } else { push(out, grab(in)) // change the in handler to avoid System.nanoTime call after timeout - setHandler(in, new InHandler { - def onPush() = push(out, grab(in)) - }) + setHandler( + in, + new InHandler { + def onPush() = push(out, grab(in)) + }) } } @@ -2068,9 +2009,7 @@ private[akka] object TakeWithin { override def toString = "DropWithin" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class Reduce[T](val f: (T, T) => T) extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.reduce and SourceLocation.forLambda(f) @@ -2086,16 +2025,18 @@ private[akka] object TakeWithin { def setInitialInHandler(): Unit = { // Initial input handler - setHandler(in, new InHandler { - override def onPush(): Unit = { - aggregator = grab(in) - pull(in) - setHandler(in, self) - } + setHandler( + in, + new InHandler { + override def onPush(): Unit = { + aggregator = grab(in) + pull(in) + setHandler(in, self) + } - override def onUpstreamFinish(): Unit = - failStage(new NoSuchElementException("reduce over empty stream")) - }) + override def onUpstreamFinish(): Unit = + failStage(new NoSuchElementException("reduce over empty stream")) + }) } @nowarn // compiler complaining about aggregator = _: T @@ -2132,9 +2073,7 @@ private[akka] object TakeWithin { override def toString = "Reduce" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[stream] object RecoverWith @InternalApi private[akka] final class RecoverWith[T, M]( @@ -2194,17 +2133,13 @@ private[akka] object TakeWithin { override def toString: String = "RecoverWith" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object StatefulMap { private final class NullStateException(msg: String) extends NullPointerException(msg) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class StatefulMap[S, In, Out](create: () => S, f: (S, In) => (S, Out), onComplete: S => Option[Out]) extends GraphStage[FlowShape[In, Out]] { @@ -2312,9 +2247,7 @@ private[akka] final class StatefulMap[S, In, Out](create: () => S, f: (S, In) => override def toString = "StatefulMap" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi @ccompatUsedUntil213 private[akka] final class StatefulMapConcat[In, Out](val f: () => In => IterableOnce[Out]) @@ -2361,20 +2294,19 @@ private[akka] final class StatefulMapConcat[In, Out](val f: () => In => Iterable try pushPull(shouldResumeContext = true) catch handleException - private def handleException: Catcher[Unit] = { - case NonFatal(ex) => - decider(ex) match { - case Supervision.Stop => failStage(ex) - case Supervision.Resume => - if (isClosed(in)) completeStage() - else if (!hasBeenPulled(in)) pull(in) - case Supervision.Restart => - if (isClosed(in)) completeStage() - else { - restartState() - if (!hasBeenPulled(in)) pull(in) - } - } + private def handleException: Catcher[Unit] = { case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case Supervision.Resume => + if (isClosed(in)) completeStage() + else if (!hasBeenPulled(in)) pull(in) + case Supervision.Restart => + if (isClosed(in)) completeStage() + else { + restartState() + if (!hasBeenPulled(in)) pull(in) + } + } } private def restartState(): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala index 5245629b358..070e4e58b1f 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala @@ -30,9 +30,7 @@ import akka.stream.stage._ import akka.util.OptionVal import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class FlattenMerge[T, M](val breadth: Int) extends GraphStage[FlowShape[Graph[SourceShape[T], M], T]] { private val in = Inlet[Graph[SourceShape[T], M]]("flatten.in") @@ -76,12 +74,14 @@ import akka.util.ccompat.JavaConverters._ override def onUpstreamFinish(): Unit = if (activeSources == 0) completeStage() override def onPull(): Unit = { pull(in) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - // could be unavailable due to async input having been executed before this notification - if (queue.nonEmpty && isAvailable(out)) pushOut() - } - }) + setHandler( + out, + new OutHandler { + override def onPull(): Unit = { + // could be unavailable due to async input having been executed before this notification + if (queue.nonEmpty && isAvailable(out)) pushOut() + } + }) } setHandlers(in, out, this) @@ -137,9 +137,7 @@ import akka.util.ccompat.JavaConverters._ override def toString: String = s"FlattenMerge($breadth)" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class PrefixAndTail[T](val n: Int) extends GraphStage[FlowShape[T, (immutable.Seq[T], Source[T, NotUsed])]] { val in: Inlet[T] = Inlet("PrefixAndTail.in") @@ -253,9 +251,7 @@ import akka.util.ccompat.JavaConverters._ override def toString: String = s"PrefixAndTail($n)" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class GroupBy[T, K]( val maxSubstreams: Int, val keyFor: T => K, @@ -269,7 +265,6 @@ import akka.util.ccompat.JavaConverters._ override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) with OutHandler with InHandler { parent => - lazy val decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider private val activeSubstreamsMap = new java.util.HashMap[Any, SubstreamSource]() private val closedSubstreams = @@ -447,9 +442,7 @@ import akka.util.ccompat.JavaConverters._ } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object Split { sealed abstract class SplitDecision @@ -470,9 +463,7 @@ import akka.util.ccompat.JavaConverters._ new Split(Split.SplitAfter, p, substreamCancelStrategy) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class Split[T]( val decision: Split.SplitDecision, val p: T => Boolean, @@ -507,7 +498,7 @@ import akka.util.ccompat.JavaConverters._ new OutHandler { override def onPull(): Unit = { if (substreamSource eq null) { - //can be already pulled from substream in case split after + // can be already pulled from substream in case split after if (!hasBeenPulled(in)) pull(in) } else if (substreamWaitingToBePushed) pushSubstreamSource() } @@ -646,9 +637,7 @@ import akka.util.ccompat.JavaConverters._ } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[stream] object SubSink { sealed trait State @@ -675,9 +664,7 @@ import akka.util.ccompat.JavaConverters._ case class Cancel(cause: Throwable) extends Command } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[stream] final class SubSink[T](name: String, externalCallback: ActorSubscriberMessage => Unit) extends GraphStage[SinkShape[T]] { import SubSink._ @@ -764,9 +751,7 @@ import akka.util.ccompat.JavaConverters._ override def toString: String = name } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class SubSource[T]( name: String, private[fusing] val externalCallback: AsyncCallback[SubSink.Command]) @@ -791,7 +776,7 @@ import akka.util.ccompat.JavaConverters._ status.get.asInstanceOf[AsyncCallback[Any]].invoke(ActorSubscriberMessage.OnComplete) case OnError(_) => // already failed out, keep the exception as that happened first case ActorSubscriberMessage.OnComplete => // it was already completed - case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser + case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser } def failSubstream(ex: Throwable): Unit = status.get match { @@ -802,7 +787,7 @@ import akka.util.ccompat.JavaConverters._ status.get.asInstanceOf[AsyncCallback[Any]].invoke(failure) case ActorSubscriberMessage.OnComplete => // it was already completed, ignore failure as completion happened first case OnError(_) => // already failed out, keep the exception as that happened first - case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser + case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser } def timeout(d: FiniteDuration): Boolean = diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala b/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala index 7522d424c18..759b016b473 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala @@ -12,9 +12,7 @@ import akka.stream._ import akka.stream.stage._ import akka.util.ByteString -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] abstract class ByteStringParser[T] extends GraphStage[FlowShape[ByteString, T]] { import ByteStringParser._ @@ -142,9 +140,7 @@ import akka.util.ByteString } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ByteStringParser { val CompactionThreshold = 16 @@ -210,15 +206,15 @@ import akka.util.ByteString if (off < input.length) { val x = input(off) off += 1 - x & 0xFF + x & 0xff } else throw NeedMoreData def readShortLE(): Int = readByte() | (readByte() << 8) def readIntLE(): Int = readShortLE() | (readShortLE() << 16) - def readLongLE(): Long = (readIntLE() & 0XFFFFFFFFL) | ((readIntLE() & 0XFFFFFFFFL) << 32) + def readLongLE(): Long = (readIntLE() & 0xffffffffL) | ((readIntLE() & 0xffffffffL) << 32) def readShortBE(): Int = (readByte() << 8) | readByte() def readIntBE(): Int = (readShortBE() << 16) | readShortBE() - def readLongBE(): Long = ((readIntBE() & 0XFFFFFFFFL) << 32) | (readIntBE() & 0XFFFFFFFFL) + def readLongBE(): Long = ((readIntBE() & 0xffffffffL) << 32) | (readIntBE() & 0xffffffffL) def skip(numBytes: Int): Unit = if (off + numBytes <= input.length) off += numBytes diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/FileOutputStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/FileOutputStage.scala index 8c909404354..bee08084675 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/FileOutputStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/FileOutputStage.scala @@ -26,9 +26,7 @@ import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue, InH import akka.util.ByteString import akka.util.ccompat.JavaConverters._ -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class FileOutputStage(path: Path, startPosition: Long, openOptions: immutable.Set[OpenOption]) extends GraphStageWithMaterializedValue[SinkShape[ByteString], Future[IOResult]] { diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala b/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala index 861c658db96..3b4575da9d8 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala @@ -19,9 +19,7 @@ import akka.stream.Attributes.InputBuffer import akka.stream.stage._ import akka.util.ByteString -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] object FileSource { val completionHandler = new CompletionHandler[Integer, Try[Int] => Unit] { @@ -82,7 +80,7 @@ private[akka] final class FileSource(path: Path, chunkSize: Int, startPosition: override def onPull(): Unit = { if (availableChunks.size < maxReadAhead && !eofEncountered) availableChunks = readAhead(maxReadAhead, availableChunks) - //if already read something and try + // if already read something and try if (availableChunks.nonEmpty) { emitMultiple(out, availableChunks.iterator, () => if (eofEncountered) success() else setHandler(out, handler)) availableChunks = Vector.empty[ByteString] @@ -97,13 +95,14 @@ private[akka] final class FileSource(path: Path, chunkSize: Int, startPosition: /** BLOCKING I/O READ */ @tailrec def readAhead(maxChunks: Int, chunks: Vector[ByteString]): Vector[ByteString] = if (chunks.size < maxChunks && !eofEncountered) { - val readBytes = try channel.read(buffer, position) - catch { - case NonFatal(ex) => - failStage(ex) - ioResultPromise.trySuccess(IOResult(position, Failure(ex))) - throw ex - } + val readBytes = + try channel.read(buffer, position) + catch { + case NonFatal(ex) => + failStage(ex) + ioResultPromise.trySuccess(IOResult(position, Failure(ex))) + throw ex + } if (readBytes > 0) { buffer.flip() diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala index 5c3abb50cbc..53cc252af03 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala @@ -37,9 +37,7 @@ private[stream] object InputStreamSinkStage { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final private[stream] class InputStreamSinkStage(readTimeout: FiniteDuration) extends GraphStageWithMaterializedValue[SinkShape[ByteString], InputStream] { @@ -75,7 +73,7 @@ private[stream] object InputStreamSinkStage { } def onPush(): Unit = { - //1 is buffer for Finished or Failed callback + // 1 is buffer for Finished or Failed callback require(dataQueue.remainingCapacity() > 1) val bs = grab(in) if (bs.nonEmpty) { @@ -221,8 +219,8 @@ private[stream] object InputStreamSinkStage { if (!isInitialized) { sharedBuffer.poll(readTimeout.toMillis, TimeUnit.MILLISECONDS) match { case Initialized => isInitialized = true - case null => throw new IOException(s"Timeout after $readTimeout waiting for Initialized message from stage") - case entry => require(false, s"First message must be Initialized notification, got $entry") + case null => throw new IOException(s"Timeout after $readTimeout waiting for Initialized message from stage") + case entry => require(false, s"First message must be Initialized notification, got $entry") } } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSource.scala b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSource.scala index c78f2253b7e..adebc390ce4 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSource.scala @@ -23,9 +23,7 @@ import akka.stream.impl.Stages.DefaultAttributes import akka.stream.stage.{ GraphStageLogic, GraphStageLogicWithLogging, GraphStageWithMaterializedValue, OutHandler } import akka.util.ByteString -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class InputStreamSource(factory: () => InputStream, chunkSize: Int) extends GraphStageWithMaterializedValue[SourceShape[ByteString], Future[IOResult]] { diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamGraphStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamGraphStage.scala index da4057534f0..6478c9bf0ca 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamGraphStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamGraphStage.scala @@ -15,9 +15,7 @@ import akka.stream.impl.Stages.DefaultAttributes import akka.stream.stage.{ GraphStageLogic, GraphStageLogicWithLogging, GraphStageWithMaterializedValue, InHandler } import akka.util.ByteString -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class OutputStreamGraphStage(factory: () => OutputStream, autoFlush: Boolean) extends GraphStageWithMaterializedValue[SinkShape[ByteString], Future[IOResult]] { diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala index b6ad35b4ebd..843781d285f 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala @@ -25,9 +25,7 @@ import akka.stream.impl.fusing.ActorGraphInterpreter import akka.stream.snapshot.StreamSnapshotImpl import akka.util.ByteString -/** - * INTERNAL API. - */ +/** INTERNAL API. */ @InternalApi private[stream] object TLSActor { def props( @@ -45,9 +43,7 @@ import akka.util.ByteString final val UserIn = 1 } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ @InternalApi private[stream] class TLSActor( maxInputBufferSize: Int, createSSLEngine: () => SSLEngine, @@ -83,9 +79,7 @@ import akka.util.ByteString private var buffer = ByteString.empty - /** - * Whether there are no bytes lying on this chopping block. - */ + /** Whether there are no bytes lying on this chopping block. */ def isEmpty: Boolean = buffer.isEmpty /** @@ -129,9 +123,7 @@ import akka.util.ByteString prepare(b) } - /** - * Prepare a fresh ByteBuffer for receiving a chop of data. - */ + /** Prepare a fresh ByteBuffer for receiving a chop of data. */ def prepare(b: ByteBuffer): Unit = { b.clear() b.limit(0) @@ -371,7 +363,7 @@ import akka.util.ByteString if (tracing) log.debug( s"wrap: status=${result.getStatus} handshake=$lastHandshakeStatus remaining=${userInBuffer.remaining} out=${transportOutBuffer - .position()}") + .position()}") if (lastHandshakeStatus == FINISHED) handshakeFinished() runDelegatedTasks() @@ -405,7 +397,7 @@ import akka.util.ByteString if (tracing) log.debug( s"unwrap: status=${result.getStatus} handshake=$lastHandshakeStatus remaining=${transportInBuffer.remaining} out=${userOutBuffer - .position()}") + .position()}") runDelegatedTasks() result.getStatus match { case OK => @@ -476,10 +468,10 @@ import akka.util.ByteString } } - override def receive = inputBunch.subreceive.orElse[Any, Unit](outputBunch.subreceive).orElse { - case ActorGraphInterpreter.Snapshot => + override def receive = + inputBunch.subreceive.orElse[Any, Unit](outputBunch.subreceive).orElse { case ActorGraphInterpreter.Snapshot => sender() ! StreamSnapshotImpl(self.path, Seq.empty, Seq.empty) - } + } initialPhase(2, bidirectional) @@ -520,9 +512,7 @@ import akka.util.ByteString } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TlsUtils { def applySessionParameters(engine: SSLEngine, sessionParameters: NegotiateNewSession): Unit = { sessionParameters.enabledCipherSuites.foreach(cs => engine.setEnabledCipherSuites(cs.toArray)) diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala index b0e978df601..2fe0f67869c 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala @@ -23,15 +23,13 @@ import akka.io.Tcp._ import akka.stream._ import akka.stream.impl.ReactiveStreamsCompliance import akka.stream.impl.fusing.GraphStages.detacher -import akka.stream.scaladsl.{ BidiFlow, Flow, TcpIdleTimeoutException, Tcp => StreamTcp } +import akka.stream.scaladsl.{ BidiFlow, Flow, Tcp => StreamTcp, TcpIdleTimeoutException } import akka.stream.scaladsl.Tcp.{ OutgoingConnection, ServerBinding } import akka.stream.scaladsl.TcpAttributes import akka.stream.stage._ import akka.util.ByteString -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[stream] class ConnectionSourceStage( val tcpManager: ActorRef, val endpoint: InetSocketAddress, @@ -40,7 +38,9 @@ import akka.util.ByteString val halfClose: Boolean, val idleTimeout: Duration, val bindShutdownTimeout: FiniteDuration) - extends GraphStageWithMaterializedValue[SourceShape[StreamTcp.IncomingConnection], Future[StreamTcp.ServerBinding]] { + extends GraphStageWithMaterializedValue[ + SourceShape[StreamTcp.IncomingConnection], + Future[StreamTcp.ServerBinding]] { import ConnectionSourceStage._ val out: Outlet[StreamTcp.IncomingConnection] = Outlet("IncomingConnections.out") @@ -77,16 +77,19 @@ import akka.util.ByteString stageActor.watch(listener) if (isAvailable(out)) listener ! ResumeAccepting(1) val thisStage = self - bindingPromise.success(ServerBinding(localAddress)(() => { - // To allow unbind() to be invoked multiple times with minimal chance of dead letters, we check if - // it's already unbound before sending the message. - if (!unbindPromise.isCompleted) { - // Beware, sender must be explicit since stageActor.ref will be invalid to access after the stage - // stopped. - thisStage.tell(Unbind, thisStage) - } - unbindPromise.future - }, unbindPromise.future.map(_ => Done)(ExecutionContexts.parasitic))) + bindingPromise.success( + ServerBinding(localAddress)( + () => { + // To allow unbind() to be invoked multiple times with minimal chance of dead letters, we check if + // it's already unbound before sending the message. + if (!unbindPromise.isCompleted) { + // Beware, sender must be explicit since stageActor.ref will be invalid to access after the stage + // stopped. + thisStage.tell(Unbind, thisStage) + } + unbindPromise.future + }, + unbindPromise.future.map(_ => Done)(ExecutionContexts.parasitic))) case f: CommandFailed => val ex = new BindFailedException { // cannot modify the actual exception class for compatibility reasons @@ -207,9 +210,7 @@ private[stream] object ConnectionSourceStage { val BindShutdownTimer = "BindTimer" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[stream] object TcpConnectionStage { case object WriteAck extends Tcp.Event @@ -497,9 +498,7 @@ private[stream] object ConnectionSourceStage { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class IncomingConnectionStage( connection: ActorRef, remoteAddress: InetSocketAddress, @@ -535,9 +534,7 @@ private[stream] object ConnectionSourceStage { override def toString = s"TCP-from($remoteAddress)" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[stream] class OutgoingConnectionStage( manager: ActorRef, remoteAddress: InetSocketAddress, @@ -596,11 +593,10 @@ private[stream] object ConnectionSourceStage { val toNetTimeout: BidiFlow[ByteString, ByteString, ByteString, ByteString, NotUsed] = BidiFlow.fromFlows( - Flow[ByteString].mapError { - case _: TimeoutException => - new TcpIdleTimeoutException( - s"TCP idle-timeout encountered$connectionToString, no bytes passed in the last $idleTimeout", - idleTimeout) + Flow[ByteString].mapError { case _: TimeoutException => + new TcpIdleTimeoutException( + s"TCP idle-timeout encountered$connectionToString, no bytes passed in the last $idleTimeout", + idleTimeout) }, Flow[ByteString]) val fromNetTimeout: BidiFlow[ByteString, ByteString, ByteString, ByteString, NotUsed] = diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala index d15a7843220..61fec0c15ec 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala @@ -16,9 +16,7 @@ import akka.stream.impl.{ TlsModuleIslandTag, TraversalBuilder } import akka.stream.impl.StreamLayout.AtomicModule import akka.util.ByteString -/** - * INTERNAL API. - */ +/** INTERNAL API. */ @InternalApi private[stream] final case class TlsModule( plainIn: Inlet[SslTlsOutbound], plainOut: Outlet[SslTlsInbound], @@ -39,9 +37,7 @@ import akka.util.ByteString TraversalBuilder.atomic(this, attributes).makeIsland(TlsModuleIslandTag) } -/** - * INTERNAL API. - */ +/** INTERNAL API. */ @InternalApi private[stream] object TlsModule { def apply( attributes: Attributes, diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/compression/CompressionUtils.scala b/akka-stream/src/main/scala/akka/stream/impl/io/compression/CompressionUtils.scala index 156abb1ba85..afdfbd7717a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/compression/CompressionUtils.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/compression/CompressionUtils.scala @@ -15,9 +15,7 @@ import akka.util.ByteString /** INTERNAL API */ @InternalApi private[stream] object CompressionUtils { - /** - * Creates a flow from a compressor constructor. - */ + /** Creates a flow from a compressor constructor. */ def compressorFlow(newCompressor: () => Compressor): Flow[ByteString, ByteString, NotUsed] = Flow.fromGraph { new SimpleLinearGraphStage[ByteString] { diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/compression/Compressor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/compression/Compressor.scala index b451ea85e55..4317b927209 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/compression/Compressor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/compression/Compressor.scala @@ -22,9 +22,7 @@ import akka.util.ByteString */ def compress(input: ByteString): ByteString - /** - * Flushes any output data and returns the currently remaining compressed data. - */ + /** Flushes any output data and returns the currently remaining compressed data. */ def flush(): ByteString /** diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipDecompressor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipDecompressor.scala index 7f81af75df9..a503628fa7b 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipDecompressor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipDecompressor.scala @@ -34,7 +34,7 @@ import akka.util.ByteString case object ReadHeaders extends Step { override def parse(reader: ByteStringParser.ByteReader): ParseResult[ByteString] = { import reader._ - if (readByte() != 0x1F || readByte() != 0x8B) fail("Not in GZIP format") // check magic header + if (readByte() != 0x1f || readByte() != 0x8b) fail("Not in GZIP format") // check magic header if (readByte() != 8) fail("Unsupported GZIP compression method") // check compression method val flags = readByte() skip(6) // skip MTIME, XFL and OS fields @@ -65,15 +65,15 @@ import akka.util.ByteString private def crc16(data: ByteString) = { val crc = new CRC32 crc.update(data.toArrayUnsafe()) - crc.getValue.toInt & 0xFFFF + crc.getValue.toInt & 0xffff } } /** INTERNAL API */ @InternalApi private[akka] object GzipDecompressor { // RFC 1952: https://www.rfc-editor.org/rfc/rfc1952.html section 2.2 - private[impl] val Header = ByteString(0x1F, // ID1 - 0x8B, // ID2 + private[impl] val Header = ByteString(0x1f, // ID1 + 0x8b, // ID2 8, // CM = Deflate 0, // FLG 0, // MTIME 1 @@ -82,5 +82,5 @@ import akka.util.ByteString 0, // MTIME 4 0, // XFL 0 // OS - ) + ) } diff --git a/akka-stream/src/main/scala/akka/stream/impl/package.scala b/akka-stream/src/main/scala/akka/stream/impl/package.scala index 655ff8ea62f..8238c76cba8 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/package.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/package.scala @@ -458,6 +458,5 @@ package akka.stream * * [[akka.stream.impl.TraversalBuilder.printTraversal]]: Prints the Traversal in a readable format * * [[akka.stream.impl.TraversalBuilder.printWiring]]: Prints the calculated port assignments. Useful for * debugging if everything is wired to the right thing. - * */ package object impl {} diff --git a/akka-stream/src/main/scala/akka/stream/impl/streamref/SinkRefImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/streamref/SinkRefImpl.scala index d689413772b..acfa85359d7 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/streamref/SinkRefImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/streamref/SinkRefImpl.scala @@ -24,9 +24,7 @@ private[stream] final case class SinkRefImpl[In](initialPartnerRef: ActorRef) ex Sink.fromGraph(new SinkRefStageImpl[In](OptionVal.Some(initialPartnerRef))).mapMaterializedValue(_ => NotUsed) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[stream] object SinkRefStageImpl { private sealed trait ActorRefStage { def ref: ActorRef } } @@ -224,7 +222,9 @@ private[stream] final class SinkRefStageImpl[In] private[akka] (val initialPartn case OptionVal.Some(ref) => ref ! StreamRefsProtocol.RemoteStreamFailure(ex.getMessage) finishedWithAwaitingPartnerTermination = OptionVal(Failure(ex)) - setKeepGoing(true) // we will terminate once partner ref has Terminated (to avoid racing Terminated with completion message) + setKeepGoing( + true + ) // we will terminate once partner ref has Terminated (to avoid racing Terminated with completion message) case _ => completedBeforeRemoteConnected = OptionVal(scala.util.Failure(ex)) @@ -240,7 +240,9 @@ private[stream] final class SinkRefStageImpl[In] private[akka] (val initialPartn case OptionVal.Some(ref) => ref ! StreamRefsProtocol.RemoteStreamCompleted(remoteCumulativeDemandConsumed) finishedWithAwaitingPartnerTermination = OptionVal(Success(Done)) - setKeepGoing(true) // we will terminate once partner ref has Terminated (to avoid racing Terminated with completion message) + setKeepGoing( + true + ) // we will terminate once partner ref has Terminated (to avoid racing Terminated with completion message) case _ => completedBeforeRemoteConnected = OptionVal(scala.util.Success(Done)) // not terminating on purpose, since other side may subscribe still and then we want to complete it @@ -264,13 +266,17 @@ private[stream] final class SinkRefStageImpl[In] private[akka] (val initialPartn ex) partner ! StreamRefsProtocol.RemoteStreamFailure(ex.getMessage) finishedWithAwaitingPartnerTermination = OptionVal(Failure(ex)) - setKeepGoing(true) // we will terminate once partner ref has Terminated (to avoid racing Terminated with completion message) + setKeepGoing( + true + ) // we will terminate once partner ref has Terminated (to avoid racing Terminated with completion message) case OptionVal.Some(scala.util.Success(Done)) => log.warning("[{}] Stream already completed before remote side materialized, failing now.", stageActorName) partner ! StreamRefsProtocol.RemoteStreamCompleted(remoteCumulativeDemandConsumed) finishedWithAwaitingPartnerTermination = OptionVal(Success(Done)) - setKeepGoing(true) // we will terminate once partner ref has Terminated (to avoid racing Terminated with completion message) + setKeepGoing( + true + ) // we will terminate once partner ref has Terminated (to avoid racing Terminated with completion message) case _ => if (partner != getPartnerRef) { diff --git a/akka-stream/src/main/scala/akka/stream/impl/streamref/SourceRefImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/streamref/SourceRefImpl.scala index f2ee06103a1..7a682507d08 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/streamref/SourceRefImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/streamref/SourceRefImpl.scala @@ -23,9 +23,7 @@ private[stream] final case class SourceRefImpl[T](initialPartnerRef: ActorRef) e Source.fromGraph(new SourceRefStageImpl(OptionVal.Some(initialPartnerRef))).mapMaterializedValue(_ => NotUsed) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[stream] object SourceRefStageImpl { private sealed trait ActorRefStage { def ref: ActorRef } diff --git a/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefResolverImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefResolverImpl.scala index 3bf0ab11861..79402fd45ef 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefResolverImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefResolverImpl.scala @@ -10,9 +10,7 @@ import akka.stream.SinkRef import akka.stream.SourceRef import akka.stream.StreamRefResolver -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class StreamRefResolverImpl(system: ExtendedActorSystem) extends StreamRefResolver { def toSerializationFormat[T](ref: SourceRef[T]): String = ref match { diff --git a/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefsProtocol.scala b/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefsProtocol.scala index 9ac50741ac6..68d0159d9cb 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefsProtocol.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefsProtocol.scala @@ -30,9 +30,7 @@ private[akka] object StreamRefsProtocol { if (payload == null) throw ReactiveStreamsCompliance.elementMustNotBeNullException } - /** - * INTERNAL API: Initial message sent to remote side to establish partnership between origin and remote stream refs. - */ + /** INTERNAL API: Initial message sent to remote side to establish partnership between origin and remote stream refs. */ @InternalApi private[akka] final case class OnSubscribeHandshake(targetRef: ActorRef) extends StreamRefsProtocol diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala index 81665cb2301..f6865b537f5 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala @@ -42,7 +42,6 @@ object BidiFlow { * | +----------------------+ | * +----------------------------+ * }}} - * */ def fromFlowsMat[I1, O1, I2, O2, M1, M2, M]( flow1: Graph[FlowShape[I1, O1], M1], @@ -67,7 +66,6 @@ object BidiFlow { * | +----------------------+ | * +----------------------------+ * }}} - * */ def fromFlows[I1, O1, I2, O2, M1, M2]( flow1: Graph[FlowShape[I1, O1], M1], @@ -194,14 +192,10 @@ final class BidiFlow[I1, O1, I2, O2, Mat](delegate: scaladsl.BidiFlow[I1, O1, I2 def join[Mat2, M](flow: Flow[O1, I2, Mat2], combine: function.Function2[Mat, Mat2, M]): Flow[I1, O2, M] = new Flow(delegate.joinMat(flow.asScala)(combinerToScala(combine))) - /** - * Turn this BidiFlow around by 180 degrees, logically flipping it upside down in a protocol stack. - */ + /** Turn this BidiFlow around by 180 degrees, logically flipping it upside down in a protocol stack. */ def reversed: BidiFlow[I2, O2, I1, O1, Mat] = new BidiFlow(delegate.reversed) - /** - * Transform only the materialized value of this BidiFlow, leaving all other properties as they were. - */ + /** Transform only the materialized value of this BidiFlow, leaving all other properties as they were. */ def mapMaterializedValue[Mat2](f: function.Function[Mat, Mat2]): BidiFlow[I1, O1, I2, O2, Mat2] = new BidiFlow(delegate.mapMaterializedValue(f.apply _)) @@ -224,15 +218,11 @@ final class BidiFlow[I1, O1, I2, O2, Mat](delegate: scaladsl.BidiFlow[I1, O1, I2 override def addAttributes(attr: Attributes): BidiFlow[I1, O1, I2, O2, Mat] = new BidiFlow(delegate.addAttributes(attr)) - /** - * Add a ``name`` attribute to this Flow. - */ + /** Add a ``name`` attribute to this Flow. */ override def named(name: String): BidiFlow[I1, O1, I2, O2, Mat] = new BidiFlow(delegate.named(name)) - /** - * Put an asynchronous boundary around this `Flow` - */ + /** Put an asynchronous boundary around this `Flow` */ override def async: BidiFlow[I1, O1, I2, O2, Mat] = new BidiFlow(delegate.async) diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/DelayStrategy.scala b/akka-stream/src/main/scala/akka/stream/javadsl/DelayStrategy.scala index 281a57978f2..7c0cbc8ce04 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/DelayStrategy.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/DelayStrategy.scala @@ -16,9 +16,7 @@ import akka.util.JavaDurationConverters.JavaDurationOps */ trait DelayStrategy[T] { - /** - * Returns delay for ongoing element, `Duration.Zero` means passing without delay - */ + /** Returns delay for ongoing element, `Duration.Zero` means passing without delay */ def nextDelay(elem: T): java.time.Duration } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala b/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala index 49b246dccb8..c73773bd3ef 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala @@ -15,9 +15,7 @@ import akka.stream.scaladsl.SourceToCompletionStage import akka.util.ByteString import akka.util.ccompat.JavaConverters._ -/** - * Java API: Factories to create sinks and sources from files - */ +/** Java API: Factories to create sinks and sources from files */ object FileIO { /** diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala index 641e7ff17d7..59b48ca9852 100755 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala @@ -61,9 +61,7 @@ object Flow { /** Create a `Flow` which can process elements of type `T`. */ def of[T](@unused clazz: Class[T]): javadsl.Flow[T, T, NotUsed] = create[T]() - /** - * A graph with the shape of a flow logically is a flow, this method makes it so also in type. - */ + /** A graph with the shape of a flow logically is a flow, this method makes it so also in type. */ def fromGraph[I, O, M](g: Graph[FlowShape[I, O], M]): Flow[I, O, M] = g match { case f: Flow[I, O, M] @unchecked => f @@ -386,16 +384,15 @@ object Flow { * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * * */ + * * + */ def flattenOptional[Out, In <: Optional[Out]](): Flow[In, Out, NotUsed] = new Flow(scaladsl.Flow[In].collect { case optional: Optional[Out @unchecked] if optional.isPresent => optional.get() }) } -/** - * A `Flow` is a set of stream processing steps that has one open input and one open output. - */ +/** A `Flow` is a set of stream processing steps that has one open input and one open output. */ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Graph[FlowShape[In, Out], Mat] { import akka.util.ccompat.JavaConverters._ @@ -407,9 +404,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr /** Converts this Flow to its Scala DSL counterpart */ def asScala: scaladsl.Flow[In, Out, Mat] = delegate - /** - * Transform only the materialized value of this Flow, leaving all other properties as they were. - */ + /** Transform only the materialized value of this Flow, leaving all other properties as they were. */ def mapMaterializedValue[Mat2](f: function.Function[Mat, Mat2]): Flow[In, Out, Mat2] = new Flow(delegate.mapMaterializedValue(f.apply _)) @@ -695,7 +690,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def wireTap(f: function.Procedure[Out]): javadsl.Flow[In, Out, Mat] = new Flow(delegate.wireTap(f(_))) @@ -1037,7 +1031,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def filter(p: function.Predicate[Out]): javadsl.Flow[In, Out, Mat] = new Flow(delegate.filter(p.test)) @@ -1678,7 +1671,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError(pf: PartialFunction[Throwable, Throwable]): javadsl.Flow[In, Out, Mat] = new Flow(delegate.mapError(pf)) @@ -1700,7 +1692,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError[E <: Throwable](clazz: Class[E], f: function.Function[E, Throwable]): javadsl.Flow[In, Out, Mat] = mapError { @@ -1725,7 +1716,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recoverWith(pf: PartialFunction[Throwable, _ <: Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] = new Flow(delegate.recoverWith(pf)) @@ -1870,9 +1860,11 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr attempts: Int, clazz: Class[_ <: Throwable], supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] = - recoverWithRetries(attempts, { - case elem if clazz.isInstance(elem) => supplier.get() - }) + recoverWithRetries( + attempts, + { + case elem if clazz.isInstance(elem) => supplier.get() + }) /** * Terminate processing (and cancel the upstream publisher) after the given @@ -1943,7 +1935,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @param seed Provides the first state for a conflated value using the first unconsumed element as a start * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate - * */ def conflateWithSeed[S]( seed: function.Function[Out, S], @@ -1974,7 +1965,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * see also [[Flow.conflateWithSeed]] [[Flow.batch]] [[Flow.batchWeighted]] * * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate - * */ def conflate(aggregate: function.Function2[Out, Out, Out]): javadsl.Flow[In, Out, Mat] = new Flow(delegate.conflate(aggregate.apply)) @@ -2198,7 +2188,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @param n the number of elements to accumulate before materializing the downstream flow. * @param f a function that produces the downstream flow based on the upstream's prefix. - **/ + */ def flatMapPrefix[Out2, Mat2]( n: Int, f: function.Function[java.lang.Iterable[Out], javadsl.Flow[Out, Out2, Mat2]]): javadsl.Flow[In, Out2, Mat] = { @@ -2942,7 +2932,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new Flow(delegate.interleaveAll(seq, segmentSize, eagerClose)) } @@ -3023,7 +3014,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new javadsl.Flow(delegate.mergeAll(seq, eagerComplete)) } @@ -3178,13 +3170,15 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out Pair T, M2] = this.viaMat( Flow.fromGraph( - GraphDSL.create(that, new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] { - def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out Pair T] = { - val zip: FanInShape2[Out, T, Out Pair T] = b.add(Zip.create[Out, T]) - b.from(s).toInlet(zip.in1) - FlowShape(zip.in0, zip.out) - } - })), + GraphDSL.create( + that, + new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] { + def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out Pair T] = { + val zip: FanInShape2[Out, T, Out Pair T] = b.add(Zip.create[Out, T]) + b.from(s).toInlet(zip.in1) + FlowShape(zip.in0, zip.out) + } + })), matF) /** @@ -3250,13 +3244,15 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out Pair T, M2] = this.viaMat( Flow.fromGraph( - GraphDSL.create(that, new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] { - def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out Pair T] = { - val zip: FanInShape2[Out, T, Out Pair T] = b.add(ZipLatest.create[Out, T]) - b.from(s).toInlet(zip.in1) - FlowShape(zip.in0, zip.out) - } - })), + GraphDSL.create( + that, + new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] { + def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out Pair T] = { + val zip: FanInShape2[Out, T, Out Pair T] = b.add(ZipLatest.create[Out, T]) + b.from(s).toInlet(zip.in1) + FlowShape(zip.in0, zip.out) + } + })), matF) /** @@ -3494,7 +3490,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle(elements: Int, per: java.time.Duration): javadsl.Flow[In, Out, Mat] = new Flow(delegate.throttle(elements, per.asScala)) @@ -3533,7 +3528,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( elements: Int, @@ -3572,7 +3566,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -3617,7 +3610,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -3707,15 +3699,11 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr override def addAttributes(attr: Attributes): javadsl.Flow[In, Out, Mat] = new Flow(delegate.addAttributes(attr)) - /** - * Add a ``name`` attribute to this Flow. - */ + /** Add a ``name`` attribute to this Flow. */ override def named(name: String): javadsl.Flow[In, Out, Mat] = new Flow(delegate.named(name)) - /** - * Put an asynchronous boundary around this `Flow` - */ + /** Put an asynchronous boundary around this `Flow` */ override def async: javadsl.Flow[In, Out, Mat] = new Flow(delegate.async) @@ -3930,7 +3918,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @param collapseContext turn each incoming pair of element and context value into an element of this Flow * @param extractContext turn each outgoing element of this Flow into an outgoing context value - * */ def asFlowWithContext[U, CtxU, CtxOut]( collapseContext: function.Function2[U, CtxU, In], @@ -3964,8 +3951,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr .aggregateWithBoundary(() => allocate.get())( aggregate = (agg, out) => aggregate.apply(agg, out).toScala, harvest = agg => harvest.apply(agg), - emitOnTimer = Option(emitOnTimer).map { - case Pair(predicate, duration) => (agg => predicate.test(agg), duration.asScala) + emitOnTimer = Option(emitOnTimer).map { case Pair(predicate, duration) => + (agg => predicate.test(agg), duration.asScala) }) .asJava @@ -4030,9 +4017,7 @@ abstract class RunnableGraph[+Mat] extends Graph[ClosedShape, Mat] { */ def run(materializer: Materializer): Mat - /** - * Transform only the materialized value of this RunnableGraph, leaving all other properties as they were. - */ + /** Transform only the materialized value of this RunnableGraph, leaving all other properties as they were. */ def mapMaterializedValue[Mat2](f: function.Function[Mat, Mat2]): RunnableGraph[Mat2] override def withAttributes(attr: Attributes): RunnableGraph[Mat] @@ -4043,8 +4028,6 @@ abstract class RunnableGraph[+Mat] extends Graph[ClosedShape, Mat] { override def named(name: String): RunnableGraph[Mat] = withAttributes(Attributes.name(name)) - /** - * Converts this Java DSL element to its Scala DSL counterpart. - */ + /** Converts this Java DSL element to its Scala DSL counterpart. */ def asScala: scaladsl.RunnableGraph[Mat] } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala b/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala index 5fa7f447bed..e5e49e2c548 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala @@ -23,9 +23,7 @@ object FlowWithContext { def create[In, Ctx](): FlowWithContext[In, Ctx, In, Ctx, akka.NotUsed] = new FlowWithContext(Flow.create[Pair[In, Ctx]]()) - /** - * Creates a FlowWithContext from a regular flow that operates on `Pair` elements. - */ + /** Creates a FlowWithContext from a regular flow that operates on `Pair` elements. */ def fromPairs[In, CtxIn, Out, CtxOut, Mat]( under: Flow[Pair[In, CtxIn], Pair[Out, CtxOut], Mat]): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] = new FlowWithContext(under) @@ -39,7 +37,6 @@ object FlowWithContext { * operations. * * An "empty" flow can be created by calling `FlowWithContext[Ctx, T]`. - * */ final class FlowWithContext[In, CtxIn, Out, CtxOut, +Mat]( delegate: javadsl.Flow[Pair[In, CtxIn], Pair[Out, CtxOut], Mat]) @@ -106,9 +103,7 @@ final class FlowWithContext[In, CtxIn, Out, CtxOut, +Mat]( def mapMaterializedValue[Mat2](f: function.Function[Mat, Mat2]): FlowWithContext[In, CtxIn, Out, CtxOut, Mat2] = new FlowWithContext(delegate.mapMaterializedValue[Mat2](f)) - /** - * Creates a regular flow of pairs (data, context). - */ + /** Creates a regular flow of pairs (data, context). */ def asFlow(): Flow[Pair[In, CtxIn], Pair[Out, CtxOut], Mat] @uncheckedVariance = delegate @@ -223,9 +218,7 @@ final class FlowWithContext[In, CtxIn, Out, CtxOut, +Mat]( f: function.Function[Out, _ <: java.lang.Iterable[Out2]]): FlowWithContext[In, CtxIn, Out2, CtxOut, Mat] = viaScala(_.mapConcat(elem => Util.immutableSeq(f.apply(elem)))) - /** - * Apply the given function to each context element (leaving the data elements unchanged). - */ + /** Apply the given function to each context element (leaving the data elements unchanged). */ def mapContext[CtxOut2]( extractContext: function.Function[CtxOut, CtxOut2]): FlowWithContext[In, CtxIn, Out, CtxOut2, Mat] = { viaScala(_.mapContext(extractContext.apply)) diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala index 7e1bbcdc54c..9c8a945cc73 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala @@ -111,7 +111,6 @@ object Framing { * For example, frame can have a shape like this: `[offset bytes][body size bytes][body bytes][footer bytes]`. * Then computeFrameSize can be used to compute the frame size: `(offset bytes, computed size) => (actual frame size)`. * ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the operator fails otherwise. - * */ def lengthField( fieldLength: Int, diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala index fb8d31c538f..565d233ca2c 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala @@ -30,15 +30,11 @@ import akka.util.unused */ object Merge { - /** - * Create a new `Merge` operator with the specified output type. - */ + /** Create a new `Merge` operator with the specified output type. */ def create[T](inputPorts: Int): Graph[UniformFanInShape[T, T], NotUsed] = scaladsl.Merge(inputPorts) - /** - * Create a new `Merge` operator with the specified output type. - */ + /** Create a new `Merge` operator with the specified output type. */ def create[T](@unused clazz: Class[T], inputPorts: Int): Graph[UniformFanInShape[T, T], NotUsed] = create(inputPorts) /** @@ -78,15 +74,11 @@ object Merge { */ object MergePreferred { - /** - * Create a new `MergePreferred` operator with the specified output type. - */ + /** Create a new `MergePreferred` operator with the specified output type. */ def create[T](secondaryPorts: Int): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] = scaladsl.MergePreferred(secondaryPorts) - /** - * Create a new `MergePreferred` operator with the specified output type. - */ + /** Create a new `MergePreferred` operator with the specified output type. */ def create[T]( @unused clazz: Class[T], secondaryPorts: Int): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] = @@ -136,15 +128,11 @@ object MergePreferred { */ object MergePrioritized { - /** - * Create a new `MergePrioritized` operator with the specified output type. - */ + /** Create a new `MergePrioritized` operator with the specified output type. */ def create[T](priorities: Array[Int]): Graph[UniformFanInShape[T, T], NotUsed] = scaladsl.MergePrioritized(priorities.toIndexedSeq) - /** - * Create a new `MergePrioritized` operator with the specified output type. - */ + /** Create a new `MergePrioritized` operator with the specified output type. */ def create[T](@unused clazz: Class[T], priorities: Array[Int]): Graph[UniformFanInShape[T, T], NotUsed] = create(priorities) @@ -203,9 +191,7 @@ object Broadcast { */ def create[T](outputCount: Int): Graph[UniformFanOutShape[T, T], NotUsed] = create(outputCount, eagerCancel = false) - /** - * Create a new `Broadcast` operator with the specified input type. - */ + /** Create a new `Broadcast` operator with the specified input type. */ def create[T](@unused clazz: Class[T], outputCount: Int): Graph[UniformFanOutShape[T, T], NotUsed] = create(outputCount) @@ -477,15 +463,11 @@ object ZipWithN { */ object Unzip { - /** - * Creates a new `Unzip` operator with the specified output types. - */ + /** Creates a new `Unzip` operator with the specified output types. */ def create[A, B](): Graph[FanOutShape2[A Pair B, A, B], NotUsed] = UnzipWith.create(ConstantFun.javaIdentityFunction[Pair[A, B]]) - /** - * Creates a new `Unzip` operator with the specified output types. - */ + /** Creates a new `Unzip` operator with the specified output types. */ def create[A, B](@unused left: Class[A], @unused right: Class[B]): Graph[FanOutShape2[A Pair B, A, B], NotUsed] = create[A, B]() @@ -506,25 +488,17 @@ object Unzip { */ object Concat { - /** - * Create a new anonymous `Concat` operator with the specified input types. - */ + /** Create a new anonymous `Concat` operator with the specified input types. */ def create[T](): Graph[UniformFanInShape[T, T], NotUsed] = scaladsl.Concat[T]() - /** - * Create a new anonymous `Concat` operator with the specified input types. - */ + /** Create a new anonymous `Concat` operator with the specified input types. */ def create[T](inputCount: Int): Graph[UniformFanInShape[T, T], NotUsed] = scaladsl.Concat[T](inputCount) - /** - * Create a new anonymous `Concat` operator with the specified input types. - */ + /** Create a new anonymous `Concat` operator with the specified input types. */ def create[T](inputCount: Int, detachedInputs: Boolean): Graph[UniformFanInShape[T, T], NotUsed] = scaladsl.Concat[T](inputCount, detachedInputs) - /** - * Create a new anonymous `Concat` operator with the specified input types. - */ + /** Create a new anonymous `Concat` operator with the specified input types. */ def create[T](@unused clazz: Class[T]): Graph[UniformFanInShape[T, T], NotUsed] = create() } @@ -621,12 +595,13 @@ object GraphDSL extends GraphCreate { } val sListH = gbuilder.delegate.add(graphs.get(0), toList) val sListT = graphs.subList(1, graphs.size()).asScala.map(g => gbuilder.delegate.add(g, combine)).asJava - val s = buildBlock(gbuilder, { - val newList = new util.ArrayList[IS] - newList.add(sListH) - newList.addAll(sListT) - newList - }) + val s = buildBlock( + gbuilder, { + val newList = new util.ArrayList[IS] + newList.add(sListH) + newList.addAll(sListT) + newList + }) new GenericGraph(s, gbuilder.delegate.result(s)) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala index 4f456b5b367..ff9bb4fd881 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala @@ -79,9 +79,8 @@ object MergeHub { perProducerBufferSize: Int): Source[T, akka.japi.Pair[Sink[T, NotUsed], DrainingControl]] = { akka.stream.scaladsl.MergeHub .sourceWithDraining[T](perProducerBufferSize) - .mapMaterializedValue { - case (sink, draining) => - akka.japi.Pair(sink.asJava[T], new DrainingControlImpl(draining): DrainingControl) + .mapMaterializedValue { case (sink, draining) => + akka.japi.Pair(sink.asJava[T], new DrainingControlImpl(draining): DrainingControl) } .asJava } @@ -393,9 +392,7 @@ object PartitionHub { */ def queueSize(consumerId: Long): Int - /** - * Number of attached consumers. - */ + /** Number of attached consumers. */ def size: Int } } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/MergeLatest.scala b/akka-stream/src/main/scala/akka/stream/javadsl/MergeLatest.scala index 0fd04ff53fa..88717e48050 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/MergeLatest.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/MergeLatest.scala @@ -19,7 +19,6 @@ import akka.util.ccompat.JavaConverters._ * '''Completes when''' all upstreams complete (eagerClose=false) or one upstream completes (eagerClose=true) * * '''Cancels when''' downstream cancels - * */ object MergeLatest { diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Queue.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Queue.scala index 33c237c116f..37ec22113a5 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Queue.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Queue.scala @@ -15,9 +15,7 @@ import akka.Done import akka.dispatch.ExecutionContexts import akka.stream.QueueOfferResult -/** - * This trait allows to have a queue as a data source for some stream. - */ +/** This trait allows to have a queue as a data source for some stream. */ trait SourceQueue[T] { /** @@ -43,9 +41,7 @@ trait SourceQueue[T] { def watchCompletion(): CompletionStage[Done] } -/** - * This trait adds completion support to [[SourceQueue]]. - */ +/** This trait adds completion support to [[SourceQueue]]. */ trait SourceQueueWithComplete[T] extends SourceQueue[T] { /** @@ -76,9 +72,7 @@ trait SourceQueueWithComplete[T] extends SourceQueue[T] { object SourceQueueWithComplete { - /** - * Converts the queue into a `scaladsl.SourceQueueWithComplete` - */ + /** Converts the queue into a `scaladsl.SourceQueueWithComplete` */ def asScala[T](queue: SourceQueueWithComplete[T]): akka.stream.scaladsl.SourceQueueWithComplete[T] = { // would have been better to add `asScala` in SourceQueueWithComplete trait, but not doing // that for backwards compatibility reasons @@ -109,22 +103,16 @@ trait SinkQueue[T] { def pull(): CompletionStage[Optional[T]] } -/** - * This trait adds cancel support to [[SinkQueue]]. - */ +/** This trait adds cancel support to [[SinkQueue]]. */ trait SinkQueueWithCancel[T] extends SinkQueue[T] { - /** - * Cancels the stream. This method returns right away without waiting for actual finalizing the stream. - */ + /** Cancels the stream. This method returns right away without waiting for actual finalizing the stream. */ def cancel(): Unit } object SinkQueueWithCancel { - /** - * Converts the queue into a `scaladsl.SinkQueueWithCancel` - */ + /** Converts the queue into a `scaladsl.SinkQueueWithCancel` */ def asScala[T](queue: SinkQueueWithCancel[T]): akka.stream.scaladsl.SinkQueueWithCancel[T] = { // would have been better to add `asScala` in SinkQueueWithCancel trait, but not doing // that for backwards compatibility reasons diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala b/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala index 2631217c033..d28f8b00884 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala @@ -124,7 +124,6 @@ object RestartSource { * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. * In order to skip this additional delay pass in `0`. * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ @Deprecated @deprecated("Use the overloaded method which accepts akka.stream.RestartSettings instead.", since = "2.6.10") @@ -157,7 +156,6 @@ object RestartSource { * @param maxRestarts the amount of restarts is capped to this amount within a time frame of minBackoff. * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ @Deprecated @deprecated("Use the overloaded method which accepts akka.stream.RestartSettings instead.", since = "2.6.10") @@ -184,7 +182,6 @@ object RestartSource { * * @param settings [[RestartSettings]] defining restart configuration * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ def onFailuresWithBackoff[T](settings: RestartSettings, sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = akka.stream.scaladsl.RestartSource diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala index 6cec5e49171..63eab5a722e 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala @@ -84,27 +84,19 @@ object Sink { def reduce[In](f: function.Function2[In, In, In]): Sink[In, CompletionStage[In]] = new Sink(scaladsl.Sink.reduce[In](f.apply).toCompletionStage()) - /** - * Helper to create [[Sink]] from `Subscriber`. - */ + /** Helper to create [[Sink]] from `Subscriber`. */ def fromSubscriber[In](subs: Subscriber[In]): Sink[In, NotUsed] = new Sink(scaladsl.Sink.fromSubscriber(subs)) - /** - * A `Sink` that immediately cancels its upstream after materialization. - */ + /** A `Sink` that immediately cancels its upstream after materialization. */ def cancelled[T](): Sink[T, NotUsed] = new Sink(scaladsl.Sink.cancelled) - /** - * A `Sink` that will consume the stream and discard the elements. - */ + /** A `Sink` that will consume the stream and discard the elements. */ def ignore[T](): Sink[T, CompletionStage[Done]] = new Sink(scaladsl.Sink.ignore.toCompletionStage()) - /** - * A [[Sink]] that will always backpressure never cancel and never consume any elements from the stream. - * */ + /** A [[Sink]] that will always backpressure never cancel and never consume any elements from the stream. */ def never[T]: Sink[T, CompletionStage[Done]] = new Sink(scaladsl.Sink.never.toCompletionStage()) @@ -237,7 +229,6 @@ object Sink { * of the actor will grow. For potentially slow consumer actors it is recommended * to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate * limiting operator in front of this `Sink`. - * */ def actorRef[In](ref: ActorRef, onCompleteMessage: Any): Sink[In, NotUsed] = new Sink(scaladsl.Sink.actorRef[In](ref, onCompleteMessage, (t: Throwable) => Status.Failure(t))) @@ -340,9 +331,7 @@ object Sink { def setup[T, M](factory: BiFunction[ActorMaterializer, Attributes, Sink[T, M]]): Sink[T, CompletionStage[M]] = scaladsl.Sink.setup((mat, attr) => factory(mat, attr).asScala).mapMaterializedValue(_.toJava).asJava - /** - * Combine several sinks with fan-out strategy like `Broadcast` or `Balance` and returns `Sink`. - */ + /** Combine several sinks with fan-out strategy like `Broadcast` or `Balance` and returns `Sink`. */ def combine[T, U]( output1: Sink[U, _], output2: Sink[U, _], @@ -356,9 +345,7 @@ object Sink { new Sink(scaladsl.Sink.combine(output1.asScala, output2.asScala, seq: _*)(num => fanOutStrategy.apply(num))) } - /** - * Combine two sinks with fan-out strategy like `Broadcast` or `Balance` and returns `Sink` with 2 outlets. - */ + /** Combine two sinks with fan-out strategy like `Broadcast` or `Balance` and returns `Sink` with 2 outlets. */ def combineMat[T, U, M1, M2, M]( first: Sink[U, M1], second: Sink[U, M2], @@ -379,7 +366,8 @@ object Sink { val seq = if (sinks != null) Util.immutableSeq(sinks).collect { case sink: Sink[U @unchecked, M @unchecked] => sink.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() import akka.util.ccompat.JavaConverters._ new Sink(scaladsl.Sink.combine(seq)(size => fanOutStrategy(size)).mapMaterializedValue(_.asJava)) } @@ -496,7 +484,7 @@ object Sink { */ def lazyCompletionStageSink[T, M](create: Creator[CompletionStage[Sink[T, M]]]): Sink[T, CompletionStage[M]] = new Sink(scaladsl.Sink.lazyFutureSink { () => - create.create().toScala.map(_.asScala)((ExecutionContexts.parasitic)) + create.create().toScala.map(_.asScala)(ExecutionContexts.parasitic) }).mapMaterializedValue(_.toJava) } @@ -513,9 +501,7 @@ final class Sink[In, Mat](delegate: scaladsl.Sink[In, Mat]) extends Graph[SinkSh override def toString: String = delegate.toString - /** - * Converts this Sink to its Scala DSL counterpart. - */ + /** Converts this Sink to its Scala DSL counterpart. */ def asScala: scaladsl.Sink[In, Mat] = delegate /** @@ -526,9 +512,7 @@ final class Sink[In, Mat](delegate: scaladsl.Sink[In, Mat]) extends Graph[SinkSh def runWith[M](source: Graph[SourceShape[In], M], systemProvider: ClassicActorSystemProvider): M = asScala.runWith(source)(SystemMaterializer(systemProvider.classicSystem).materializer) - /** - * Connect this `Sink` to a `Source` and run it. - */ + /** Connect this `Sink` to a `Source` and run it. */ def runWith[M](source: Graph[SourceShape[In], M], materializer: Materializer): M = asScala.runWith(source)(materializer) @@ -543,9 +527,7 @@ final class Sink[In, Mat](delegate: scaladsl.Sink[In, Mat]) extends Graph[SinkSh def contramap[In2](f: function.Function[In2, In]): Sink[In2, Mat] = javadsl.Flow.fromFunction(f).toMat(this, Keep.right[NotUsed, Mat]) - /** - * Transform only the materialized value of this Sink, leaving all other properties as they were. - */ + /** Transform only the materialized value of this Sink, leaving all other properties as they were. */ def mapMaterializedValue[Mat2](f: function.Function[Mat, Mat2]): Sink[In, Mat2] = new Sink(delegate.mapMaterializedValue(f.apply _)) @@ -597,15 +579,11 @@ final class Sink[In, Mat](delegate: scaladsl.Sink[In, Mat]) extends Graph[SinkSh override def addAttributes(attr: Attributes): javadsl.Sink[In, Mat] = new Sink(delegate.addAttributes(attr)) - /** - * Add a ``name`` attribute to this Sink. - */ + /** Add a ``name`` attribute to this Sink. */ override def named(name: String): javadsl.Sink[In, Mat] = new Sink(delegate.named(name)) - /** - * Put an asynchronous boundary around this `Sink` - */ + /** Put an asynchronous boundary around this `Sink` */ override def async: javadsl.Sink[In, Mat] = new Sink(delegate.async) diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala index 2159617eec7..b6db721eafe 100755 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala @@ -42,9 +42,7 @@ object Source { */ def empty[O](): Source[O, NotUsed] = _empty.asInstanceOf[Source[O, NotUsed]] - /** - * Create a `Source` with no elements. The result is the same as calling `Source.empty()` - */ + /** Create a `Source` with no elements. The result is the same as calling `Source.empty()` */ def empty[T](@unused clazz: Class[T]): Source[T, NotUsed] = empty[T]() /** @@ -233,9 +231,7 @@ object Source { def single[T](element: T): Source[T, NotUsed] = new Source(scaladsl.Source.single(element)) - /** - * Create a `Source` that will continually emit the given element. - */ + /** Create a `Source` that will continually emit the given element. */ def repeat[T](element: T): Source[T, NotUsed] = new Source(scaladsl.Source.repeat(element)) @@ -246,15 +242,11 @@ object Source { def unfold[S, E](s: S, f: function.Function[S, Optional[Pair[S, E]]]): Source[E, NotUsed] = new Source(scaladsl.Source.unfold(s)((s: S) => f.apply(s).asScala.map(_.toScala))) - /** - * Same as [[unfold]], but uses an async function to generate the next state-element tuple. - */ + /** Same as [[unfold]], but uses an async function to generate the next state-element tuple. */ def unfoldAsync[S, E](s: S, f: function.Function[S, CompletionStage[Optional[Pair[S, E]]]]): Source[E, NotUsed] = new Source(scaladsl.Source.fromGraph(new UnfoldAsyncJava[S, E](s, f))) - /** - * Create a `Source` that immediately ends the stream with the `cause` failure to every connected `Sink`. - */ + /** Create a `Source` that immediately ends the stream with the `cause` failure to every connected `Sink`. */ def failed[T](cause: Throwable): Source[T, NotUsed] = new Source(scaladsl.Source.failed(cause)) @@ -386,9 +378,7 @@ object Source { lazySource[T, CompletionStage[M]](() => completionStageSource(create.create())) .mapMaterializedValue(_.thenCompose(csm => csm)) - /** - * Creates a `Source` that is materialized as a [[org.reactivestreams.Subscriber]] - */ + /** Creates a `Source` that is materialized as a [[org.reactivestreams.Subscriber]] */ def asSubscriber[T](): Source[T, Subscriber[T]] = new Source(scaladsl.Source.asSubscriber) @@ -435,19 +425,24 @@ object Source { failureMatcher: akka.japi.function.Function[Any, java.util.Optional[Throwable]], bufferSize: Int, overflowStrategy: OverflowStrategy): Source[T, ActorRef] = - new Source(scaladsl.Source.actorRef(new JavaPartialFunction[Any, CompletionStrategy] { - override def apply(x: Any, isCheck: Boolean): CompletionStrategy = { - val result = completionMatcher(x) - if (!result.isPresent) throw JavaPartialFunction.noMatch() - else result.get() - } - }, new JavaPartialFunction[Any, Throwable] { - override def apply(x: Any, isCheck: Boolean): Throwable = { - val result = failureMatcher(x) - if (!result.isPresent) throw JavaPartialFunction.noMatch() - else result.get() - } - }, bufferSize, overflowStrategy)) + new Source( + scaladsl.Source.actorRef( + new JavaPartialFunction[Any, CompletionStrategy] { + override def apply(x: Any, isCheck: Boolean): CompletionStrategy = { + val result = completionMatcher(x) + if (!result.isPresent) throw JavaPartialFunction.noMatch() + else result.get() + } + }, + new JavaPartialFunction[Any, Throwable] { + override def apply(x: Any, isCheck: Boolean): Throwable = { + val result = failureMatcher(x) + if (!result.isPresent) throw JavaPartialFunction.noMatch() + else result.get() + } + }, + bufferSize, + overflowStrategy)) /** * Creates a `Source` that is materialized as an [[akka.actor.ActorRef]]. @@ -494,11 +489,16 @@ object Source { @Deprecated @deprecated("Use variant accepting completion and failure matchers", "2.6.0") def actorRef[T](bufferSize: Int, overflowStrategy: OverflowStrategy): Source[T, ActorRef] = - new Source(scaladsl.Source.actorRef({ - case akka.actor.Status.Success(s: CompletionStrategy) => s - case akka.actor.Status.Success(_) => CompletionStrategy.Draining - case akka.actor.Status.Success => CompletionStrategy.Draining - }, { case akka.actor.Status.Failure(cause) => cause }, bufferSize, overflowStrategy)) + new Source( + scaladsl.Source.actorRef( + { + case akka.actor.Status.Success(s: CompletionStrategy) => s + case akka.actor.Status.Success(_) => CompletionStrategy.Draining + case akka.actor.Status.Success => CompletionStrategy.Draining + }, + { case akka.actor.Status.Failure(cause) => cause }, + bufferSize, + overflowStrategy)) /** * Creates a `Source` that is materialized as an [[akka.actor.ActorRef]]. @@ -518,19 +518,23 @@ object Source { ackMessage: Any, completionMatcher: akka.japi.function.Function[Any, java.util.Optional[CompletionStrategy]], failureMatcher: akka.japi.function.Function[Any, java.util.Optional[Throwable]]): Source[T, ActorRef] = - new Source(scaladsl.Source.actorRefWithBackpressure(ackMessage, new JavaPartialFunction[Any, CompletionStrategy] { - override def apply(x: Any, isCheck: Boolean): CompletionStrategy = { - val result = completionMatcher(x) - if (!result.isPresent) throw JavaPartialFunction.noMatch() - else result.get() - } - }, new JavaPartialFunction[Any, Throwable] { - override def apply(x: Any, isCheck: Boolean): Throwable = { - val result = failureMatcher(x) - if (!result.isPresent) throw JavaPartialFunction.noMatch() - else result.get() - } - })) + new Source( + scaladsl.Source.actorRefWithBackpressure( + ackMessage, + new JavaPartialFunction[Any, CompletionStrategy] { + override def apply(x: Any, isCheck: Boolean): CompletionStrategy = { + val result = completionMatcher(x) + if (!result.isPresent) throw JavaPartialFunction.noMatch() + else result.get() + } + }, + new JavaPartialFunction[Any, Throwable] { + override def apply(x: Any, isCheck: Boolean): Throwable = { + val result = failureMatcher(x) + if (!result.isPresent) throw JavaPartialFunction.noMatch() + else result.get() + } + })) /** * Creates a `Source` that is materialized as an [[akka.actor.ActorRef]]. @@ -554,19 +558,23 @@ object Source { ackMessage: Any, completionMatcher: akka.japi.function.Function[Any, java.util.Optional[CompletionStrategy]], failureMatcher: akka.japi.function.Function[Any, java.util.Optional[Throwable]]): Source[T, ActorRef] = - new Source(scaladsl.Source.actorRefWithBackpressure(ackMessage, new JavaPartialFunction[Any, CompletionStrategy] { - override def apply(x: Any, isCheck: Boolean): CompletionStrategy = { - val result = completionMatcher(x) - if (!result.isPresent) throw JavaPartialFunction.noMatch() - else result.get() - } - }, new JavaPartialFunction[Any, Throwable] { - override def apply(x: Any, isCheck: Boolean): Throwable = { - val result = failureMatcher(x) - if (!result.isPresent) throw JavaPartialFunction.noMatch() - else result.get() - } - })) + new Source( + scaladsl.Source.actorRefWithBackpressure( + ackMessage, + new JavaPartialFunction[Any, CompletionStrategy] { + override def apply(x: Any, isCheck: Boolean): CompletionStrategy = { + val result = completionMatcher(x) + if (!result.isPresent) throw JavaPartialFunction.noMatch() + else result.get() + } + }, + new JavaPartialFunction[Any, Throwable] { + override def apply(x: Any, isCheck: Boolean): Throwable = { + val result = failureMatcher(x) + if (!result.isPresent) throw JavaPartialFunction.noMatch() + else result.get() + } + })) /** * Creates a `Source` that is materialized as an [[akka.actor.ActorRef]]. @@ -590,11 +598,15 @@ object Source { @Deprecated @deprecated("Use actorRefWithBackpressure accepting completion and failure matchers", "2.6.0") def actorRefWithAck[T](ackMessage: Any): Source[T, ActorRef] = - new Source(scaladsl.Source.actorRefWithBackpressure(ackMessage, { - case akka.actor.Status.Success(s: CompletionStrategy) => s - case akka.actor.Status.Success(_) => CompletionStrategy.Draining - case akka.actor.Status.Success => CompletionStrategy.Draining - }, { case akka.actor.Status.Failure(cause) => cause })) + new Source( + scaladsl.Source.actorRefWithBackpressure( + ackMessage, + { + case akka.actor.Status.Success(s: CompletionStrategy) => s + case akka.actor.Status.Success(_) => CompletionStrategy.Draining + case akka.actor.Status.Success => CompletionStrategy.Draining + }, + { case akka.actor.Status.Failure(cause) => cause })) /** * A graph with the shape of a source logically is a source, this method makes @@ -625,9 +637,7 @@ object Source { def setup[T, M](factory: BiFunction[ActorMaterializer, Attributes, Source[T, M]]): Source[T, CompletionStage[M]] = scaladsl.Source.setup((mat, attr) => factory(mat, attr).asScala).mapMaterializedValue(_.toJava).asJava - /** - * Combines several sources with fan-in strategy like [[Merge]] or [[Concat]] into a single [[Source]]. - */ + /** Combines several sources with fan-in strategy like [[Merge]] or [[Concat]] into a single [[Source]]. */ def combine[T, U]( first: Source[T, _ <: Any], second: Source[T, _ <: Any], @@ -640,9 +650,7 @@ object Source { new Source(scaladsl.Source.combine(first.asScala, second.asScala, seq: _*)(num => fanInStrategy.apply(num))) } - /** - * Combines two sources with fan-in strategy like `Merge` or `Concat` and returns `Source` with a materialized value. - */ + /** Combines two sources with fan-in strategy like `Merge` or `Concat` and returns `Source` with a materialized value. */ def combineMat[T, U, M1, M2, M]( first: Source[T, M1], second: Source[T, M2], @@ -654,9 +662,7 @@ object Source { scaladsl.Source.combineMat(first.asScala, second.asScala)(num => fanInStrategy.apply(num))(combinerToScala(matF))) } - /** - * Combines several sources with fan-in strategy like [[Merge]] or [[Concat]] into a single [[Source]]. - */ + /** Combines several sources with fan-in strategy like [[Merge]] or [[Concat]] into a single [[Source]]. */ def combine[T, U, M]( sources: java.util.List[_ <: Graph[SourceShape[T], M]], fanInStrategy: function.Function[java.lang.Integer, Graph[UniformFanInShape[T, U], NotUsed]]) @@ -664,14 +670,13 @@ object Source { val seq = if (sources != null) Util.immutableSeq(sources).collect { case source: Source[T @unchecked, M @unchecked] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() import akka.util.ccompat.JavaConverters._ new Source(scaladsl.Source.combine(seq)(size => fanInStrategy(size)).mapMaterializedValue(_.asJava)) } - /** - * Combine the elements of multiple streams into a stream of lists. - */ + /** Combine the elements of multiple streams into a stream of lists. */ def zipN[T](sources: java.util.List[Source[T, _ <: Any]]): Source[java.util.List[T], NotUsed] = { val seq = if (sources != null) Util.immutableSeq(sources).map(_.asScala) else immutable.Seq() new Source(scaladsl.Source.zipN(seq).map(_.asJava)) @@ -918,14 +923,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ override def toString: String = delegate.toString - /** - * Converts this Java DSL element to its Scala DSL counterpart. - */ + /** Converts this Java DSL element to its Scala DSL counterpart. */ def asScala: scaladsl.Source[Out, Mat] = delegate - /** - * Transform only the materialized value of this Source, leaving all other properties as they were. - */ + /** Transform only the materialized value of this Source, leaving all other properties as they were. */ def mapMaterializedValue[Mat2](f: function.Function[Mat, Mat2]): Source[Out, Mat2] = new Source(delegate.mapMaterializedValue(f.apply _)) @@ -1523,7 +1524,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def wireTap(that: Graph[SinkShape[Out], _]): javadsl.Source[Out, Mat] = new Source(delegate.wireTap(that)) @@ -1665,7 +1665,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new Source(delegate.interleaveAll(seq, segmentSize, eagerClose)) } @@ -1744,7 +1745,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new Source(delegate.mergeAll(seq, eagerComplete)) } @@ -2214,7 +2216,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError(pf: PartialFunction[Throwable, Throwable]): javadsl.Source[Out, Mat] = new Source(delegate.mapError(pf)) @@ -2236,7 +2237,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError[E <: Throwable](clazz: Class[E], f: function.Function[E, Throwable]): javadsl.Source[Out, Mat] = mapError { @@ -2368,7 +2368,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recoverWithRetries( attempts: Int, @@ -2405,9 +2404,11 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ attempts: Int, clazz: Class[_ <: Throwable], supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] = - recoverWithRetries(attempts, { - case elem if clazz.isInstance(elem) => supplier.get() - }: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]) + recoverWithRetries( + attempts, + { + case elem if clazz.isInstance(elem) => supplier.get() + }: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]) /** * Transform each input element into an `Iterable` of output elements that is @@ -2580,9 +2581,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ def mapAsync[T](parallelism: Int, f: function.Function[Out, CompletionStage[T]]): javadsl.Source[T, Mat] = new Source(delegate.mapAsync(parallelism)(x => f(x).toScala)) - /** - * @see [[akka.stream.javadsl.Flow.mapAsyncPartitioned]] - */ + /** @see [[akka.stream.javadsl.Flow.mapAsyncPartitioned]] */ def mapAsyncPartitioned[T, P]( parallelism: Int, perPartition: Int, @@ -2716,7 +2715,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def filter(p: function.Predicate[Out]): javadsl.Source[Out, Mat] = new Source(delegate.filter(p.test)) @@ -3613,7 +3611,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @param n the number of elements to accumulate before materializing the downstream flow. * @param f a function that produces the downstream flow based on the upstream's prefix. - **/ + */ def flatMapPrefix[Out2, Mat2]( n: Int, f: function.Function[java.lang.Iterable[Out], javadsl.Flow[Out, Out2, Mat2]]): javadsl.Source[Out2, Mat] = { @@ -4001,7 +3999,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle(elements: Int, per: java.time.Duration): javadsl.Source[Out, Mat] = new Source(delegate.throttle(elements, per.asScala)) @@ -4040,7 +4037,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( elements: Int, @@ -4079,7 +4075,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -4124,7 +4119,6 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -4210,15 +4204,11 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ override def addAttributes(attr: Attributes): javadsl.Source[Out, Mat] = new Source(delegate.addAttributes(attr)) - /** - * Add a ``name`` attribute to this Source. - */ + /** Add a ``name`` attribute to this Source. */ override def named(name: String): javadsl.Source[Out, Mat] = new Source(delegate.named(name)) - /** - * Put an asynchronous boundary around this `Source` - */ + /** Put an asynchronous boundary around this `Source` */ override def async: javadsl.Source[Out, Mat] = new Source(delegate.async) @@ -4417,9 +4407,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ def logWithMarker(name: String, marker: function.Function[Out, LogMarker]): javadsl.Source[Out, Mat] = this.logWithMarker(name, marker, ConstantFun.javaIdentityFunction[Out], null) - /** - * Transform this source whose element is ``e`` into a source producing tuple ``(e, f(e))`` - **/ + /** Transform this source whose element is ``e`` into a source producing tuple ``(e, f(e))`` */ def asSourceWithContext[Ctx](extractContext: function.Function[Out, Ctx]): SourceWithContext[Out, Ctx, Mat] = new scaladsl.SourceWithContext(this.asScala.map(x => (x, extractContext.apply(x)))).asJava @@ -4450,8 +4438,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ .aggregateWithBoundary(() => allocate.get())( aggregate = (agg, out) => aggregate.apply(agg, out).toScala, harvest = agg => harvest.apply(agg), - emitOnTimer = Option(emitOnTimer).map { - case Pair(predicate, duration) => (agg => predicate.test(agg), duration.asScala) + emitOnTimer = Option(emitOnTimer).map { case Pair(predicate, duration) => + (agg => predicate.test(agg), duration.asScala) }) .asJava diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SourceWithContext.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SourceWithContext.scala index 44c7df02e0e..d79f622740f 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/SourceWithContext.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/SourceWithContext.scala @@ -23,9 +23,7 @@ import akka.util.ccompat.JavaConverters._ object SourceWithContext { - /** - * Creates a SourceWithContext from a regular flow that operates on `Pair` elements. - */ + /** Creates a SourceWithContext from a regular flow that operates on `Pair` elements. */ def fromPairs[Out, CtxOut, Mat](under: Source[Pair[Out, CtxOut], Mat]): SourceWithContext[Out, CtxOut, Mat] = { new SourceWithContext(scaladsl.SourceWithContext.fromTuples(under.asScala.map(_.toScala))) } @@ -214,9 +212,7 @@ final class SourceWithContext[Out, Ctx, +Mat](delegate: scaladsl.SourceWithConte def mapConcat[Out2](f: function.Function[Out, _ <: java.lang.Iterable[Out2]]): SourceWithContext[Out2, Ctx, Mat] = viaScala(_.mapConcat(elem => Util.immutableSeq(f.apply(elem)))) - /** - * Apply the given function to each context element (leaving the data elements unchanged). - */ + /** Apply the given function to each context element (leaving the data elements unchanged). */ def mapContext[Ctx2](extractContext: function.Function[Ctx, Ctx2]): SourceWithContext[Out, Ctx2, Mat] = viaScala(_.mapContext(extractContext.apply)) diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala b/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala index 5f83d86f0da..f2e4c4caa87 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala @@ -16,9 +16,7 @@ import akka.stream.scaladsl.SinkToCompletionStage import akka.stream.scaladsl.SourceToCompletionStage import akka.util.ByteString -/** - * Converters for interacting with the blocking `java.io` streams APIs and Java 8 Streams - */ +/** Converters for interacting with the blocking `java.io` streams APIs and Java 8 Streams */ object StreamConverters { /** diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/StreamRefs.scala b/akka-stream/src/main/scala/akka/stream/javadsl/StreamRefs.scala index 8f43ff7ca65..dc68658251c 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/StreamRefs.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/StreamRefs.scala @@ -6,9 +6,7 @@ package akka.stream.javadsl import akka.stream._ -/** - * Factories for creating stream refs. - */ +/** Factories for creating stream refs. */ object StreamRefs { /** diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala index 2362dd311d5..81f0aefdb18 100755 --- a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala @@ -164,7 +164,6 @@ final class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def wireTap(f: function.Procedure[Out]): SubFlow[In, Out, Mat] = new SubFlow(delegate.wireTap(f(_))) @@ -342,9 +341,7 @@ final class SubFlow[In, Out, Mat]( def mapAsync[T](parallelism: Int, f: function.Function[Out, CompletionStage[T]]): SubFlow[In, T, Mat] = new SubFlow(delegate.mapAsync(parallelism)(x => f(x).toScala)) - /** - * @see [[akka.stream.javadsl.Flow.mapAsyncPartitioned]] - */ + /** @see [[akka.stream.javadsl.Flow.mapAsyncPartitioned]] */ def mapAsyncPartitioned[T, P]( parallelism: Int, perPartition: Int, @@ -402,7 +399,6 @@ final class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def filter(p: function.Predicate[Out]): SubFlow[In, Out, Mat] = new SubFlow(delegate.filter(p.test)) @@ -990,7 +986,6 @@ final class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recover(pf: PartialFunction[Throwable, Out]): SubFlow[In, Out, Mat] = new SubFlow(delegate.recover(pf)) @@ -1013,7 +1008,6 @@ final class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recoverWith( pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubFlow[In, Out, Mat @uncheckedVariance] = @@ -1091,7 +1085,6 @@ final class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recoverWithRetries( attempts: Int, @@ -1115,7 +1108,6 @@ final class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError(pf: PartialFunction[Throwable, Throwable]): SubFlow[In, Out, Mat @uncheckedVariance] = new SubFlow(delegate.mapError(pf)) @@ -1137,7 +1129,6 @@ final class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError[E <: Throwable]( clazz: Class[E], @@ -1211,7 +1202,6 @@ final class SubFlow[In, Out, Mat]( * * @param seed Provides the first state for a conflated value using the first unconsumed element as a start * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate - * */ def conflateWithSeed[S]( seed: function.Function[Out, S], @@ -1242,7 +1232,6 @@ final class SubFlow[In, Out, Mat]( * see also [[SubFlow.conflateWithSeed]] [[SubFlow.batch]] [[SubFlow.batchWeighted]] * * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate - * */ def conflate(aggregate: function.Function2[Out, Out, Out]): SubFlow[In, Out, Mat] = new SubFlow(delegate.conflate(aggregate.apply)) @@ -1470,7 +1459,7 @@ final class SubFlow[In, Out, Mat]( * * @param n the number of elements to accumulate before materializing the downstream flow. * @param f a function that produces the downstream flow based on the upstream's prefix. - **/ + */ def flatMapPrefix[Out2, Mat2]( n: Int, f: function.Function[java.lang.Iterable[Out], javadsl.Flow[Out, Out2, Mat2]]): SubFlow[In, Out2, Mat] = { @@ -1490,7 +1479,6 @@ final class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes and all consumed substreams complete * * '''Cancels when''' downstream cancels - * */ def flatMapConcat[T, M](f: function.Function[Out, _ <: Graph[SourceShape[T], M]]): SubFlow[In, T, Mat] = new SubFlow(delegate.flatMapConcat(x => f(x))) @@ -1764,7 +1752,8 @@ final class SubFlow[In, Out, Mat]( val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new SubFlow(delegate.mergeAll(seq, eagerComplete)) } @@ -1821,7 +1810,8 @@ final class SubFlow[In, Out, Mat]( val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new SubFlow(delegate.interleaveAll(seq, segmentSize, eagerClose)) } @@ -2095,7 +2085,6 @@ final class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle(elements: Int, per: java.time.Duration): javadsl.SubFlow[In, Out, Mat] = new SubFlow(delegate.throttle(elements, per.asScala)) @@ -2134,7 +2123,6 @@ final class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( elements: Int, @@ -2173,7 +2161,6 @@ final class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -2218,7 +2205,6 @@ final class SubFlow[In, Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -2275,15 +2261,11 @@ final class SubFlow[In, Out, Mat]( def addAttributes(attr: Attributes): SubFlow[In, Out, Mat] = new SubFlow(delegate.addAttributes(attr)) - /** - * Add a ``name`` attribute to this Flow. - */ + /** Add a ``name`` attribute to this Flow. */ def named(name: String): SubFlow[In, Out, Mat] = new SubFlow(delegate.named(name)) - /** - * Put an asynchronous boundary around this `SubFlow` - */ + /** Put an asynchronous boundary around this `SubFlow` */ def async: SubFlow[In, Out, Mat] = new SubFlow(delegate.async) @@ -2492,8 +2474,8 @@ final class SubFlow[In, Out, Mat]( asScala.aggregateWithBoundary(() => allocate.get())( aggregate = (agg, out) => aggregate.apply(agg, out).toScala, harvest = agg => harvest.apply(agg), - emitOnTimer = Option(emitOnTimer).map { - case Pair(predicate, duration) => (agg => predicate.test(agg), duration.asScala) + emitOnTimer = Option(emitOnTimer).map { case Pair(predicate, duration) => + (agg => predicate.test(agg), duration.asScala) })) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala index ded06e246aa..7452ebd7f19 100755 --- a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala @@ -155,7 +155,6 @@ final class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def wireTap(f: function.Procedure[Out]): SubSource[Out, Mat] = new SubSource(delegate.wireTap(f(_))) @@ -333,9 +332,7 @@ final class SubSource[Out, Mat]( def mapAsync[T](parallelism: Int, f: function.Function[Out, CompletionStage[T]]): SubSource[T, Mat] = new SubSource(delegate.mapAsync(parallelism)(x => f(x).toScala)) - /** - * @see [[akka.stream.javadsl.Source.mapAsyncPartitioned]] - */ + /** @see [[akka.stream.javadsl.Source.mapAsyncPartitioned]] */ def mapAsyncPartitioned[T, P]( parallelism: Int, perPartition: Int, @@ -393,7 +390,6 @@ final class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def filter(p: function.Predicate[Out]): SubSource[Out, Mat] = new SubSource(delegate.filter(p.test)) @@ -977,7 +973,6 @@ final class SubSource[Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recover(pf: PartialFunction[Throwable, Out]): SubSource[Out, Mat] = new SubSource(delegate.recover(pf)) @@ -998,7 +993,6 @@ final class SubSource[Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recoverWith(pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubSource[Out, Mat] = new SubSource(delegate.recoverWith(pf)) @@ -1073,7 +1067,6 @@ final class SubSource[Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recoverWithRetries( attempts: Int, @@ -1097,7 +1090,6 @@ final class SubSource[Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError(pf: PartialFunction[Throwable, Throwable]): SubSource[Out, Mat] = new SubSource(delegate.mapError(pf)) @@ -1119,7 +1111,6 @@ final class SubSource[Out, Mat]( * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError[E <: Throwable](clazz: Class[E], f: function.Function[E, Throwable]): javadsl.SubSource[Out, Mat] = mapError { @@ -1191,7 +1182,6 @@ final class SubSource[Out, Mat]( * * @param seed Provides the first state for a conflated value using the first unconsumed element as a start * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate - * */ def conflateWithSeed[S]( seed: function.Function[Out, S], @@ -1222,7 +1212,6 @@ final class SubSource[Out, Mat]( * see also [[SubSource.conflateWithSeed]] [[SubSource.batch]] [[SubSource.batchWeighted]] * * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate - * */ def conflate(aggregate: function.Function2[Out, Out, Out]): SubSource[Out, Mat] = new SubSource(delegate.conflate(aggregate.apply)) @@ -1448,7 +1437,7 @@ final class SubSource[Out, Mat]( * * @param n the number of elements to accumulate before materializing the downstream flow. * @param f a function that produces the downstream flow based on the upstream's prefix. - **/ + */ def flatMapPrefix[Out2, Mat2]( n: Int, f: function.Function[java.lang.Iterable[Out], javadsl.Flow[Out, Out2, Mat2]]): javadsl.SubSource[Out2, Mat] = { @@ -1468,7 +1457,6 @@ final class SubSource[Out, Mat]( * '''Completes when''' upstream completes and all consumed substreams complete * * '''Cancels when''' downstream cancels - * */ def flatMapConcat[T, M](f: function.Function[Out, _ <: Graph[SourceShape[T], M]]): SubSource[T, Mat] = new SubSource(delegate.flatMapConcat(x => f(x))) @@ -1742,7 +1730,8 @@ final class SubSource[Out, Mat]( val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new SubSource(delegate.mergeAll(seq, eagerComplete)) } @@ -1800,7 +1789,8 @@ final class SubSource[Out, Mat]( val seq = if (those != null) Util.immutableSeq(those).collect { case source: Source[Out @unchecked, _] => source.asScala case other => other - } else immutable.Seq() + } + else immutable.Seq() new SubSource(delegate.interleaveAll(seq, segmentSize, eagerClose)) } @@ -2074,7 +2064,6 @@ final class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle(elements: Int, per: java.time.Duration): javadsl.SubSource[Out, Mat] = new SubSource(delegate.throttle(elements, per.asScala)) @@ -2113,7 +2102,6 @@ final class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( elements: Int, @@ -2152,7 +2140,6 @@ final class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -2197,7 +2184,6 @@ final class SubSource[Out, Mat]( * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -2254,15 +2240,11 @@ final class SubSource[Out, Mat]( def addAttributes(attr: Attributes): SubSource[Out, Mat] = new SubSource(delegate.addAttributes(attr)) - /** - * Add a ``name`` attribute to this Flow. - */ + /** Add a ``name`` attribute to this Flow. */ def named(name: String): SubSource[Out, Mat] = new SubSource(delegate.named(name)) - /** - * Put an asynchronous boundary around this `SubSource` - */ + /** Put an asynchronous boundary around this `SubSource` */ def async: SubSource[Out, Mat] = new SubSource(delegate.async) @@ -2471,7 +2453,7 @@ final class SubSource[Out, Mat]( asScala.aggregateWithBoundary(() => allocate.get())( aggregate = (agg, out) => aggregate.apply(agg, out).toScala, harvest = agg => harvest.apply(agg), - emitOnTimer = Option(emitOnTimer).map { - case Pair(predicate, duration) => (agg => predicate.test(agg), duration.asScala) + emitOnTimer = Option(emitOnTimer).map { case Pair(predicate, duration) => + (agg => predicate.test(agg), duration.asScala) })) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/TLS.scala b/akka-stream/src/main/scala/akka/stream/javadsl/TLS.scala index c4991bcfce8..30d528f16fd 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/TLS.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/TLS.scala @@ -92,9 +92,7 @@ object TLS { */ object TLSPlacebo { - /** - * Returns a reusable [[BidiFlow]] instance representing a [[TLSPlacebo$]]. - */ + /** Returns a reusable [[BidiFlow]] instance representing a [[TLSPlacebo$]]. */ def getInstance(): javadsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SessionBytes, NotUsed] = forJava private val forJava: javadsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SessionBytes, NotUsed] = diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala index b19df4793c7..863dcd18462 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala @@ -44,9 +44,7 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { */ final class ServerBinding @InternalApi private[akka] (delegate: scaladsl.Tcp.ServerBinding) { - /** - * The local address of the endpoint bound by the materialization of the `connections` [[Source]]. - */ + /** The local address of the endpoint bound by the materialization of the `connections` [[Source]]. */ def localAddress: InetSocketAddress = delegate.localAddress /** @@ -57,25 +55,17 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { */ def unbind(): CompletionStage[Unit] = delegate.unbind().toJava - /** - * @return A completion operator that is completed when manually unbound, or failed if the server fails - */ + /** @return A completion operator that is completed when manually unbound, or failed if the server fails */ def whenUnbound(): CompletionStage[Done] = delegate.whenUnbound.toJava } - /** - * Represents an accepted incoming TCP connection. - */ + /** Represents an accepted incoming TCP connection. */ class IncomingConnection private[akka] (delegate: scaladsl.Tcp.IncomingConnection) { - /** - * The local address this connection is bound to. - */ + /** The local address this connection is bound to. */ def localAddress: InetSocketAddress = delegate.localAddress - /** - * The remote address this connection is bound to. - */ + /** The remote address this connection is bound to. */ def remoteAddress: InetSocketAddress = delegate.remoteAddress /** @@ -107,19 +97,13 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { def flow: Flow[ByteString, ByteString, NotUsed] = new Flow(delegate.flow) } - /** - * Represents a prospective outgoing TCP connection. - */ + /** Represents a prospective outgoing TCP connection. */ class OutgoingConnection private[akka] (delegate: scaladsl.Tcp.OutgoingConnection) { - /** - * The remote address this connection is or will be bound to. - */ + /** The remote address this connection is or will be bound to. */ def remoteAddress: InetSocketAddress = delegate.remoteAddress - /** - * The local address of the endpoint bound by the materialization of the connection materialization. - */ + /** The local address of the endpoint bound by the materialization of the connection materialization. */ def localAddress: InetSocketAddress = delegate.localAddress } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala index a0feb4723ac..e2f61308f28 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala @@ -159,15 +159,11 @@ final class BidiFlow[-I1, +O1, -I2, +O2, +Mat]( new Flow(LinearTraversalBuilder.fromBuilder(resultBuilder, newShape, Keep.right), newShape) } - /** - * Turn this BidiFlow around by 180 degrees, logically flipping it upside down in a protocol stack. - */ + /** Turn this BidiFlow around by 180 degrees, logically flipping it upside down in a protocol stack. */ def reversed: BidiFlow[I2, O2, I1, O1, Mat] = new BidiFlow(traversalBuilder, BidiShape(shape.in2, shape.out2, shape.in1, shape.out1)) - /** - * Transform only the materialized value of this BidiFlow, leaving all other properties as they were. - */ + /** Transform only the materialized value of this BidiFlow, leaving all other properties as they were. */ def mapMaterializedValue[Mat2](f: Mat => Mat2): BidiFlow[I1, O1, I2, O2, Mat2] = new BidiFlow(traversalBuilder.transformMat(f.asInstanceOf[Any => Any]), shape) @@ -190,15 +186,11 @@ final class BidiFlow[-I1, +O1, -I2, +O2, +Mat]( override def addAttributes(attr: Attributes): BidiFlow[I1, O1, I2, O2, Mat] = withAttributes(traversalBuilder.attributes and attr) - /** - * Add a ``name`` attribute to this Flow. - */ + /** Add a ``name`` attribute to this Flow. */ override def named(name: String): BidiFlow[I1, O1, I2, O2, Mat] = addAttributes(Attributes.name(name)) - /** - * Put an asynchronous boundary around this `BidiFlow` - */ + /** Put an asynchronous boundary around this `BidiFlow` */ override def async: BidiFlow[I1, O1, I2, O2, Mat] = super.async.asInstanceOf[BidiFlow[I1, O1, I2, O2, Mat]] @@ -258,7 +250,6 @@ object BidiFlow { * | +----------------------+ | * +----------------------------+ * }}} - * */ def fromFlowsMat[I1, O1, I2, O2, M1, M2, M](flow1: Graph[FlowShape[I1, O1], M1], flow2: Graph[FlowShape[I2, O2], M2])( combine: (M1, M2) => M): BidiFlow[I1, O1, I2, O2, M] = { @@ -289,7 +280,6 @@ object BidiFlow { * | +----------------------+ | * +----------------------------+ * }}} - * */ def fromFlows[I1, O1, I2, O2, M1, M2]( flow1: Graph[FlowShape[I1, O1], M1], diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Compression.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Compression.scala index 3f879a19144..06349c5bb6a 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Compression.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Compression.scala @@ -54,7 +54,6 @@ object Compression { * * @param level Compression level (0-9) * @param nowrap if true then use GZIP compatible compression - * */ def deflate(level: Int, nowrap: Boolean): Flow[ByteString, ByteString, NotUsed] = CompressionUtils.compressorFlow(() => new DeflateCompressor(level, nowrap)) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/DelayStrategy.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/DelayStrategy.scala index 2de3f98bf4d..72c7a2928e2 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/DelayStrategy.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/DelayStrategy.scala @@ -14,9 +14,7 @@ import scala.concurrent.duration.{ Duration, FiniteDuration } */ trait DelayStrategy[-T] { - /** - * Returns delay for ongoing element, `Duration.Zero` means passing without delay - */ + /** Returns delay for ongoing element, `Duration.Zero` means passing without delay */ def nextDelay(elem: T): FiniteDuration } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala index ed13aaaf2a2..a897cf0f0a9 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala @@ -15,9 +15,7 @@ import akka.stream.impl.Stages.DefaultAttributes import akka.stream.impl.io._ import akka.util.ByteString -/** - * Factories to create sinks and sources from files - */ +/** Factories to create sinks and sources from files */ object FileIO { /** diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala index 4a851f64406..5f54e224310 100755 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala @@ -45,9 +45,7 @@ import akka.util.OptionVal import akka.util.Timeout import akka.util.ccompat._ -/** - * A `Flow` is a set of stream processing steps that has one open input and one open output. - */ +/** A `Flow` is a set of stream processing steps that has one open input and one open output. */ final class Flow[-In, +Out, +Mat]( override val traversalBuilder: LinearTraversalBuilder, override val shape: FlowShape[In, Out]) @@ -148,9 +146,7 @@ final class Flow[-In, +Out, +Mat]( } } - /** - * Transform the materialized value of this Flow, leaving all other properties as they were. - */ + /** Transform the materialized value of this Flow, leaving all other properties as they were. */ override def mapMaterializedValue[Mat2](f: Mat => Mat2): ReprMat[Out, Mat2] = new Flow(traversalBuilder.transformMat(f), shape) @@ -290,14 +286,10 @@ final class Flow[-In, +Out, +Mat]( */ override def addAttributes(attr: Attributes): Repr[Out] = withAttributes(traversalBuilder.attributes and attr) - /** - * Add a ``name`` attribute to this Flow. - */ + /** Add a ``name`` attribute to this Flow. */ override def named(name: String): Repr[Out] = addAttributes(Attributes.name(name)) - /** - * Put an asynchronous boundary around this `Flow` - */ + /** Put an asynchronous boundary around this `Flow` */ override def async: Repr[Out] = super.async.asInstanceOf[Repr[Out]] /** @@ -325,8 +317,8 @@ final class Flow[-In, +Out, +Mat]( * Note that the `ActorSystem` can be used as the implicit `materializer` parameter to use the * [[akka.stream.SystemMaterializer]] for running the stream. */ - def runWith[Mat1, Mat2](source: Graph[SourceShape[In], Mat1], sink: Graph[SinkShape[Out], Mat2])( - implicit materializer: Materializer): (Mat1, Mat2) = + def runWith[Mat1, Mat2](source: Graph[SourceShape[In], Mat1], sink: Graph[SinkShape[Out], Mat2])(implicit + materializer: Materializer): (Mat1, Mat2) = Source.fromGraph(source).via(this).toMat(sink)(Keep.both).run() /** @@ -341,15 +333,14 @@ final class Flow[-In, +Out, +Mat]( .asSubscriber[In] .via(this) .toMat(Sink.asPublisher[Out](false))(Keep.both[Subscriber[In], Publisher[Out]]) - .mapMaterializedValue { - case (sub, pub) => - new Processor[In, Out] { - override def onError(t: Throwable): Unit = sub.onError(t) - override def onSubscribe(s: Subscription): Unit = sub.onSubscribe(s) - override def onComplete(): Unit = sub.onComplete() - override def onNext(t: In): Unit = sub.onNext(t) - override def subscribe(s: Subscriber[_ >: Out]): Unit = pub.subscribe(s) - } + .mapMaterializedValue { case (sub, pub) => + new Processor[In, Out] { + override def onError(t: Throwable): Unit = sub.onError(t) + override def onSubscribe(s: Subscription): Unit = sub.onSubscribe(s) + override def onComplete(): Unit = sub.onComplete() + override def onNext(t: In): Unit = sub.onNext(t) + override def subscribe(s: Subscriber[_ >: Out]): Unit = pub.subscribe(s) + } } /** @@ -362,9 +353,8 @@ final class Flow[-In, +Out, +Mat]( extractContext: Out => CtxOut): FlowWithContext[U, CtxU, Out, CtxOut, Mat] = new FlowWithContext( Flow[(U, CtxU)] - .map { - case (e, ctx) => - collapseContext(e, ctx) + .map { case (e, ctx) => + collapseContext(e, ctx) } .viaMat(this)(Keep.right) .map(e => (e, extractContext(e)))) @@ -381,22 +371,16 @@ object Flow { private[this] val identity: Flow[Any, Any, NotUsed] = new Flow[Any, Any, NotUsed](identityTraversalBuilder, GraphStages.identity.shape) - /** - * Creates a Flow from a Reactive Streams [[org.reactivestreams.Processor]] - */ + /** Creates a Flow from a Reactive Streams [[org.reactivestreams.Processor]] */ def fromProcessor[I, O](processorFactory: () => Processor[I, O]): Flow[I, O, NotUsed] = { fromProcessorMat(() => (processorFactory(), NotUsed)) } - /** - * Creates a Flow from a Reactive Streams [[org.reactivestreams.Processor]] and returns a materialized value. - */ + /** Creates a Flow from a Reactive Streams [[org.reactivestreams.Processor]] and returns a materialized value. */ def fromProcessorMat[I, O, M](processorFactory: () => (Processor[I, O], M)): Flow[I, O, M] = fromGraph(ProcessorModule(processorFactory)) - /** - * Returns a `Flow` which outputs all its inputs. - */ + /** Returns a `Flow` which outputs all its inputs. */ def apply[T]: Flow[T, T, NotUsed] = identity.asInstanceOf[Flow[T, T, NotUsed]] /** @@ -758,15 +742,11 @@ object RunnableGraph { } } -/** - * Flow with attached input and output, can be executed. - */ +/** Flow with attached input and output, can be executed. */ final case class RunnableGraph[+Mat](override val traversalBuilder: TraversalBuilder) extends Graph[ClosedShape, Mat] { override def shape = ClosedShape - /** - * Transform only the materialized value of this RunnableGraph, leaving all other properties as they were. - */ + /** Transform only the materialized value of this RunnableGraph, leaving all other properties as they were. */ def mapMaterializedValue[Mat2](f: Mat => Mat2): RunnableGraph[Mat2] = copy(traversalBuilder.transformMat(f.asInstanceOf[Any => Any])) @@ -787,21 +767,15 @@ final case class RunnableGraph[+Mat](override val traversalBuilder: TraversalBui override def named(name: String): RunnableGraph[Mat] = addAttributes(Attributes.name(name)) - /** - * Note that an async boundary around a runnable graph does not make sense - */ + /** Note that an async boundary around a runnable graph does not make sense */ override def async: RunnableGraph[Mat] = super.async.asInstanceOf[RunnableGraph[Mat]] - /** - * Note that an async boundary around a runnable graph does not make sense - */ + /** Note that an async boundary around a runnable graph does not make sense */ override def async(dispatcher: String): RunnableGraph[Mat] = super.async(dispatcher).asInstanceOf[RunnableGraph[Mat]] - /** - * Note that an async boundary around a runnable graph does not make sense - */ + /** Note that an async boundary around a runnable graph does not make sense */ override def async(dispatcher: String, inputBufferSize: Int): RunnableGraph[Mat] = super.async(dispatcher, inputBufferSize).asInstanceOf[RunnableGraph[Mat]] @@ -869,7 +843,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recover[T >: Out](pf: PartialFunction[Throwable, T]): Repr[T] = via(Recover(pf)) @@ -891,7 +864,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def recoverWith[T >: Out](pf: PartialFunction[Throwable, Graph[SourceShape[T], NotUsed]]): Repr[T] = via(new RecoverWith(-1, pf)) @@ -931,8 +903,8 @@ trait FlowOps[+Out, +Mat] { def onErrorComplete(pf: PartialFunction[Throwable, Boolean]): Repr[Out] = via( Flow[Out] - .recoverWith(pf.andThen({ - case true => Source.empty[Out] + .recoverWith(pf.andThen({ case true => + Source.empty[Out] }: PartialFunction[Boolean, Graph[SourceShape[Out], NotUsed]])) .withAttributes(DefaultAttributes.onErrorComplete and SourceLocation.forLambda(pf))) @@ -960,7 +932,6 @@ trait FlowOps[+Out, +Mat] { * * @param attempts Maximum number of retries or -1 to retry indefinitely * @param pf Receives the failure cause and returns the new Source to be materialized if any - * */ def recoverWithRetries[T >: Out]( attempts: Int, @@ -984,7 +955,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes or upstream failed with exception pf can handle * * '''Cancels when''' downstream cancels - * */ def mapError(pf: PartialFunction[Throwable, Throwable]): Repr[Out] = via(MapError(pf)) @@ -1001,7 +971,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def map[T](f: Out => T): Repr[T] = via(Map(f)) @@ -1026,7 +995,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def wireTap(f: Out => Unit): Repr[Out] = wireTap(Sink.foreach(f)).named("wireTap") @@ -1047,7 +1015,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes and all remaining elements have been emitted * * '''Cancels when''' downstream cancels - * */ def mapConcat[T](f: Out => IterableOnce[T]): Repr[T] = statefulMapConcat(() => f) @@ -1227,7 +1194,8 @@ trait FlowOps[+Out, +Mat] { f: (Out, P) => Future[T]): Repr[T] = if (parallelism == 1) mapAsyncUnordered[T](parallelism = 1) { elem => f(elem, partitioner(elem)) - } else via(MapAsyncPartitioned(parallelism, perPartition, partitioner, f)) + } + else via(MapAsyncPartitioned(parallelism, perPartition, partitioner, f)) /** * Transform this stream by applying the given function to each of the elements @@ -2238,7 +2206,7 @@ trait FlowOps[+Out, +Mat] { * * @param n the number of elements to accumulate before materializing the downstream flow. * @param f a function that produces the downstream flow based on the upstream's prefix. - **/ + */ def flatMapPrefix[Out2, Mat2](n: Int)(f: immutable.Seq[Out] => Flow[Out, Out2, Mat2]): Repr[Out2] = { via(new FlatMapPrefix(n, f)) } @@ -2650,7 +2618,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle(elements: Int, per: FiniteDuration, maximumBurst: Int, mode: ThrottleMode): Repr[Out] = throttle(elements, per, maximumBurst, ConstantFun.oneInt, mode) @@ -2685,7 +2652,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle(cost: Int, per: FiniteDuration, costCalculation: (Out) => Int): Repr[Out] = via(new Throttle(cost, per, Throttle.AutomaticMaximumBurst, costCalculation, ThrottleMode.Shaping)) @@ -2727,7 +2693,6 @@ trait FlowOps[+Out, +Mat] { * '''Completes when''' upstream completes * * '''Cancels when''' downstream cancels - * */ def throttle( cost: Int, @@ -2783,8 +2748,8 @@ trait FlowOps[+Out, +Mat] { * * '''Cancels when''' downstream cancels */ - def log(name: String, extract: Out => Any = ConstantFun.scalaIdentityFunction)( - implicit log: LoggingAdapter = null): Repr[Out] = + def log(name: String, extract: Out => Any = ConstantFun.scalaIdentityFunction)(implicit + log: LoggingAdapter = null): Repr[Out] = via(Log(name, extract.asInstanceOf[Any => Any], Option(log))) /** @@ -3224,8 +3189,8 @@ trait FlowOps[+Out, +Mat] { def mergeSorted[U >: Out, M](that: Graph[SourceShape[U], M])(implicit ord: Ordering[U]): Repr[U] = via(mergeSortedGraph(that)) - protected def mergeSortedGraph[U >: Out, M](that: Graph[SourceShape[U], M])( - implicit ord: Ordering[U]): Graph[FlowShape[Out @uncheckedVariance, U], M] = + protected def mergeSortedGraph[U >: Out, M](that: Graph[SourceShape[U], M])(implicit + ord: Ordering[U]): Graph[FlowShape[Out @uncheckedVariance, U], M] = GraphDSL.createGraph(that) { implicit b => r => val merge = b.add(new MergeSorted[U]) r ~> merge.in1 @@ -3357,7 +3322,6 @@ trait FlowOps[+Out, +Mat] { * * When needing a prepend operator that is not detached use [[#prependLazy]] * - * * '''Emits when''' element is available from the given [[Source]] or from current stream when the [[Source]] is completed * * '''Backpressures when''' downstream backpressures @@ -3876,8 +3840,8 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] { * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def mergeSortedMat[U >: Out, Mat2, Mat3](that: Graph[SourceShape[U], Mat2])(matF: (Mat, Mat2) => Mat3)( - implicit ord: Ordering[U]): ReprMat[U, Mat3] = + def mergeSortedMat[U >: Out, Mat2, Mat3](that: Graph[SourceShape[U], Mat2])(matF: (Mat, Mat2) => Mat3)(implicit + ord: Ordering[U]): ReprMat[U, Mat3] = viaMat(mergeSortedGraph(that))(matF) /** @@ -4042,9 +4006,7 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] { def watchTermination[Mat2]()(matF: (Mat, Future[Done]) => Mat2): ReprMat[Out, Mat2] = viaMat(GraphStages.terminationWatcher)(matF) - /** - * Transform the materialized value of this graph, leaving all other properties as they were. - */ + /** Transform the materialized value of this graph, leaving all other properties as they were. */ def mapMaterializedValue[Mat2](f: Mat => Mat2): ReprMat[Out, Mat2] /** diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContext.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContext.scala index 23b49ee8504..5a6af9305b7 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContext.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContext.scala @@ -12,17 +12,13 @@ import akka.stream._ object FlowWithContext { - /** - * Creates an "empty" FlowWithContext that passes elements through with their context unchanged. - */ + /** Creates an "empty" FlowWithContext that passes elements through with their context unchanged. */ def apply[In, Ctx]: FlowWithContext[In, Ctx, In, Ctx, akka.NotUsed] = { val under = Flow[(In, Ctx)] new FlowWithContext[In, Ctx, In, Ctx, akka.NotUsed](under) } - /** - * Creates a FlowWithContext from a regular flow that operates on a tuple of `(data, context)` elements. - */ + /** Creates a FlowWithContext from a regular flow that operates on a tuple of `(data, context)` elements. */ def fromTuples[In, CtxIn, Out, CtxOut, Mat]( flow: Flow[(In, CtxIn), (Out, CtxOut), Mat]): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] = new FlowWithContext(flow) @@ -35,7 +31,6 @@ object FlowWithContext { * operations. * * An "empty" flow can be created by calling `FlowWithContext[Ctx, T]`. - * */ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: Flow[(In, CtxIn), (Out, CtxOut), Mat]) extends GraphDelegate(delegate) @@ -89,8 +84,9 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: Flow[(In javadsl.Flow .create[Pair[JIn, JCtxIn]]() .map(_.toScala) - .viaMat(delegate.map { - case (first, second) => + .viaMat( + delegate.map { case (first, second) => Pair[JOut, JCtxOut](first, second) - }.asJava, javadsl.Keep.right[NotUsed, JMat])) + }.asJava, + javadsl.Keep.right[NotUsed, JMat])) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContextOps.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContextOps.scala index c37f9b2f4e3..c2be9ba35c2 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContextOps.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContextOps.scala @@ -20,7 +20,6 @@ import akka.util.ConstantFun /** * Shared stream operations for [[FlowWithContext]] and [[SourceWithContext]] that automatically propagate a context * element with each data element. - * */ @ccompatUsedUntil213 trait FlowWithContextOps[+Out, +Ctx, +Mat] { @@ -100,8 +99,8 @@ trait FlowWithContextOps[+Out, +Ctx, +Mat] { * @see [[akka.stream.scaladsl.FlowOps.mapAsync]] */ def mapAsync[Out2](parallelism: Int)(f: Out => Future[Out2]): Repr[Out2, Ctx] = - via(flow.mapAsync(parallelism) { - case (e, ctx) => f(e).map(o => (o, ctx))(ExecutionContexts.parasitic) + via(flow.mapAsync(parallelism) { case (e, ctx) => + f(e).map(o => (o, ctx))(ExecutionContexts.parasitic) }) /** @@ -209,13 +208,11 @@ trait FlowWithContextOps[+Out, +Ctx, +Mat] { * @see [[akka.stream.scaladsl.FlowOps.mapConcat]] */ def mapConcat[Out2](f: Out => IterableOnce[Out2]): Repr[Out2, Ctx] = - via(flow.mapConcat { - case (e, ctx) => f(e).iterator.map(_ -> ctx) + via(flow.mapConcat { case (e, ctx) => + f(e).iterator.map(_ -> ctx) }) - /** - * Apply the given function to each context element (leaving the data elements unchanged). - */ + /** Apply the given function to each context element (leaving the data elements unchanged). */ def mapContext[Ctx2](f: Ctx => Ctx2): Repr[Out, Ctx2] = via(flow.map { case (e, ctx) => (e, f(ctx)) }) @@ -224,8 +221,8 @@ trait FlowWithContextOps[+Out, +Ctx, +Mat] { * * @see [[akka.stream.scaladsl.FlowOps.log]] */ - def log(name: String, extract: Out => Any = ConstantFun.scalaIdentityFunction)( - implicit log: LoggingAdapter = null): Repr[Out, Ctx] = { + def log(name: String, extract: Out => Any = ConstantFun.scalaIdentityFunction)(implicit + log: LoggingAdapter = null): Repr[Out, Ctx] = { val extractWithContext: ((Out, Ctx)) => Any = { case (e, _) => extract(e) } via(flow.log(name, extractWithContext)(log)) } @@ -238,8 +235,8 @@ trait FlowWithContextOps[+Out, +Ctx, +Mat] { def logWithMarker( name: String, marker: (Out, Ctx) => LogMarker, - extract: Out => Any = ConstantFun.scalaIdentityFunction)( - implicit log: MarkerLoggingAdapter = null): Repr[Out, Ctx] = { + extract: Out => Any = ConstantFun.scalaIdentityFunction)(implicit + log: MarkerLoggingAdapter = null): Repr[Out, Ctx] = { val extractWithContext: ((Out, Ctx)) => Any = { case (e, _) => extract(e) } via(flow.logWithMarker(name, marker.tupled, extractWithContext)(log)) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala index 19541625658..ce54da207e4 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala @@ -84,7 +84,6 @@ object Framing { * Then computeFrameSize can be used to compute the frame size: `(offset bytes, computed size) => (actual frame size)`. * ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the operator fails otherwise. * Must not mutate the given byte array. - * */ def lengthField( fieldLength: Int, @@ -135,15 +134,11 @@ object Framing { simpleFramingProtocolDecoder(maximumMessageLength))(Keep.left) } - /** - * Protocol decoder that is used by [[Framing#simpleFramingProtocol]] - */ + /** Protocol decoder that is used by [[Framing#simpleFramingProtocol]] */ def simpleFramingProtocolDecoder(maximumMessageLength: Int): Flow[ByteString, ByteString, NotUsed] = lengthField(4, 0, maximumMessageLength + 4, ByteOrder.BIG_ENDIAN).map(_.drop(4)) - /** - * Protocol encoder that is used by [[Framing#simpleFramingProtocol]] - */ + /** Protocol encoder that is used by [[Framing#simpleFramingProtocol]] */ def simpleFramingProtocolEncoder(maximumMessageLength: Int): Flow[ByteString, ByteString, NotUsed] = Flow[ByteString].via(new SimpleFramingProtocolEncoder(maximumMessageLength)) @@ -154,7 +149,7 @@ object Framing { var decoded = 0 while (count > 0) { decoded <<= 8 - decoded |= bs.next().toInt & 0xFF + decoded |= bs.next().toInt & 0xff count -= 1 } decoded @@ -167,7 +162,7 @@ object Framing { var decoded = 0 while (count > 0) { decoded >>>= 8 - decoded += (bs.next().toInt & 0xFF) << highestOctet + decoded += (bs.next().toInt & 0xff) << highestOctet count -= 1 } decoded & Mask @@ -188,7 +183,7 @@ object Framing { s"Maximum allowed message size is $maximumMessageLength but tried to send $msgSize bytes")) else { val header = - ByteString((msgSize >> 24) & 0xFF, (msgSize >> 16) & 0xFF, (msgSize >> 8) & 0xFF, msgSize & 0xFF) + ByteString((msgSize >> 24) & 0xff, (msgSize >> 16) & 0xff, (msgSize >> 8) & 0xff, msgSize & 0xff) push(out, header ++ message) } } @@ -307,10 +302,13 @@ object Framing { if (isClosed(in) && buffer.isEmpty) completeStage() } else { // Emit results and compact buffer - emitMultiple(out, new FrameIterator(), () => { - reset() - if (isClosed(in) && buffer.isEmpty) completeStage() - }) + emitMultiple( + out, + new FrameIterator(), + () => { + reset() + if (isClosed(in) && buffer.isEmpty) completeStage() + }) } private def reset(): Unit = { @@ -371,7 +369,7 @@ object Framing { computeFrameSize: Option[(Array[Byte], Int) => Int]) extends GraphStage[FlowShape[ByteString, ByteString]] { - //for the sake of binary compatibility + // for the sake of binary compatibility def this(lengthFieldLength: Int, lengthFieldOffset: Int, maximumFrameLength: Int, byteOrder: ByteOrder) = this(lengthFieldLength, lengthFieldOffset, maximumFrameLength, byteOrder, None) @@ -379,7 +377,7 @@ object Framing { private val intDecoder = byteOrder match { case ByteOrder.BIG_ENDIAN => bigEndianDecoder case ByteOrder.LITTLE_ENDIAN => littleEndianDecoder - case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser + case _ => throw new RuntimeException() // won't happen, compiler exhaustiveness check pleaser } val in = Inlet[ByteString]("LengthFieldFramingStage.in") @@ -391,10 +389,7 @@ object Framing { private var buffer = ByteString.empty private var frameSize = Int.MaxValue - /** - * push, and reset frameSize and buffer - * - */ + /** push, and reset frameSize and buffer */ private def pushFrame() = { val emit = buffer.take(frameSize).compact buffer = buffer.drop(frameSize) @@ -405,10 +400,7 @@ object Framing { } } - /** - * try to push downstream, if failed then try to pull upstream - * - */ + /** try to push downstream, if failed then try to pull upstream */ private def tryPushFrame() = { val buffSize = buffer.size if (buffSize >= frameSize) { diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala index 27aa28ece53..fd481cacb8e 100755 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala @@ -229,7 +229,7 @@ final class MergePreferred[T](val secondaryPorts: Int, val eagerComplete: Boolea } override def preStart(): Unit = { - //while initializing this `MergePreferredShape`, the `preferred` port gets added to `inlets` by side-effect. + // while initializing this `MergePreferredShape`, the `preferred` port gets added to `inlets` by side-effect. shape.inlets.foreach(tryPull) } @@ -451,7 +451,6 @@ object Interleave { * '''Completes when''' all upstreams complete (eagerClose=false) or one upstream completes (eagerClose=true) * * '''Cancels when''' downstream cancels - * */ final class Interleave[T](val inputPorts: Int, val segmentSize: Int, val eagerClose: Boolean) extends GraphStage[UniformFanInShape[T, T]] { @@ -567,10 +566,12 @@ final class MergeSorted[T: Ordering] extends GraphStage[FanInShape2[T, T, T]] { override def preStart(): Unit = { // all fan-in stages need to eagerly pull all inputs to get cycles started pull(right) - read(left)(l => { - other = l - readR() - }, () => passAlong(right, out)) + read(left)( + l => { + other = l + readR() + }, + () => passAlong(right, out)) } } } @@ -599,7 +600,6 @@ object Broadcast { * * '''Cancels when''' * If eagerCancel is enabled: when any downstream cancels; otherwise: when all downstreams cancel - * */ final class Broadcast[T](val outputPorts: Int, val eagerCancel: Boolean) extends GraphStage[UniformFanOutShape[T, T]] { // one output might seem counter intuitive but saves us from special handling in other places @@ -677,9 +677,7 @@ final class Broadcast[T](val outputPorts: Int, val eagerCancel: Boolean) extends object WireTap { private val singleton = new WireTap[Nothing] - /** - * @see [[WireTap]] - */ + /** @see [[WireTap]] */ def apply[T](): WireTap[T] = singleton.asInstanceOf[WireTap[T]] } @@ -696,7 +694,6 @@ object WireTap { * '''Completes when''' upstream completes * * '''Cancels when''' the 'main' output cancels - * */ @InternalApi private[stream] final class WireTap[T] extends GraphStage[FanOutShape2[T, T, T]] { @@ -741,11 +738,13 @@ private[stream] final class WireTap[T] extends GraphStage[FanOutShape2[T, T, T]] } override def onDownstreamFinish(cause: Throwable): Unit = { - setHandler(in, new InHandler { - override def onPush() = { - push(outMain, grab(in)) - } - }) + setHandler( + in, + new InHandler { + override def onPush() = { + push(outMain, grab(in)) + } + }) // Allow any outstanding element to be garbage-collected pendingTap = None } @@ -769,7 +768,8 @@ object Partition { * * @param outputPorts number of output ports * @param partitioner function deciding which output each element will be targeted - */ // FIXME BC add `eagerCancel: Boolean = false` parameter + */ + // FIXME BC add `eagerCancel: Boolean = false` parameter def apply[T](outputPorts: Int, partitioner: T => Int): Partition[T] = new Partition(outputPorts, partitioner, false) } @@ -791,7 +791,8 @@ final class Partition[T](val outputPorts: Int, val partitioner: T => Int, val ea extends GraphStage[UniformFanOutShape[T, T]] { val in: Inlet[T] = Inlet[T]("Partition.in") - val out: Seq[Outlet[T]] = Seq.tabulate(outputPorts)(i => Outlet[T]("Partition.out" + i)) // FIXME BC make this immutable.IndexedSeq as type + Vector as concrete impl + val out: Seq[Outlet[T]] = Seq.tabulate(outputPorts)(i => + Outlet[T]("Partition.out" + i)) // FIXME BC make this immutable.IndexedSeq as type + Vector as concrete impl override val shape: UniformFanOutShape[T, T] = UniformFanOutShape[T, T](in, out: _*) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = @@ -847,41 +848,40 @@ final class Partition[T](val outputPorts: Int, val partitioner: T => Int, val ea setHandler(in, this) - out.iterator.zipWithIndex.foreach { - case (o, idx) => - setHandler( - o, - new OutHandler { - override def onPull() = { - if (outPendingElem != null) { - val elem = outPendingElem.asInstanceOf[T] - if (idx == outPendingIdx) { - push(o, elem) - outPendingElem = null - if (isClosed(in)) - completeStage() - else if (!hasBeenPulled(in)) - pull(in) - } - } else if (!hasBeenPulled(in)) - pull(in) - } + out.iterator.zipWithIndex.foreach { case (o, idx) => + setHandler( + o, + new OutHandler { + override def onPull() = { + if (outPendingElem != null) { + val elem = outPendingElem.asInstanceOf[T] + if (idx == outPendingIdx) { + push(o, elem) + outPendingElem = null + if (isClosed(in)) + completeStage() + else if (!hasBeenPulled(in)) + pull(in) + } + } else if (!hasBeenPulled(in)) + pull(in) + } - override def onDownstreamFinish(cause: Throwable) = - if (eagerCancel) cancelStage(cause) - else { - downstreamRunning -= 1 - if (downstreamRunning == 0) + override def onDownstreamFinish(cause: Throwable) = + if (eagerCancel) cancelStage(cause) + else { + downstreamRunning -= 1 + if (downstreamRunning == 0) + cancelStage(cause) + else if (outPendingElem != null && idx == outPendingIdx) { + outPendingElem = null + if (isClosed(in)) cancelStage(cause) - else if (outPendingElem != null && idx == outPendingIdx) { - outPendingElem = null - if (isClosed(in)) - cancelStage(cause) - else if (!hasBeenPulled(in)) - pull(in) - } + else if (!hasBeenPulled(in)) + pull(in) } - }) + } + }) } } @@ -1001,9 +1001,7 @@ final class Balance[T](val outputPorts: Int, val waitForAllDownstreams: Boolean, object Zip { - /** - * Create a new `Zip`. - */ + /** Create a new `Zip`. */ def apply[A, B](): Zip[A, B] = new Zip() } @@ -1026,14 +1024,10 @@ final class Zip[A, B] extends ZipWith2[A, B, (A, B)](Tuple2.apply) { object ZipLatest { - /** - * Create a new `ZipLatest`. - */ + /** Create a new `ZipLatest`. */ def apply[A, B](): ZipLatest[A, B] = new ZipLatest() - /** - * Create a new `ZipLatest`. - */ + /** Create a new `ZipLatest`. */ def apply[A, B](eagerComplete: Boolean): ZipLatest[A, B] = new ZipLatest(eagerComplete) } @@ -1111,9 +1105,7 @@ object ZipLatestWith extends ZipLatestWithApply */ object Unzip { - /** - * Create a new `Unzip`. - */ + /** Create a new `Unzip`. */ def apply[A, B](): Unzip[A, B] = new Unzip() } @@ -1149,9 +1141,7 @@ object UnzipWith extends UnzipWithApply object ZipN { - /** - * Create a new `ZipN`. - */ + /** Create a new `ZipN`. */ def apply[A](n: Int) = new ZipN[A](n) } @@ -1175,9 +1165,7 @@ final class ZipN[A](n: Int) extends ZipWithN[A, immutable.Seq[A]](ConstantFun.sc object ZipWithN { - /** - * Create a new `ZipWithN`. - */ + /** Create a new `ZipWithN`. */ def apply[A, O](zipper: immutable.Seq[A] => O)(n: Int) = new ZipWithN[A, O](zipper)(n) } @@ -1221,9 +1209,10 @@ class ZipWithN[A, O](zipper: immutable.Seq[A] => O)(n: Int) extends GraphStage[U shape.inlets.foreach(pullInlet) } - shape.inlets.zipWithIndex.foreach { - case (in, i) => - setHandler(in, new InHandler { + shape.inlets.zipWithIndex.foreach { case (in, i) => + setHandler( + in, + new InHandler { override def onPush(): Unit = { // Only one context can be propagated. Picked the first element as an arbitrary but deterministic choice. if (i == 0) contextPropagation.suspendContext() @@ -1344,9 +1333,7 @@ final class Concat[T](val inputPorts: Int) extends GraphStage[UniformFanInShape[ object OrElse { private val singleton = new OrElse[Nothing] - /** - * @see [[OrElse]] - */ + /** @see [[OrElse]] */ def apply[T]() = singleton.asInstanceOf[OrElse[T]] } @@ -1408,15 +1395,17 @@ private[stream] final class OrElse[T] extends GraphStage[UniformFanInShape[T, T] } } - setHandler(secondary, new InHandler { - override def onPush(): Unit = { - push(out, grab(secondary)) - } + setHandler( + secondary, + new InHandler { + override def onPush(): Unit = { + push(out, grab(secondary)) + } - override def onUpstreamFinish(): Unit = { - if (isClosed(primary)) completeStage() - } - }) + override def onUpstreamFinish(): Unit = { + if (isClosed(primary)) completeStage() + } + }) setHandlers(primary, out, this) } @@ -1431,7 +1420,7 @@ object MergeSequence { private implicit def ordering[T]: Ordering[Pushed[T]] = Ordering.by[Pushed[T], Long](_.sequence).reverse - /** @see [[MergeSequence]] **/ + /** @see [[MergeSequence]] * */ def apply[T](inputPorts: Int = 2)(extractSequence: T => Long): Graph[UniformFanInShape[T, T], NotUsed] = GraphStages.withDetachedInputs(new MergeSequence[T](inputPorts)(extractSequence)) } @@ -1483,36 +1472,35 @@ final class MergeSequence[T](val inputPorts: Int)(extractSequence: T => Long) setHandler(out, this) - in.zipWithIndex.foreach { - case (inPort, idx) => - setHandler( - inPort, - new InHandler { - override def onPush(): Unit = { - val elem = grab(inPort) - val sequence = extractSequence(elem) - if (sequence < nextSequence) { - failStage( - new IllegalStateException(s"Sequence regression from $nextSequence to $sequence on port $idx")) - } else if (sequence == nextSequence && isAvailable(out)) { - push(out, elem) - tryPull(inPort) - nextSequence += 1 - } else { - available.enqueue(Pushed(inPort, sequence, elem)) - detectMissedSequence() - } + in.zipWithIndex.foreach { case (inPort, idx) => + setHandler( + inPort, + new InHandler { + override def onPush(): Unit = { + val elem = grab(inPort) + val sequence = extractSequence(elem) + if (sequence < nextSequence) { + failStage( + new IllegalStateException(s"Sequence regression from $nextSequence to $sequence on port $idx")) + } else if (sequence == nextSequence && isAvailable(out)) { + push(out, elem) + tryPull(inPort) + nextSequence += 1 + } else { + available.enqueue(Pushed(inPort, sequence, elem)) + detectMissedSequence() } + } - override def onUpstreamFinish(): Unit = { - complete += 1 - if (complete == inputPorts && available.isEmpty) { - completeStage() - } else { - detectMissedSequence() - } + override def onUpstreamFinish(): Unit = { + complete += 1 + if (complete == inputPorts && available.isEmpty) { + completeStage() + } else { + detectMissedSequence() } - }) + } + }) } def onPull(): Unit = @@ -1581,9 +1569,7 @@ object GraphDSL extends GraphApply { private var traversalBuilderInProgress: TraversalBuilder = TraversalBuilder.empty() - /** - * INTERNAL API - */ + /** INTERNAL API */ private[GraphDSL] def addEdge[T, U >: T](from: Outlet[T], to: Inlet[U]): Unit = try { traversalBuilderInProgress = traversalBuilderInProgress.wire(from, to) @@ -1661,9 +1647,11 @@ object GraphDSL extends GraphApply { * @return The outlet that will emit the materialized value. */ def materializedValue: Outlet[M @uncheckedVariance] = - add(Source.maybe[M], { (prev: M, prom: Promise[Option[M]]) => - prom.success(Some(prev)); prev - }).out + add( + Source.maybe[M], + { (prev: M, prom: Promise[Option[M]]) => + prom.success(Some(prev)); prev + }).out private[GraphDSL] def traversalBuilder: TraversalBuilder = traversalBuilderInProgress @@ -1872,8 +1860,8 @@ object GraphDSL extends GraphApply { implicit final class FlowShapeArrow[I, O](val f: FlowShape[I, O]) extends AnyVal with ReverseCombinerBase[I] { override def importAndGetPortReverse(b: Builder[_]): Inlet[I] = f.in - def <~>[I2, O2, Mat](bidi: Graph[BidiShape[O, O2, I2, I], Mat])( - implicit b: Builder[_]): BidiShape[O, O2, I2, I] = { + def <~>[I2, O2, Mat](bidi: Graph[BidiShape[O, O2, I2, I], Mat])(implicit + b: Builder[_]): BidiShape[O, O2, I2, I] = { val shape = b.add(bidi) b.addEdge(f.out, shape.in1) b.addEdge(shape.out2, f.in) @@ -1894,8 +1882,8 @@ object GraphDSL extends GraphApply { } implicit final class FlowArrow[I, O, M](val f: Graph[FlowShape[I, O], M]) extends AnyVal { - def <~>[I2, O2, Mat](bidi: Graph[BidiShape[O, O2, I2, I], Mat])( - implicit b: Builder[_]): BidiShape[O, O2, I2, I] = { + def <~>[I2, O2, Mat](bidi: Graph[BidiShape[O, O2, I2, I], Mat])(implicit + b: Builder[_]): BidiShape[O, O2, I2, I] = { val shape = b.add(bidi) val flow = b.add(f) b.addEdge(flow.out, shape.in1) @@ -1925,8 +1913,8 @@ object GraphDSL extends GraphApply { other } - def <~>[I3, O3, M](otherFlow: Graph[BidiShape[O1, O3, I3, I2], M])( - implicit b: Builder[_]): BidiShape[O1, O3, I3, I2] = { + def <~>[I3, O3, M](otherFlow: Graph[BidiShape[O1, O3, I3, I2], M])(implicit + b: Builder[_]): BidiShape[O1, O3, I3, I2] = { val other = b.add(otherFlow) b.addEdge(bidi.out1, other.in1) b.addEdge(other.out2, bidi.in2) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala index f4f5e8640c3..88a1ce68d13 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala @@ -44,7 +44,6 @@ object MergeHub { /** * Set the operation mode of the linked MergeHub to draining. In this mode the Hub will cancel any new producer and * will complete as soon as all the currently connected producers complete. - * */ def drainAndComplete(): Unit } @@ -117,9 +116,7 @@ object MergeHub { final class ProducerFailed(msg: String, cause: Throwable) extends RuntimeException(msg, cause) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final class MergeHubDrainingControlImpl(drainAction: () => Unit) extends MergeHub.DrainingControl { override def drainAndComplete(): Unit = { @@ -174,10 +171,9 @@ private[akka] class MergeHub[T](perProducerBufferSize: Int, drainingEnabled: Boo @volatile private[this] var draining = false private[this] val demands = scala.collection.mutable.LongMap.empty[InputState] - private[this] val wakeupCallback = getAsyncCallback[NotUsed]( - (_) => - // We are only allowed to dequeue if we are not backpressured. See comment in tryProcessNext() for details. - if (isAvailable(out)) tryProcessNext(firstAttempt = true)) + private[this] val wakeupCallback = getAsyncCallback[NotUsed](_ => + // We are only allowed to dequeue if we are not backpressured. See comment in tryProcessNext() for details. + if (isAvailable(out)) tryProcessNext(firstAttempt = true)) private[MergeHub] val drainingCallback: Option[AsyncCallback[NotUsed]] = { // Only create an async callback if the draining support is enabled in order to avoid book-keeping costs. @@ -392,9 +388,7 @@ private[akka] class MergeHub[T](perProducerBufferSize: Int, drainingEnabled: Boo */ object BroadcastHub { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] val defaultBufferSize = 256 /** @@ -457,15 +451,12 @@ object BroadcastHub { * all corresponding [[Source]]s are completed. Both failure and normal completion is "remembered" and later * materializations of the [[Source]] will see the same (failure or completion) state. [[Source]]s that are * cancelled are simply removed from the dynamic set of consumers. - * */ def sink[T]: Sink[T, Source[T, NotUsed]] = sink(bufferSize = defaultBufferSize) } -/** - * INTERNAL API - */ +/** INTERNAL API */ private[akka] class BroadcastHub[T](startAfterNrOfConsumers: Int, bufferSize: Int) extends GraphStageWithMaterializedValue[SinkShape[T], Source[T, NotUsed]] { require(startAfterNrOfConsumers >= 0, "startAfterNrOfConsumers must >= 0") @@ -640,7 +631,7 @@ private[akka] class BroadcastHub[T](startAfterNrOfConsumers: Int, bufferSize: In // TODO: Try to eliminate modulo division somehow... val wheelSlot = offset & WheelMask var consumersInSlot = consumerWheel(wheelSlot) - //debug(s"consumers before removal $consumersInSlot") + // debug(s"consumers before removal $consumersInSlot") var remainingConsumersInSlot: List[Consumer] = Nil var removedConsumer: Consumer = null @@ -868,9 +859,7 @@ private[akka] class BroadcastHub[T](startAfterNrOfConsumers: Int, bufferSize: In */ object PartitionHub { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] val defaultBufferSize = 256 /** @@ -975,16 +964,12 @@ object PartitionHub { */ def queueSize(consumerId: Long): Int - /** - * Number of attached consumers. - */ + /** Number of attached consumers. */ def size: Int } - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] object Internal { sealed trait ConsumerEvent case object Wakeup extends ConsumerEvent @@ -1135,9 +1120,7 @@ object PartitionHub { } } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] class PartitionHub[T]( partitioner: () => (PartitionHub.ConsumerInfo, T) => Long, startAfterNrOfConsumers: Int, diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/JavaFlowSupport.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/JavaFlowSupport.scala index fd4a04aefad..f2f4fb65d06 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/JavaFlowSupport.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/JavaFlowSupport.scala @@ -21,9 +21,7 @@ import akka.stream.scaladsl object JavaFlowSupport { import JavaFlowAndRsConverters.Implicits._ - /** - * [[akka.stream.scaladsl.Source]] factories operating with `java.util.concurrent.Flow.*` interfaces. - */ + /** [[akka.stream.scaladsl.Source]] factories operating with `java.util.concurrent.Flow.*` interfaces. */ object Source { /** @@ -38,9 +36,9 @@ object JavaFlowSupport { * (which carries the same semantics, however existed before RS's inclusion in Java 9). */ final - //#fromPublisher + // #fromPublisher def fromPublisher[T](publisher: java.util.concurrent.Flow.Publisher[T]): Source[T, NotUsed] = - //#fromPublisher + // #fromPublisher scaladsl.Source.fromPublisher(publisher.asRs) /** @@ -50,27 +48,21 @@ object JavaFlowSupport { * (which carries the same semantics, however existed before RS's inclusion in Java 9). */ final - //#asSubscriber + // #asSubscriber def asSubscriber[T]: Source[T, java.util.concurrent.Flow.Subscriber[T]] = - //#asSubscriber + // #asSubscriber scaladsl.Source.asSubscriber[T].mapMaterializedValue(_.asJava) } - /** - * [[akka.stream.scaladsl.Flow]] factories operating with `java.util.concurrent.Flow.*` interfaces. - */ + /** [[akka.stream.scaladsl.Flow]] factories operating with `java.util.concurrent.Flow.*` interfaces. */ object Flow { - /** - * Creates a Flow from a Reactive Streams [[org.reactivestreams.Processor]] - */ + /** Creates a Flow from a Reactive Streams [[org.reactivestreams.Processor]] */ def fromProcessor[I, O](processorFactory: () => juc.Flow.Processor[I, O]): Flow[I, O, NotUsed] = { fromProcessorMat(() => (processorFactory(), NotUsed)) } - /** - * Creates a Flow from a Reactive Streams [[java.util.concurrent.Flow.Processor]] and returns a materialized value. - */ + /** Creates a Flow from a Reactive Streams [[java.util.concurrent.Flow.Processor]] and returns a materialized value. */ def fromProcessorMat[I, O, M](processorFactory: () => (juc.Flow.Processor[I, O], M)): Flow[I, O, M] = scaladsl.Flow.fromProcessorMat { () => val (processor, mat) = processorFactory() @@ -98,9 +90,7 @@ object JavaFlowSupport { } } - /** - * [[akka.stream.scaladsl.Sink]] factories operating with `java.util.concurrent.Flow.*` interfaces. - */ + /** [[akka.stream.scaladsl.Sink]] factories operating with `java.util.concurrent.Flow.*` interfaces. */ object Sink { /** @@ -117,9 +107,7 @@ object JavaFlowSupport { final def asPublisher[T](fanout: Boolean): Sink[T, juc.Flow.Publisher[T]] = scaladsl.Sink.asPublisher[T](fanout).mapMaterializedValue(_.asJava) - /** - * Helper to create [[Sink]] from [[java.util.concurrent.Flow.Subscriber]]. - */ + /** Helper to create [[Sink]] from [[java.util.concurrent.Flow.Subscriber]]. */ final def fromSubscriber[T](s: juc.Flow.Subscriber[T]): Sink[T, NotUsed] = scaladsl.Sink.fromSubscriber(s.asRs) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/JsonFraming.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/JsonFraming.scala index 7f9092456b8..ea476e1e060 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/JsonFraming.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/JsonFraming.scala @@ -74,10 +74,12 @@ object JsonFraming { } def tryPopBuffer(): Unit = { - try buffer.poll() match { - case Some(json) => push(out, json) - case _ => if (isClosed(in)) complete() else pull(in) - } catch { + try + buffer.poll() match { + case Some(json) => push(out, json) + case _ => if (isClosed(in)) complete() else pull(in) + } + catch { case NonFatal(ex) => failStage(ex) } } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala index 391d0235814..4916ccc7ebb 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala @@ -20,7 +20,6 @@ import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler } * '''Completes when''' all upstreams complete (eagerClose=false) or one upstream completes (eagerClose=true) * * '''Cancels when''' downstream cancels - * */ object MergeLatest { @@ -53,25 +52,24 @@ final class MergeLatest[T, M](val inputPorts: Int, val eagerClose: Boolean)(buil override def preStart(): Unit = in.foreach(tryPull) - in.zipWithIndex.foreach { - case (input, index) => - setHandler( - input, - new InHandler { - override def onPush(): Unit = { - messages.update(index, grab(input)) - activeStreams.add(index) - if (allMessagesReady) emit(out, buildElem(messages.asInstanceOf[Array[T]])) - tryPull(input) - } + in.zipWithIndex.foreach { case (input, index) => + setHandler( + input, + new InHandler { + override def onPush(): Unit = { + messages.update(index, grab(input)) + activeStreams.add(index) + if (allMessagesReady) emit(out, buildElem(messages.asInstanceOf[Array[T]])) + tryPull(input) + } - override def onUpstreamFinish(): Unit = { - if (!eagerClose) { - runningUpstreams -= 1 - if (upstreamsClosed) completeStage() - } else completeStage() - } - }) + override def onUpstreamFinish(): Unit = { + if (!eagerClose) { + runningUpstreams -= 1 + if (upstreamsClosed) completeStage() + } else completeStage() + } + }) } override def onPull(): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Queue.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Queue.scala index 80f874e032f..cbf3330c184 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Queue.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Queue.scala @@ -16,9 +16,7 @@ import akka.annotation.InternalApi import akka.dispatch.ExecutionContexts import akka.stream.QueueOfferResult -/** - * This trait allows to have a queue as a data source for some stream. - */ +/** This trait allows to have a queue as a data source for some stream. */ trait SourceQueue[T] { /** @@ -47,9 +45,7 @@ trait SourceQueue[T] { def watchCompletion(): Future[Done] } -/** - * This trait adds completion support to [[SourceQueue]]. - */ +/** This trait adds completion support to [[SourceQueue]]. */ trait SourceQueueWithComplete[T] extends SourceQueue[T] { /** @@ -83,16 +79,12 @@ object SourceQueueWithComplete { // would have been better to add `asJava` in SourceQueueWithComplete trait, but not doing // that for backwards compatibility reasons - /** - * Converts the queue into a `javadsl.SourceQueueWithComplete` - */ + /** Converts the queue into a `javadsl.SourceQueueWithComplete` */ def asJava: akka.stream.javadsl.SourceQueueWithComplete[T] = SourceQueueWithComplete.asJava(queue) } - /** - * INTERNAL API: Converts the queue into a `javadsl.SourceQueueWithComplete` - */ + /** INTERNAL API: Converts the queue into a `javadsl.SourceQueueWithComplete` */ @InternalApi private[akka] def asJava[T]( queue: SourceQueueWithComplete[T]): akka.stream.javadsl.SourceQueueWithComplete[T] = new akka.stream.javadsl.SourceQueueWithComplete[T] { @@ -120,14 +112,10 @@ trait SinkQueue[T] { def pull(): Future[Option[T]] } -/** - * This trait adds cancel support to [[SinkQueue]]. - */ +/** This trait adds cancel support to [[SinkQueue]]. */ trait SinkQueueWithCancel[T] extends SinkQueue[T] { - /** - * Cancels the stream. This method returns right away without waiting for actual finalizing the stream. - */ + /** Cancels the stream. This method returns right away without waiting for actual finalizing the stream. */ def cancel(): Unit } @@ -140,9 +128,7 @@ object SinkQueueWithCancel { SinkQueueWithCancel.asJava(queue) } - /** - * INTERNAL API: Converts the queue into a `javadsl.SinkQueueWithCancel` - */ + /** INTERNAL API: Converts the queue into a `javadsl.SinkQueueWithCancel` */ @InternalApi private[akka] def asJava[T](queue: SinkQueueWithCancel[T]): akka.stream.javadsl.SinkQueueWithCancel[T] = new akka.stream.javadsl.SinkQueueWithCancel[T] { override def pull(): CompletionStage[Optional[T]] = diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala index 8a6adc1e3d4..17e4af85949 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala @@ -220,15 +220,14 @@ private final class RestartWithBackoffFlow[In, Out]( // We need to ensure that the other end of the sub flow is also completed, so that we don't // receive any callbacks from it. - activeOutIn.foreach { - case (sourceOut, sinkIn) => - if (!sourceOut.isClosed) { - sourceOut.complete() - } - if (!sinkIn.isClosed) { - sinkIn.cancel() - } - activeOutIn = None + activeOutIn.foreach { case (sourceOut, sinkIn) => + if (!sourceOut.isClosed) { + sourceOut.complete() + } + if (!sinkIn.isClosed) { + sinkIn.cancel() + } + activeOutIn = None } } @@ -236,9 +235,7 @@ private final class RestartWithBackoffFlow[In, Out]( } } -/** - * Shared logic for all restart with backoff logics. - */ +/** Shared logic for all restart with backoff logics. */ private abstract class RestartWithBackoffLogic[S <: Shape]( name: String, shape: S, @@ -301,13 +298,15 @@ private abstract class RestartWithBackoffLogic[S <: Shape]( } }) - setHandler(out, new OutHandler { - override def onPull() = sinkIn.pull() - override def onDownstreamFinish(cause: Throwable) = { - finishing = true - sinkIn.cancel(cause) - } - }) + setHandler( + out, + new OutHandler { + override def onPull() = sinkIn.pull() + override def onDownstreamFinish(cause: Throwable) = { + finishing = true + sinkIn.cancel(cause) + } + }) sinkIn } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala index faca4c99b3c..1f2aeb18ac7 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala @@ -116,7 +116,6 @@ object RestartSource { * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. * In order to skip this additional delay pass in `0`. * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ @Deprecated @deprecated("Use the overloaded method which accepts akka.stream.RestartSettings instead.", since = "2.6.10") @@ -147,7 +146,6 @@ object RestartSource { * @param maxRestarts the amount of restarts is capped to this amount within a time frame of minBackoff. * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ @Deprecated @deprecated("Use the overloaded method which accepts akka.stream.RestartSettings instead.", since = "2.6.10") @@ -174,7 +172,6 @@ object RestartSource { * * @param settings [[RestartSettings]] defining restart configuration * @param sourceFactory A factory for producing the [[Source]] to wrap. - * */ def onFailuresWithBackoff[T](settings: RestartSettings)(sourceFactory: () => Source[T, _]): Source[T, NotUsed] = Source.fromGraph(new RestartWithBackoffSource(sourceFactory, settings, onlyOnFailures = true)) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala index 00d15fcfe2d..c66e16cf2df 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala @@ -60,9 +60,7 @@ final class Sink[-In, +Mat](override val traversalBuilder: LinearTraversalBuilde def runWith[Mat2](source: Graph[SourceShape[In], Mat2])(implicit materializer: Materializer): Mat2 = Source.fromGraph(source).to(this).run() - /** - * Transform only the materialized value of this Sink, leaving all other properties as they were. - */ + /** Transform only the materialized value of this Sink, leaving all other properties as they were. */ def mapMaterializedValue[Mat2](f: Mat => Mat2): Sink[In, Mat2] = new Sink(traversalBuilder.transformMat(f.asInstanceOf[Any => Any]), shape) @@ -97,14 +95,10 @@ final class Sink[-In, +Mat](override val traversalBuilder: LinearTraversalBuilde override def addAttributes(attr: Attributes): Sink[In, Mat] = withAttributes(traversalBuilder.attributes and attr) - /** - * Add a ``name`` attribute to this Sink. - */ + /** Add a ``name`` attribute to this Sink. */ override def named(name: String): Sink[In, Mat] = addAttributes(Attributes.name(name)) - /** - * Put an asynchronous boundary around this `Source` - */ + /** Put an asynchronous boundary around this `Source` */ override def async: Sink[In, Mat] = super.async.asInstanceOf[Sink[In, Mat]] /** @@ -124,9 +118,7 @@ final class Sink[-In, +Mat](override val traversalBuilder: LinearTraversalBuilde override def async(dispatcher: String, inputBufferSize: Int): Sink[In, Mat] = super.async(dispatcher, inputBufferSize).asInstanceOf[Sink[In, Mat]] - /** - * Converts this Scala DSL element to it's Java DSL counterpart. - */ + /** Converts this Scala DSL element to it's Java DSL counterpart. */ def asJava[JIn <: In]: javadsl.Sink[JIn, Mat @uncheckedVariance] = new javadsl.Sink(this) override def getAttributes: Attributes = traversalBuilder.attributes @@ -166,11 +158,11 @@ object Sink { */ def fromMaterializer[T, M](factory: (Materializer, Attributes) => Sink[T, M]): Sink[T, Future[M]] = Flow - .fromMaterializer({ (mat, attr) => + .fromMaterializer { (mat, attr) => Flow.fromGraph(GraphDSL.createGraph(factory(mat, attr)) { b => sink => FlowShape(sink.in, b.materializedValue.outlet) }) - }) + } .to(Sink.head) /** @@ -184,15 +176,11 @@ object Sink { factory(ActorMaterializerHelper.downcast(mat), attr) } - /** - * Helper to create [[Sink]] from `Subscriber`. - */ + /** Helper to create [[Sink]] from `Subscriber`. */ def fromSubscriber[T](subscriber: Subscriber[T]): Sink[T, NotUsed] = fromGraph(new SubscriberSink(subscriber, DefaultAttributes.subscriberSink, shape("SubscriberSink"))) - /** - * A `Sink` that immediately cancels its upstream after materialization. - */ + /** A `Sink` that immediately cancels its upstream after materialization. */ def cancelled[T]: Sink[T, NotUsed] = fromGraph[Any, NotUsed](new CancelSink(DefaultAttributes.cancelledSink, shape("CancelledSink"))) @@ -299,14 +287,10 @@ object Sink { if (fanout) new FanoutPublisherSink[T](DefaultAttributes.fanoutPublisherSink, shape("FanoutPublisherSink")) else new PublisherSink[T](DefaultAttributes.publisherSink, shape("PublisherSink"))) - /** - * A `Sink` that will consume the stream and discard the elements. - */ + /** A `Sink` that will consume the stream and discard the elements. */ def ignore: Sink[Any, Future[Done]] = fromGraph(GraphStages.IgnoreSink) - /** - * A [[Sink]] that will always backpressure never cancel and never consume any elements from the stream. - * */ + /** A [[Sink]] that will always backpressure never cancel and never consume any elements from the stream. */ def never: Sink[Any, Future[Done]] = _never private[this] val _never: Sink[Any, Future[Done]] = fromGraph(GraphStages.NeverSink) @@ -328,9 +312,7 @@ object Sink { def foreachAsync[T](parallelism: Int)(f: T => Future[Unit]): Sink[T, Future[Done]] = Flow[T].mapAsyncUnordered(parallelism)(f).toMat(Sink.ignore)(Keep.right).named("foreachAsyncSink") - /** - * Combine several sinks with fan-out strategy like `Broadcast` or `Balance` and returns `Sink`. - */ + /** Combine several sinks with fan-out strategy like `Broadcast` or `Balance` and returns `Sink`. */ def combine[T, U](first: Sink[U, _], second: Sink[U, _], rest: Sink[U, _]*)( @nowarn @deprecatedName(Symbol("strategy")) @@ -350,9 +332,7 @@ object Sink { combineRest(2, rest.iterator) }) - /** - * Combine two sinks with fan-out strategy like `Broadcast` or `Balance` and returns `Sink` with 2 outlets. - */ + /** Combine two sinks with fan-out strategy like `Broadcast` or `Balance` and returns `Sink` with 2 outlets. */ def combineMat[T, U, M1, M2, M](first: Sink[U, M1], second: Sink[U, M2])( fanOutStrategy: Int => Graph[UniformFanOutShape[T, U], NotUsed])(matF: (M1, M2) => M): Sink[T, M] = { Sink.fromGraph(GraphDSL.createGraph(first, second)(matF) { implicit b => (shape1, shape2) => diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala index 0fc5030fdd7..81cc9c13861 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala @@ -53,10 +53,10 @@ final class Source[+Out, +Mat]( combine: (Mat, Mat2) => Mat3): Source[T, Mat3] = { if (flow.traversalBuilder eq Flow.identityTraversalBuilder) if (combine == Keep.left) - //optimization by returning this - this.asInstanceOf[Source[T, Mat3]] //Mat == Mat3, due to Keep.left + // optimization by returning this + this.asInstanceOf[Source[T, Mat3]] // Mat == Mat3, due to Keep.left else if (combine == Keep.right || combine == Keep.none) // Mat3 = NotUsed - //optimization with LinearTraversalBuilder.empty() + // optimization with LinearTraversalBuilder.empty() new Source[T, Mat3]( traversalBuilder.append(LinearTraversalBuilder.empty(), flow.shape, combine), SourceShape(shape.out).asInstanceOf[SourceShape[T]]) @@ -84,9 +84,7 @@ final class Source[+Out, +Mat]( RunnableGraph(traversalBuilder.append(sink.traversalBuilder, sink.shape, combine)) } - /** - * Transform only the materialized value of this Source, leaving all other properties as they were. - */ + /** Transform only the materialized value of this Source, leaving all other properties as they were. */ override def mapMaterializedValue[Mat2](f: Mat => Mat2): ReprMat[Out, Mat2] = new Source[Out, Mat2](traversalBuilder.transformMat(f.asInstanceOf[Any => Any]), shape) @@ -197,14 +195,10 @@ final class Source[+Out, +Mat]( */ override def addAttributes(attr: Attributes): Repr[Out] = withAttributes(traversalBuilder.attributes and attr) - /** - * Add a ``name`` attribute to this Source. - */ + /** Add a ``name`` attribute to this Source. */ override def named(name: String): Repr[Out] = addAttributes(Attributes.name(name)) - /** - * Put an asynchronous boundary around this `Source` - */ + /** Put an asynchronous boundary around this `Source` */ override def async: Repr[Out] = super.async.asInstanceOf[Repr[Out]] /** @@ -224,14 +218,10 @@ final class Source[+Out, +Mat]( override def async(dispatcher: String, inputBufferSize: Int): Repr[Out] = super.async(dispatcher, inputBufferSize).asInstanceOf[Repr[Out]] - /** - * Converts this Scala DSL element to it's Java DSL counterpart. - */ + /** Converts this Scala DSL element to it's Java DSL counterpart. */ def asJava: javadsl.Source[Out @uncheckedVariance, Mat @uncheckedVariance] = new javadsl.Source(this) - /** - * Transform this source whose element is ``e`` into a source producing tuple ``(e, f(e))`` - **/ + /** Transform this source whose element is ``e`` into a source producing tuple ``(e, f(e))`` */ def asSourceWithContext[Ctx](f: Out => Ctx): SourceWithContext[Out, Ctx, Mat] = new SourceWithContext(this.map(e => (e, f(e)))) @@ -401,9 +391,7 @@ object Source { def single[T](element: T): Source[T, NotUsed] = fromGraph(new GraphStages.SingleSource(element)) - /** - * Create a `Source` that will continually emit the given element. - */ + /** Create a `Source` that will continually emit the given element. */ def repeat[T](element: T): Source[T, NotUsed] = { fromIterator(() => Iterator.continually(element)).withAttributes(DefaultAttributes.repeat) } @@ -442,9 +430,7 @@ object Source { def unfoldAsync[S, E](s: S)(f: S => Future[Option[(S, E)]]): Source[E, NotUsed] = Source.fromGraph(new UnfoldAsync(s, f)) - /** - * A `Source` with no elements, i.e. an empty stream that is completed immediately for every connected `Sink`. - */ + /** A `Source` with no elements, i.e. an empty stream that is completed immediately for every connected `Sink`. */ def empty[T]: Source[T, NotUsed] = _empty private[this] val _empty: Source[Nothing, NotUsed] = Source.fromGraph(EmptySource) @@ -463,9 +449,7 @@ object Source { def maybe[T]: Source[T, Promise[Option[T]]] = Source.fromGraph(MaybeSource.asInstanceOf[Graph[SourceShape[T], Promise[Option[T]]]]) - /** - * Create a `Source` that immediately ends the stream with the `cause` error to every connected `Sink`. - */ + /** Create a `Source` that immediately ends the stream with the `cause` error to every connected `Sink`. */ def failed[T](cause: Throwable): Source[T, NotUsed] = Source.fromGraph(new FailedSource[T](cause)) @@ -581,9 +565,7 @@ object Source { def lazyFutureSource[T, M](create: () => Future[Source[T, M]]): Source[T, Future[M]] = lazySource(() => futureSource(create())).mapMaterializedValue(_.flatten) - /** - * Creates a `Source` that is materialized as a [[org.reactivestreams.Subscriber]] - */ + /** Creates a `Source` that is materialized as a [[org.reactivestreams.Subscriber]] */ def asSubscriber[T]: Source[T, Subscriber[T]] = fromGraph(new SubscriberSource[T](DefaultAttributes.subscriberSource, shape("SubscriberSource"))) @@ -667,21 +649,22 @@ object Source { * * See also [[akka.stream.scaladsl.Source.queue]]. * - * * @param bufferSize The size of the buffer in element count * @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer */ @deprecated("Use variant accepting completion and failure matchers instead", "2.6.0") def actorRef[T](bufferSize: Int, overflowStrategy: OverflowStrategy): Source[T, ActorRef] = - actorRef({ - case akka.actor.Status.Success(s: CompletionStrategy) => s - case akka.actor.Status.Success(_) => CompletionStrategy.Draining - case akka.actor.Status.Success => CompletionStrategy.Draining - }, { case akka.actor.Status.Failure(cause) => cause }, bufferSize, overflowStrategy) + actorRef( + { + case akka.actor.Status.Success(s: CompletionStrategy) => s + case akka.actor.Status.Success(_) => CompletionStrategy.Draining + case akka.actor.Status.Success => CompletionStrategy.Draining + }, + { case akka.actor.Status.Failure(cause) => cause }, + bufferSize, + overflowStrategy) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] def actorRefWithAck[T]( ackTo: Option[ActorRef], ackMessage: Any, @@ -732,15 +715,17 @@ object Source { */ @deprecated("Use actorRefWithBackpressure accepting completion and failure matchers instead", "2.6.0") def actorRefWithAck[T](ackMessage: Any): Source[T, ActorRef] = - actorRefWithAck(None, ackMessage, { - case akka.actor.Status.Success(s: CompletionStrategy) => s - case akka.actor.Status.Success(_) => CompletionStrategy.Draining - case akka.actor.Status.Success => CompletionStrategy.Draining - }, { case akka.actor.Status.Failure(cause) => cause }) - - /** - * Combines several sources with fan-in strategy like [[Merge]] or [[Concat]] into a single [[Source]]. - */ + actorRefWithAck( + None, + ackMessage, + { + case akka.actor.Status.Success(s: CompletionStrategy) => s + case akka.actor.Status.Success(_) => CompletionStrategy.Draining + case akka.actor.Status.Success => CompletionStrategy.Draining + }, + { case akka.actor.Status.Failure(cause) => cause }) + + /** Combines several sources with fan-in strategy like [[Merge]] or [[Concat]] into a single [[Source]]. */ def combine[T, U](first: Source[T, _], second: Source[T, _], rest: Source[T, _]*)( @nowarn @deprecatedName(Symbol("strategy")) @@ -760,9 +745,7 @@ object Source { combineRest(2, rest.iterator) }) - /** - * Combines several sources with fan-in strategy like [[Merge]] or [[Concat]] into a single [[Source]]. - */ + /** Combines several sources with fan-in strategy like [[Merge]] or [[Concat]] into a single [[Source]]. */ def combine[T, U, M](sources: immutable.Seq[Graph[SourceShape[T], M]])( fanInStrategy: Int => Graph[UniformFanInShape[T, U], NotUsed]): Source[U, immutable.Seq[M]] = sources match { @@ -779,9 +762,7 @@ object Source { }) } - /** - * Combines two sources with fan-in strategy like [[Merge]] or [[Concat]] into a single [[Source]] with a materialized value. - */ + /** Combines two sources with fan-in strategy like [[Merge]] or [[Concat]] into a single [[Source]] with a materialized value. */ def combineMat[T, U, M1, M2, M](first: Source[T, M1], second: Source[T, M2])( @nowarn @deprecatedName(Symbol("strategy")) @@ -794,9 +775,7 @@ object Source { SourceShape(c.out) }) - /** - * Combine the elements of multiple streams into a stream of sequences. - */ + /** Combine the elements of multiple streams into a stream of sequences. */ def zipN[T](sources: immutable.Seq[Source[T, _]]): Source[immutable.Seq[T], NotUsed] = zipWithN(ConstantFun.scalaIdentityFunction[immutable.Seq[T]])(sources).addAttributes(DefaultAttributes.zipN) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/SourceWithContext.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/SourceWithContext.scala index 43270f28df6..d22eb66139e 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/SourceWithContext.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/SourceWithContext.scala @@ -10,9 +10,7 @@ import akka.stream._ object SourceWithContext { - /** - * Creates a SourceWithContext from a regular source that operates on a tuple of `(data, context)` elements. - */ + /** Creates a SourceWithContext from a regular source that operates on a tuple of `(data, context)` elements. */ def fromTuples[Out, CtxOut, Mat](source: Source[(Out, CtxOut), Mat]): SourceWithContext[Out, CtxOut, Mat] = new SourceWithContext(source) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala index 067b6a8fdb5..3ace463eb0c 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala @@ -19,9 +19,7 @@ import akka.stream.impl.Stages.DefaultAttributes import akka.stream.impl.io.{ InputStreamSinkStage, InputStreamSource, OutputStreamGraphStage, OutputStreamSourceStage } import akka.util.ByteString -/** - * Converters for interacting with the blocking `java.io` streams APIs and Java 8 Streams - */ +/** Converters for interacting with the blocking `java.io` streams APIs and Java 8 Streams */ object StreamConverters { /** @@ -108,9 +106,8 @@ object StreamConverters { def javaCollector[T, R](collectorFactory: () => java.util.stream.Collector[T, _ <: Any, R]): Sink[T, Future[R]] = Flow[T] .fold { - new FirstCollectorState[T, R](collectorFactory.asInstanceOf[() => java.util.stream.Collector[T, Any, R]]): CollectorState[ - T, - R] + new FirstCollectorState[T, R]( + collectorFactory.asInstanceOf[() => java.util.stream.Collector[T, Any, R]]): CollectorState[T, R] } { (state, elem) => state.update(elem) } @@ -178,29 +175,28 @@ object StreamConverters { // TODO removing the QueueSink name, see issue #22523 Sink .fromGraph(new QueueSink[T](1).withAttributes(Attributes.none)) - .mapMaterializedValue( - queue => - StreamSupport - .stream( - Spliterators.spliteratorUnknownSize( - new java.util.Iterator[T] { - var nextElementFuture: Future[Option[T]] = queue.pull() - var nextElement: Option[T] = _ - - override def hasNext: Boolean = { - nextElement = Await.result(nextElementFuture, Inf) - nextElement.isDefined - } - - override def next(): T = { - val next = nextElement.get - nextElementFuture = queue.pull() - next - } - }, - 0), - false) - .onClose(new Runnable { def run = queue.cancel() })) + .mapMaterializedValue(queue => + StreamSupport + .stream( + Spliterators.spliteratorUnknownSize( + new java.util.Iterator[T] { + var nextElementFuture: Future[Option[T]] = queue.pull() + var nextElement: Option[T] = _ + + override def hasNext: Boolean = { + nextElement = Await.result(nextElementFuture, Inf) + nextElement.isDefined + } + + override def next(): T = { + val next = nextElement.get + nextElementFuture = queue.pull() + next + } + }, + 0), + false) + .onClose(new Runnable { def run = queue.cancel() })) .withAttributes(DefaultAttributes.asJavaStream) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/StreamRefs.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/StreamRefs.scala index 370f72539f9..50c814a22c3 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/StreamRefs.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/StreamRefs.scala @@ -8,9 +8,7 @@ import akka.stream.{ SinkRef, SourceRef } import akka.stream.impl.streamref.{ SinkRefStageImpl, SourceRefStageImpl } import akka.util.OptionVal -/** - * Factories for creating stream refs. - */ +/** Factories for creating stream refs. */ object StreamRefs { /** diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala index 52d5a9b7426..655325e05e9 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala @@ -66,7 +66,9 @@ object TLS { */ def apply( createSSLEngine: () => SSLEngine, // we don't offer the internal `ActorSystem => SSLEngine` API here, see #21753 - verifySession: SSLSession => Try[Unit], // we don't offer the internal API that provides `ActorSystem` here, see #21753 + verifySession: SSLSession => Try[ + Unit + ], // we don't offer the internal API that provides `ActorSystem` here, see #21753 closing: TLSClosing): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = scaladsl.BidiFlow.fromGraph(TlsModule(Attributes.none, createSSLEngine, verifySession, closing)) @@ -111,9 +113,7 @@ import javax.net.ssl.SSLPeerUnverifiedException /** Allows access to an SSLSession with Scala types */ trait ScalaSessionAPI { - /** - * The underlying [[javax.net.ssl.SSLSession]]. - */ + /** The underlying [[javax.net.ssl.SSLSession]]. */ def session: SSLSession /** diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala index fb9d9f3613e..afa1727b4e2 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala @@ -52,9 +52,7 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { def unbind(): Future[Unit] = unbindAction() } - /** - * Represents an accepted incoming TCP connection. - */ + /** Represents an accepted incoming TCP connection. */ final case class IncomingConnection( localAddress: InetSocketAddress, remoteAddress: InetSocketAddress, @@ -71,9 +69,7 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { } - /** - * Represents a prospective outgoing TCP connection. - */ + /** Represents a prospective outgoing TCP connection. */ final case class OutgoingConnection(remoteAddress: InetSocketAddress, localAddress: InetSocketAddress) def apply()(implicit system: ActorSystem): Tcp = super.apply(system) @@ -87,14 +83,14 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { // just wraps/unwraps the TLS byte events to provide ByteString, ByteString flows private val tlsWrapping: BidiFlow[ByteString, TLSProtocol.SendBytes, TLSProtocol.SslTlsInbound, ByteString, NotUsed] = - BidiFlow.fromFlows(Flow[ByteString].map(TLSProtocol.SendBytes.apply), Flow[TLSProtocol.SslTlsInbound].collect { - case sb: TLSProtocol.SessionBytes => sb.bytes + BidiFlow.fromFlows( + Flow[ByteString].map(TLSProtocol.SendBytes.apply), + Flow[TLSProtocol.SslTlsInbound].collect { case sb: TLSProtocol.SessionBytes => + sb.bytes // ignore other kinds of inbounds (currently only Truncated) - }) + }) - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] val defaultBacklog = 100 } diff --git a/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala b/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala index 6e97de8ec00..da9f6293a92 100644 --- a/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala +++ b/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala @@ -28,9 +28,7 @@ import akka.util.Timeout */ object MaterializerState { - /** - * Dump stream snapshots of all streams of the default system materializer. - */ + /** Dump stream snapshots of all streams of the default system materializer. */ def streamSnapshots(system: ActorSystem): Future[immutable.Seq[StreamSnapshot]] = { SystemMaterializer(system).materializer match { case impl: PhasedFusingActorMaterializer => @@ -39,9 +37,7 @@ object MaterializerState { } } - /** - * Dump stream snapshots of all streams of the given materializer. - */ + /** Dump stream snapshots of all streams of the given materializer. */ def streamSnapshots(mat: Materializer): Future[immutable.Seq[StreamSnapshot]] = { mat match { case impl: PhasedFusingActorMaterializer => @@ -52,8 +48,8 @@ object MaterializerState { /** INTERNAL API */ @InternalApi - private[akka] def requestFromSupervisor(supervisor: ActorRef)( - implicit ec: ExecutionContext): Future[immutable.Seq[StreamSnapshot]] = { + private[akka] def requestFromSupervisor(supervisor: ActorRef)(implicit + ec: ExecutionContext): Future[immutable.Seq[StreamSnapshot]] = { // Arbitrary timeout: operation should always be quick, when it times out it will be because the materializer stopped implicit val timeout: Timeout = 10.seconds supervisor @@ -80,14 +76,10 @@ object MaterializerState { @DoNotInherit sealed trait StreamSnapshot { - /** - * Running interpreters - */ + /** Running interpreters */ def activeInterpreters: Seq[RunningInterpreter] - /** - * Interpreters that has been created but not yet initialized - the stream is not yet running - */ + /** Interpreters that has been created but not yet initialized - the stream is not yet running */ def newShells: Seq[UninitializedInterpreter] } @@ -110,36 +102,24 @@ sealed trait InterpreterSnapshot { @DoNotInherit sealed trait UninitializedInterpreter extends InterpreterSnapshot -/** - * A stream interpreter that is running/has been started - */ +/** A stream interpreter that is running/has been started */ @DoNotInherit sealed trait RunningInterpreter extends InterpreterSnapshot { - /** - * Each of the materialized graph stage logics running inside the interpreter - */ + /** Each of the materialized graph stage logics running inside the interpreter */ def logics: immutable.Seq[LogicSnapshot] - /** - * Each connection between logics in the interpreter - */ + /** Each connection between logics in the interpreter */ def connections: immutable.Seq[ConnectionSnapshot] - /** - * Total number of non-stopped logics in the interpreter - */ + /** Total number of non-stopped logics in the interpreter */ def runningLogicsCount: Int - /** - * All logics that has completed and is no longer executing - */ + /** All logics that has completed and is no longer executing */ def stoppedLogics: immutable.Seq[LogicSnapshot] } -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit sealed trait LogicSnapshot { def label: String @@ -148,9 +128,7 @@ sealed trait LogicSnapshot { object ConnectionSnapshot { - /** - * Not for user extension - */ + /** Not for user extension */ @DoNotInherit sealed trait ConnectionState case object ShouldPull extends ConnectionState @@ -158,9 +136,7 @@ object ConnectionSnapshot { case object Closed extends ConnectionState } -/** - * Not for user extension - */ +/** Not for user extension */ @DoNotInherit sealed trait ConnectionSnapshot { def in: LogicSnapshot @@ -168,9 +144,7 @@ sealed trait ConnectionSnapshot { def state: ConnectionSnapshot.ConnectionState } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi final private[akka] case class StreamSnapshotImpl( self: ActorPath, @@ -180,16 +154,12 @@ final private[akka] case class StreamSnapshotImpl( override def toString: String = s"StreamSnapshot($self, $activeInterpreters, $newShells)" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class UninitializedInterpreterImpl(logics: immutable.Seq[LogicSnapshot]) extends UninitializedInterpreter -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class RunningInterpreterImpl( logics: immutable.Seq[LogicSnapshot], @@ -200,9 +170,7 @@ private[akka] final case class RunningInterpreterImpl( extends RunningInterpreter with HideImpl -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class LogicSnapshotImpl(index: Int, label: String, attributes: Attributes) extends LogicSnapshot @@ -211,9 +179,7 @@ private[akka] final case class LogicSnapshotImpl(index: Int, label: String, attr override def toString: String = s"Logic($label)" } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] final case class ConnectionSnapshotImpl( id: Int, @@ -223,9 +189,7 @@ private[akka] final case class ConnectionSnapshotImpl( extends ConnectionSnapshot with HideImpl -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi trait HideImpl { override def toString: String = super.toString.replaceFirst("Impl", "") diff --git a/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala b/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala index 8b8d0da5742..0e47003f578 100644 --- a/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala +++ b/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala @@ -58,9 +58,7 @@ abstract class GraphStageWithMaterializedValue[+S <: Shape, +M] extends Graph[S, private var _traversalBuilder: TraversalBuilder = _ - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] final override def traversalBuilder: TraversalBuilder = { // _traversalBuilder instance is cached to avoid allocations, no need for volatile or synchronization if (_traversalBuilder eq null) { @@ -159,17 +157,13 @@ object GraphStageLogic { override def onUpstreamFailure(ex: Throwable): Unit = () } - /** - * Output handler that terminates the operator upon cancellation. - */ + /** Output handler that terminates the operator upon cancellation. */ object EagerTerminateOutput extends OutHandler { override def onPull(): Unit = () override def toString = "EagerTerminateOutput" } - /** - * Output handler that does not terminate the operator upon cancellation. - */ + /** Output handler that does not terminate the operator upon cancellation. */ object IgnoreTerminateOutput extends OutHandler { override def onPull(): Unit = () override def onDownstreamFinish(cause: Throwable): Unit = () @@ -273,9 +267,7 @@ object GraphStageLogic { private[stream] val NoPromise: Promise[Done] = Promise.successful(Done) } -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object ConcurrentAsyncCallbackState { sealed trait State[+E] @@ -314,14 +306,10 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: def this(shape: Shape) = this(shape.inlets.size, shape.outlets.size) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[stream] var stageId: Int = Int.MinValue - /** - * INTERNAL API - */ + /** INTERNAL API */ private[stream] var attributes: Attributes = Attributes.none /** @@ -339,9 +327,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: */ private[stream] val handlers = new Array[Any](inCount + outCount) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[stream] def inHandler(id: Int): InHandler = { if (id > inCount) throw new IllegalArgumentException(s"$id not in inHandler range $inCount in $this") if (inCount < 1) @@ -356,25 +342,17 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: handlers(inCount + id).asInstanceOf[OutHandler] } - /** - * INTERNAL API - */ + /** INTERNAL API */ // Using common array to reduce overhead for small port counts private[stream] val portToConn = new Array[Connection](handlers.length) - /** - * INTERNAL API - */ + /** INTERNAL API */ private[this] var _interpreter: GraphInterpreter = _ - /** - * INTERNAL API - */ + /** INTERNAL API */ private[stream] def interpreter_=(gi: GraphInterpreter): Unit = _interpreter = gi - /** - * INTERNAL API - */ + /** INTERNAL API */ private[akka] def interpreter: GraphInterpreter = if (_interpreter == null) throw new IllegalStateException( @@ -423,14 +401,10 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: */ final protected def totallyIgnorantInput: InHandler = TotallyIgnorantInput - /** - * Output handler that terminates the operator upon cancellation. - */ + /** Output handler that terminates the operator upon cancellation. */ final protected def eagerTerminateOutput: OutHandler = EagerTerminateOutput - /** - * Output handler that does not terminate the operator upon cancellation. - */ + /** Output handler that does not terminate the operator upon cancellation. */ final protected def ignoreTerminateOutput: OutHandler = IgnoreTerminateOutput /** @@ -440,32 +414,24 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: final protected def conditionalTerminateOutput(predicate: () => Boolean): OutHandler = new ConditionalTerminateOutput(predicate) - /** - * Assigns callbacks for the events for an [[Inlet]] - */ + /** Assigns callbacks for the events for an [[Inlet]] */ final protected def setHandler(in: Inlet[_], handler: InHandler): Unit = { handlers(in.id) = handler if (_interpreter != null) _interpreter.setHandler(conn(in), handler) } - /** - * Assign callbacks for linear operator for both [[Inlet]] and [[Outlet]] - */ + /** Assign callbacks for linear operator for both [[Inlet]] and [[Outlet]] */ final protected def setHandlers(in: Inlet[_], out: Outlet[_], handler: InHandler with OutHandler): Unit = { setHandler(in, handler) setHandler(out, handler) } - /** - * Retrieves the current callback for the events on the given [[Inlet]] - */ + /** Retrieves the current callback for the events on the given [[Inlet]] */ final protected def getHandler(in: Inlet[_]): InHandler = { handlers(in.id).asInstanceOf[InHandler] } - /** - * Assigns callbacks for the events for an [[Outlet]] - */ + /** Assigns callbacks for the events for an [[Outlet]] */ final protected def setHandler(out: Outlet[_], handler: OutHandler): Unit = { handlers(out.id + inCount) = handler if (_interpreter != null) _interpreter.setHandler(conn(out), handler) @@ -474,9 +440,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: private def conn(in: Inlet[_]): Connection = portToConn(in.id) private def conn(out: Outlet[_]): Connection = portToConn(out.id + inCount) - /** - * Retrieves the current callback for the events on the given [[Outlet]] - */ + /** Retrieves the current callback for the events on the given [[Outlet]] */ final protected def getHandler(out: Outlet[_]): OutHandler = { handlers(out.id + inCount).asInstanceOf[OutHandler] } @@ -527,9 +491,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: */ final protected def cancel[T](in: Inlet[T]): Unit = cancel(in, SubscriptionWithCancelException.NoMoreElementsNeeded) - /** - * Requests to stop receiving events from a given input port. Cancelling clears any ungrabbed elements from the port. - */ + /** Requests to stop receiving events from a given input port. Cancelling clears any ungrabbed elements from the port. */ final protected def cancel[T](in: Inlet[T], cause: Throwable): Unit = cancel(conn(in), cause) private def cancel[T](connection: Connection, cause: Throwable): Unit = @@ -539,8 +501,8 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: // ignore pushs now, since the stage wanted it cancelled already // do not ignore termination signals connection.inHandler = EagerTerminateInput - val callback = getAsyncCallback[(Connection, Throwable)] { - case (connection, cause) => doCancel(connection, cause) + val callback = getAsyncCallback[(Connection, Throwable)] { case (connection, cause) => + doCancel(connection, cause) } materializer.scheduleOnce(delay, () => callback.invoke((connection, cause))) case _ => @@ -609,17 +571,17 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: connection.slot match { case Empty | _ @(_: Cancelled) => false // cancelled (element is discarded when cancelled) case _ => true // completed but element still there to grab - } else if ((connection.portState & (InReady | InFailed)) == (InReady | InFailed)) + } + else if ((connection.portState & (InReady | InFailed)) == (InReady | InFailed)) connection.slot match { case Failed(_, elem) => elem.asInstanceOf[AnyRef] ne Empty // failed but element still there to grab case _ => false - } else false + } + else false } } - /** - * Indicates whether the port has been closed. A closed port cannot be pulled. - */ + /** Indicates whether the port has been closed. A closed port cannot be pulled. */ final protected def isClosed[T](in: Inlet[T]): Boolean = (conn(in).portState & InClosed) != 0 /** @@ -662,18 +624,14 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: final protected def setKeepGoing(enabled: Boolean): Unit = interpreter.setKeepGoing(this, enabled) - /** - * Signals that there will be no more elements emitted on the given port. - */ + /** Signals that there will be no more elements emitted on the given port. */ final protected def complete[T](out: Outlet[T]): Unit = getHandler(out) match { case e: Emitting[T @unchecked] => e.addFollowUp(new EmittingCompletion[T](e.out, e.previous)) case _ => interpreter.complete(conn(out)) } - /** - * Signals failure through the given port. - */ + /** Signals failure through the given port. */ final protected def fail[T](out: Outlet[T], ex: Throwable): Unit = interpreter.fail(conn(out), ex) /** @@ -685,9 +643,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: // Variable used from `OutHandler.onDownstreamFinish` to carry over cancellation cause in cases where // `OutHandler` implementations call `super.onDownstreamFinished()`. - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[stream] var lastCancellationCause: Throwable = _ /** @@ -764,15 +720,11 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: _subInletsAndOutlets = Set.empty } - /** - * Return true if the given output port is ready to be pushed. - */ + /** Return true if the given output port is ready to be pushed. */ final def isAvailable[T](out: Outlet[T]): Boolean = (conn(out).portState & (OutReady | OutClosed)) == OutReady - /** - * Indicates whether the port has been closed. A closed port cannot be pushed. - */ + /** Indicates whether the port has been closed. A closed port cannot be pushed. */ final protected def isClosed[T](out: Outlet[T]): Boolean = (conn(out).portState & OutClosed) != 0 /** @@ -785,14 +737,14 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: * the `onClose` function is invoked with the elements which were read. */ final protected def readN[T](in: Inlet[T], n: Int)(andThen: Seq[T] => Unit, onClose: Seq[T] => Unit): Unit = - //FIXME `onClose` is a poor name for `onComplete` rename this at the earliest possible opportunity + // FIXME `onClose` is a poor name for `onComplete` rename this at the earliest possible opportunity if (n < 0) throw new IllegalArgumentException("cannot read negative number of elements") else if (n == 0) andThen(Nil) else { val result = new Array[AnyRef](n).asInstanceOf[Array[T]] var pos = 0 - if (isAvailable(in)) { //If we already have data available, then shortcircuit and read the first + if (isAvailable(in)) { // If we already have data available, then shortcircuit and read the first result(pos) = grab(in) pos += 1 } @@ -800,11 +752,15 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: if (n != pos) { // If we aren't already done requireNotReading(in) if (!hasBeenPulled(in)) pull(in) - setHandler(in, new Reading(in, n - pos, getHandler(in))((elem: T) => { - result(pos) = elem - pos += 1 - if (pos == n) andThen(result.toSeq) - }, () => onClose(result.take(pos).toSeq))) + setHandler( + in, + new Reading(in, n - pos, getHandler(in))( + (elem: T) => { + result(pos) = elem + pos += 1 + if (pos == n) andThen(result.toSeq) + }, + () => onClose(result.take(pos).toSeq))) } else andThen(result.toSeq) } @@ -819,7 +775,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: n: Int, andThen: Procedure[java.util.List[T]], onClose: Procedure[java.util.List[T]]): Unit = { - //FIXME `onClose` is a poor name for `onComplete` rename this at the earliest possible opportunity + // FIXME `onClose` is a poor name for `onComplete` rename this at the earliest possible opportunity import akka.util.ccompat.JavaConverters._ readN(in, n)(seq => andThen(seq.asJava), seq => onClose(seq.asJava)) } @@ -1237,7 +1193,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: Future.failed(streamDetachedException) } - //external call + // external call override def invoke(event: T): Unit = invokeWithPromise(event, NoPromise) @tailrec @@ -1391,15 +1347,11 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: private def streamDetachedException = new StreamDetachedException(s"Stage with GraphStageLogic ${this} stopped before async invocation was processed") - /** - * Invoked before any external events are processed, at the startup of the operator. - */ + /** Invoked before any external events are processed, at the startup of the operator. */ @throws(classOf[Exception]) def preStart(): Unit = () - /** - * Invoked after processing of external events stopped because the operator is about to stop or fail. - */ + /** Invoked after processing of external events stopped because the operator is about to stop or fail. */ @throws(classOf[Exception]) def postStop(): Unit = () @@ -1422,22 +1374,24 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: private var closed = false private var pulled = false - private val _sink = new SubSink[T](name, getAsyncCallback[ActorSubscriberMessage] { msg => - if (!closed) msg match { - case OnNext(e) => - elem = e.asInstanceOf[T] - pulled = false - handler.onPush() - case OnComplete => - closed = true - handler.onUpstreamFinish() - GraphStageLogic.this.completedOrFailed(this) - case OnError(ex) => - closed = true - handler.onUpstreamFailure(ex) - GraphStageLogic.this.completedOrFailed(this) - } - }.invoke _) + private val _sink = new SubSink[T]( + name, + getAsyncCallback[ActorSubscriberMessage] { msg => + if (!closed) msg match { + case OnNext(e) => + elem = e.asInstanceOf[T] + pulled = false + handler.onPush() + case OnComplete => + closed = true + handler.onUpstreamFinish() + GraphStageLogic.this.completedOrFailed(this) + case OnError(ex) => + closed = true + handler.onUpstreamFailure(ex) + GraphStageLogic.this.completedOrFailed(this) + } + }.invoke _) GraphStageLogic.this.created(this) @@ -1518,15 +1472,11 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: private val _source = new SubSource[T](name, callback) GraphStageLogic.this.created(this) - /** - * Set the source into timed-out mode if it has not yet been materialized. - */ + /** Set the source into timed-out mode if it has not yet been materialized. */ def timeout(d: FiniteDuration): Unit = if (_source.timeout(d)) closed = true - /** - * Get the Source for this dynamic output port. - */ + /** Get the Source for this dynamic output port. */ def source: Graph[SourceShape[T], NotUsed] = _source /** @@ -1535,9 +1485,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: */ def setHandler(handler: OutHandler): Unit = this.handler = handler - /** - * Returns `true` if this output port can be pushed. - */ + /** Returns `true` if this output port can be pushed. */ def isAvailable: Boolean = available /** @@ -1548,18 +1496,14 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: */ def isClosed: Boolean = closed - /** - * Push to this output port. - */ + /** Push to this output port. */ def push(elem: T): Unit = { if (!isAvailable) throw new IllegalArgumentException(s"Cannot push port ($this) twice, or before it being pulled") available = false _source.pushSubstream(elem) } - /** - * Complete this output port. - */ + /** Complete this output port. */ def complete(): Unit = { available = false closed = true @@ -1567,9 +1511,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: GraphStageLogic.this.completedOrFailed(this) } - /** - * Fail this output port. - */ + /** Fail this output port. */ def fail(ex: Throwable): Unit = { available = false closed = true @@ -1856,9 +1798,7 @@ abstract class GraphStageLogicWithLogging(_shape: Shape) extends GraphStageLogic /** Java API: [[TimerGraphStageLogic]] with [[StageLogging]]. */ abstract class TimerGraphStageLogicWithLogging(_shape: Shape) extends TimerGraphStageLogic(_shape) with StageLogging -/** - * Collection of callbacks for an input port of a [[GraphStage]] - */ +/** Collection of callbacks for an input port of a [[GraphStage]] */ trait InHandler { /** @@ -1868,22 +1808,16 @@ trait InHandler { @throws(classOf[Exception]) def onPush(): Unit - /** - * Called when the input port is finished. After this callback no other callbacks will be called for this port. - */ + /** Called when the input port is finished. After this callback no other callbacks will be called for this port. */ @throws(classOf[Exception]) def onUpstreamFinish(): Unit = GraphInterpreter.currentInterpreter.activeStage.completeStage() - /** - * Called when the input port has failed. After this callback no other callbacks will be called for this port. - */ + /** Called when the input port has failed. After this callback no other callbacks will be called for this port. */ @throws(classOf[Exception]) def onUpstreamFailure(ex: Throwable): Unit = GraphInterpreter.currentInterpreter.activeStage.failStage(ex) } -/** - * Collection of callbacks for an output port of a [[GraphStage]] - */ +/** Collection of callbacks for an output port of a [[GraphStage]] */ trait OutHandler { /** @@ -1898,7 +1832,10 @@ trait OutHandler { * be called for this port. */ @throws(classOf[Exception]) - @deprecatedOverriding("Override `def onDownstreamFinish(cause: Throwable)`, instead.", since = "2.6.0") // warns when overriding + @deprecatedOverriding( + "Override `def onDownstreamFinish(cause: Throwable)`, instead.", + since = "2.6.0" + ) // warns when overriding @deprecated("Call onDownstreamFinish with a cancellation cause.", since = "2.6.0") // warns when calling def onDownstreamFinish(): Unit = { val thisStage = GraphInterpreter.currentInterpreter.activeStage @@ -1919,7 +1856,7 @@ trait OutHandler { require(cause ne null, "Cancellation cause must not be null") require(thisStage.lastCancellationCause eq null, "onDownstreamFinish(cause) must not be called recursively") thisStage.lastCancellationCause = cause - (onDownstreamFinish(): @nowarn("msg=deprecated")) // if not overridden, call old deprecated variant + onDownstreamFinish(): @nowarn("msg=deprecated") // if not overridden, call old deprecated variant } finally thisStage.lastCancellationCause = null } } diff --git a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala index f5c1401f961..962540644b6 100644 --- a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala +++ b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala @@ -74,10 +74,9 @@ private[testkit] class CallingThreadDispatcherQueues extends Extension { // we have to forget about long-gone threads sometime private def gc(): Unit = { queues = queues - .foldLeft(Map.newBuilder[CallingThreadMailbox, Set[WeakReference[MessageQueue]]]) { - case (m, (k, v)) => - val nv = v.filter(_.get ne null) - if (nv.isEmpty) m else m += (k -> nv) + .foldLeft(Map.newBuilder[CallingThreadMailbox, Set[WeakReference[MessageQueue]]]) { case (m, (k, v)) => + val nv = v.filter(_.get ne null) + if (nv.isEmpty) m else m += (k -> nv) } .result() } @@ -302,20 +301,22 @@ class CallingThreadDispatcher(_configurator: MessageDispatcherConfigurator) exte // this actors mailbox at some other level on our call stack if (!mbox.ctdLock.isHeldByCurrentThread) { var intex = interruptedEx - val gotLock = try { - mbox.ctdLock.tryLock(50, TimeUnit.MILLISECONDS) - } catch { - case ie: InterruptedException => - Thread.interrupted() // clear interrupted flag before we continue, exception will be thrown later - intex = ie - false - } - if (gotLock) { - val ie = try { - process(intex) - } finally { - mbox.ctdLock.unlock + val gotLock = + try { + mbox.ctdLock.tryLock(50, TimeUnit.MILLISECONDS) + } catch { + case ie: InterruptedException => + Thread.interrupted() // clear interrupted flag before we continue, exception will be thrown later + intex = ie + false } + if (gotLock) { + val ie = + try { + process(intex) + } finally { + mbox.ctdLock.unlock + } throwInterruptionIfExistsOrSet(ie) } else { // if we didn't get the lock and our mailbox still has messages, then we need to try again diff --git a/akka-testkit/src/main/scala/akka/testkit/ExplicitlyTriggeredScheduler.scala b/akka-testkit/src/main/scala/akka/testkit/ExplicitlyTriggeredScheduler.scala index 231d8173dab..ea9b203772a 100644 --- a/akka-testkit/src/main/scala/akka/testkit/ExplicitlyTriggeredScheduler.scala +++ b/akka-testkit/src/main/scala/akka/testkit/ExplicitlyTriggeredScheduler.scala @@ -39,12 +39,12 @@ class ExplicitlyTriggeredScheduler(@unused config: Config, log: LoggingAdapter, private val currentTime = new AtomicLong() private val scheduled = new ConcurrentHashMap[Item, Long]() - override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = + override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = schedule(initialDelay, Some(interval), runnable) - override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)( - implicit executor: ExecutionContext): Cancellable = + override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit + executor: ExecutionContext): Cancellable = schedule(delay, None, runnable) /** @@ -124,8 +124,6 @@ class ExplicitlyTriggeredScheduler(@unused config: Config, log: LoggingAdapter, override def maxFrequency: Double = 42 - /** - * The scheduler need to expose its internal time for testing. - */ + /** The scheduler need to expose its internal time for testing. */ def currentTimeMs: Long = currentTime.get() } diff --git a/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala b/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala index 500c2676346..ef387f47c31 100644 --- a/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala +++ b/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala @@ -12,9 +12,7 @@ import scala.collection.immutable import scala.util.Random import scala.util.control.NonFatal -/** - * Utilities to get free socket address. - */ +/** Utilities to get free socket address. */ object SocketUtil { val RANDOM_LOOPBACK_ADDRESS = "RANDOM_LOOPBACK_ADDRESS" @@ -90,15 +88,17 @@ object SocketUtil { } val addr = new InetSocketAddress(address, 0) - try if (udp) { - val ds = DatagramChannel.open().socket() - ds.bind(addr) - (ds, new InetSocketAddress(address, ds.getLocalPort)) - } else { - val ss = ServerSocketChannel.open().socket() - ss.bind(addr) - (ss, new InetSocketAddress(address, ss.getLocalPort)) - } catch { + try + if (udp) { + val ds = DatagramChannel.open().socket() + ds.bind(addr) + (ds, new InetSocketAddress(address, ds.getLocalPort)) + } else { + val ss = ServerSocketChannel.open().socket() + ss.bind(addr) + (ss, new InetSocketAddress(address, ss.getLocalPort)) + } + catch { case NonFatal(ex) => throw new RuntimeException(s"Binding to $addr failed with ${ex.getMessage}", ex) } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index b0cf1890a59..684f9ce170c 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -23,45 +23,48 @@ import akka.pattern.ask */ @nowarn // 'early initializers' are deprecated on 2.13 and will be replaced with trait parameters on 2.14. https://github.com/akka/akka/issues/26753 class TestActorRef[T <: Actor](_system: ActorSystem, _props: Props, _supervisor: ActorRef, name: String) - extends LocalActorRef({ - val disregard = _supervisor match { - case l: LocalActorRef => l.underlying.reserveChild(name) - case r: RepointableActorRef => - r.underlying match { - case _: UnstartedCell => - throw new IllegalStateException( - "cannot attach a TestActor to an unstarted top-level actor, ensure that it is started by sending a message and observing the reply") - case c: ActorCell => c.reserveChild(name) - case o => - _system.log.error( - "trying to attach child {} to unknown type of supervisor cell {}, this is not going to end well", - name, - o.getClass) - } - case s => - _system.log.error( - "trying to attach child {} to unknown type of supervisor {}, this is not going to end well", - name, - s.getClass) - } + extends LocalActorRef( + { + val disregard = _supervisor match { + case l: LocalActorRef => l.underlying.reserveChild(name) + case r: RepointableActorRef => + r.underlying match { + case _: UnstartedCell => + throw new IllegalStateException( + "cannot attach a TestActor to an unstarted top-level actor, ensure that it is started by sending a message and observing the reply") + case c: ActorCell => c.reserveChild(name) + case o => + _system.log.error( + "trying to attach child {} to unknown type of supervisor cell {}, this is not going to end well", + name, + o.getClass) + } + case s => + _system.log.error( + "trying to attach child {} to unknown type of supervisor {}, this is not going to end well", + name, + s.getClass) + } - _system.asInstanceOf[ActorSystemImpl] - }, { - _props.withDispatcher( - if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id - else _props.dispatcher) - }, { - val props = _props.withDispatcher( - if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id - else _props.dispatcher) - _system.dispatchers.lookup(props.dispatcher) - }, { - val props = _props.withDispatcher( - if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id - else _props.dispatcher) - val dispatcher = _system.dispatchers.lookup(props.dispatcher) - _system.mailboxes.getMailboxType(props, dispatcher.configurator.config) - }, _supervisor.asInstanceOf[InternalActorRef], _supervisor.path / name) { + _system.asInstanceOf[ActorSystemImpl] + }, { + _props.withDispatcher( + if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id + else _props.dispatcher) + }, { + val props = _props.withDispatcher( + if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id + else _props.dispatcher) + _system.dispatchers.lookup(props.dispatcher) + }, { + val props = _props.withDispatcher( + if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id + else _props.dispatcher) + val dispatcher = _system.dispatchers.lookup(props.dispatcher) + _system.mailboxes.getMailboxType(props, dispatcher.configurator.config) + }, + _supervisor.asInstanceOf[InternalActorRef], + _supervisor.path / name) { val props = _props.withDispatcher( if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id @@ -172,57 +175,60 @@ object TestActorRef { new TestActorRef(sysImpl, props, supervisor.asInstanceOf[InternalActorRef], randomName) } - def apply[T <: Actor](props: Props, supervisor: ActorRef, name: String)( - implicit system: ActorSystem): TestActorRef[T] = { + def apply[T <: Actor](props: Props, supervisor: ActorRef, name: String)(implicit + system: ActorSystem): TestActorRef[T] = { val sysImpl = system.asInstanceOf[ActorSystemImpl] new TestActorRef(sysImpl, props, supervisor.asInstanceOf[InternalActorRef], name) } def apply[T <: Actor](implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = apply[T](randomName) - private def dynamicCreateRecover[U]: PartialFunction[Throwable, U] = { - case exception => - throw ActorInitializationException( - null, - "Could not instantiate Actor" + - "\nMake sure Actor is NOT defined inside a class/trait," + - "\nif so put it outside the class/trait, f.e. in a companion object," + - "\nOR try to change: 'actorOf(Props[MyActor]' to 'actorOf(Props(new MyActor)'.", - exception) + private def dynamicCreateRecover[U]: PartialFunction[Throwable, U] = { case exception => + throw ActorInitializationException( + null, + "Could not instantiate Actor" + + "\nMake sure Actor is NOT defined inside a class/trait," + + "\nif so put it outside the class/trait, f.e. in a companion object," + + "\nOR try to change: 'actorOf(Props[MyActor]' to 'actorOf(Props(new MyActor)'.", + exception) } def apply[T <: Actor](name: String)(implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = - apply[T](Props({ - system - .asInstanceOf[ExtendedActorSystem] - .dynamicAccess - .createInstanceFor[T](t.runtimeClass, Nil) - .recover(dynamicCreateRecover) - .get - }), name) + apply[T]( + Props { + system + .asInstanceOf[ExtendedActorSystem] + .dynamicAccess + .createInstanceFor[T](t.runtimeClass, Nil) + .recover(dynamicCreateRecover) + .get + }, + name) def apply[T <: Actor](supervisor: ActorRef)(implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = - apply[T](Props({ - system - .asInstanceOf[ExtendedActorSystem] - .dynamicAccess - .createInstanceFor[T](t.runtimeClass, Nil) - .recover(dynamicCreateRecover) - .get - }), supervisor) - - def apply[T <: Actor](supervisor: ActorRef, name: String)( - implicit t: ClassTag[T], + apply[T]( + Props { + system + .asInstanceOf[ExtendedActorSystem] + .dynamicAccess + .createInstanceFor[T](t.runtimeClass, Nil) + .recover(dynamicCreateRecover) + .get + }, + supervisor) + + def apply[T <: Actor](supervisor: ActorRef, name: String)(implicit + t: ClassTag[T], system: ActorSystem): TestActorRef[T] = apply[T]( - Props({ + Props { system .asInstanceOf[ExtendedActorSystem] .dynamicAccess .createInstanceFor[T](t.runtimeClass, Nil) .recover(dynamicCreateRecover) .get - }), + }, supervisor, name) diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActors.scala b/akka-testkit/src/main/scala/akka/testkit/TestActors.scala index e5167f62b8d..3807bd09f8e 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActors.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActors.scala @@ -6,26 +6,19 @@ package akka.testkit import akka.actor.{ Actor, ActorRef, Props } -/** - * A collection of common actor patterns used in tests. - */ +/** A collection of common actor patterns used in tests. */ object TestActors { - /** - * EchoActor sends back received messages (unmodified). - */ + /** EchoActor sends back received messages (unmodified). */ class EchoActor extends Actor { - override def receive = { - case message => sender() ! message + override def receive = { case message => + sender() ! message } } - /** - * BlackholeActor does nothing for incoming messages, its like a blackhole. - */ + /** BlackholeActor does nothing for incoming messages, its like a blackhole. */ class BlackholeActor extends Actor { - override def receive = { - case _ => // ignore... + override def receive = { case _ => // ignore... } } @@ -35,8 +28,8 @@ object TestActors { * @param ref target ActorRef to forward messages to */ class ForwardActor(ref: ActorRef) extends Actor { - override def receive = { - case message => ref.forward(message) + override def receive = { case message => + ref.forward(message) } } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala index c8f6feec53f..d0ebee69995 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala @@ -47,9 +47,7 @@ object TestEvent { } final case class Mute(filters: immutable.Seq[EventFilter]) extends TestEvent with NoSerializationVerificationNeeded { - /** - * Java API: create a Mute command from a list of filters - */ + /** Java API: create a Mute command from a list of filters */ def this(filters: JIterable[EventFilter]) = this(immutableSeq(filters)) } object UnMute { @@ -59,9 +57,7 @@ object TestEvent { extends TestEvent with NoSerializationVerificationNeeded { - /** - * Java API: create an UnMute command from a list of filters - */ + /** Java API: create an UnMute command from a list of filters */ def this(filters: JIterable[EventFilter]) = this(immutableSeq(filters)) } } @@ -133,9 +129,7 @@ abstract class EventFilter(occurrences: Int) { protected val message: Either[String, Regex] = Left("") protected val complete: Boolean = false - /** - * internal implementation helper, no guaranteed API - */ + /** internal implementation helper, no guaranteed API */ protected def doMatch(src: String, msg: Any) = { val msgstr = if (msg != null) msg.toString else "null" (source.isDefined && source.get == src || source.isEmpty) && @@ -192,9 +186,7 @@ object EventFilter { if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start), message ne null)(occurrences) - /** - * Create a filter for Error events. See apply() for more details. - */ + /** Create a filter for Error events. See apply() for more details. */ def error( message: String = null, source: String = null, @@ -357,9 +349,7 @@ final case class ErrorFilter( else Left(message), complete)(occurrences) - /** - * Java API: filter only on the given type of exception - */ + /** Java API: filter only on the given type of exception */ def this(throwable: Class[_]) = this(throwable, null, null, false, false, Int.MaxValue) } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestException.scala b/akka-testkit/src/main/scala/akka/testkit/TestException.scala index cf156843535..79facfbf313 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestException.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestException.scala @@ -6,7 +6,5 @@ package akka.testkit import scala.util.control.NoStackTrace -/** - * A predefined exception that can be used in tests. It doesn't include a stack trace. - */ +/** A predefined exception that can be used in tests. It doesn't include a stack trace. */ final case class TestException(message: String) extends RuntimeException(message) with NoStackTrace diff --git a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala index 8991716490d..1013399a547 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala @@ -33,20 +33,16 @@ import akka.actor._ * * @since 1.2 */ -class TestFSMRef[S, D, T <: Actor](system: ActorSystem, props: Props, supervisor: ActorRef, name: String)( - implicit ev: T <:< FSM[S, D]) +class TestFSMRef[S, D, T <: Actor](system: ActorSystem, props: Props, supervisor: ActorRef, name: String)(implicit + ev: T <:< FSM[S, D]) extends TestActorRef[T](system, props, supervisor, name) { private def fsm: T = underlyingActor - /** - * Get current state name of this FSM. - */ + /** Get current state name of this FSM. */ def stateName: S = fsm.stateName - /** - * Get current state data of this FSM. - */ + /** Get current state data of this FSM. */ def stateData: D = fsm.stateData /** @@ -63,27 +59,19 @@ class TestFSMRef[S, D, T <: Actor](system: ActorSystem, props: Props, supervisor fsm.applyState(FSM.State(stateName, stateData, Option(timeout), stopReason)) } - /** - * Proxy for [[akka.actor.FSM#startTimerWithFixedDelay]]. - */ + /** Proxy for [[akka.actor.FSM#startTimerWithFixedDelay]]. */ def startTimerWithFixedDelay(name: String, msg: Any, delay: FiniteDuration): Unit = fsm.startTimerWithFixedDelay(name, msg, delay) - /** - * Proxy for [[akka.actor.FSM#startTimerAtFixedRate]]. - */ + /** Proxy for [[akka.actor.FSM#startTimerAtFixedRate]]. */ def startTimerAtFixedRate(name: String, msg: Any, interval: FiniteDuration): Unit = fsm.startTimerAtFixedRate(name, msg, interval) - /** - * Proxy for [[akka.actor.FSM#startSingleTimer]]. - */ + /** Proxy for [[akka.actor.FSM#startSingleTimer]]. */ def startSingleTimer(name: String, msg: Any, delay: FiniteDuration): Unit = fsm.startSingleTimer(name, msg, delay) - /** - * Proxy for [[akka.actor.FSM#setTimer]]. - */ + /** Proxy for [[akka.actor.FSM#setTimer]]. */ @deprecated( "Use startTimerWithFixedDelay or startTimerAtFixedRate instead. This has the same semantics as " + "startTimerAtFixedRate, but startTimerWithFixedDelay is often preferred.", @@ -92,19 +80,13 @@ class TestFSMRef[S, D, T <: Actor](system: ActorSystem, props: Props, supervisor fsm.setTimer(name, msg, timeout, repeat) } - /** - * Proxy for [[akka.actor.FSM#cancelTimer]]. - */ + /** Proxy for [[akka.actor.FSM#cancelTimer]]. */ def cancelTimer(name: String): Unit = { fsm.cancelTimer(name) } - /** - * Proxy for [[akka.actor.FSM#isStateTimerActive]]. - */ + /** Proxy for [[akka.actor.FSM#isStateTimerActive]]. */ def isTimerActive(name: String) = fsm.isTimerActive(name) - /** - * Proxy for [[akka.actor.FSM#isStateTimerActive]]. - */ + /** Proxy for [[akka.actor.FSM#isStateTimerActive]]. */ def isStateTimerActive = fsm.isStateTimerActive } @@ -116,22 +98,22 @@ object TestFSMRef { new TestFSMRef(impl, Props(factory), impl.guardian.asInstanceOf[InternalActorRef], TestActorRef.randomName) } - def apply[S, D, T <: Actor: ClassTag](factory: => T, name: String)( - implicit ev: T <:< FSM[S, D], + def apply[S, D, T <: Actor: ClassTag](factory: => T, name: String)(implicit + ev: T <:< FSM[S, D], system: ActorSystem): TestFSMRef[S, D, T] = { val impl = system.asInstanceOf[ActorSystemImpl] new TestFSMRef(impl, Props(factory), impl.guardian.asInstanceOf[InternalActorRef], name) } - def apply[S, D, T <: Actor: ClassTag](factory: => T, supervisor: ActorRef, name: String)( - implicit ev: T <:< FSM[S, D], + def apply[S, D, T <: Actor: ClassTag](factory: => T, supervisor: ActorRef, name: String)(implicit + ev: T <:< FSM[S, D], system: ActorSystem): TestFSMRef[S, D, T] = { val impl = system.asInstanceOf[ActorSystemImpl] new TestFSMRef(impl, Props(factory), supervisor.asInstanceOf[InternalActorRef], name) } - def apply[S, D, T <: Actor: ClassTag](factory: => T, supervisor: ActorRef)( - implicit ev: T <:< FSM[S, D], + def apply[S, D, T <: Actor: ClassTag](factory: => T, supervisor: ActorRef)(implicit + ev: T <:< FSM[S, D], system: ActorSystem): TestFSMRef[S, D, T] = { val impl = system.asInstanceOf[ActorSystemImpl] new TestFSMRef(impl, Props(factory), supervisor.asInstanceOf[InternalActorRef], TestActorRef.randomName) diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index 6964c508079..f36c2405e54 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -167,9 +167,7 @@ trait TestKitBase { def lastSender = lastMessage.sender - /** - * Defines the testActor name. - */ + /** Defines the testActor name. */ protected def testActorName: String = "testActor" /** @@ -181,10 +179,13 @@ trait TestKitBase { val ref = impl.systemActorOf( TestActor.props(queue).withDispatcher(CallingThreadDispatcher.Id), "%s-%d".format(testActorName, TestKit.testActorId.incrementAndGet)) - awaitCond(ref match { - case r: RepointableRef => r.isStarted - case _ => true - }, 3.seconds.dilated, 10.millis) + awaitCond( + ref match { + case r: RepointableRef => r.isStarted + case _ => true + }, + 3.seconds.dilated, + 10.millis) ref } @@ -202,22 +203,16 @@ trait TestKitBase { */ def ignoreMsg(f: PartialFunction[Any, Boolean]): Unit = { testActor ! TestActor.SetIgnore(Some(f)) } - /** - * Stop ignoring messages in the test actor. - */ + /** Stop ignoring messages in the test actor. */ def ignoreNoMsg(): Unit = { testActor ! TestActor.SetIgnore(None) } - /** - * Have the testActor watch someone (i.e. `context.watch(...)`). - */ + /** Have the testActor watch someone (i.e. `context.watch(...)`). */ def watch(ref: ActorRef): ActorRef = { testActor ! TestActor.Watch(ref) ref } - /** - * Have the testActor stop watching someone (i.e. `context.unwatch(...)`). - */ + /** Have the testActor stop watching someone (i.e. `context.unwatch(...)`). */ def unwatch(ref: ActorRef): ActorRef = { testActor ! TestActor.UnWatch(ref) ref @@ -230,9 +225,7 @@ trait TestKitBase { */ def setAutoPilot(pilot: TestActor.AutoPilot): Unit = testActor ! TestActor.SetAutoPilot(pilot) - /** - * Obtain current time (`System.nanoTime`) as Duration. - */ + /** Obtain current time (`System.nanoTime`) as Duration. */ def now: FiniteDuration = System.nanoTime.nanos /** @@ -270,9 +263,7 @@ trait TestKitBase { case _ => throw new IllegalArgumentException("max duration cannot be infinite") } - /** - * Query queue status. - */ + /** Query queue status. */ def msgAvailable = !queue.isEmpty /** @@ -414,8 +405,9 @@ trait TestKitBase { val prev_end = end end = start + max_diff - val ret = try f - finally end = prev_end + val ret = + try f + finally end = prev_end val diff = now - start assert(min <= diff, s"block took ${format(min.unit, diff)}, should at least have been $min") @@ -426,14 +418,10 @@ trait TestKitBase { ret } - /** - * Same as calling `within(0 seconds, max)(f)`. - */ + /** Same as calling `within(0 seconds, max)(f)`. */ def within[T](max: FiniteDuration)(f: => T): T = within(0 seconds, max)(f) - /** - * Same as `expectMsg(remainingOrDefault, obj)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsg(remainingOrDefault, obj)`, but correctly treating the timeFactor. */ def expectMsg[T](obj: T): T = expectMsg_internal(remainingOrDefault, obj) /** @@ -490,8 +478,8 @@ trait TestKitBase { * @return the received Terminated message */ def expectTerminated(target: ActorRef, max: Duration = Duration.Undefined): Terminated = - expectMsgPF(max, "Terminated " + target) { - case t @ Terminated(`target`) => t + expectMsgPF(max, "Terminated " + target) { case t @ Terminated(`target`) => + t } /** @@ -535,9 +523,7 @@ trait TestKitBase { recv } - /** - * Same as `expectMsgType[T](remainingOrDefault)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsgType[T](remainingOrDefault)`, but correctly treating the timeFactor. */ def expectMsgType[T](implicit t: ClassTag[T]): T = expectMsgClass_internal(remainingOrDefault, t.runtimeClass.asInstanceOf[Class[T]]) @@ -551,9 +537,7 @@ trait TestKitBase { def expectMsgType[T](max: FiniteDuration)(implicit t: ClassTag[T]): T = expectMsgClass_internal(max.dilated, t.runtimeClass.asInstanceOf[Class[T]]) - /** - * Same as `expectMsgClass(remainingOrDefault, c)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsgClass(remainingOrDefault, c)`, but correctly treating the timeFactor. */ def expectMsgClass[C](c: Class[C]): C = expectMsgClass_internal(remainingOrDefault, c) /** @@ -572,9 +556,7 @@ trait TestKitBase { o.asInstanceOf[C] } - /** - * Same as `expectMsgAnyOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsgAnyOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. */ def expectMsgAnyOf[T](obj: T*): T = expectMsgAnyOf_internal(remainingOrDefault, obj: _*) /** @@ -593,9 +575,7 @@ trait TestKitBase { o.asInstanceOf[T] } - /** - * Same as `expectMsgAnyClassOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsgAnyClassOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. */ def expectMsgAnyClassOf[C](obj: Class[_ <: C]*): C = expectMsgAnyClassOf_internal(remainingOrDefault, obj: _*) /** @@ -615,9 +595,7 @@ trait TestKitBase { o.asInstanceOf[C] } - /** - * Same as `expectMsgAllOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsgAllOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. */ def expectMsgAllOf[T](obj: T*): immutable.Seq[T] = expectMsgAllOf_internal(remainingOrDefault, obj: _*) /** @@ -654,9 +632,7 @@ trait TestKitBase { recv.asInstanceOf[immutable.Seq[T]] } - /** - * Same as `expectMsgAllClassOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsgAllClassOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. */ def expectMsgAllClassOf[T](obj: Class[_ <: T]*): immutable.Seq[T] = internalExpectMsgAllClassOf(remainingOrDefault, obj: _*) @@ -679,9 +655,7 @@ trait TestKitBase { recv.asInstanceOf[immutable.Seq[T]] } - /** - * Same as `expectMsgAllConformingOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsgAllConformingOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. */ def expectMsgAllConformingOf[T](obj: Class[_ <: T]*): immutable.Seq[T] = internalExpectMsgAllConformingOf(remainingOrDefault, obj: _*) @@ -732,8 +706,8 @@ trait TestKitBase { var elem: AnyRef = queue.peekFirst() var left = leftNow while (left.toNanos > 0 && elem == null) { - //Use of (left / 2) gives geometric series limited by finish time similar to (1/2)^n limited by 1, - //so it is very precise + // Use of (left / 2) gives geometric series limited by finish time similar to (1/2)^n limited by 1, + // so it is very precise Thread.sleep(pollInterval.toMillis min (left / 2).toMillis) left = leftNow if (left.toNanos > 0) { @@ -810,9 +784,7 @@ trait TestKitBase { */ def receiveN(n: Int): immutable.Seq[AnyRef] = receiveN_internal(n, remainingOrDefault) - /** - * Receive N messages in a row before the given deadline. - */ + /** Receive N messages in a row before the given deadline. */ def receiveN(n: Int, max: FiniteDuration): immutable.Seq[AnyRef] = receiveN_internal(n, max.dilated) private def receiveN_internal(n: Int, max: Duration): immutable.Seq[AnyRef] = { @@ -958,9 +930,7 @@ class TestKit(_system: ActorSystem) extends TestKitBase { object TestKit { - /** - * INTERNAL API - */ + /** INTERNAL API */ @InternalApi private[akka] val testActorId = new AtomicInteger(0) @@ -988,9 +958,7 @@ object TestKit { poll() } - /** - * Obtain current timestamp as Duration for relative measurements (using System.nanoTime). - */ + /** Obtain current timestamp as Duration for relative measurements (using System.nanoTime). */ def now: Duration = System.nanoTime().nanos /** @@ -1023,16 +991,12 @@ object TestKit { } } -/** - * TestKit-based probe which allows sending, reception and reply. - */ +/** TestKit-based probe which allows sending, reception and reply. */ class TestProbe(_application: ActorSystem, name: String) extends TestKit(_application) { def this(_application: ActorSystem) = this(_application, "testProbe") - /** - * Shorthand to get the testActor. - */ + /** Shorthand to get the testActor. */ def ref = testActor protected override def testActorName = name @@ -1044,19 +1008,13 @@ class TestProbe(_application: ActorSystem, name: String) extends TestKit(_applic */ def send(actor: ActorRef, msg: Any): Unit = actor.!(msg)(testActor) - /** - * Forward this message as if in the TestActor's receive method with self.forward. - */ + /** Forward this message as if in the TestActor's receive method with self.forward. */ def forward(actor: ActorRef, msg: Any = lastMessage.msg): Unit = actor.!(msg)(lastMessage.sender) - /** - * Get sender of last received message. - */ + /** Get sender of last received message. */ def sender() = lastMessage.sender - /** - * Send message to the sender of the last dequeued message. - */ + /** Send message to the sender of the last dequeued message. */ def reply(msg: Any): Unit = sender().!(msg)(ref) } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKitUtils.scala b/akka-testkit/src/main/scala/akka/testkit/TestKitUtils.scala index c0f924ffb2b..8fca0d3311d 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKitUtils.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKitUtils.scala @@ -10,9 +10,7 @@ import scala.util.matching.Regex import akka.annotation.InternalApi -/** - * INTERNAL API - */ +/** INTERNAL API */ @InternalApi private[akka] object TestKitUtils { diff --git a/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala index b4c8451adb5..0d686273d7d 100644 --- a/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala @@ -5,7 +5,7 @@ package akka.testkit.javadsl import java.util.{ List => JList } -import java.util.function.{ Supplier, Function => JFunction } +import java.util.function.{ Function => JFunction, Supplier } import scala.annotation.nowarn import scala.annotation.varargs @@ -36,15 +36,11 @@ import akka.util.ccompat.JavaConverters._ * are scaled using the `dilated` method, which uses the * TestKitExtension.Settings.TestTimeFactor settable via akka.conf entry * "akka.test.timefactor". - * - * */ @nowarn("msg=deprecated") class TestKit(system: ActorSystem) { - /** - * All the Java APIs are delegated to TestProbe - */ + /** All the Java APIs are delegated to TestProbe */ private val tp: TestProbe = new TestProbe(system) /** @@ -53,9 +49,7 @@ class TestKit(system: ActorSystem) { */ def getTestActor: ActorRef = tp.testActor - /** - * Shorthand to get the testActor. - */ + /** Shorthand to get the testActor. */ def getRef: ActorRef = getTestActor def getSystem: ActorSystem = tp.system @@ -68,19 +62,13 @@ class TestKit(system: ActorSystem) { } } - /** - * Java timeouts (durations) during tests with the configured - */ + /** Java timeouts (durations) during tests with the configured */ def dilated(duration: java.time.Duration): java.time.Duration = duration.asScala.dilated(getSystem).asJava - /** - * Query queue status. - */ + /** Query queue status. */ def msgAvailable: Boolean = tp.msgAvailable - /** - * Get the last sender of the TestProbe - */ + /** Get the last sender of the TestProbe */ def getLastSender: ActorRef = tp.lastSender /** @@ -90,24 +78,16 @@ class TestKit(system: ActorSystem) { */ def send(actor: ActorRef, msg: AnyRef): Unit = actor.tell(msg, tp.ref) - /** - * Forward this message as if in the TestActor's receive method with self.forward. - */ + /** Forward this message as if in the TestActor's receive method with self.forward. */ def forward(actor: ActorRef): Unit = actor.tell(tp.lastMessage.msg, tp.lastMessage.sender) - /** - * Send message to the sender of the last dequeued message. - */ + /** Send message to the sender of the last dequeued message. */ def reply(msg: AnyRef): Unit = tp.lastSender.tell(msg, tp.ref) - /** - * Have the testActor watch someone (i.e. `context.watch(...)`). - */ + /** Have the testActor watch someone (i.e. `context.watch(...)`). */ def watch(ref: ActorRef): ActorRef = tp.watch(ref) - /** - * Have the testActor stop watching someone (i.e. `context.unwatch(...)`). - */ + /** Have the testActor stop watching someone (i.e. `context.unwatch(...)`). */ def unwatch(ref: ActorRef): ActorRef = tp.unwatch(ref) /** @@ -121,9 +101,7 @@ class TestKit(system: ActorSystem) { }) } - /** - * Stop ignoring messages in the test actor. - */ + /** Stop ignoring messages in the test actor. */ def ignoreNoMsg(): Unit = tp.ignoreNoMsg() /** @@ -284,9 +262,7 @@ class TestKit(system: ActorSystem) { def awaitAssert[A](max: java.time.Duration, interval: java.time.Duration, a: Supplier[A]): A = tp.awaitAssert(a.get, max.asScala, interval.asScala) - /** - * Same as `expectMsg(remainingOrDefault, obj)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsg(remainingOrDefault, obj)`, but correctly treating the timeFactor. */ def expectMsgEquals[T](obj: T): T = tp.expectMsg(obj) /** @@ -298,9 +274,7 @@ class TestKit(system: ActorSystem) { */ def expectMsgEquals[T](max: java.time.Duration, obj: T): T = tp.expectMsg(max.asScala, obj) - /** - * Same as `expectMsg(remainingOrDefault, obj)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsg(remainingOrDefault, obj)`, but correctly treating the timeFactor. */ def expectMsg[T](obj: T): T = tp.expectMsg(obj) /** @@ -359,9 +333,7 @@ class TestKit(system: ActorSystem) { @nowarn("msg=deprecated") def expectMsgPF[T](max: java.time.Duration, hint: String, f: JFunction[Any, T]): T = expectMsgPF(max.asScala, hint, f) - /** - * Same as `expectMsgClass(remainingOrDefault, c)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsgClass(remainingOrDefault, c)`, but correctly treating the timeFactor. */ def expectMsgClass[T](c: Class[T]): T = tp.expectMsgClass(c) /** @@ -371,9 +343,7 @@ class TestKit(system: ActorSystem) { */ def expectMsgClass[T](max: java.time.Duration, c: Class[T]): T = tp.expectMsgClass(max.asScala, c) - /** - * Same as `expectMsgAnyOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsgAnyOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. */ @varargs def expectMsgAnyOf[T](first: T, objs: T*): T = tp.expectMsgAnyOf((first +: objs): _*) @@ -385,9 +355,7 @@ class TestKit(system: ActorSystem) { @varargs def expectMsgAnyOfWithin[T](max: java.time.Duration, objs: T*): T = tp.expectMsgAnyOf(max.asScala, objs: _*) - /** - * Same as `expectMsgAllOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsgAllOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. */ @varargs def expectMsgAllOf[T](objs: T*): JList[T] = tp.expectMsgAllOf(objs: _*).asJava @@ -402,9 +370,7 @@ class TestKit(system: ActorSystem) { def expectMsgAllOfWithin[T](max: java.time.Duration, objs: T*): JList[T] = tp.expectMsgAllOf(max.asScala, objs: _*).asJava - /** - * Same as `expectMsgAnyClassOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. - */ + /** Same as `expectMsgAnyClassOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. */ @varargs def expectMsgAnyClassOf[T](objs: Class[_]*): T = tp.expectMsgAnyClassOf(objs: _*).asInstanceOf[T] @@ -490,9 +456,7 @@ class TestKit(system: ActorSystem) { def fishForMessage(max: java.time.Duration, hint: String, f: JFunction[Any, Boolean]): Any = fishForMessage(max.asScala, hint, f) - /** - * Same as `fishForMessage`, but gets a different partial function and returns properly typed message. - */ + /** Same as `fishForMessage`, but gets a different partial function and returns properly typed message. */ @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.6.0") def fishForSpecificMessage[T](max: Duration, hint: String, f: JFunction[Any, T]): T = { tp.fishForSpecificMessage(max, hint)(new CachingPartialFunction[Any, T] { @@ -501,9 +465,7 @@ class TestKit(system: ActorSystem) { }) } - /** - * Same as `fishForMessage`, but gets a different partial function and returns properly typed message. - */ + /** Same as `fishForMessage`, but gets a different partial function and returns properly typed message. */ @nowarn("msg=deprecated") def fishForSpecificMessage[T](max: java.time.Duration, hint: String, f: JFunction[Any, T]): T = fishForSpecificMessage(max.asScala, hint, f) @@ -515,9 +477,7 @@ class TestKit(system: ActorSystem) { def receiveN(n: Int): JList[AnyRef] = tp.receiveN(n).asJava - /** - * Receive N messages in a row before the given deadline. - */ + /** Receive N messages in a row before the given deadline. */ def receiveN(n: Int, max: java.time.Duration): JList[AnyRef] = tp.receiveN(n, max.asScala).asJava /** @@ -538,7 +498,6 @@ class TestKit(system: ActorSystem) { * * One possible use of this method is for testing whether messages of * certain characteristics are generated at a certain rate: - * */ def receiveWhile[T]( max: java.time.Duration, @@ -546,40 +505,30 @@ class TestKit(system: ActorSystem) { messages: Int, f: JFunction[AnyRef, T]): JList[T] = { tp.receiveWhile(max.asScala, idle.asScala, messages)(new CachingPartialFunction[AnyRef, T] { - @throws(classOf[Exception]) - override def `match`(x: AnyRef): T = f.apply(x) - }) - .asJava + @throws(classOf[Exception]) + override def `match`(x: AnyRef): T = f.apply(x) + }).asJava } def receiveWhile[T](max: java.time.Duration, f: JFunction[AnyRef, T]): JList[T] = { tp.receiveWhile(max = max.asScala)(new CachingPartialFunction[AnyRef, T] { - @throws(classOf[Exception]) - override def `match`(x: AnyRef): T = f.apply(x) - }) - .asJava + @throws(classOf[Exception]) + override def `match`(x: AnyRef): T = f.apply(x) + }).asJava } - /** - * Spawns an actor as a child of this test actor, and returns the child's ActorRef. - */ + /** Spawns an actor as a child of this test actor, and returns the child's ActorRef. */ def childActorOf(props: Props, name: String, supervisorStrategy: SupervisorStrategy) = tp.childActorOf(props, name, supervisorStrategy) - /** - * Spawns an actor as a child of this test actor with an auto-generated name, and returns the child's ActorRef. - */ + /** Spawns an actor as a child of this test actor with an auto-generated name, and returns the child's ActorRef. */ def childActorOf(props: Props, supervisorStrategy: SupervisorStrategy) = tp.childActorOf(props, supervisorStrategy) - /** - * Spawns an actor as a child of this test actor with a stopping supervisor strategy, and returns the child's ActorRef. - */ + /** Spawns an actor as a child of this test actor with a stopping supervisor strategy, and returns the child's ActorRef. */ def childActorOf(props: Props, name: String) = tp.childActorOf(props, name) - /** - * Spawns an actor as a child of this test actor with an auto-generated name and stopping supervisor strategy, returning the child's ActorRef. - */ + /** Spawns an actor as a child of this test actor with an auto-generated name and stopping supervisor strategy, returning the child's ActorRef. */ def childActorOf(props: Props) = tp.childActorOf(props) } diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala index 4dac0af017d..63a623c5664 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala @@ -68,12 +68,14 @@ class AkkaSpecSpec extends AnyWordSpec with Matchers { try { var locker = Seq.empty[DeadLetter] implicit val timeout: Timeout = TestKitExtension(system).DefaultTimeout.duration.dilated(system) - val davyJones = otherSystem.actorOf(Props(new Actor { - def receive = { - case m: DeadLetter => locker :+= m - case "Die!" => sender() ! "finally gone"; context.stop(self) - } - }), "davyJones") + val davyJones = otherSystem.actorOf( + Props(new Actor { + def receive = { + case m: DeadLetter => locker :+= m + case "Die!" => sender() ! "finally gone"; context.stop(self) + } + }), + "davyJones") system.eventStream.subscribe(davyJones, classOf[DeadLetter]) @@ -84,8 +86,7 @@ class AkkaSpecSpec extends AnyWordSpec with Matchers { * may happen that the system.stop() suspends the testActor before it had * a chance to put the message into its private queue */ - probe.receiveWhile(1 second) { - case null => + probe.receiveWhile(1 second) { case null => } val latch = new TestLatch(1)(system) diff --git a/akka-testkit/src/test/scala/akka/testkit/Coroner.scala b/akka-testkit/src/test/scala/akka/testkit/Coroner.scala index d0a3f75464e..26db0d6f43c 100644 --- a/akka-testkit/src/test/scala/akka/testkit/Coroner.scala +++ b/akka-testkit/src/test/scala/akka/testkit/Coroner.scala @@ -33,9 +33,7 @@ object Coroner { */ trait WatchHandle extends Awaitable[Boolean] { - /** - * Will try to ensure that the Coroner has finished reporting. - */ + /** Will try to ensure that the Coroner has finished reporting. */ def cancel(): Unit } @@ -124,9 +122,7 @@ object Coroner { watchedHandle } - /** - * Print a report containing diagnostic information. - */ + /** Print a report containing diagnostic information. */ def printReport(reportTitle: String, out: PrintStream): Unit = { import out.println diff --git a/akka-testkit/src/test/scala/akka/testkit/CoronerSpec.scala b/akka-testkit/src/test/scala/akka/testkit/CoronerSpec.scala index c40d4588a72..8ed96f562c5 100644 --- a/akka-testkit/src/test/scala/akka/testkit/CoronerSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/CoronerSpec.scala @@ -70,27 +70,29 @@ class CoronerSpec extends AnyWordSpec with Matchers { def lockingThread(name: String, initialLocks: List[ReentrantLock]): LockingThread = { val ready = new Semaphore(0) val proceed = new Semaphore(0) - val t = new Thread(new Runnable { - def run = - try recursiveLock(initialLocks) - catch { case _: InterruptedException => () } - - def recursiveLock(locks: List[ReentrantLock]): Unit = { - locks match { - case Nil => () - case lock :: rest => { - ready.release() - proceed.acquire() - lock.lockInterruptibly() // Allows us to break deadlock and free threads - try { - recursiveLock(rest) - } finally { - lock.unlock() + val t = new Thread( + new Runnable { + def run = + try recursiveLock(initialLocks) + catch { case _: InterruptedException => () } + + def recursiveLock(locks: List[ReentrantLock]): Unit = { + locks match { + case Nil => () + case lock :: rest => { + ready.release() + proceed.acquire() + lock.lockInterruptibly() // Allows us to break deadlock and free threads + try { + recursiveLock(rest) + } finally { + lock.unlock() + } } } } - } - }, name) + }, + name) t.start() LockingThread(name, t, ready, proceed) } diff --git a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala index 791ffa2a249..98a1c370ccc 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala @@ -15,9 +15,7 @@ import akka.dispatch.Dispatcher import akka.event.Logging.Warning import akka.pattern.ask -/** - * Test whether TestActorRef behaves as an ActorRef should, besides its own spec. - */ +/** Test whether TestActorRef behaves as an ActorRef should, besides its own spec. */ object TestActorRefSpec { var counter = 4 @@ -86,17 +84,16 @@ object TestActorRefSpec { class Logger extends Actor { var count = 0 var msg: String = _ - def receive = { - case Warning(_, _, m: String) => count += 1; msg = m + def receive = { case Warning(_, _, m: String) => + count += 1; msg = m } } class ReceiveTimeoutActor(target: ActorRef) extends Actor { context.setReceiveTimeout(1.second) - def receive = { - case ReceiveTimeout => - target ! "timeout" - context.stop(self) + def receive = { case ReceiveTimeout => + target ! "timeout" + context.stop(self) } } @@ -125,9 +122,9 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA val nested = TestActorRef(Props(new Actor { def receive = { case _ => } })) def receive = { case _ => sender() ! nested } })) - a should not be (null) + a should not be null val nested = Await.result((a ? "any").mapTo[ActorRef], timeout.duration) - nested should not be (null) + nested should not be null a should not be theSameInstanceAs(nested) } @@ -136,9 +133,9 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA val nested = context.actorOf(Props(new Actor { def receive = { case _ => } })) def receive = { case _ => sender() ! nested } })) - a should not be (null) + a should not be null val nested = Await.result((a ? "any").mapTo[ActorRef], timeout.duration) - nested should not be (null) + nested should not be null a should not be theSameInstanceAs(nested) } @@ -180,8 +177,8 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA } })) a.!(PoisonPill)(testActor) - expectMsgPF(5 seconds) { - case WrappedTerminated(Terminated(`a`)) => true + expectMsgPF(5 seconds) { case WrappedTerminated(Terminated(`a`)) => + true } a.isTerminated should ===(true) assertThread() @@ -193,11 +190,14 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA counter = 2 val boss = TestActorRef(Props(new TActor { - val ref = TestActorRef(Props(new TActor { - def receiveT = { case _ => } - override def preRestart(reason: Throwable, msg: Option[Any]): Unit = { counter -= 1 } - override def postRestart(reason: Throwable): Unit = { counter -= 1 } - }), self, "child") + val ref = TestActorRef( + Props(new TActor { + def receiveT = { case _ => } + override def preRestart(reason: Throwable, msg: Option[Any]): Unit = { counter -= 1 } + override def postRestart(reason: Throwable): Unit = { counter -= 1 } + }), + self, + "child") override def supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 5, withinTimeRange = 1 second)(List(classOf[ActorKilledException])) @@ -232,8 +232,8 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA "allow access to internals" in { class TA extends TActor { var s: String = _ - def receiveT = { - case x: String => s = x + def receiveT = { case x: String => + s = x } } val ref = TestActorRef(new TA) @@ -273,12 +273,15 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA "not throw an exception when parent is passed in the apply" in { EventFilter[RuntimeException](occurrences = 1, message = "expected").intercept { val parent = TestProbe() - val child = TestActorRef(Props(new Actor { - def receive: Receive = { - case 1 => throw new RuntimeException("expected") - case x => sender() ! x - } - }), parent.ref, "Child") + val child = TestActorRef( + Props(new Actor { + def receive: Receive = { + case 1 => throw new RuntimeException("expected") + case x => sender() ! x + } + }), + parent.ref, + "Child") child ! 1 } @@ -286,12 +289,14 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA "not throw an exception when child is created through childActorOf" in { EventFilter[RuntimeException](occurrences = 1, message = "expected").intercept { val parent = TestProbe() - val child = parent.childActorOf(Props(new Actor { - def receive: Receive = { - case 1 => throw new RuntimeException("expected") - case x => sender() ! x - } - }), "Child") + val child = parent.childActorOf( + Props(new Actor { + def receive: Receive = { + case 1 => throw new RuntimeException("expected") + case x => sender() ! x + } + }), + "Child") child ! 1 } diff --git a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala index fd45ca664cb..842529de5e3 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala @@ -15,16 +15,18 @@ class TestFSMRefSpec extends AkkaSpec { "A TestFSMRef" must { "allow access to state data" in { - val fsm = TestFSMRef(new Actor with FSM[Int, String] { - startWith(1, "") - when(1) { - case Event("go", _) => goto(2).using("go") - case Event(StateTimeout, _) => goto(2).using("timeout") - } - when(2) { - case Event("back", _) => goto(1).using("back") - } - }, "test-fsm-ref-1") + val fsm = TestFSMRef( + new Actor with FSM[Int, String] { + startWith(1, "") + when(1) { + case Event("go", _) => goto(2).using("go") + case Event(StateTimeout, _) => goto(2).using("timeout") + } + when(2) { case Event("back", _) => + goto(1).using("back") + } + }, + "test-fsm-ref-1") fsm.stateName should ===(1) fsm.stateData should ===("") fsm ! "go" @@ -43,12 +45,14 @@ class TestFSMRefSpec extends AkkaSpec { } "allow access to timers" in { - val fsm = TestFSMRef(new Actor with FSM[Int, Null] { - startWith(1, null) - when(1) { - case _ => stay() - } - }, "test-fsm-ref-2") + val fsm = TestFSMRef( + new Actor with FSM[Int, Null] { + startWith(1, null) + when(1) { case _ => + stay() + } + }, + "test-fsm-ref-2") fsm.isTimerActive("test") should ===(false) fsm.startTimerWithFixedDelay("test", 12, 10 millis) fsm.isTimerActive("test") should ===(true) @@ -65,8 +69,8 @@ class TestFSMRefSpec extends AkkaSpec { class TestFSMActor extends Actor with FSM[Int, Null] { startWith(1, null) - when(1) { - case _ => stay() + when(1) { case _ => + stay() } val supervisor = context.parent val name = context.self.path.name diff --git a/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala index 137577fb9a9..e5241ad5b6a 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala @@ -77,7 +77,7 @@ class TestProbeSpec extends AkkaSpec with DefaultTimeout with Eventually { awaitAssert { child ! "hello" - restarts.get() should be > (1) + restarts.get() should be > 1 } } @@ -115,7 +115,7 @@ class TestProbeSpec extends AkkaSpec with DefaultTimeout with Eventually { } "have an AutoPilot" in { - //#autopilot + // #autopilot val probe = TestProbe() probe.setAutoPilot(new TestActor.AutoPilot { def run(sender: ActorRef, msg: Any): TestActor.AutoPilot = @@ -124,7 +124,7 @@ class TestProbeSpec extends AkkaSpec with DefaultTimeout with Eventually { case x => testActor.tell(x, sender); TestActor.KeepRunning } }) - //#autopilot + // #autopilot probe.ref ! "hallo" probe.ref ! "welt" probe.ref ! "stop" diff --git a/akka-testkit/src/test/scala/akka/testkit/WithLogCapturing.scala b/akka-testkit/src/test/scala/akka/testkit/WithLogCapturing.scala index 76659636500..fafbdf3ab51 100644 --- a/akka-testkit/src/test/scala/akka/testkit/WithLogCapturing.scala +++ b/akka-testkit/src/test/scala/akka/testkit/WithLogCapturing.scala @@ -12,9 +12,7 @@ import akka.actor.ActorSystem import akka.event.Logging import akka.event.Logging._ -/** - * Mixin this trait to a test to make log lines appear only when the test failed. - */ +/** Mixin this trait to a test to make log lines appear only when the test failed. */ trait WithLogCapturing extends SuiteMixin { this: TestSuite => implicit def system: ActorSystem diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala index f046040d975..03914afb140 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala @@ -13,18 +13,19 @@ import com.codahale.metrics.jvm.FileDescriptorRatioGauge import akka.util.ccompat.JavaConverters._ -/** - * MetricSet exposing number of open and maximum file descriptors used by the JVM process. - */ +/** MetricSet exposing number of open and maximum file descriptors used by the JVM process. */ private[akka] class FileDescriptorMetricSet(os: OperatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean) extends MetricSet { override def getMetrics: util.Map[String, Metric] = { - Map[String, Metric](name("file-descriptors", "open") -> new Gauge[Long] { - override def getValue: Long = invoke("getOpenFileDescriptorCount") - }, name("file-descriptors", "max") -> new Gauge[Long] { - override def getValue: Long = invoke("getMaxFileDescriptorCount") - }, name("file-descriptors", "ratio") -> new FileDescriptorRatioGauge(os)).asJava + Map[String, Metric]( + name("file-descriptors", "open") -> new Gauge[Long] { + override def getValue: Long = invoke("getOpenFileDescriptorCount") + }, + name("file-descriptors", "max") -> new Gauge[Long] { + override def getValue: Long = invoke("getMaxFileDescriptorCount") + }, + name("file-descriptors", "ratio") -> new FileDescriptorRatioGauge(os)).asJava } private def invoke(name: String): Long = { diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala index 57de8685a42..afd3d811614 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala @@ -66,9 +66,7 @@ private[akka] trait MetricsKit extends MetricsKitOps { configureConsoleReporter() } - /** - * Schedule metric reports execution iterval. Should not be used multiple times - */ + /** Schedule metric reports execution iterval. Should not be used multiple times */ def scheduleMetricReports(every: FiniteDuration): Unit = { reporters.foreach { _.start(every.toMillis, TimeUnit.MILLISECONDS) } } @@ -143,9 +141,7 @@ private[akka] trait MetricsKit extends MetricsKitOps { registry.removeMatching(matching) } - /** - * MUST be called after all tests have finished. - */ + /** MUST be called after all tests have finished. */ def shutdownMetrics(): Unit = { reporters.foreach { _.stop() } } diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala index b31ca675d2f..6b2e09dc523 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala @@ -103,6 +103,6 @@ private[metrics] trait MetricsPrefix extends MetricSet { abstract override def getMetrics: util.Map[String, Metric] = { // does not have to be fast, is only called once during registering registry import akka.util.ccompat.JavaConverters._ - (super.getMetrics.asScala.map { case (k, v) => (prefix / k).toString -> v }).asJava + super.getMetrics.asScala.map { case (k, v) => (prefix / k).toString -> v }.asJava } } diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala index d3deef6b6af..682b4caba14 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala @@ -12,9 +12,7 @@ import com.codahale.metrics._ import akka.testkit.metrics._ -/** - * Used to report `akka.testkit.metric.Metric` types that the original `com.codahale.metrics.ConsoleReporter` is unaware of (cannot re-use directly because of private constructor). - */ +/** Used to report `akka.testkit.metric.Metric` types that the original `com.codahale.metrics.ConsoleReporter` is unaware of (cannot re-use directly because of private constructor). */ class AkkaConsoleReporter(registry: AkkaMetricRegistry, verbose: Boolean, output: PrintStream = System.out) extends ScheduledReporter( registry.asInstanceOf[MetricRegistry], diff --git a/project/plugins.sbt b/project/plugins.sbt index 248c72d3ad6..b01785fb67a 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -5,7 +5,7 @@ libraryDependencies += Defaults.sbtPluginExtra( addSbtPlugin("com.lightbend.sbt" % "sbt-java-formatter" % "0.8.0") addSbtPlugin("com.lightbend.sbt" % "sbt-bill-of-materials" % "1.0.2") -addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.2") +addSbtPlugin("org.scalameta" % "sbt-scalafmt" % "2.4.6") addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "1.1.3") addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.5.0") addSbtPlugin("com.thoughtworks.sbt-api-mappings" % "sbt-api-mappings" % "3.0.2")