Skip to content
Permalink
Browse files

Upgraded to latest sbt-scalafmt release and removed coursier since it…

… is now built in to sbt
  • Loading branch information...
mpilquist committed Sep 6, 2019
1 parent 29789ca commit 337bb293e8de8b038f1bcc7611f231e5a0fce46f
Showing with 1,158 additions and 761 deletions.
  1. +1 −0 .scalafmt.conf
  2. +2 −1 benchmark/src/main/scala/fs2/benchmark/QueueBenchmark.scala
  3. +30 −18 core/jvm/src/main/scala/fs2/compress.scala
  4. +1 −1 core/jvm/src/main/scala/fs2/hash.scala
  5. +5 −3 core/jvm/src/main/scala/fs2/internal/ThreadFactories.scala
  6. +4 −2 core/jvm/src/test/scala/fs2/CompressSpec.scala
  7. +2 −1 core/jvm/src/test/scala/fs2/HashSpec.scala
  8. +1 −2 core/jvm/src/test/scala/fs2/MemorySanityChecks.scala
  9. +3 −3 core/shared/src/main/scala-2.12-/fs2/internal/ArrayBackedSeq.scala
  10. +34 −24 core/shared/src/main/scala/fs2/Chunk.scala
  11. +11 −6 core/shared/src/main/scala/fs2/CompositeFailure.scala
  12. +15 −9 core/shared/src/main/scala/fs2/Pull.scala
  13. +4 −2 core/shared/src/main/scala/fs2/RaiseThrowable.scala
  14. +33 −20 core/shared/src/main/scala/fs2/concurrent/PubSub.scala
  15. +43 −23 core/shared/src/main/scala/fs2/concurrent/Queue.scala
  16. +6 −3 core/shared/src/main/scala/fs2/concurrent/Topic.scala
  17. +66 −39 core/shared/src/main/scala/fs2/internal/Algebra.scala
  18. +22 −10 core/shared/src/main/scala/fs2/internal/CompileScope.scala
  19. +6 −3 core/shared/src/main/scala/fs2/internal/FreeC.scala
  20. +37 −25 core/shared/src/main/scala/fs2/text.scala
  21. +24 −14 core/shared/src/test/scala/fs2/ChunkGenerators.scala
  22. +10 −9 core/shared/src/test/scala/fs2/ChunkSpec.scala
  23. +8 −12 core/shared/src/test/scala/fs2/CompilationTest.scala
  24. +1 −1 core/shared/src/test/scala/fs2/Counter.scala
  25. +5 −2 core/shared/src/test/scala/fs2/Fs2Spec.scala
  26. +9 −6 core/shared/src/test/scala/fs2/PullLawsSpec.scala
  27. +4 −4 core/shared/src/test/scala/fs2/StreamGenerators.scala
  28. +18 −10 core/shared/src/test/scala/fs2/StreamLawsSpec.scala
  29. +12 −8 core/shared/src/test/scala/fs2/StreamPerformanceSpec.scala
  30. +183 −121 core/shared/src/test/scala/fs2/StreamSpec.scala
  31. +3 −2 core/shared/src/test/scala/fs2/TextSpec.scala
  32. +131 −109 core/shared/src/test/scala/fs2/ToFuturePropCheckerAsserting.scala
  33. +7 −2 core/shared/src/test/scala/fs2/concurrent/SignalSpec.scala
  34. +1 −1 core/shared/src/test/scala/scalacheck/GeneratorCompat.scala
  35. +12 −7 experimental/src/main/scala/fs2/experimental/concurrent/PubSub.scala
  36. +5 −4 experimental/src/main/scala/fs2/experimental/concurrent/Queue.scala
  37. +12 −7 io/src/main/scala/fs2/io/JavaInputOutputStream.scala
  38. +4 −3 io/src/main/scala/fs2/io/file/FileHandle.scala
  39. +97 −59 io/src/main/scala/fs2/io/file/Watcher.scala
  40. +86 −53 io/src/main/scala/fs2/io/file/file.scala
  41. +29 −16 io/src/main/scala/fs2/io/file/pulls.scala
  42. +20 −13 io/src/main/scala/fs2/io/io.scala
  43. +33 −27 io/src/main/scala/fs2/io/tcp/SocketGroup.scala
  44. +8 −4 io/src/main/scala/fs2/io/tcp/SocketOptionMapping.scala
  45. +53 −37 io/src/main/scala/fs2/io/udp/AsynchronousSocketGroup.scala
  46. +14 −8 io/src/main/scala/fs2/io/udp/SocketGroup.scala
  47. +3 −2 io/src/test/scala/fs2/io/file/BaseFileSpec.scala
  48. +18 −10 io/src/test/scala/fs2/io/file/FileSpec.scala
  49. +4 −2 io/src/test/scala/fs2/io/file/WatcherSpec.scala
  50. +11 −8 io/src/test/scala/fs2/io/udp/UdpSpec.scala
  51. +1 −2 project/plugins.sbt
  52. +6 −3 reactive-streams/src/main/scala/fs2/interop/reactivestreams/StreamSubscription.scala
@@ -1,4 +1,5 @@
version = "2.0.1"

style = default

maxColumn = 100
@@ -31,7 +31,8 @@ class QueueBenchmark {
.unbounded[IO, Chunk[Int]]
.flatMap { q =>
Concurrent[IO].start(
Stream.constant(1, N).take(size).chunks.through(q.enqueue).compile.drain) >>
Stream.constant(1, N).take(size).chunks.through(q.enqueue).compile.drain
) >>
q.dequeue.flatMap(Stream.chunk).take(size).compile.drain
}
.unsafeRunSync()
@@ -20,10 +20,12 @@ object compress {
* compressor. Default size is 32 KB.
* @param strategy compression strategy -- see `java.util.zip.Deflater` for details
*/
def deflate[F[_]](level: Int = Deflater.DEFAULT_COMPRESSION,
nowrap: Boolean = false,
bufferSize: Int = 1024 * 32,
strategy: Int = Deflater.DEFAULT_STRATEGY): Pipe[F, Byte, Byte] = { in =>
def deflate[F[_]](
level: Int = Deflater.DEFAULT_COMPRESSION,
nowrap: Boolean = false,
bufferSize: Int = 1024 * 32,
strategy: Int = Deflater.DEFAULT_STRATEGY
): Pipe[F, Byte, Byte] = { in =>
Pull.suspend {
val deflater = new Deflater(level, nowrap)
deflater.setStrategy(strategy)
@@ -32,8 +34,10 @@ object compress {
}.stream
}

private def _deflate_stream[F[_]](deflater: Deflater,
buffer: Array[Byte]): Stream[F, Byte] => Pull[F, Byte, Unit] =
private def _deflate_stream[F[_]](
deflater: Deflater,
buffer: Array[Byte]
): Stream[F, Byte] => Pull[F, Byte, Unit] =
_.pull.uncons.flatMap {
case Some((hd, tl)) =>
deflater.setInput(hd.toArray)
@@ -50,10 +54,12 @@ object compress {
}

@tailrec
private def _deflate_collect(deflater: Deflater,
buffer: Array[Byte],
acc: ArrayBuffer[Byte],
fin: Boolean): ArrayBuffer[Byte] =
private def _deflate_collect(
deflater: Deflater,
buffer: Array[Byte],
acc: ArrayBuffer[Byte],
fin: Boolean
): ArrayBuffer[Byte] =
if ((fin && deflater.finished) || (!fin && deflater.needsInput)) acc
else {
val count = deflater.deflate(buffer)
@@ -68,7 +74,8 @@ object compress {
* decompressor. Default size is 32 KB.
*/
def inflate[F[_]](nowrap: Boolean = false, bufferSize: Int = 1024 * 32)(
implicit ev: RaiseThrowable[F]): Pipe[F, Byte, Byte] =
implicit ev: RaiseThrowable[F]
): Pipe[F, Byte, Byte] =
_.pull.uncons.flatMap {
case None => Pull.done
case Some((hd, tl)) =>
@@ -81,7 +88,8 @@ object compress {
}.stream

private def _inflate_stream[F[_]](inflater: Inflater, buffer: Array[Byte])(
implicit ev: RaiseThrowable[F]): Stream[F, Byte] => Pull[F, Byte, Unit] =
implicit ev: RaiseThrowable[F]
): Stream[F, Byte] => Pull[F, Byte, Unit] =
_.pull.uncons.flatMap {
case Some((hd, tl)) =>
inflater.setInput(hd.toArray)
@@ -91,13 +99,17 @@ object compress {
case None =>
if (!inflater.finished)
Pull.raiseError[F](new DataFormatException("Insufficient data"))
else { inflater.end(); Pull.done }
else {
inflater.end(); Pull.done
}
}

@tailrec
private def _inflate_collect(inflater: Inflater,
buffer: Array[Byte],
acc: ArrayBuffer[Byte]): ArrayBuffer[Byte] =
private def _inflate_collect(
inflater: Inflater,
buffer: Array[Byte],
acc: ArrayBuffer[Byte]
): ArrayBuffer[Byte] =
if (inflater.finished || inflater.needsInput) acc
else {
val count = inflater.inflate(buffer)
@@ -159,7 +171,7 @@ object compress {
}

body ++ trailer
}
}

/**
* Returns a pipe that incrementally decompresses input according to the GZIP
@@ -251,7 +263,7 @@ object compress {

stepDecompress ++ mainline
}
}
}

final case class NonProgressiveDecompressionException(bufferSize: Int)
extends RuntimeException(s"buffer size $bufferSize is too small; gunzip cannot make progress")
@@ -43,5 +43,5 @@ object hash {
.flatMap { d =>
Stream.chunk(Chunk.bytes(d.digest()))
}
}
}
}
@@ -10,9 +10,11 @@ import scala.util.control.NonFatal
private[fs2] object ThreadFactories {

/** A `ThreadFactory` which names threads according to the pattern ${threadPrefix}-${count}. */
def named(threadPrefix: String,
daemon: Boolean,
exitJvmOnFatalError: Boolean = true): ThreadFactory =
def named(
threadPrefix: String,
daemon: Boolean,
exitJvmOnFatalError: Boolean = true
): ThreadFactory =
new ThreadFactory {
val defaultThreadFactory = Executors.defaultThreadFactory()
val idx = new AtomicInteger(0)
@@ -50,7 +50,8 @@ class CompressSpec extends Fs2Spec {
deflate(
level = level,
nowrap = nowrap
))
)
)
.toVector

actual should equal(expected)
@@ -65,7 +66,8 @@ class CompressSpec extends Fs2Spec {
deflate(
level = level,
nowrap = nowrap
))
)
)
.toVector

def expectEqual(expected: Array[Byte], actual: Array[Byte]) = {
@@ -20,7 +20,8 @@ class HashSpec extends Fs2Spec {
str.getBytes
.grouped(n)
.foldLeft(Stream.empty.covaryOutput[Byte])(
(acc, c) => acc ++ Stream.chunk(Chunk.bytes(c)))
(acc, c) => acc ++ Stream.chunk(Chunk.bytes(c))
)

s.through(h).toList shouldBe digest(algo, str)
}
@@ -67,8 +67,7 @@ object RepeatEvalSanityTest extends App {
s.pull.uncons1.flatMap {
case Some((h, t)) => Pull.output1(h) >> go(t); case None => Pull.done
}
in =>
go(in).stream
in => go(in).stream
}
Stream.repeatEval(IO(1)).through(id[Int]).compile.drain.unsafeRunSync()
}
@@ -3,7 +3,7 @@ package fs2.internal
private[fs2] object ArrayBackedSeq {
def unapply[A](a: collection.Seq[A]): Option[Array[_]] = a match {
case as: collection.mutable.WrappedArray[A] => Some(as.array)
case as: collection.mutable.ArraySeq[A] => Some(as.array)
case _ => None
case as: collection.mutable.ArraySeq[A] => Some(as.array)
case _ => None
}
}
}
@@ -789,10 +789,11 @@ object Chunk {
def apply(values: Array[Byte]): Bytes = Bytes(values, 0, values.length)
}

sealed abstract class Buffer[A <: Buffer[A, B, C], B <: JBuffer, C: ClassTag](buf: B,
val offset: Int,
val size: Int)
extends Chunk[C]
sealed abstract class Buffer[A <: Buffer[A, B, C], B <: JBuffer, C: ClassTag](
buf: B,
val offset: Int,
val size: Int
) extends Chunk[C]
with KnownElementType[C] {

def elementClassTag: ClassTag[C] = implicitly[ClassTag[C]]
@@ -936,10 +937,11 @@ object Chunk {
new DoubleBuffer(buf, buf.position, buf.remaining)
}

final case class DoubleBuffer(buf: JDoubleBuffer,
override val offset: Int,
override val size: Int)
extends Buffer[DoubleBuffer, JDoubleBuffer, Double](buf, offset, size) {
final case class DoubleBuffer(
buf: JDoubleBuffer,
override val offset: Int,
override val size: Int
) extends Buffer[DoubleBuffer, JDoubleBuffer, Double](buf, offset, size) {

def readOnly(b: JDoubleBuffer): JDoubleBuffer =
b.asReadOnlyBuffer()
@@ -949,10 +951,12 @@ object Chunk {

def buffer(b: JDoubleBuffer): DoubleBuffer = DoubleBuffer.view(b)

override def get(b: JDoubleBuffer,
dest: Array[Double],
offset: Int,
length: Int): JDoubleBuffer =
override def get(
b: JDoubleBuffer,
dest: Array[Double],
offset: Int,
length: Int
): JDoubleBuffer =
b.get(dest, offset, length)

def duplicate(b: JDoubleBuffer): JDoubleBuffer = b.duplicate()
@@ -1083,10 +1087,11 @@ object Chunk {
new ByteBuffer(buf, buf.position, buf.remaining)
}

final case class ByteBuffer private (buf: JByteBuffer,
override val offset: Int,
override val size: Int)
extends Buffer[ByteBuffer, JByteBuffer, Byte](buf, offset, size) {
final case class ByteBuffer private (
buf: JByteBuffer,
override val offset: Int,
override val size: Int
) extends Buffer[ByteBuffer, JByteBuffer, Byte](buf, offset, size) {

def readOnly(b: JByteBuffer): JByteBuffer =
b.asReadOnlyBuffer()
@@ -1499,8 +1504,10 @@ object Chunk {
/**
* Creates a chunk consisting of the first `n` elements of `queue` and returns the remainder.
*/
def queueFirstN[A](queue: collection.immutable.Queue[A],
n: Int): (Chunk[A], collection.immutable.Queue[A]) =
def queueFirstN[A](
queue: collection.immutable.Queue[A],
n: Int
): (Chunk[A], collection.immutable.Queue[A]) =
if (n <= 0) (Chunk.empty, queue)
else if (n == 1) {
val (hd, tl) = queue.dequeue
@@ -1535,11 +1542,12 @@ object Chunk {
* }}}
*/
implicit val instance
: Traverse[Chunk] with Monad[Chunk] with Alternative[Chunk] with TraverseFilter[Chunk] =
: Traverse[Chunk] with Monad[Chunk] with Alternative[Chunk] with TraverseFilter[Chunk] =
new Traverse[Chunk] with Monad[Chunk] with Alternative[Chunk] with TraverseFilter[Chunk] {
override def foldLeft[A, B](fa: Chunk[A], b: B)(f: (B, A) => B): B = fa.foldLeft(b)(f)
override def foldRight[A, B](fa: Chunk[A], b: Eval[B])(
f: (A, Eval[B]) => Eval[B]): Eval[B] = {
f: (A, Eval[B]) => Eval[B]
): Eval[B] = {
def go(i: Int): Eval[B] =
if (i < fa.size) f(fa(i), Eval.defer(go(i + 1)))
else b
@@ -1577,13 +1585,15 @@ object Chunk {
override def combineK[A](x: Chunk[A], y: Chunk[A]): Chunk[A] =
Chunk.concat(List(x, y))
override def traverse: Traverse[Chunk] = this
override def traverse[F[_], A, B](fa: Chunk[A])(f: A => F[B])(
implicit F: Applicative[F]): F[Chunk[B]] =
override def traverse[F[_], A, B](
fa: Chunk[A]
)(f: A => F[B])(implicit F: Applicative[F]): F[Chunk[B]] =
foldRight[A, F[Vector[B]]](fa, Eval.always(F.pure(Vector.empty))) { (a, efv) =>
F.map2Eval(f(a), efv)(_ +: _)
}.value.map(Chunk.vector)
override def traverseFilter[F[_], A, B](fa: Chunk[A])(f: A => F[Option[B]])(
implicit F: Applicative[F]): F[Chunk[B]] =
override def traverseFilter[F[_], A, B](
fa: Chunk[A]
)(f: A => F[Option[B]])(implicit F: Applicative[F]): F[Chunk[B]] =
foldRight[A, F[Vector[B]]](fa, Eval.always(F.pure(Vector.empty))) { (a, efv) =>
F.map2Eval(f(a), efv)((oa, efv) => oa.map(_ +: efv).getOrElse(efv))
}.value.map(Chunk.vector)
@@ -8,16 +8,19 @@ final class CompositeFailure(
val tail: NonEmptyList[Throwable]
) extends Throwable(
s"Multiple exceptions were thrown (${1 + tail.size}), first ${head.getClass.getName}: ${head.getMessage}",
head) {
head
) {

/** Gets all causes (guaranteed to have at least 2 elements). */
def all: NonEmptyList[Throwable] = head :: tail
}

object CompositeFailure {
def apply(first: Throwable,
second: Throwable,
rest: List[Throwable] = List.empty): CompositeFailure =
def apply(
first: Throwable,
second: Throwable,
rest: List[Throwable] = List.empty
): CompositeFailure =
new CompositeFailure(first, NonEmptyList(second, rest))

def fromList(errors: List[Throwable]): Option[Throwable] = errors match {
@@ -34,8 +37,10 @@ object CompositeFailure {
* - When both results succeeds then Right(()) is returned
*
*/
def fromResults(first: Either[Throwable, Unit],
second: Either[Throwable, Unit]): Either[Throwable, Unit] =
def fromResults(
first: Either[Throwable, Unit],
second: Either[Throwable, Unit]
): Either[Throwable, Unit] =
first match {
case Right(_) => second
case Left(err) =>

0 comments on commit 337bb29

Please sign in to comment.
You can’t perform that action at this time.