diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e64dd86 --- /dev/null +++ b/.gitignore @@ -0,0 +1,36 @@ +project/project +project/target +target +.idea +.tmp + +*.iml +/out +.idea_modules +.classpath +.project +/RUNNING_PID +.settings +.sass-cache +scalajvm/upload/* + +# temp files +.~* +*~ +*.orig + +# eclipse +.scala_dependencies +.buildpath +.cache +.target +bin/ +.ensime +.ensime_cache + +# OSX +.DS_Store + +# PGP keys +pubring.gpg +secring.gpg \ No newline at end of file diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..73b41d1 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,9 @@ +language: scala +scala: + - 2.11.7 +jdk: + - oraclejdk8 +script: + - sbt test +after_success: + - bash deploy.sh \ No newline at end of file diff --git a/build.sbt b/build.sbt new file mode 100644 index 0000000..f0ecc98 --- /dev/null +++ b/build.sbt @@ -0,0 +1,51 @@ +lazy val fetch = (project in file(".")) +.settings(publishSettings:_*) +.enablePlugins(ExerciseCompilerPlugin) +.settings( + organization := "org.scala-exercises", + name := "exercises-fetch", + scalaVersion := "2.11.7", + version := "0.1.1", + resolvers ++= Seq( + Resolver.sonatypeRepo("snapshots"), + Resolver.sonatypeRepo("releases") + ), + libraryDependencies ++= Seq( + "com.fortysevendeg" %% "fetch" % "0.3.0-SNAPSHOT", + "com.chuusai" %% "shapeless" % "2.2.5", + "org.scalatest" %% "scalatest" % "2.2.4", + "org.scala-exercises" %% "exercise-compiler" % version.value, + "org.scala-exercises" %% "definitions" % version.value, + "org.scalacheck" %% "scalacheck" % "1.12.5", + "com.github.alexarchambault" %% "scalacheck-shapeless_1.12" % "0.3.1", + compilerPlugin("org.spire-math" %% "kind-projector" % "0.7.1") + ) +) + +// Distribution + +lazy val gpgFolder = sys.env.getOrElse("SE_GPG_FOLDER", ".") + +lazy val publishSettings = Seq( + organizationName := "Scala Exercises", + organizationHomepage := Some(new URL("http://scala-exercises.org")), + startYear := Some(2016), + description := "Scala Exercises: The path to enlightenment", + homepage := Some(url("http://scala-exercises.org")), + pgpPassphrase := Some(sys.env.getOrElse("SE_GPG_PASSPHRASE", "").toCharArray), + pgpPublicRing := file(s"$gpgFolder/pubring.gpg"), + pgpSecretRing := file(s"$gpgFolder/secring.gpg"), + credentials += Credentials("Sonatype Nexus Repository Manager", "oss.sonatype.org", sys.env.getOrElse("PUBLISH_USERNAME", ""), sys.env.getOrElse("PUBLISH_PASSWORD", "")), + scmInfo := Some(ScmInfo(url("https://github.com/scala-exercises/exercises-fetch"), "https://github.com/scala-exercises/exercises-fetch.git")), + licenses := Seq("Apache License, Version 2.0" -> url("http://www.apache.org/licenses/LICENSE-2.0.txt")), + publishMavenStyle := true, + publishArtifact in Test := false, + pomIncludeRepository := Function.const(false), + publishTo := { + val nexus = "https://oss.sonatype.org/" + if (isSnapshot.value) + Some("Snapshots" at nexus + "content/repositories/snapshots") + else + Some("Releases" at nexus + "service/local/staging/deploy/maven2") + } +) diff --git a/deploy.sh b/deploy.sh new file mode 100644 index 0000000..04e80fe --- /dev/null +++ b/deploy.sh @@ -0,0 +1,23 @@ + +#!/bin/sh + +function decipherKeys { + echo $KEYS_PASSPHRASE | gpg --passphrase-fd 0 keys.tar.gpg + tar xfv keys.tar +} + +function publish { + sbt compile publishSigned +} + +function release { + decipherKeys + publish +} + +if [[ $TRAVIS_BRANCH == 'master' ]]; then + echo "Master branch, releasing..." + release +else + echo "Not in master branch, skipping release" +fi diff --git a/keys.tar.gpg b/keys.tar.gpg new file mode 100644 index 0000000..4be048e Binary files /dev/null and b/keys.tar.gpg differ diff --git a/project/build.properties b/project/build.properties new file mode 100644 index 0000000..176a863 --- /dev/null +++ b/project/build.properties @@ -0,0 +1 @@ +sbt.version=0.13.9 \ No newline at end of file diff --git a/project/plugins.sbt b/project/plugins.sbt new file mode 100644 index 0000000..f354b1b --- /dev/null +++ b/project/plugins.sbt @@ -0,0 +1,2 @@ +addSbtPlugin("org.scala-exercises" % "sbt-exercise" % "0.1.1", "0.13", "2.10") +addSbtPlugin("com.jsuereth" % "sbt-pgp" % "1.0.0") diff --git a/src/main/scala/fetchlib/CachingSection.scala b/src/main/scala/fetchlib/CachingSection.scala new file mode 100644 index 0000000..0627d0a --- /dev/null +++ b/src/main/scala/fetchlib/CachingSection.scala @@ -0,0 +1,55 @@ +package fetchlib + +import cats.data.NonEmptyList +import org.scalatest._ +import fetch._ + +import cats._ +import fetch.unsafe.implicits._ +import fetch.syntax._ +import cats.std.list._ +import cats.syntax.cartesian._ +import cats.syntax.traverse._ + +/** + * = Caching = + * + * As we have learned, Fetch caches intermediate results implicitly. You can + * provide a prepopulated cache for running a fetch, replay a fetch with the cache of a previous + * one, and even implement a custom cache. + * + * @param name caching + */ +object CachingSection extends FlatSpec with Matchers with exercise.Section { + + import FetchTutorialHelper._ + + /** + * = Prepopulating a cache = + * + * We'll be using the default in-memory cache, prepopulated with some data. The cache key of an identity + * is calculated with the `DataSource`'s `identity` method. + * {{{ + * val cache = InMemoryCache(UserSource.identity(1) -> User(1, "@dialelo")) + * }}} + * We can pass a cache as argument when running a fetch + */ + def prepopulating(res0: Int) = { + + val env = getUser(1).runE[Id](cache) + env.rounds.size should be(res0) + + } + + /** + * As you can see, when all the data is cached, no query to the data sources is executed since the results are available + * in the cache. + * If only part of the data is cached, the cached data won't be asked for: + * + */ + def cachePartialHits(res0: Int) = { + val env = List(1, 2, 3).traverse(getUser).runE[Id](cache) + env.rounds.size should be(res0) + } + +} diff --git a/src/main/scala/fetchlib/FetchLibrary.scala b/src/main/scala/fetchlib/FetchLibrary.scala new file mode 100644 index 0000000..e990b85 --- /dev/null +++ b/src/main/scala/fetchlib/FetchLibrary.scala @@ -0,0 +1,17 @@ +package fetchlib + +/** Fetch is a library for making access to data both simple & efficient. + * + * @param name fetch + */ +object FetchLibrary extends exercise.Library { + override def owner = "scala-exercises" + override def repository = "exercises-fetch" + + override def color = Some("#2F2859") + + override def sections = List( + UsageSection, + CachingSection + ) +} diff --git a/src/main/scala/fetchlib/FetchTutorialHelper.scala b/src/main/scala/fetchlib/FetchTutorialHelper.scala new file mode 100644 index 0000000..90f1dd9 --- /dev/null +++ b/src/main/scala/fetchlib/FetchTutorialHelper.scala @@ -0,0 +1,90 @@ +package fetchlib + +object FetchTutorialHelper { + + import fetch._ + import cats.std.list._ + + type UserId = Int + case class User(id: UserId, username: String) + + def latency[A](result: A, msg: String) = { + val id = Thread.currentThread.getId + println(s"~~> [$id] $msg") + Thread.sleep(100) + println(s"<~~ [$id] $msg") + result + } + + import cats.data.NonEmptyList + + val userDatabase: Map[UserId, User] = Map( + 1 -> User(1, "@one"), + 2 -> User(2, "@two"), + 3 -> User(3, "@three"), + 4 -> User(4, "@four") + ) + + implicit object UserSource extends DataSource[UserId, User] { + override def fetchOne(id: UserId): Query[Option[User]] = { + Query.sync({ + latency(userDatabase.get(id), s"One User $id") + }) + } + override def fetchMany(ids: NonEmptyList[UserId]): Query[Map[UserId, User]] = { + Query.sync({ + latency(userDatabase.filterKeys(ids.unwrap.contains), s"Many Users $ids") + }) + } + } + + def getUser(id: UserId): Fetch[User] = Fetch(id) + + type PostId = Int + case class Post(id: PostId, author: UserId, content: String) + + val postDatabase: Map[PostId, Post] = Map( + 1 -> Post(1, 2, "An article"), + 2 -> Post(2, 3, "Another article"), + 3 -> Post(3, 4, "Yet another article") + ) + + implicit object PostSource extends DataSource[PostId, Post] { + override def fetchOne(id: PostId): Query[Option[Post]] = { + Query.sync({ + latency(postDatabase.get(id), s"One Post $id") + }) + } + override def fetchMany(ids: NonEmptyList[PostId]): Query[Map[PostId, Post]] = { + Query.sync({ + latency(postDatabase.filterKeys(ids.unwrap.contains), s"Many Posts $ids") + }) + } + } + + def getPost(id: PostId): Fetch[Post] = Fetch(id) + + def getAuthor(p: Post): Fetch[User] = Fetch(p.author) + + type PostTopic = String + + implicit object PostTopicSource extends DataSource[Post, PostTopic] { + override def fetchOne(id: Post): Query[Option[PostTopic]] = { + Query.sync({ + val topic = if (id.id % 2 == 0) "monad" else "applicative" + latency(Option(topic), s"One Post Topic $id") + }) + } + override def fetchMany(ids: NonEmptyList[Post]): Query[Map[Post, PostTopic]] = { + Query.sync({ + val result = ids.unwrap.map(id => (id, if (id.id % 2 == 0) "monad" else "applicative")).toMap + latency(result, s"Many Post Topics $ids") + }) + } + } + + def getPostTopic(post: Post): Fetch[PostTopic] = Fetch(post) + + val cache = InMemoryCache(UserSource.identity(1) -> User(1, "@dialelo")) + +} diff --git a/src/main/scala/fetchlib/QuickStart.scala b/src/main/scala/fetchlib/QuickStart.scala new file mode 100644 index 0000000..9649da2 --- /dev/null +++ b/src/main/scala/fetchlib/QuickStart.scala @@ -0,0 +1,462 @@ +package fetchlib + +import cats.data.NonEmptyList +import org.scalatest._ +import fetch._ + +import cats._ +import fetch.unsafe.implicits._ +import fetch.syntax._ +import cats.std.list._ +import cats.syntax.cartesian._ +import cats.syntax.traverse._ +/** + * = Introduction = + * + * Fetch is a library that allows your data fetches to be written in a concise, + * composable way while executing efficiently. You don't need to use any explicit + * concurrency construct but existing idioms: applicative for concurrency and + * monad for sequencing. + * + * Oftentimes, our applications read and manipulate data from a variety of + * different sources such as databases, web services or file systems. These data + * sources are subject to latency, and we'd prefer to query them efficiently. + * + * If we are just reading data, we can make a series of optimizations such as: + * + * - batching requests to the same data source + * - requesting independent data from different sources in parallel + * - caching previously seen results + * + * However, if we mix these optimizations with the code that fetches the data + * we may end up trading clarity for performance. Furthermore, we are + * mixing low-level (optimization) and high-level (business logic with the data + * we read) concerns. + * + * = Usage = + * + * In order to tell Fetch how to retrieve data, we must implement the `DataSource` typeclass. + * + * {{{ + * import cats.data.NonEmptyList + * + * trait DataSource[Identity, Result]{ + * def fetchOne(id: Identity): Query[Option[Result]] + * def fetchMany(ids: NonEmptyList[Identity]): Query[Map[Identity, Result]] + * } + * }}} + * + * It takes two type parameters: + * + * - `Identity`: the identity we want to fetch (a `UserId` if we were fetching users) + * - `Result`: the type of the data we retrieve (a `User` if we were fetching users) + * + * There are two methods: `fetchOne` and `fetchMany`. `fetchOne` receives one identity and must return + * a `Query` containing + * an optional result. Returning an `Option` Fetch can detect whether an identity couldn't be fetched or no longer exists. + * + * `fetchMany` method takes a non-empty list of identities and must return a `Query` containing + * a map from identities to results. Accepting a list of identities gives Fetch the ability to batch requests to + * the same data source, and returning a mapping from identities to results, Fetch can detect whenever an identity + * couldn't be fetched or no longer exists. + * + * Returning `Query` makes it possible to run a fetch independently of the target monad. + * + * = Writing your first data source = + * + * Now that we know about the `DataSource` typeclass, let's write our first data source! We'll start by implementing a data + * source for fetching users given their id. The first thing we'll do is define the types for user ids and users. + * + * {{{ + * type UserId = Int + * case class User(id: UserId, username: String) + * }}} + * + * We'll simulate unpredictable latency with this function. + * + * {{{ + * def latency[A](result: A, msg: String) = { + * val id = Thread.currentThread.getId + * println(s"~~> [$id] $msg") + * Thread.sleep(100) + * println(s"<~~ [$id] $msg") + * result + * } + * }}} + * + * And now we're ready to write our user data source; we'll emulate a database with an in-memory map. + * + * {{{ + * import cats.data.NonEmptyList + * import cats.std.list._ + * + * import fetch._ + * + * val userDatabase: Map[UserId, User] = Map( + * 1 -> User(1, "@one"), + * 2 -> User(2, "@two"), + * 3 -> User(3, "@three"), + * 4 -> User(4, "@four") + * ) + * + * implicit object UserSource extends DataSource[UserId, User]{ + * override def fetchOne(id: UserId): Query[Option[User]] = { + * Query.sync({ + * latency(userDatabase.get(id), s"One User $id") + * }) + * } + * override def fetchMany(ids: NonEmptyList[UserId]): Query[Map[UserId, User]] = { + * Query.sync({ + * latency(userDatabase.filterKeys(ids.unwrap.contains), s"Many Users $ids") + * }) + * } + * } + * }}} + * + * Now that we have a data source we can write a function for fetching users + * given an id, we just have to pass a `UserId` as an argument to `Fetch`. + * + * {{{ + * def getUser(id: UserId): Fetch[User] = Fetch(id) // or, more explicitly: Fetch(id)(UserSource) + * }}} + * + * @param name usage + */ +object UsageSection extends FlatSpec with Matchers with exercise.Section { + + import FetchTutorialHelper._ + + /** + * = Creating and running a fetch + * + * We are now ready to create and run fetches. Note the distinction between Fetch creation and execution. + * When we are creating and combining `Fetch` values, we are just constructing a recipe of our data + * dependencies. + * + * {{{ + * val fetchUser: Fetch[User] = getUser(1) + * }}} + * + * A `Fetch` is just a value, and in order to be able to get its value we need to run it to a monad first. The + * target monad `M[_]` must be able to lift a `Query[A]` to `M[A]`, evaluating the query in the monad's context. + * + * We'll run `fetchUser` using `Id` as our target monad, so let's do some imports first. Note that interpreting + * a fetch to a non-concurrency monad like `Id` or `Eval` is only recommended for trying things out in a Scala + * console, that's why for using them you need to import `fetch.unsafe.implicits`. + * + * {{{ + * import cats.Id + * import fetch.unsafe.implicits._ + * import fetch.syntax._ + * }}} + * + * Note that running a fetch to non-concurrency monads like `Id` or `Eval` is not supported in Scala.js. + * In real-life scenarios you'll want to run your fetches to `Future` or a `Task` type provided by a library like + * [Monix](https://monix.io/) or [fs2](https://github.com/functional-streams-for-scala/fs2), both of which are supported + * in Fetch. + * + * We can now run the fetch and see its result: + * + * ```tut:book + * fetchUser.runA[Id] + * ``` + * + */ + def creatingAndRunning(res0: User) = { + val fetchUser: Fetch[User] = getUser(1) + + fetchUser.runA[Id] should be(res0) + } + + /** + * = Sequencing = + * + * When we have two fetches that depend on each other, we can use `flatMap` to combine them. + * The most straightforward way is to use a for comprehension. + * + * When composing fetches with `flatMap` we are telling Fetch that the second one depends on the previous one, + * so it isn't able to make any optimizations. When running the below fetch, we will query the user data source + * in two rounds: one for the user with id 1 and another for the user with id 2. + */ + def sequencing(res0: Tuple2[User, User], res1: Int) = { + val fetchTwoUsers: Fetch[(User, User)] = for { + aUser <- getUser(1) + anotherUser <- getUser(aUser.id + 1) + } yield (aUser, anotherUser) + + val (env, result) = fetchTwoUsers.runF[Id] + + result should be(res0) + env.rounds.size should be(res1) + } + + /** + * = Batching = + * + * If we combine two independent requests to the same data source, Fetch will + * automatically batch them together into a single request. + * Applicative operations like the product of two fetches help us tell + * the library that those fetches are independent, and thus can be batched if they use the same data source: + * + * Both ids (1 and 2) are requested in a single query to the data source when executing the fetch. + * {{{ + * import cats.syntax.cartesian._ + * }}} + */ + def batching(res0: Tuple2[User, User], res1: Int) = { + val fetchProduct: Fetch[(User, User)] = getUser(1).product(getUser(2)) + + val (env, result) = fetchProduct.runF[Id] + + result should be(res0) + env.rounds.size should be(res1) + } + + /** + * = Deduplication = + * + * If two independent requests ask for the same identity, Fetch will detect it and deduplicate the id. + * Note that when running the fetch, the identity 1 is only requested once even when it is needed by both fetches. + * + */ + def deduplication(res0: Tuple2[User, User], res1: Int) = { + val fetchDuped: Fetch[(User, User)] = getUser(1).product(getUser(1)) + + val (env, result) = fetchDuped.runF[Id] + + result should be(res0) + env.rounds.size should be(res1) + } + + /** + * = Caching = + * + * During the execution of a fetch, previously requested results are implicitly cached. This allows us to write + * fetches in a very modular way, asking for all the data they need as if it + * was in memory; furthermore, it also avoids re-fetching an identity that may have changed + * during the course of a fetch execution, which can lead to inconsistencies in the data. + * + * {{{ + * val fetchCached: Fetch[(User, User)] = for { + * aUser <- getUser(1) + * anotherUser <- getUser(1) + * } yield (aUser, anotherUser) + * }}} + * + * As you can see, the `User` with id 1 is fetched only once in a single round-trip. The next + * time it was needed we used the cached versions, thus avoiding another request to the user data + * source. + */ + def caching(res0: Tuple2[User, User], res1: Int) = { + val fetchCached: Fetch[(User, User)] = for { + aUser <- getUser(1) + anotherUser <- getUser(1) + } yield (aUser, anotherUser) + + val (env, result) = fetchCached.runF[Id] + + result should be(res0) + env.rounds.size should be(res1) + } + + /** + * = Queries = + * + * Queries are a way of separating the computation required to read a piece of data from the context in + * which is run. Let's look at the various ways we have of constructing queries. + * + * @param name queries + * = synchronous = + * + * A query can be synchronous, and we may want to evaluate it when `fetchOne` and `fetchMany` + * are called. We can do so with `Query#sync`: + */ + def synchronous(res0: Boolean) = { + val threadSyncSource = new DataSource[Unit, Long] { + override def fetchOne(id: Unit): Query[Option[Long]] = { + Query.sync(Some(Thread.currentThread.getId)) + } + override def fetchMany(ids: NonEmptyList[Unit]): Query[Map[Unit, Long]] = + batchingNotSupported(ids) + } + + val threadId = Fetch(())(threadSyncSource).runA[Id] + + (threadId == Thread.currentThread.getId) should be(res0) + } + + /** + * = asynchronous = + * + * Asynchronous queries are constructed passing a function that accepts a callback (`A => Unit`) and an errback + * (`Throwable => Unit`) and performs the asynchronous computation. Note that you must ensure that either the + * callback or the errback are called. + */ + def asynchronous(res0: Boolean) = { + val threadAsyncSource = new DataSource[Unit, Long] { + override def fetchOne(id: Unit): Query[Option[Long]] = { + Query.async((ok, fail) => ok(Some(Thread.currentThread.getId))) + } + override def fetchMany(ids: NonEmptyList[Unit]): Query[Map[Unit, Long]] = + batchingNotSupported(ids) + } + + val threadId = Fetch(())(threadAsyncSource).runA[Id] + + (threadId == Thread.currentThread.getId) should be(res0) + } + + /** + * = Combining data from multiple sources = + * + * Now that we know about some of the optimizations that Fetch can perform to read data efficiently, + * let's look at how we can combine more than one data source. + * + * + * Imagine that we are rendering a blog and have the following types for posts: + * + * {{{ + * type PostId = Int + * case class Post(id: PostId, author: UserId, content: String) + * }}} + * + * As you can see, every `Post` has an author, but it refers to the author by its id. We'll implement a data source for retrieving a post given a post id. + * + * {{{ + * val postDatabase: Map[PostId, Post] = Map( + * 1 -> Post(1, 2, "An article"), + * 2 -> Post(2, 3, "Another article"), + * 3 -> Post(3, 4, "Yet another article") + * ) + * + * implicit object PostSource extends DataSource[PostId, Post]{ + * override def fetchOne(id: PostId): Query[Option[Post]] = { + * Query.sync({ + * latency(postDatabase.get(id), s"One Post $id") + * }) + * } + * override def fetchMany(ids: NonEmptyList[PostId]): Query[Map[PostId, Post]] = { + * Query.sync({ + * latency(postDatabase.filterKeys(ids.unwrap.contains), s"Many Posts $ids") + * }) + * } + * } + * + * def getPost(id: PostId): Fetch[Post] = Fetch(id) + * }}} + * + * We can also implement a function for fetching a post's author given a post: + * + * {{{ + * def getAuthor(p: Post): Fetch[User] = Fetch(p.author) + * }}} + * + * Apart from posts, we are going to add another data source: one for post topics. + * + * {{{ + * type PostTopic = String + * }}} + * + * We'll implement a data source for retrieving a post topic given a post id. + * + * {{{ + * implicit object PostTopicSource extends DataSource[Post, PostTopic]{ + * override def fetchOne(id: Post): Query[Option[PostTopic]] = { + * Query.sync({ + * val topic = if (id.id % 2 == 0) "monad" else "applicative" + * latency(Option(topic), s"One Post Topic $id") + * }) + * } + * override def fetchMany(ids: NonEmptyList[Post]): Query[Map[Post, PostTopic]] = { + * Query.sync({ + * val result = ids.unwrap.map(id => (id, if (id.id % 2 == 0) "monad" else "applicative")).toMap + * latency(result, s"Many Post Topics $ids") + * }) + * } + * } + * + * def getPostTopic(post: Post): Fetch[PostTopic] = Fetch(post) + * }}} + * + * Now that we have multiple sources let's mix them in the same fetch. + * In the following example, we are fetching a post given its id and then fetching its topic. This + * data could come from entirely different places, but Fetch makes working with heterogeneous sources + * of data very easy. + * + */ + def combiningData(res0: Tuple2[Post, PostTopic]) = { + val fetchMulti: Fetch[(Post, PostTopic)] = for { + post <- getPost(1) + topic <- getPostTopic(post) + } yield (post, topic) + + fetchMulti.runA[Id] should be(res0) + } + + /** + * = Concurrency = + * + * Combining multiple independent requests to the same data source can have two outcomes: + * + * - if the data sources are the same, the request is batched + * - otherwise, both data sources are queried at the same time + * + * In the following example we are fetching from different data sources so both requests will be + * evaluated together. + * The below example combines data from two different sources, and the library knows they are independent. + */ + def concurrency(res0: Tuple2[Post, User], res1: Int) = { + val fetchConcurrent: Fetch[(Post, User)] = getPost(1).product(getUser(2)) + + import scala.concurrent._ + import ExecutionContext.Implicits.global + import scala.concurrent.duration._ + + import fetch.implicits._ + + val (env, result) = Await.result(fetchConcurrent.runF[Future], Duration.Inf) + result should be(res0) + env.rounds.size should be(res1) + + } + + /** + * = Combinators = + * + * Besides `flatMap` for sequencing fetches and `product` for running them concurrently, Fetch provides a number of + * other combinators. + * + * = Sequence = + * + * Whenever we have a list of fetches of the same type and want to run them concurrently, we can use the `sequence` + * combinator. It takes a `List[Fetch[A]]` and gives you back a `Fetch[List[A]]`, batching the fetches to the same + * data source and running fetches to different sources in parallel. + * Note that the `sequence` combinator is more general and works not only on lists but on any type that + * has a [[http://typelevel.org/cats/tut/traverse.html] Traverse] instance. + * + * Since `sequence` uses applicative operations internally, the library is able to perform optimizations + * across all the sequenced fetches. + * {{{ + * import cats.std.list._ + * import cats.syntax.traverse._ + * }}} + */ + def sequence(res0: List[User]) = { + val fetchSequence: Fetch[List[User]] = List(getUser(1), getUser(2), getUser(3)).sequence + fetchSequence.runA[Id] should be(res0) + } + + /** + * = Traverse = + * + * Another interesting combinator is `traverse`, which is the composition of `map` and `sequence`. + * + * All the optimizations made by `sequence` still apply when using `traverse`. + * + */ + def traverse(res0: List[User]) = { + val fetchTraverse: Fetch[List[User]] = List(1, 2, 3).traverse(getUser) + fetchTraverse.runA[Id] should be(res0) + } + +}