Skip to content
Permalink
Browse files

Updated to Scala 2.13.1

  • Loading branch information
kiroco12 committed Jan 10, 2020
1 parent 4bed23b commit c3f99378f1203fbab025c72031bec66a69fd7ac4
@@ -1,6 +1,6 @@
import ProjectPlugin.autoImport._

val scalaExercisesV = "0.5.0-SNAPSHOT"
val scalaExercisesV = "0.6.0-SNAPSHOT"

def dep(artifactId: String) = "org.scala-exercises" %% artifactId % scalaExercisesV

@@ -15,7 +15,7 @@ object ProjectPlugin extends AutoPlugin {
object autoImport {

lazy val V = new {
val scala212: String = "2.12.10"
val scala213: String = "2.13.1"
val cats: String = "2.0.0"
val shapeless: String = "2.3.3"
val scalatest: String = "3.1.0"
@@ -41,14 +41,14 @@ object ProjectPlugin extends AutoPlugin {
organizationEmail = "hello@47deg.com"
),
orgLicenseSetting := ApacheLicense,
scalaVersion := V.scala212,
scalaVersion := V.scala213,
scalaOrganization := "org.scala-lang",
resolvers ++= Seq(
Resolver.mavenLocal,
Resolver.sonatypeRepo("snapshots"),
Resolver.sonatypeRepo("releases")
),
scalacOptions := scalacCommonOptions ++ scalacLanguageOptions ++ Seq("-Ypartial-unification"),
scalacOptions := scalacCommonOptions ++ scalacLanguageOptions,
headerLicense := Some(Custom(s"""| scala-exercises - ${name.value}
| Copyright (C) 2015-2019 47 Degrees, LLC. <http://www.47deg.com>
|
@@ -1 +1 @@
sbt.version=1.2.8
sbt.version=1.3.5
@@ -2,5 +2,5 @@ resolvers ++= Seq(
Resolver.sonatypeRepo("snapshots")
)

addSbtPlugin("org.scala-exercises" % "sbt-exercise" % "0.5.0-SNAPSHOT")
addSbtPlugin("org.scala-exercises" % "sbt-exercise" % "0.6.0-SNAPSHOT")
addSbtPlugin("com.47deg" % "sbt-org-policies" % "0.12.0-M3")
@@ -19,9 +19,9 @@ import org.scalatest.matchers.should.Matchers
* Fetch is used for reading data from remote sources and the queries we perform can and will fail at some point.
* There are many things that can go wrong:
*
* - an exception can be thrown by client code of certain data sources
* - an identity may be missing
* - the data source may be temporarily available
* - an exception can be thrown by client code of certain data sources
* - an identity may be missing
* - the data source may be temporarily available
*
* Since the error cases are plenty and can’t be anticipated Fetch errors are represented by the 'FetchException'
* trait, which extends `Throwable`.
@@ -56,7 +56,8 @@ object FetchTutorialHelper {
latency[F](s"One User $id") >> CF.pure(userDatabase.get(id))

override def batch(ids: NonEmptyList[UserId]): F[Map[UserId, User]] =
latency[F](s"Batch Users $ids") >> CF.pure(userDatabase.filterKeys(ids.toList.toSet))
latency[F](s"Batch Users $ids") >> CF.pure(
userDatabase.view.filterKeys(ids.toList.toSet).toMap)
}
}

@@ -89,7 +90,8 @@ object FetchTutorialHelper {
latency[F](s"One Post $id") >> CF.pure(postDatabase.get(id))

override def batch(ids: NonEmptyList[PostId]): F[Map[PostId, Post]] =
latency[F](s"Batch Posts $ids") >> CF.pure(postDatabase.filterKeys(ids.toList.toSet))
latency[F](s"Batch Posts $ids") >> CF.pure(
postDatabase.view.filterKeys(ids.toList.toSet).toMap)
}
}

@@ -147,7 +149,8 @@ object FetchTutorialHelper {
latency[F](s"One User $id") >> CF.pure(userDatabase.get(id))

override def batch(ids: NonEmptyList[UserId]): F[Map[UserId, User]] =
latency[F](s"Batch Users $ids") >> CF.pure(userDatabase.filterKeys(ids.toList.toSet))
latency[F](s"Batch Users $ids") >> CF.pure(
userDatabase.view.filterKeys(ids.toList.toSet).toMap)
}
}

@@ -169,7 +172,8 @@ object FetchTutorialHelper {
latency[F](s"One User $id") >> CF.pure(userDatabase.get(id))

override def batch(ids: NonEmptyList[UserId]): F[Map[UserId, User]] =
latency[F](s"Batch Users $ids") >> CF.pure(userDatabase.filterKeys(ids.toList.toSet))
latency[F](s"Batch Users $ids") >> CF.pure(
userDatabase.view.filterKeys(ids.toList.toSet).toMap)
}
}

@@ -27,24 +27,30 @@ import org.scalatest.matchers.should.Matchers
*
* If we are just reading data, we can make a series of optimizations such as:
*
* - batching requests to the same data source
* - requesting independent data from different sources in parallel
* - caching previously seen results
* - batching requests to the same data source
* - requesting independent data from different sources in parallel
* - caching previously seen results
*
* However, if we mix these optimizations with the code that fetches the data
* we may end up trading clarity for performance. Furthermore, we are
* mixing low-level (optimization) and high-level (business logic with the data
* we read) concerns.
*
* = Installation =
*To begin, add the following dependency to your SBT build file:
*{{{
*"com.47deg" %% "fetch" % "1.2.1"
*}}}
*Or, if using Scala.js:
*{{{
*"com.47deg" %%% "fetch" % "1.2.1"
*}}}
*Now you’ll have Fetch available in both Scala and Scala.js.
*
* To begin, add the following dependency to your SBT build file:
*
* {{{
* "com.47deg" %% "fetch" % "1.2.1"
* }}}
*
* Or, if using Scala.js:
*
* {{{
* "com.47deg" %%% "fetch" % "1.2.1"
* }}}
*
* Now you’ll have Fetch available in both Scala and Scala.js.
*
* = Usage =
*
@@ -68,8 +74,8 @@ import org.scalatest.matchers.should.Matchers
*
* It takes two type parameters:
*
* - `Identity`: the identity we want to fetch (a `UserId` if we were fetching users)
* - `Result`: the type of the data we retrieve (a `User` if we were fetching users)
* - `Identity`: the identity we want to fetch (a `UserId` if we were fetching users)
* - `Result`: the type of the data we retrieve (a `User` if we were fetching users)
*
* There are two methods: `fetch` and `batch`. `fetch` receives one identity and must return
* a `Concurrent` containing
@@ -1 +1 @@
version in ThisBuild := "0.5.0-SNAPSHOT"
version in ThisBuild := "0.6.0-SNAPSHOT"

0 comments on commit c3f9937

Please sign in to comment.
You can’t perform that action at this time.