+ );
+}
+export default function DocVersionBanner({className}) {
+ const versionMetadata = useDocsVersion();
+ if (versionMetadata.banner) {
+ return (
+
+ );
+ }
+ return null;
+}
diff --git a/website/versioned_docs/version-1.0.18/about/code_of_conduct.md b/website/versioned_docs/version-1.0.18/about/code_of_conduct.md
new file mode 100644
index 000000000000..66801c9f95b5
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/about/code_of_conduct.md
@@ -0,0 +1,45 @@
+---
+id: about_coc
+title: "ZIO Code of Conduct"
+---
+
+We are committed to providing a friendly, safe and welcoming
+environment for all, regardless of level of experience, gender, gender
+identity and expression, sexual orientation, disability, personal
+appearance, body size, race, ethnicity, age, religion, nationality, or
+other such characteristics.
+
+The ZIO project follows the [Scala Code of Conduct](https://www.scala-lang.org/conduct/), with
+an additional clause regarding moderation that is detailed below. All participants, contributors and
+members are expected to follow the Scala Code of Conduct when discussing the project on the available
+communication channels. If you are being harassed, please contact us immediately so that we can support you.
+
+## Moderation and Steering Committee
+
+The ZIO project is moderated by the Steering Committee team members:
+
+- Itamar Ravid [@iravid](https://github.com/iravid)
+- John De Goes [@jdegoes](https://github.com/jdegoes)
+- Kai [@neko-kai](https://github.com/neko-kai)
+- Paul Shirshov [@pshirshov](https://github.com/pshirshov)
+- Pierre Ricadat [@ghostdogpr](https://github.com/ghostdogpr)
+- Wiem Zine El Abidine [@wi101](https://github.com/wi101)
+
+The ZIO project requires that drastic moderation actions detailed in the code of
+conduct - for example, removing a user from the Gitter channel - be agreed upon
+a group of over 2/3rds of the steering committee.
+
+For any questions, concerns, or moderation requests please contact any member of
+the project, or the people listed above.
+
+## BDFL
+
+In addition to the above, the ZIO project's BDFL (benevolent dictator for life) is
+John De Goes (@jdegoes), owing to his original founding of the project and continued
+investments in it. While John adheres to the same code of conduct as everyone else,
+he is entitled to override moderation decisions made by the steering committee.
+
+We do not take the BDFL position lightly, especially with regards to moderation. John
+has consistently shown he is level-headed and able to handle conflict responsibly. Feel
+free to reach out to any member of the steering committee, including John himself,
+with any concern you might have.
diff --git a/website/versioned_docs/version-1.0.18/about/coding_guidelines.md b/website/versioned_docs/version-1.0.18/about/coding_guidelines.md
new file mode 100644
index 000000000000..d66b642434e5
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/about/coding_guidelines.md
@@ -0,0 +1,154 @@
+---
+id: about_coding_guidelines
+title: "ZIO Coding Guidelines"
+---
+
+These are coding guidelines strictly for ZIO contributors for ZIO projects and
+not general conventions to be applied by the Scala community at large.
+
+Additionally, bear in mind that, although we try to enforce these rules to the
+best of our ability, both via automated rules (scalafmt) and strict reviewing
+processes, it is both possible to find existing code that does not comply to
+these rules. If that is the case, we would be extremely grateful if you could
+make a contribution, by providing a fix to said issue.
+
+Last, but not least, these rules are continuously evolving and as such,
+refer to them once in a while when in doubt.
+
+### Defining classes and traits
+
+1. Value classes must be final and extend `AnyVal`.
+This is done to avoid allocating runtime objects;
+
+2. Method extension classes must be final and extend `AnyVal`;
+
+3. Avoid overloading standard interfaces. When creating services avoid using the same names as well known standard interfaces.
+Example: Instead of having a service `Random` with methods `nextLong(n)` and `nextInt(n)` consider choosing something like
+`nextLongBounded(n)` and `nextIntBounded(n)`.
+
+4. Sealed traits that are ADTs (Algebraic data types) should extend `Product` and `Serializable`.
+This is done to help the compiler infer types;
+
+5. Regular traits and sealed trait that do not form ADTs should extend `Serializable` but not `Product`;
+
+6. Traits should always extend `Serializable`. (e.g. `ZIO`).
+
+### Final and private modifiers
+
+1. All methods on classes / traits are declared `final`, by default;
+
+2. No methods on objects declared `final`, because they are `final` by default;
+
+3. No methods on final classes declared `final`, because they are `final` by default;
+
+4. All classes inside objects should be defined `final`, because otherwise they could still be extended;
+
+5. In general, classes that are not case classes have their constructors & constructor parameters private.
+ Typically, it is not good practice to expose constructors and constructor parameters but exceptions apply (i.e. `Assertion` and `TestAnnotation`);
+
+6. All `vals` declared `final`, even in objects or `final classes`, if they are constant expressions and without type annotations;
+
+7. Package-private `vals` and methods should be declared `final`.
+
+### Refactoring
+
+1. If a class has all its members `final`, the class should be declared `final` and `final` member annotations should be removed except constant expressions;
+
+2. All type annotations should use the least powerful type alias. This means, that, let us say, a `ZIO` effect that has
+ no dependencies but throws an arbitrary error, should be defined as `IO`.
+3. Use `def` in place of `val` for an abstract data member to avoid `NullPointerException` risk.
+
+### Understanding naming of parameters or values
+
+ZIO code often uses the following naming conventions, and you might be asked to change method parameters to follow these conventions. This guide can help you understand where the names come from.
+Naming expectations can be helpful in understanding the role of certain parameters without even glancing at its type signature when reading code or class/method signatures.
+
+1. Partial functions have a shortened name `pf`;
+
+2. In ZIO implicit parameters are often used as compiler evidences;
+ These evidences help you, as a developer, prove something to the compiler (at compile time), and they have the ability to add constraints to a method;
+ They are typically called `ev` if there is only one. Or `ev1`, `ev2`... if more than one;
+
+3. Promises are called `p` (unless in its own class methods, in that case it is called `that`, like point 8 defines);
+
+4. Functions are called `fn`, `fn1`, unless they bear specific meaning: `use`, `release`;
+
+5. ZIO effects are called `f`, unless they bear specific meaning like partially providing environment: `r0`;
+
+6. Consider methods ending with `_` having more meaningful names;
+
+7. Iterable are called `in`;
+
+8. When a parameter type equals own (in a method of a trait) call it `that`;
+
+9. Be mindful of using by-name parameters. Mind the `Function[0]` extra allocation and loss of clean syntax when invoking the method.
+ Loss of syntax means that instead of being able to do something like `f.flatMap(ZIO.success)` you require to explicitly do `f.flatMap(ZIO.success(_))`;
+
+10. Fold or fold variants initial values are called `zero`.
+
+### Understanding naming of methods
+
+ZIO goes to great lengths to define method names that are intuitive to the library user. Naming is hard!!!
+This section will attempt to provide some guidelines and examples to document, guide and explain naming of methods in ZIO.
+
+1. Methods that lift pure values to effects are dangerous. Dangerous in the sense that they can potentially have dangerous side-effects.
+ Such methods should have a default lazy variant and an eager variant for advanced users that are aware they absolutely do not have side-effects in their code,
+ having slight gains in performance. The lazy variant should have a normal name (succeed, fail, die, lift) and the eager variant should have a `Now` suffix
+ (succeedNow, failNow, dieNow, liftNow) which makes it clear of its eager behaviour;
+
+2. Methods that have the form of `List#zip` are called `zip`, and have an alias called `<*>`. The parallel version, if applicable, has the name `zipPar`, with an alias called `<&>`;
+
+3. Methods that are intended to capture side-effects, convert them into functional effects, should be prefixed by effect*. For example, `ZIO.effect`;
+
+4. The dual of zip, which is trying either a left or right side, producing an Either of the result, should be called `orElseEither`, with alias `<+>`.
+ The simplified variant where both left and right have the same type should be called `orElse`, with alias `<>`;
+
+5. Constructors for a data type `X` that are based on another data type `Y` should be placed in the companion object `X` and named `fromY`.
+ For example, `ZIO.fromOption`, `ZStream.fromEffect`;
+
+6. Parallel versions of methods should be named the same, but with a `Par` suffix. Parallel versions with a bound on parallelism should use a `ParN` suffix;
+
+7. `Foreach` should be used as the default traverse operation, with `traverse` retained as an alias for programmers with an FP background. For example, `ZIO.foreach`.
+
+### Type annotations
+
+ZIO goes to great lengths to take advantage of the scala compiler in varied ways. Type variance is one of them.
+The following rules are good to have in mind when adding new `types`, `traits` or `classes` that have either covariant or contravariant types.
+
+1. Generalized ADTs should always have type annotation. (i.e. `final case class Fail[+E](value: E) extends Cause[E]`);
+
+2. Type alias should always have type annotation. Much like in Generalized ADTs defining type aliases should carry the type annotations
+ (i.e. `type IO[+E, +A] = ZIO[Any, E, A]`).
+
+When defining new methods, keep in mind the following rules:
+
+1. Accept the most general type possible. For example, if a method accepts a collection, prefer `Iterable[A]` to `List[A]`.
+2. Return the most specific type possible, e.g., prefer `UIO[Unit]` to `UIO[Any]`.
+
+### Method alphabetization
+
+In general the following rules should be applied regarding method alphabetization.
+To fix forward references of values we recommend the programmer to make them lazy (`lazy val`).
+Operators are any methods that only have non-letter characters (i.e. `<*>` , `<>`, `*>`).
+
+1. Public abstract defs / vals listed first, and alphabetized, with operators appearing before names.
+
+2. Public concrete defs / vals listed second, and alphabetized, with operators appearing before names.
+
+3. Private implementation details listed third, and alphabetized, with operators appearing before names.
+
+### Scala documentation
+
+It is strongly recommended to use scala doc links when referring to other members.
+This both makes it easier for users to navigate the documentation and enforces that the references are accurate.
+A good example of this are `ZIO` type aliases that are extremely pervasive in the codebase: `Task`, `RIO`, `URIO` and `UIO`.
+To make it easy for developers to see the implementation scala doc links are used, for example:
+
+```
+ /**
+ * @see See [[zio.ZIO.absolve]]
+ */
+ def absolve[R, A](v: RIO[R, Either[Throwable, A]]): RIO[R, A] =
+ ZIO.absolve(v)
+```
+
diff --git a/website/versioned_docs/version-1.0.18/about/contributing.md b/website/versioned_docs/version-1.0.18/about/contributing.md
new file mode 100644
index 000000000000..29d5d08ad461
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/about/contributing.md
@@ -0,0 +1,300 @@
+---
+id: about_contributing
+title: "ZIO Contributor Guidelines"
+---
+
+Thank you for your interest in contributing to ZIO, which is a small, zero-dependency library for doing type-safe, composable concurrent and asynchronous programming!
+
+We welcome contributions from all people! You will learn about functional programming, and you will add your own unique touch to the ZIO project. We are happy to help you to get started and to hear your suggestions and answer your questions.
+
+_You too can contribute to ZIO, we believe in you!_
+
+# Contributing
+
+## Getting Started
+
+To begin contributing, please follow these steps:
+
+### Get The Project
+
+If you do not already have one, sign up for a free [GitHub Account](https://github.com/join?source=header-home).
+
+After you [log into](https://github.com/login) GitHub using your account, go to the [ZIO Project Page](https://github.com/zio/zio), and click on [Fork](https://github.com/zio/zio/fork) to fork the ZIO repository into your own account.
+
+You will make _all_ contributions from your own account. No one contributes _directly_ to the main repository. Contributors only ever merge code from other people's forks into the main repository.
+
+Once you have forked the repository, you can now clone your forked repository to your own machine, so you have a complete copy of the project and can begin safely making your modifications (even without an Internet connection).
+
+To clone your forked repository, first make sure you have installed [Git](https://git-scm.com/downloads), the version control system used by GitHub. Then open a Terminal and type the following commands:
+
+```bash
+mkdir zio
+cd zio
+git clone git@github.com:your-user-name/zio.git .
+```
+
+If these steps were successful, then congratulations, you now have a complete copy of the ZIO project!
+
+The next step is to build the project on your machine, to ensure you know how to compile the project and run tests.
+
+### Build the Project
+
+The official way to build the project is with sbt. An sbt build file is included in the project, so if you choose to build the project this way, you won't have to do any additional configuration or setup (others choose to build the project using IntelliJ IDEA, Gradle, Maven, Mill, or Fury).
+
+We use a custom sbt script, which is included in the repository, in order to ensure settings are uniform across all development machines, and the continuous integration service (Circle CI).
+
+The sbt script is in the root of the repository. To launch this script from your Terminal window, simply type:
+
+```bash
+./sbt
+```
+
+Sbt will launch, read the project build file, and download dependencies as required.
+
+You can now compile the production source code with the following sbt command:
+
+```bash
+compile
+```
+
+You can compile the test source code with the following sbt command:
+
+```bash
+Test/compile
+```
+
+[Learn more](https://www.scala-sbt.org) about sbt to understand how you can list projects, switch projects, and otherwise manage an sbt project.
+
+The main project in ZIO is `coreJVM` (the core project on the JVM; there is also `coreJS` for the core project on Scala.js), which you can focus on using sbt by issuing the following command:
+
+```bash
+project coreJVM
+```
+
+### Find an Issue
+
+You may have your own idea about what contributions to make to ZIO, which is great! If you want to make sure the ZIO contributors are open to your idea, you can [open an issue](https://github.com/zio/zio/issues/new) first on the ZIO project site.
+
+Otherwise, if you have no ideas about what to contribute, you can find a large supply of feature requests and bugs on the project's [issue tracker](https://github.com/zio/zio/issues).
+
+Issues are tagged with various labels, such as `good first issue`, which help you find issues that are a fit for you.
+
+If some issue is confusing or you think you might need help, then just post a comment on the issue asking for help. Typically, the author of the issue will provide as much help as you need, and if the issue is critical, leading ZIO contributors will probably step in to mentor you and give you a hand, making sure you understand the issue thoroughly.
+
+Once you've decided on an issue and understand what is necessary to complete the issue, then it's a good idea to post a comment on the issue saying that you intend to work on it. Otherwise, someone else might work on it too!
+
+### Fix an Issue
+
+Once you have an issue, the next step is to fix the bug or implement the feature. Since ZIO is an open source project, there are no deadlines. Take your time!
+
+The only thing you have to worry about is if you take too long, especially for a critical issue, eventually someone else will come along and work on the issue.
+
+If you shoot for 2-3 weeks for most issues, this should give you plenty of time without having to worry about having your issue stolen.
+
+If you get stuck, please consider [opening a pull request](https://github.com/zio/zio/compare) for your incomplete work, and asking for help (just prefix the pull request by _WIP_). In addition, you can comment on the original issue, pointing people to your own fork. Both of these are great ways to get outside help from people more familiar with the project.
+
+### Prepare Your Code
+
+If you've gotten this far, congratulations! You've implemented a new feature or fixed a bug. Now you're in the last mile, and the next step is submitting your code for review, so that other contributors can spot issues and help improve the quality of the code.
+
+To do this, you need to commit your changes locally. A good way to find out what you did locally is to use the `git status` command:
+
+```bash
+git status
+```
+
+If you see new files, you will have to tell `git` to add them to the repository using `git add`:
+
+```bash
+git add core/src/shared/zio/zio/NewFile.scala
+```
+
+Then you can commit all your changes at once with the following command:
+
+```bash
+git commit -am "Fixed #94211 - Optimized race for lists of effects"
+```
+
+At this point, you have saved your work locally, to your machine, but you still need to push your changes to your fork of the repository. To do that, use the `git push` command:
+
+```bash
+git push
+```
+
+Now while you were working on this great improvement, it's quite likely that other ZIO contributors were making their own improvements. You need to pull all those improvements into your own code base to resolve any conflicts and make sure the changes all work well together.
+
+To do that, use the `git pull` command:
+
+```bash
+git pull git@github.com:zio/zio.git master
+```
+
+You may get a warning from Git that some files conflicted. Don't worry! That just means you and another contributor edited the same parts of the same files.
+
+Using a text editor, open up the conflicted files, and try to merge them together, preserving your changes and the other changes (both are important!).
+
+Once you are done, you can commit again:
+
+```bash
+git commit -am "merged upstream changes"
+```
+
+At this point, you should re-run all tests to make sure everything is passing:
+
+```bash
+# If you are already in a SBT session you can type only 'test'
+
+sbt test
+```
+
+If all the tests are passing, then you can format your code to be shipped:
+
+```bash
+# If you are already in a SBT session you can type only 'fmt'
+
+sbt fmt
+```
+
+For simplicity, there is a command that does everything. Formats the code, compiles it and runs tests:
+
+```bash
+# If you are already in a SBT session you can type only 'build'
+
+sbt build
+```
+
+If your changes altered an API, then you may need to rebuild the microsite to make sure none of the (compiled) documentation breaks:
+
+```bash
+# If you are already in a SBT session you can type only 'docs/docusaurusCreateSite'
+
+sbt docs/docusaurusCreateSite
+```
+
+(If you get an error about _Jekyll_, that means all the code examples work and you can ignore the rest.)
+
+Finally, if you are up-to-date with master, all your tests are passing, you have properly formatted your code, and the microsite builds properly, then it's time to submit your work for review!
+
+### Create a Pull Request
+
+To create a pull request, first push all your changes to your fork of the project repository:
+
+```bash
+git push
+```
+
+Next, [open a new pull request](https://github.com/zio/zio/compare) on GitHub, and select _Compare Across Forks_. On the right hand side, choose your own fork of the ZIO repository, in which you've been making your contribution.
+
+Provide a description for the pull request, which details the issue it is fixing, and has other information that may be helpful to developers reviewing the pull request.
+
+Finally, click _Create Pull Request_!
+
+### Get Your Pull Request Merged
+
+Once you have a pull request open, it's still your job to get it merged! To get it merged, you need at least one core ZIO contributor to approve the code.
+
+If you know someone who would be qualified to review your code, you can request that person, either in the comments of the pull request, or on the right-hand side, if you have appropriate permissions.
+
+Code reviews can sometimes take a few days, because open source projects are largely done outside of work, in people's leisure time. Be patient, but don't wait forever. If you haven't gotten a review within a few days, then consider gently reminding people that you need a review.
+
+Once you receive a review, you will probably have to go back and make minor changes that improve your contribution and make it follow existing conventions in the code base. This is normal, even for experienced contributors, and the rigorous reviews help ensure ZIO's code base stays high quality.
+
+If iterating on a feature resulted in several commits, consider squashing them before pushing the final version of the code.
+One of the ways to squash e.g. 5 last commits:
+```bash
+git reset --soft HEAD~5
+git commit -am "add feature X"
+git push --force
+```
+
+After you make changes, you may need to remind reviewers to check out the code again. If they give a final approval, it means your code is ready for merge! Usually this will happen at the same time, though for controversial changes, a contributor may wait for someone more senior to merge.
+
+If you don't get a merge in a day after your review is successful, then please gently remind folks that your code is ready to be merged.
+
+Sit back, relax, and enjoy being a ZIO contributor!
+
+# ZIO Contributor License Agreement
+
+Thank you for your interest in contributing to the ZIO open source project.
+
+This contributor agreement ("Agreement") describes the terms and conditions under which you may Submit a Contribution to Us. By Submitting a Contribution to Us, you accept the terms and conditions in the Agreement. If you do not accept the terms and conditions in the Agreement, you must not Submit any Contribution to Us.
+
+This is a legally binding document, so please read it carefully before accepting the terms and conditions. If you accept this Agreement, the then-current version of this Agreement shall apply each time you Submit a Contribution. The Agreement may cover more than one software project managed by Us.
+
+## 1. Definitions
+
+"We" or "Us" means Ziverge, Inc., and its duly appointed and authorized representatives.
+
+"You" means the individual or entity who Submits a Contribution to Us.
+
+"Contribution" means any work of authorship that is Submitted by You to Us in which You own or assert ownership of the Copyright. You may not Submit a Contribution if you do not own the Copyright in the entire work of authorship.
+
+"Copyright" means all rights protecting works of authorship owned or controlled by You, including copyright, moral and neighboring rights, as appropriate, for the full term of their existence including any extensions by You.
+
+"Material" means the work of authorship which is made available by Us to third parties. When this Agreement covers more than one software project, the Material means the work of authorship to which the Contribution was Submitted. After You Submit the Contribution, it may be included in the Material.
+
+"Submit" means any form of electronic, verbal, or written communication sent to Us or our representatives, including but not limited to electronic mailing lists, electronic mail, source code control systems, pull requests, and issue tracking systems that are managed by, or on behalf of, Us for the purpose of discussing and improving the Material, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution."
+
+"Submission Date" means the date on which You Submit a Contribution to Us.
+
+"Effective Date" means the earliest date You execute this Agreement by Submitting a Contribution to Us.
+
+## 2. Grant of Rights
+
+### 2.1 Copyright License
+
+2.1.1. You retain ownership of the Copyright in Your Contribution and have the same rights to use or license the Contribution which You would have had without entering into the Agreement.
+
+2.1.2. To the maximum extent permitted by the relevant law, You grant to Us a perpetual, worldwide, non-exclusive, transferable, royalty-free, irrevocable license under the Copyright covering the Contribution, with the right to sublicense such rights through multiple tiers of sublicensees, to reproduce, modify, display, perform and distribute the Contribution as part of the Material; provided that this license is conditioned upon compliance with Section 2.3.
+
+### 2.2 Patent License
+
+For patent claims including, without limitation, method, process, and apparatus claims which You own, control or have the right to grant, now or in the future, You grant to Us a perpetual, worldwide, non-exclusive, transferable, royalty-free, irrevocable patent license, with the right to sublicense these rights to multiple tiers of sublicensees, to make, have made, use, sell, offer for sale, import and otherwise transfer the Contribution and the Contribution in combination with the Material (and portions of such combination). This license is granted only to the extent that the exercise of the licensed rights infringes such patent claims; and provided that this license is conditioned upon compliance with Section 2.3.
+
+### 2.3 Outbound License
+
+Based on the grant of rights in Sections 2.1 and 2.2, if We include Your Contribution in a Material, We may license the Contribution under any license, including copyleft, permissive, commercial, or proprietary licenses. As a condition on the exercise of this right, We agree to also license the Contribution under the terms of the license or licenses which We are using for the Material on the Submission Date.
+
+### 2.4 Moral Rights
+
+If moral rights apply to the Contribution, to the maximum extent permitted by law, You waive and agree not to assert such moral rights against Us or our successors in interest, or any of our licensees, either direct or indirect.
+
+### 2.5 Our Rights
+
+You acknowledge that We are not obligated to use Your Contribution as part of the Material and may decide to include any Contribution We consider appropriate.
+
+### 2.6 Reservation of Rights
+
+Any rights not expressly licensed under this section are expressly reserved by You.
+
+## 3. Agreement
+
+You confirm that:
+
+a. You have the legal authority to enter into this Agreement.
+
+b. You own the Copyright and patent claims covering the Contribution which are required to grant the rights under Section 2.
+
+c. The grant of rights under Section 2 does not violate any grant of rights which You have made to third parties, including Your employer. If You are an employee, You have had Your employer approve this Agreement or sign the Entity version of this document. If You are less than eighteen years old, please have Your parents or guardian sign the Agreement.
+
+d. You have followed the instructions in, if You do not own the Copyright in the entire work of authorship Submitted.
+
+## 4. Disclaimer
+
+EXCEPT FOR THE EXPRESS WARRANTIES IN SECTION 3, THE CONTRIBUTION IS PROVIDED "AS IS". MORE PARTICULARLY, ALL EXPRESS OR IMPLIED WARRANTIES INCLUDING, WITHOUT LIMITATION, ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE EXPRESSLY DISCLAIMED BY YOU TO US. TO THE EXTENT THAT ANY SUCH WARRANTIES CANNOT BE DISCLAIMED, SUCH WARRANTY IS LIMITED IN DURATION TO THE MINIMUM PERIOD PERMITTED BY LAW.
+
+## 5. Consequential Damage Waiver
+
+TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL YOU BE LIABLE FOR ANY LOSS OF PROFITS, LOSS OF ANTICIPATED SAVINGS, LOSS OF DATA, INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL AND EXEMPLARY DAMAGES ARISING OUT OF THIS AGREEMENT REGARDLESS OF THE LEGAL OR EQUITABLE THEORY (CONTRACT, TORT OR OTHERWISE) UPON WHICH THE CLAIM IS BASED.
+
+## 6. Miscellaneous
+
+6.1. This Agreement will be governed by and construed in accordance with the laws of the state of Maryland, in the United States of America, excluding its conflicts of law provisions. Under certain circumstances, the governing law in this section might be superseded by the United Nations Convention on Contracts for the International Sale of Goods ("UN Convention") and the parties intend to avoid the application of the UN Convention to this Agreement and, thus, exclude the application of the UN Convention in its entirety to this Agreement.
+
+6.2. This Agreement sets out the entire agreement between You and Us for Your Contributions to Us and overrides all other agreements or understandings.
+
+6.3. If You or We assign the rights or obligations received through this Agreement to a third party, as a condition of the assignment, that third party must agree in writing to abide by all the rights and obligations in the Agreement.
+
+6.4. The failure of either party to require performance by the other party of any provision of this Agreement in one situation shall not affect the right of a party to require such performance at any time in the future. A waiver of performance under a provision in one situation shall not be considered a waiver of the performance of the provision in the future or a waiver of the provision in its entirety.
+
+6.5. If any provision of this Agreement is found void and unenforceable, such provision will be replaced to the extent possible with a provision that comes closest to the meaning of the original provision and which is enforceable. The terms and conditions set forth in this Agreement shall apply notwithstanding any failure of essential purpose of this Agreement or any limited remedy to the maximum extent possible under law.
diff --git a/website/versioned_docs/version-1.0.18/about/index.md b/website/versioned_docs/version-1.0.18/about/index.md
new file mode 100644
index 000000000000..cb8764b41bd0
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/about/index.md
@@ -0,0 +1,9 @@
+---
+id: about_index
+title: "About ZIO"
+---
+
+Type-safe, composable asynchronous and concurrent programming for Scala
+
+
+
diff --git a/website/versioned_docs/version-1.0.18/canfail.md b/website/versioned_docs/version-1.0.18/canfail.md
new file mode 100644
index 000000000000..6eef6a1f401a
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/canfail.md
@@ -0,0 +1,90 @@
+---
+id: can_fail
+title: "Compile Time Errors for Handling Combinators"
+slug: can_fail
+---
+
+ZIO provides a variety of combinators to handle errors such as `orElse`, `catchAll`, `catchSome`, `option`, `either`, and `retry`. However, these combinators only make sense for effects that can fail (i.e. where the error type is not `Nothing`). To help you identify code that doesn't make sense, error handling combinators require implicit evidence `CanFail[E]`, which is automatically available for all types except `Nothing`. The table below includes a list of combinators that only make sense for effects that can fail along with value preserving rewrites.
+
+## ZIO
+
+Code | Rewrite
+--- | ---
+`uio <> zio` | `uio`
+`uio.catchAll(f)` | `uio`
+`uio.catchSome(pf)` | `uio`
+`uio.either` | `uio`*
+`uio.eventually` | `uio`
+`uio.flatMapError(f)` | `uio`
+`uio.fold(f, g)` | `uio.map(g)`
+`uio.foldM(f, g)` | `uio.flatMap(g)`
+`uio.mapBoth(f, g)` | `uio.map(g)`
+`uio.mapError(f)` | `uio`
+`uio.option` | `uio`*
+`uio.orDie` | `uio`
+`uio.orDieWith(f)` | `uio`
+`uio.orElse(zio)` | `uio`
+`uio.orElseEither(zio)` | `uio`*
+`uio.orElseFail(e)` | `uio`
+`uio.asElseSucceed(a)` | `uio`
+`uio.refineOrDie(pf)` | `uio`
+`uio.refineOrDieWith(pf)(f)` | `uio`
+`uio.refineToOrDie` | `uio`
+`uio.retry(s)` | `uio`
+`uio.retryOrElse(s, f)` | `uio`
+`uio.retryOrElseEither(s, f)` | `uio`*
+`uio.tapBoth(f, g)` | `uio.tap(g)`
+`uio.tapError(f)` | `uio`
+`ZIO.partitionM(in)(f)` | `ZIO.foreach(in)(f)`*
+`ZIO.partitionMPar(in)(f)` | `ZIO.foreachPar(in)(f)`*
+`ZIO.partitionMParN(n)(in)(f)` | `ZIO.foreachParN(n)(in)(f)`*
+`ZIO.validateM(in)(f)` | `ZIO.foreach(in)(f)`*
+`ZIO.validateFirstM(in)(f)` | `ZIO.foreach(in)(f)`*
+
+## ZManaged
+
+Code | Rewrite
+--- | ---
+`umanaged <> zmanaged` | `umanaged`
+`umanaged.catchAll(f)` | `umanaged`
+`umanaged.catchSome(pf)` | `umanaged`
+`umanaged.either` | `umanaged`*
+`umanaged.flatMapError(f)` | `umanaged`
+`umanaged.fold(f, g)` | `umanaged.map(f)`
+`umanaged.foldM(f, g)` | `umanaged.flatMap(g)`
+`umanaged.mapBoth(f, g)` | `umanaged.map(g)`
+`umanaged.mapError(f)` | `umanaged`
+`umanaged.option` | `umanaged`*
+`umanaged.orDie` | `umanaged`
+`umanaged.orDieWith(f)` | `umanaged`
+`umanaged.orElse(zmanaged)` | `umanaged`
+`umanaged.orElseEither(zmanaged)` | `umanaged`
+`umanaged.orElseFail(e)` | `umanaged`
+`umanaged.asElseSucceed(a)` | `umanaged`
+`umanaged.refineOrDie(pf)` | `umanaged`
+`umanaged.refineToOrDie` | `umanaged`
+`umanaged.refineToOrDieWith(pf)(f)` | `umanaged`
+`umanaged.retry(s)` | `umanaged`
+
+## ZStream
+
+Code | Rewrite
+--- | ---
+`ustream.catchAll(f)` | `ustream`
+`ustream.either` | `ustream`*
+`ustream.mapBoth(f, g)` | `ustream.map(g)`
+`ustream.mapError(f)` | `ustream`
+`ustream.orElse(zstream)` | `ustream`
+
+## ZStreamChunk
+
+Code | Rewrite
+--- | ---
+`ustream.either` | `ustream`
+`ustream.orElse(zstream)` | `ustream`
+
+## (*) Notes:
+
+- `either`, `option`, `orElseEither`, and `retryOrElseEither` wrap their results in `Some` or `Right` so after rewriting, code calling these methods can be simplified to accept an `A` rather than an `Option[A]` or `Either[E, A]`.
+
+- `partitionM`, `partitionMPar`, `partitionMParN`, `validateM` and `validateFirstM` have error accumulating semantics on either error channel or success channel. After rewrite the error type can be simplified to `E` rather than `List[E]` or the success type `List[B]` instead of `(List[E], List[B])`.
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/getting_started.md b/website/versioned_docs/version-1.0.18/getting_started.md
new file mode 100644
index 000000000000..7835f5a6a729
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/getting_started.md
@@ -0,0 +1,96 @@
+---
+id: getting_started
+slug: /
+title: "Getting Started"
+---
+
+Include ZIO in your project by adding the following to your `build.sbt` file:
+
+```
+libraryDependencies += "dev.zio" %% "zio" % "1.0.18"
+```
+
+If you want to use ZIO streams, you should also include the following dependency:
+
+```
+libraryDependencies += "dev.zio" %% "zio-streams" % "1.0.18"
+```
+
+## Main
+
+Your application can extend `App`, which provides a complete runtime system and allows you to write your whole program using ZIO:
+
+```scala
+import zio.console._
+
+object MyApp extends zio.App {
+
+ def run(args: List[String]) =
+ myAppLogic.exitCode
+
+ val myAppLogic =
+ for {
+ _ <- putStrLn("Hello! What is your name?")
+ name <- getStrLn
+ _ <- putStrLn(s"Hello, ${name}, welcome to ZIO!")
+ } yield ()
+}
+```
+The `run` method should return a ZIO value which has all its errors handled,
+which, in ZIO parlance, is an unexceptional ZIO value.
+
+One way to do this is to invoke `fold` over a ZIO value, to get an unexceptional ZIO value.
+That requires two handler functions: `eh: E => B` (the error handler) and `ah: A => B` (the success handler).
+
+If `myAppLogic` fails, `eh` will be used to get from `e: E` to `b: B`;
+if it succeeds, `ah` will be used to get from `a: A` to `b: B`.
+
+`myAppLogic`, as folded above, produces an unexceptional ZIO value, with `B` being `Int`.
+If `myAppLogic` fails, there will be a 1; if it succeeds, there will be a 0.
+
+---
+
+If you are integrating ZIO into an existing application, using dependency injection, or do not control your main function, then you can create a runtime system in order to execute your ZIO programs:
+
+```scala
+import zio._
+
+object IntegrationExample {
+ val runtime = Runtime.default
+
+ runtime.unsafeRun(Task(println("Hello World!")))
+}
+```
+
+Ideally, your application should have a _single_ runtime, because each runtime has its own resources (including thread pool and unhandled error reporter).
+
+## Console
+
+ZIO provides a module for interacting with the console. You can import the functions in this module with the following code snippet:
+
+```scala
+import zio.console._
+```
+
+If you need to print text to the console, you can use `putStr` and `putStrLn`:
+
+```scala
+// Print without trailing line break
+putStr("Hello World")
+// res8: ZIO[Console, java.io.IOException, Unit] = zio.ZIO$Read@18df26a1
+
+// Print string and include trailing line break
+putStrLn("Hello World")
+// res9: ZIO[Console, java.io.IOException, Unit] = zio.ZIO$Read@52b30dcd
+```
+
+If you need to read input from the console, you can use `getStrLn`:
+
+```scala
+val echo = getStrLn.flatMap(line => putStrLn(line))
+// echo: ZIO[Console, java.io.IOException, Unit] = zio.ZIO$FlatMap@49de1505
+```
+
+## Learning More
+
+To learn more about ZIO, see the [Overview](overview/index.md).
diff --git a/website/versioned_docs/version-1.0.18/guides/access-system-information.md b/website/versioned_docs/version-1.0.18/guides/access-system-information.md
new file mode 100644
index 000000000000..8bf99a05236d
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/access-system-information.md
@@ -0,0 +1,39 @@
+---
+id: access-system-information
+title: "How to Access System Information?"
+---
+
+Sometimes, environment variables are relevant information to an application. ZIO provides a `system` package to interface with this functionality.
+
+```scala
+import zio.system
+```
+
+## Environment Variables
+
+With the `env` method, you can safely read an environment variable:
+
+```scala
+// Read an environment variable
+system.env("JAVA_HOME")
+// res0: zio.ZIO[system.package.System, SecurityException, Option[String]] = zio.ZIO$Read@2111d7b9
+```
+
+## Properties
+
+With the `property` method, you can safely access Java properties:
+
+```scala
+// Read a system property
+system.property("java.version")
+// res1: zio.ZIO[system.package.System, Throwable, Option[String]] = zio.ZIO$Read@7a023e34
+```
+
+## Miscellaneous
+
+With the `lineSeparator` method, you can determine the line separator for the underlying platform:
+
+```scala
+system.lineSeparator
+// res2: zio.package.URIO[system.package.System, String] = zio.ZIO$Read@260f05ee
+```
diff --git a/website/versioned_docs/version-1.0.18/guides/handle-errors.md b/website/versioned_docs/version-1.0.18/guides/handle-errors.md
new file mode 100644
index 000000000000..5778e41accd6
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/handle-errors.md
@@ -0,0 +1,40 @@
+---
+id: handle-errors
+title: "How to Handle Errors?"
+---
+
+## Declared Errors vs Unforeseen Defects
+A ZIO value has a type parameter `E` which is the type of declared errors it can fail with. `E` only covers the errors which were specified at the outset. The same ZIO value could still throw exceptions in unforeseen ways. These unforeseen situations are called _defects_ in a ZIO program, and they lie outside E.
+
+Bringing abnormal situations from the domain of defects into that of `E` enables the compiler to help us keep a tab on error conditions throughout the application, at compile time. This helps ensure the handling of domain errors in domain specific ways. Defects, on the other hand, can creep silently to higher levels in our application, and, if they get triggered at all, their handling might eventually be in more general ways.
+
+## Lossless Error Model
+ZIO holds onto errors, that would otherwise be lost, using `try finally`. If the `try` block throws an exception, and the `finally` block throws an exception as well, then, if these are caught at a higher level, only the finalizer's exception will be caught normally, not the exception from the try block.
+
+Whereas, ZIO guarantees that no errors are lost. This guarantee is provided via a hierarchy of supervisors and information made available via datatypes such as `Exit` & `Cause`. All errors will be reported. If there's a bug in the code, zio enables us to find about it.
+
+## Transform `Option` and `Either` values
+
+It's typical that you work with `Option` and `Either` values inside your application. You either fetch a record from the database which might be there or not (`Option`) or parse a file which might return decode errors `Either`. ZIO has already functions built-in to transform these values into `ZIO` values.
+
+### Either
+
+|from|to|transform|code|
+|--|--|--|--|
+|`Either[B, A]`|`ZIO[Any, E, A]`|`ifLeft: B => E`|`ZIO.fromEither(from).mapError(ifLeft)`
+|`ZIO[R, E, Either[B, A]]`|`ZIO[R, E, A]`|`ifLeft: B => E`|`from.flatMap(ZIO.fromEither(_).mapError(ifLeft))`
+|`ZIO[R, E, Either[E, A]]`|`ZIO[R, E, A]`|-|`from.rightOrFail`
+|`ZIO[R, E, Either[B, A]]`|`ZIO[R, E, A]`|`f: B => E`|`from.rightOrFailWith(f)`
+|`ZIO[R, E, Either[A, E]]`|`ZIO[R, E, A]`|-|`from.leftOrFail`
+|`ZIO[R, E, Either[A, B]]`|`ZIO[R, E, A]`|`f: B => E`|`from.leftOrFailWith(f)`
+|`ZIO[R, Throwable, Either[Throwable, A]]`|`ZIO[R, Throwable, A]`|-|`from.absolve`
+
+### Option
+
+|from|to|transform|code|
+|--|--|--|--|
+|`Option[A]`|`ZIO[Any, E, A]`|`ifEmpty: E`|`ZIO.fromOption(from).orElseFail(ifEmpty)`
+|`ZIO[R, E, Option[A]]`|`ZIO[R, E, A]`|`ifEmpty: E`|`from.someOrFail(ifEmpty)`
+
+
+
diff --git a/website/versioned_docs/version-1.0.18/guides/howto-macros.md b/website/versioned_docs/version-1.0.18/guides/howto-macros.md
new file mode 100644
index 000000000000..f8696b028ebb
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/howto-macros.md
@@ -0,0 +1,139 @@
+---
+id: use-zio-macros
+title: "How to use ZIO Macros?"
+---
+
+## Scrapping the boilerplate with macros
+
+Many libraries come together with usage best practices and repeatable code, ZIO is no different. Fortunately ZIO provides macros
+to perform these repetitive tasks for you. At the moment these are only available for Scala versions `2.x`, however their equivalents
+for Dotty are on our roadmap.
+
+### Prerequisites
+
+To enable macro expansion you need to setup your project:
+
+- for Scala `>= 2.13` add compiler option
+
+```scala
+scalacOptions += "-Ymacro-annotations"
+```
+
+- for Scala `< 2.13` add macro paradise compiler plugin
+
+```scala
+compilerPlugin(("org.scalamacros" % "paradise" % "2.1.1") cross CrossVersion.full)
+```
+
+## Capability accessors
+
+### Installation
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-macros" % ""
+```
+
+### Description
+
+The `@accessible` macro generates _capability accessors_ into annotated module object.
+
+```scala
+import zio.{ Has, ZIO }
+import zio.macros.accessible
+
+@accessible
+object AccountObserver {
+ trait Service {
+ def processEvent(event: AccountEvent): UIO[Unit]
+ }
+
+ // below will be autogenerated
+ def processEvent(event: AccountEvent) =
+ ZIO.accessM[Has[AccountObserver.Service]](_.get[Service].processEvent(event))
+}
+```
+
+For normal values, a `ZIO` with `Nothing` on error channel is generated.
+If the code is throwing exceptions see `@throwing` below.
+
+```scala
+import zio.{ Has, ZIO }
+import zio.macros.accessible
+
+@accessible
+object Module {
+ trait Service {
+ def pureMethod(v: Something): SomethingElse
+ }
+
+ // below will be autogenerated
+ def pureMethod(v: Something): ZIO[Service, Nothing, SomethingElse] =
+ ZIO.access[Has[Module.Service]](_.get[Service].pureMethod(v))
+}
+```
+
+The `@throwing` annotation will mark impure methods.
+Using this annotation will request ZIO to push the error on the error channel.
+
+```scala
+import zio.{ Has, ZIO }
+import zio.macros.accessible
+
+@accessible
+object Module {
+ trait Service {
+ @throwing
+ def impureMethod(v: Something): SomethingElse
+ }
+
+ // below will be autogenerated
+ def impureMethod(v: Something): ZIO[Service, Throwable, SomethingElse] =
+ ZIO.accessM[Has[Module.Service]](s => ZIO(s.get[Service].impureMethod(v)))
+}
+```
+
+
+> **Note:** the macro can only be applied to objects which contain a trait called `Service`.
+
+
+## Capability tags
+
+### Installation
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-test" % ""
+```
+
+### Description
+
+The `@mockable[A]` generates _capability tags_ and _mock layer_ into annotated object.
+
+```scala
+import zio.test.mock.mockable
+
+@mockable[AccountObserver.Service]
+object AccountObserverMock
+```
+
+Will result in:
+
+```scala
+import zio.{ Has, UIO, URLayer, ZLayer }
+import zio.test.mock.{ Mock, Proxy }
+
+object AccountObserverMock extends Mock[Has[AccountObserver.Service]] {
+
+ object ProcessEvent extends Effect[AccountEvent, Nothing, Unit]
+ object RunCommand extends Effect[Unit, Nothing, Unit]
+
+ val compose: URLayer[Has[Proxy], AccountObserver] =
+ ZLayer.fromServiceM { proxy =>
+ withRuntime.map { rts =>
+ new AccountObserver.Service {
+ def processEvent(event: AccountEvent) = proxy(ProcessEvent, event)
+ def runCommand: UIO[Unit] = proxy(RunCommand)
+ }
+ }
+ }
+}
+```
diff --git a/website/versioned_docs/version-1.0.18/guides/index.md b/website/versioned_docs/version-1.0.18/guides/index.md
new file mode 100644
index 000000000000..7cedbf74c68b
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/index.md
@@ -0,0 +1,7 @@
+---
+id: index
+title: "Guides"
+---
+
+Here are a few howto guides for common patterns with ZIO.
+
diff --git a/website/versioned_docs/version-1.0.18/guides/interop/with-cats-effect.md b/website/versioned_docs/version-1.0.18/guides/interop/with-cats-effect.md
new file mode 100644
index 000000000000..2313d92d3813
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/interop/with-cats-effect.md
@@ -0,0 +1,723 @@
+---
+id: with-cats-effect
+title: "How to Interop with Cats Effect?"
+---
+
+
+## Introduction
+
+[`interop-cats`](https://github.com/zio/interop-cats) has instances for the [Cats](https://typelevel.org/cats/), [Cats MTL](https://github.com/typelevel/cats-mtl) and [Cats Effect](https://typelevel.org/cats-effect/) libraries, which allow you to use ZIO with any libraries that rely on these, like [Doobie](https://github.com/tpolecat/doobie), [Http4s](https://github.com/http4s/http4s), [FS2](https://github.com/functional-streams-for-scala/fs2) or [Circe](https://github.com/circe/circe)
+
+Depends on which version of Cats Effect we are using, we should pick the right version of `zio-interop-cats`. In this tutorial, whenever we're working with Cats Effect 2.x, we are using:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-interop-cats" % "2.5.1.0"
+```
+
+And whenever we are using Cats Effect 3.x instances, we are using:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-interop-cats" % "3.1.1.0"
+```
+
+Most of the interop functionality resides in the following package:
+
+```scala
+import zio.interop.catz._
+```
+
+## Cats Effect Instances
+
+ZIO integrates with Typelevel libraries by providing instances of Cats Effect type classes. These type classes are used by `fs2`, `doobie`, `http4s`, and a variety of other libraries in the functional Scala ecosystem.
+
+Due to the limitations of the Cats Effect, ZIO cannot provide instances for arbitrary error types. Instead, we can obtain instances only for effects whose error type extends `Throwable`.
+
+For convenience, ZIO includes the `Task` and `RIO` type aliases, which fix the error type to `Throwable`, and may be useful for interop with Cats Effect:
+
+
+```scala
+type Task[+A] = ZIO[Any, Throwable, A]
+type RIO[-R, +A] = ZIO[ R, Throwable, A]
+```
+
+### Providing Runtime Manually
+
+To use Cats Effect instances for these types, we should have an implicit `Runtime[R]` in scope for the environment type of our effects. The following code snippet creates an implicit `Runtime` for all the modules built into ZIO:
+
+```scala
+import cats.implicits._
+import zio.interop.catz._
+
+object ZioCatsEffectInterop extends scala.App {
+ def catsEffectApp[F[_]: cats.effect.Sync]: F[Unit] =
+ cats.effect.Sync[F].delay(
+ println("Hello from Cats Effect World!")
+ )
+
+ implicit val runtime: zio.Runtime[zio.ZEnv] = zio.Runtime.default
+
+ val zioApp: zio.Task[Unit] = catsEffectApp[zio.Task]
+ runtime.unsafeRun(zioApp.exitCode)
+}
+```
+
+If we are working with Cats Effect 3.x, the `catsEffectApp[Task]` will be expanded as if we called the following code explicitly:
+
+
+```scala
+object ZioCatsEffectInterop extends scala.App {
+ val runtime: zio.Runtime[zio.ZEnv] = zio.Runtime.default
+
+ val zioApp: zio.Task[Unit] = catsEffectApp[zio.Task](
+ zio.interop.catz.asyncRuntimeInstance(runtime)
+ )
+
+ runtime.unsafeRun(zioApp.exitCode)
+}
+```
+
+And if we are working with Cats Effect 2.x, it will be expanded as if we called following code explicitly:
+
+```scala
+object ZioCatsEffectInterop extends scala.App {
+ val runtime: zio.Runtime[zio.ZEnv] = zio.Runtime.default
+
+ val zioApp = catsEffectApp[zio.Task](zio.interop.catz.taskConcurrentInstance)
+ runtime.unsafeRun(zioApp.exitCode)
+}
+```
+
+If we are using `RIO` for a custom environment `R`, then we will have to create our own `Runtime[R]`, and ensure that implicit wherever we need Cats Effect instances.
+
+### Using `CatsApp` Runtime
+
+As a convenience, our application can extend `CatsApp`, which automatically brings an implicit `Runtime[ZEnv]` into our scope:
+
+```scala
+import zio.interop.catz._
+import cats.implicits._
+
+object ZioCatsEffectInteropWithCatsApp extends CatsApp {
+ def catsEffectApp[F[_]: cats.effect.Sync]: F[Unit] =
+ cats.effect.Sync[F].delay(println("Hello from Cats Effect World!"))
+
+ override def run(args: List[String]): zio.URIO[zio.ZEnv, zio.ExitCode] =
+ catsEffectApp[zio.Task].exitCode
+}
+```
+
+This example works properly in both Cats Effect 2.x and 3.x versions.
+
+## Cats Effect 2.x
+
+### Timer Instance
+
+In order to get a `cats.effect.Timer[zio.Task]` instance, we need an extra import (`zio.interop.catz.implicits._`):
+
+```scala
+import java.util.concurrent.TimeUnit
+
+import cats.implicits._
+import zio.interop.catz._
+import zio.interop.catz.implicits._ // Provides `zio.Task instance` for `cats.effect.Time` type class
+import zio.{ ExitCode, Task, URIO }
+
+import scala.concurrent.duration.DurationInt
+
+object ZioCatsEffectTimerInterop extends zio.interop.catz.CatsApp {
+ override def run(args: List[String]): zio.URIO[zio.ZEnv, zio.ExitCode] =
+ catsEffectTimerApp[zio.Task].exitCode
+
+ def catsEffectTimerApp[F[_]: cats.effect.Clock: cats.effect.Timer: cats.effect.Sync]: F[Unit] = for {
+ t2 <- cats.effect.Clock[F].monotonic(TimeUnit.SECONDS)
+ _ <- cats.effect.Timer[F].sleep(2.seconds)
+ t1 <- cats.effect.Clock[F].monotonic(TimeUnit.SECONDS)
+ _ <- cats.effect.Sync[F].delay(println(t1 - t2))
+ } yield ()
+}
+```
+
+The reason a `cats.effect.Timer[zio.Task]` instance is not provided by the default _interop_ import is that it makes testing programs that require timing capabilities very difficult. The extra import (wherever needed) makes reasoning about timing-related effects much easier.
+
+If we're using `RIO` for a custom environment then our environment must use the `Clock` service, e.g. `R <: Clock` to get a timer.
+
+### Converting Resource to ZManaged
+
+To convert Cats Effect `Resource` into `ZManaged`, we can call `toManaged` on `Resource`.
+
+For example, assume we have the following `File` API:
+
+```scala
+case class File[F[_]: cats.effect.Sync]() {
+ import cats.syntax.apply._
+ def read: F[String] =
+ cats.effect.Sync[F].delay(println("Reading file.")) *>
+ cats.effect.Sync[F].pure("Hello, World!")
+ def close: F[Unit] =
+ cats.effect.Sync[F].delay(println("Closing file."))
+}
+
+object File {
+ import cats.syntax.apply._
+ def open[F[_]: cats.effect.Sync](name: String): F[File[F]] =
+ cats.effect.Sync[F].delay(println(s"opening $name file")) *>
+ cats.effect.Sync[F].delay(File())
+}
+```
+
+And, also assume we have `fileResource` defined as follows:
+
+```scala
+def fileResource[F[_]: cats.effect.Sync](name: String): cats.effect.Resource[F, File[F]] =
+ cats.effect.Resource.make(File.open[F](name))(_.close)
+```
+
+Let's convert that to `ZManaged`:
+
+```scala
+val resource: zio.ZManaged[zio.ZEnv, Throwable, File[zio.Task]] =
+ fileResource[zio.Task]("log.txt").toManaged[zio.ZEnv]
+```
+
+Here is a complete working example:
+
+```scala
+import zio.interop.catz._
+
+object CatsEffectResourceInterop extends CatsApp {
+ def fileResource[F[_]: cats.effect.Sync](name: String): cats.effect.Resource[F, File[F]] =
+ cats.effect.Resource.make(File.open[F](name))(_.close)
+
+ def myApp: zio.ZIO[zio.ZEnv, Throwable, Unit] = for {
+ c <- fileResource[zio.Task]("log.txt").toManaged[zio.ZEnv].use(_.read)
+ _ <- zio.console.putStr(s"file content: $c")
+ } yield ()
+
+ override def run(args: List[String]): zio.URIO[zio.ZEnv, zio.ExitCode] =
+ myApp.exitCode
+}
+```
+
+### Converting ZManaged to Resource
+
+We have an extension method on `ZManaged` called `ZManaged#toResource` which converts a ZIO managed resource to Cats Effect resource:
+
+```scala
+final class ZManagedSyntax[R, E, A](private val managed: ZManaged[R, E, A]) {
+ def toResource[F[_]](implicit
+ F: Async[F],
+ ev: Effect[ZIO[R, E, *]]
+ ): Resource[F, A] = ???
+}
+```
+
+Let's try an example:
+
+```scala
+import zio.interop.catz._
+
+object ZManagedToResource extends cats.effect.IOApp {
+ implicit val zioRuntime: zio.Runtime[zio.ZEnv] = zio.Runtime.default
+
+ val resource: cats.effect.Resource[cats.effect.IO, java.io.InputStream] =
+ zio.ZManaged
+ .fromAutoCloseable(
+ zio.ZIO.effect(
+ java.nio.file.Files.newInputStream(
+ java.nio.file.Paths.get("file.txt")
+ )
+ )
+ )
+ .toResource[cats.effect.IO]
+
+ val effect: cats.effect.IO[Unit] =
+ resource
+ .use { is =>
+ cats.effect.IO.delay(is.readAllBytes())
+ }
+ .flatMap(bytes =>
+ cats.effect.IO.delay(
+ println(s"file length: ${bytes.length}")
+ )
+ )
+
+ override def run(args: List[String]): cats.effect.IO[cats.effect.ExitCode] =
+ effect.as(cats.effect.ExitCode.Success)
+}
+```
+
+## Cats Effect 3.x
+
+### Type class Instances for ZIO's Task
+
+ZIO integrates with Cats Effect 3.x as well as 2.x. The `interop-cats` module provides `Concurrent`, `Temporal` and `Async` for `zio.Task`.
+
+An example of ZIO interoperability with CE3:
+
+```scala
+import cats.implicits._
+import zio.interop.catz._
+import scala.concurrent.duration.DurationInt
+
+object ZioCatsEffectInterop extends zio.interop.catz.CatsApp {
+
+ def catsEffectTimerApp[F[_]: cats.effect.Async]: F[Unit] = for {
+ t2 <- cats.effect.Clock[F].monotonic
+ _ <- cats.effect.Temporal[F].sleep(2.seconds)
+ t1 <- cats.effect.Clock[F].monotonic
+ _ <- cats.effect.Sync[F].delay(println(t1 - t2))
+ } yield ()
+
+ override def run(args: List[String]): zio.URIO[zio.ZEnv, zio.ExitCode] = {
+ catsEffectTimerApp[zio.Task].exitCode
+ }
+}
+```
+
+### Converting Resource to ZManaged
+
+To convert a Cats Effect's `Resource` to `ZManaged` we can use `cats.effect.Resource#toZManaged` extension method by importing `zio.interop.catz._` package and also we should provide an implicit instance of `Dispatcher`:
+
+```scala
+import zio.interop.catz._
+import scala.concurrent.ExecutionContextExecutor
+
+object ResourceToZManagedExample extends zio.App {
+ implicit val ceRuntime: cats.effect.unsafe.IORuntime =
+ cats.effect.unsafe.IORuntime.global
+ implicit val ec: ExecutionContextExecutor =
+ scala.concurrent.ExecutionContext.global
+
+ implicit val dispatcher: cats.effect.std.Dispatcher[cats.effect.IO] =
+ cats.effect.std
+ .Dispatcher[cats.effect.IO]
+ .allocated
+ .unsafeRunSync()
+ ._1
+
+ def catsResource[F[_]: cats.effect.Sync]
+ : cats.effect.Resource[F, java.io.InputStream] =
+ cats.effect.Resource
+ .fromAutoCloseable(
+ cats.effect
+ .Sync[F]
+ .delay(
+ java.nio.file.Files.newInputStream(
+ java.nio.file.Paths.get("file.txt")
+ )
+ )
+ )
+
+ val myApp: zio.ZIO[zio.console.Console, Throwable, Unit] =
+ catsResource[cats.effect.IO].toManaged
+ .use { is =>
+ zio.console.putStrLn(is.readAllBytes().length.toString)
+ }
+
+ override def run(args: List[String]): zio.URIO[zio.ZEnv, zio.ExitCode] =
+ myApp.exitCode
+}
+```
+
+## Cats Core
+
+There is another package in `interop-cats` module called `zio.interop.catz.core._` which helps us to interop with core data types. This package contains instances of `zio.Chunk` data type for Cats Core module like `cats.Order`, `cats.Hash`, `cats.Traverse`, and so forth.
+
+In the following example, we are going to use `zio.Chunk` in a Cats Effect application:
+
+```scala
+import cats.implicits._
+import zio.interop.catz.core._
+
+object ZioInteropWithCatsCore extends cats.effect.IOApp {
+ val chunk = zio.Chunk("1", "2", "3", "4", "5")
+
+ def parseInt(s: String): Option[Int] =
+ Either.catchOnly[NumberFormatException](s.toInt).toOption
+
+ val parseAll = cats.Traverse[zio.Chunk].traverse(chunk)(parseInt)
+
+ override def run(args: List[String]): cats.effect.IO[cats.effect.ExitCode] =
+ cats.effect.IO.println(parseAll).as(cats.effect.ExitCode.Success)
+}
+```
+
+This package also contains utilities to support `zio.NonEmptyChunk` interoperability with Cats Core module.
+
+## FS2 Streams
+
+The `interop-cats` module contains extension methods to convert _FS2 Stream_ to _ZStream_ and vice versa. These methods support both FS2 series, 2.x and 3.x:
+
+### From FS2 Stream to ZStream
+
+By importing `zio.stream.interop.fs2z._` into our application, the `fs2.Stream#toZStream` extension method converts a `fs2.Stream` to `ZStream`:
+
+```scala
+import zio.stream.ZStream
+import zio.stream.interop.fs2z._
+val zstream: ZStream[Any, Throwable, Int] = fs2.Stream.range(1, 10).toZStream()
+```
+### From ZStream to FS2 Stream
+
+Also, the `ZStream#toFs2Stream` converts a ZIO Stream into FS2 Stream:
+
+```scala
+import zio.stream.ZStream
+import zio.Chunk
+import zio.stream.interop.fs2z._
+val fs2stream = ZStream.fromChunks(Chunk(1, 2, 3, 4)).toFs2Stream
+```
+
+## Using ZQueue with Cats Effect
+The `interop-cats` library has an `import zio.interop.Queue` package to lift creation of `ZQueue` effect from `UIO[Queue[A]]` to `F[Queue[F, A]]` which enables us to run `ZQueue` under Cats Effect runtime. It supports all variants of `ZQueue` like `bounded`, `unbounded`, `sliding` and `dropping`.
+
+```scala
+def bounded[F[+_], A](capacity: Int)(implicit R: Runtime[ZEnv], F: LiftIO[F]): F[Queue[F, A]]
+```
+
+### Cats Effect 2.x
+
+In the following example, we are going to lift the `ZQueue` creation effect to Cats `IO` effect. If we are integrating with 2.x Cats Effect library, this snippet works properly:
+
+```scala
+import zio.interop.Queue
+import cats.effect.IO
+
+implicit val runtime = Runtime.default
+def liftedToIO: IO[List[Int]] = for {
+ q <- Queue.bounded[IO, Int](100)
+ _ <- q.offer(1)
+ _ <- q.offer(2)
+ r <- q.takeAll
+} yield (r)
+```
+
+### Cats Effect 3.x
+
+To run `ZQueue` with Cats Effect 3.x we also need to provide an instance of `Dispatcher` to our contextual environment:
+
+```scala
+import zio.interop.Queue
+
+object ZioQueueInteropWithCats extends scala.App {
+
+ implicit val ceRuntime: cats.effect.unsafe.IORuntime =
+ cats.effect.unsafe.IORuntime.global
+
+ implicit val zioRuntime: zio.Runtime[zio.ZEnv] =
+ zio.Runtime.default
+
+ implicit val ec: scala.concurrent.ExecutionContextExecutor =
+ scala.concurrent.ExecutionContext.global
+
+ implicit val dispatcher: cats.effect.std.Dispatcher[cats.effect.IO] =
+ cats.effect.std
+ .Dispatcher[cats.effect.IO]
+ .allocated
+ .unsafeRunSync()
+ ._1
+
+ def liftedToIO: cats.effect.IO[List[Int]] = for {
+ q <- Queue.bounded[cats.effect.IO, Int](100)
+ _ <- q.offer(1)
+ _ <- q.offer(2)
+ r <- q.takeAll
+ } yield (r)
+
+ val catsApp = liftedToIO
+ .flatMap { e =>
+ cats.effect.IO.println(s"List of elements retrieved from Queue: $e")
+ }
+ .as(cats.effect.ExitCode.Success)
+
+ catsApp.unsafeRunSync()
+}
+```
+
+## Using STM with Cats Effect
+
+The `zio.interop.stm` provides a wrapper data type on `STM[Throwable, A]` which enables us to run `STM` with the Cats Effect library.
+
+Currently, the `interop-cats` support `TRef`, `TPromise`, `TQueue` and `TSemaphore` data types.
+
+Let's try a working example using `STM` and `TRef`:
+
+```scala
+import cats.effect.IO
+import cats.effect.unsafe.IORuntime
+import zio.interop.stm.{STM, TRef}
+
+implicit val zioRuntime: zio.Runtime[zio.ZEnv] = zio.Runtime.default
+implicit val catsRuntime: IORuntime = IORuntime.global
+
+def transferMoney(
+ from: TRef[IO, Long],
+ to: TRef[IO, Long],
+ amount: Long
+): STM[IO, Long] =
+ for {
+ senderBal <- from.get
+ _ <-
+ if (senderBal < amount)
+ STM.fail[IO](new Exception("Not enough money"))
+ else
+ STM.unit[IO]
+ _ <- from.update(existing => existing - amount)
+ _ <- to.update(existing => existing + amount)
+ recvBal <- to.get
+ } yield recvBal
+
+val program: IO[Long] = for {
+ sndAcc <- STM.atomically[cats.effect.IO, TRef[IO, Long]](
+ TRef.make[IO, Long](1000)
+ )
+ rcvAcc <- STM.atomically[cats.effect.IO, TRef[IO, Long]](
+ TRef.make[IO, Long](200)
+ )
+ recvAmt <- STM.atomically(transferMoney(sndAcc, rcvAcc, 500L))
+} yield recvAmt
+
+program
+ .flatMap(amount =>
+ IO.println(s"Balance of second account after transaction: $amount")
+ )
+ .unsafeRunSync()
+```
+
+## Examples
+
+Cats Effect and Type-Level libraries are older than the ZIO ecosystem. So there are very nice libraries like doobie and http4s, that a ZIO user would like to use in its application.
+
+We have provided some full working example of using these important libraries:
+
+### Using ZIO with Doobie
+
+The following example shows how to use ZIO with Doobie (a library for JDBC access) and FS2 (a streaming library), which both rely on Cats Effect instances:
+
+```scala
+// This snippet works with both CE2 and CE3
+import doobie._
+import doobie.implicits._
+import fs2.Stream
+import zio.Task
+import zio.interop.catz._
+
+implicit val zioRuntime: zio.Runtime[zio.ZEnv] = zio.Runtime.default
+
+case class User(id: String, name: String, age: Int)
+
+def xa: Transactor[Task] =
+ Transactor.fromDriverManager[Task](
+ "org.h2.Driver",
+ "jdbc:h2:mem:users;DB_CLOSE_DELAY=-1"
+ )
+
+def createTable: doobie.ConnectionIO[Int] =
+ sql"""|CREATE TABLE IF NOT EXISTS USERS(
+ |id INT SERIAL UNIQUE,
+ |name VARCHAR NOT NULL UNIQUE,
+ |age SMALLINT
+ |)""".stripMargin.update.run
+
+def dropTable: doobie.ConnectionIO[Int] =
+ sql"""DROP TABLE IF EXISTS USERS""".update.run
+
+def insert(name: String, age: Int): doobie.ConnectionIO[Int] =
+ sql"insert into users (name, age) values ($name, $age)".update.run
+
+def loadUsers: Stream[doobie.ConnectionIO, User] =
+ sql"""SELECT * FROM users""".query[User].stream
+
+val doobieApp: Stream[doobie.ConnectionIO, User] = for {
+ _ <- fs2.Stream.eval(dropTable)
+ _ <- fs2.Stream.eval(createTable)
+ _ <- fs2.Stream.eval(insert("Olivia", 21))
+ _ <- fs2.Stream.eval(insert("Oliver", 30))
+ u <- loadUsers
+} yield u
+
+val run: Stream[Task, User] = doobieApp.transact(xa)
+
+val allUsers: List[User] =
+ zioRuntime.unsafeRun(run.compile.toList)
+```
+
+Sounds good, but how can we specify a specialized transactor than the default one? Creating a customized transactor in CE2 differs from CE3.
+
+Let's try doing that in each of which:
+
+#### Customized Transactor (CE2)
+
+ZIO provides a specific blocking thread pool for blocking operations. The `doobie-hikari` module helps us create a transactor with two separated executors, one for blocking operations, and the other one for non-blocking operations. So we shouldn't run blocking JDBC operations or perform awaiting connections to the database on the main thread pool.
+
+So let's fix this issue in the previous example. In the following snippet we are going to create a `ZMHikari` of Hikari transactor. In this example we are using `0.13.4` version of doobie which supports CE2:
+
+```scala
+import zio.ZManaged
+import zio.blocking.Blocking
+import zio.{ Runtime, Task, ZIO, ZManaged }
+import doobie.hikari.HikariTransactor
+import cats.effect.Blocker
+import zio.interop.catz._
+
+def transactor: ZManaged[Blocking, Throwable, HikariTransactor[Task]] =
+ for {
+ be <- zio.blocking.blockingExecutor.toManaged_ // our blocking EC
+ xa <- HikariTransactor
+ .newHikariTransactor[Task](
+ "org.h2.Driver", // driver classname
+ "jdbc:h2:mem:test;DB_CLOSE_DELAY=-1", // connect URL
+ "sa", // username
+ "", // password
+ be.asEC, // await connection here
+ Blocker.liftExecutionContext(be.asEC) // execute JDBC operations here
+ )
+ .toManagedZIO
+ } yield xa
+```
+
+Now we can `transact` our `doobieApp` with this `transactor` and convert that to the `ZIO` effect:
+
+```scala
+val zioApp: ZIO[Blocking, Throwable, List[User]] =
+ transactor.use(xa => doobieApp.transact(xa).compile.toList)
+```
+
+#### Customized Transactor (CE3)
+
+In Cats Effect 3.x, the `cats.effect.Blocker` has been removed. So the transactor constructor doesn't require us a blocking executor; it happens under the hood using the `Sync[F].blocking` operation.
+
+To create a `Transactor` in CE3, we need to create an instance of `Dispatcher` for `zio.Task`. The following example is based on Doobie's `1.0.0-M5` version which supports CE3:
+
+```scala
+import doobie.hikari.HikariTransactor
+import zio.blocking.Blocking
+import zio.interop.catz._
+import zio.{Task, ZIO, ZManaged}
+
+implicit val zioRuntime: zio.Runtime[zio.ZEnv] =
+ zio.Runtime.default
+
+implicit val dispatcher: cats.effect.std.Dispatcher[zio.Task] =
+ zioRuntime
+ .unsafeRun(
+ cats.effect.std
+ .Dispatcher[zio.Task]
+ .allocated
+ )
+ ._1
+
+def transactor: ZManaged[Blocking, Throwable, HikariTransactor[Task]] =
+ for {
+ rt <- ZIO.runtime[Any].toManaged_
+ xa <-
+ HikariTransactor
+ .newHikariTransactor[Task](
+ "org.h2.Driver", // driver classname
+ "jdbc:h2:mem:test;DB_CLOSE_DELAY=-1", // connect URL
+ "sa", // username
+ "", // password
+ rt.platform.executor.asEC // await connection here
+ )
+ .toManaged
+ } yield xa
+```
+
+Now we can `transact` our `doobieApp` with this `transactor` and convert that to the `ZIO` effect:
+
+```scala
+val zioApp: ZIO[Blocking, Throwable, List[User]] =
+ transactor.use(xa => doobieApp.transact(xa).compile.toList)
+```
+
+### Http4s
+
+Here is the full working example of using http4s in ZIO App:
+
+#### Cats Effect 2.x
+
+The following example is based on http4s's `0.21.24` version which supports CE2:
+
+```scala
+import cats.effect.{ConcurrentEffect, Sync, Timer}
+import cats.implicits._
+import fs2.Stream
+import org.http4s.HttpRoutes
+import org.http4s.client.blaze.BlazeClientBuilder
+import org.http4s.dsl.Http4sDsl
+import org.http4s.server.blaze.BlazeServerBuilder
+import zio.interop.catz._
+import zio.interop.catz.implicits._
+import zio.{Task, URIO}
+
+import scala.concurrent.ExecutionContext.global
+
+object ZioHttp4sInterop extends CatsApp {
+ def run(args: List[String]): URIO[zio.ZEnv, zio.ExitCode] =
+ stream[Task].compile.drain.exitCode
+
+ def stream[F[_]: ConcurrentEffect: Timer]: Stream[F, Nothing] = {
+ import org.http4s.implicits._
+ val httpApp = helloWorldRoute[F].orNotFound
+ for {
+ _ <- BlazeClientBuilder[F](global).stream
+ exitCode <- BlazeServerBuilder[F](global)
+ .bindHttp(8080, "0.0.0.0")
+ .withHttpApp(httpApp)
+ .serve
+ } yield exitCode
+ }.drain
+
+ def helloWorldRoute[F[_]: Sync]: HttpRoutes[F] = {
+ val dsl = new Http4sDsl[F]{}
+ import dsl._
+ HttpRoutes.strict[F] {
+ case GET -> Root =>
+ Ok("Hello, World!")
+ }
+ }
+}
+```
+
+#### Cats Effect 3.x
+
+The following example is based on http4s's `0.23.0-RC1` version which supports CE3:
+
+```scala
+import cats.Applicative
+import cats.effect.Async
+import fs2.Stream
+import org.http4s.HttpRoutes
+import org.http4s.blaze.client.BlazeClientBuilder
+import org.http4s.blaze.server.BlazeServerBuilder
+import org.http4s.dsl.Http4sDsl
+import zio.interop.catz._
+import zio.{Task, URIO}
+
+import scala.concurrent.ExecutionContext.global
+
+object ZioHttp4sInterop extends zio.interop.catz.CatsApp {
+ def stream[F[_]: Async]: Stream[F, Nothing] = {
+ import org.http4s.implicits._
+ val httpApp = helloWorldRoute[F].orNotFound
+ for {
+ _ <- BlazeClientBuilder[F](global).stream
+ exitCode <- BlazeServerBuilder[F](global)
+ .bindHttp(8080, "0.0.0.0")
+ .withHttpApp(httpApp)
+ .serve
+ } yield exitCode
+ }.drain
+
+ def helloWorldRoute[F[_]: Applicative]: HttpRoutes[F] = {
+ val dsl = new Http4sDsl[F] {}
+ import dsl._
+ HttpRoutes.strict[F] { case GET -> Root =>
+ Ok("Hello, World!")
+ }
+ }
+
+ def run(args: List[String]): URIO[zio.ZEnv, zio.ExitCode] =
+ stream[Task].compile.drain.exitCode
+}
+```
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/guides/interop/with-future.md b/website/versioned_docs/version-1.0.18/guides/interop/with-future.md
new file mode 100644
index 000000000000..de7a679dd912
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/interop/with-future.md
@@ -0,0 +1,53 @@
+---
+id: with-future
+title: "How to Interop with Future?"
+---
+
+## Scala Future
+
+Basic interoperability with Scala's `Future` is now provided by ZIO, and does not require a separate module.
+
+### From Future
+
+Scala's `Future` can be converted into a ZIO effect with `ZIO.fromFuture`:
+
+```scala
+def loggedFuture[A](future: ExecutionContext => Future[A]): UIO[Task[A]] = {
+ ZIO.fromFuture { implicit ec =>
+ future(ec).flatMap { result =>
+ Future(println("Future succeeded with " + result)).map(_ => result)
+ }
+ }
+}
+```
+
+Scala's `Future` can also be converted into a `Fiber` with `Fiber.fromFuture`:
+
+```scala
+def futureToFiber[A](future: => Future[A]): Fiber[Throwable, A] =
+ Fiber.fromFuture(future)
+```
+
+This is a pure operation, given any sensible notion of fiber equality.
+
+### To Future
+
+A ZIO `Task` effect can be converted into a `Future` with `ZIO#toFuture`:
+
+```scala
+def taskToFuture[A](task: Task[A]): UIO[Future[A]] =
+ task.toFuture
+```
+
+Because converting a `Task` into an (eager) `Future` is effectful, the return value of `ZIO#toFuture` is an effect. To actually begin the computation, and access the started `Future`, it is necessary to execute the effect with a runtime.
+
+A ZIO `Fiber` can be converted into a `Future` with `Fiber#toFuture`:
+
+```scala
+def fiberToFuture[A](fiber: Fiber[Throwable, A]): UIO[Future[A]] =
+ fiber.toFuture
+```
+
+## Run to Future
+
+The `Runtime` type has a method `unsafeRunToFuture`, which can execute a ZIO effect asynchronously, and return a `Future` that will be completed when the execution of the effect is complete.
diff --git a/website/versioned_docs/version-1.0.18/guides/interop/with-guava.md b/website/versioned_docs/version-1.0.18/guides/interop/with-guava.md
new file mode 100644
index 000000000000..ca1a36ef002c
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/interop/with-guava.md
@@ -0,0 +1,6 @@
+---
+id: with-guava
+title: "How to Interop with Guava?"
+---
+
+[`interop-guava`](https://github.com/zio/interop-guava) module provide capibility to convert [Guava's `com.google.common.util.concurrent.ListenableFuture`](https://github.com/google/guava/wiki/ListenableFutureExplained) into ZIO `Task`.
diff --git a/website/versioned_docs/version-1.0.18/guides/interop/with-java.md b/website/versioned_docs/version-1.0.18/guides/interop/with-java.md
new file mode 100644
index 000000000000..c9b700a68638
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/interop/with-java.md
@@ -0,0 +1,98 @@
+---
+id: with-java
+title: "How to Interop with Java?"
+---
+
+ZIO has full interoperability with foreign Java code. Let me show you how it works and then *BOOM*, tomorrow you can show off your purely functional Java at work.
+
+ZIO has built-in conversion between ZIO data types (like `ZIO` and `Fiber`) and Java concurrent data types like [`CompletionStage`](https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletionStage.html), [`Future`](https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html) and [`CompletionHandler`](https://docs.oracle.com/javase/8/docs/api/java/nio/channels/CompletionHandler.html).
+
+## From Java CompletionStage and back
+
+`CompletionStage` is the interface that comes closest to emulate a functional asynchronous effects API like ZIO's, so we start with it. It's a breeze:
+
+```scala
+def loggedStage[A](stage: => CompletionStage[A]): Task[A] =
+ ZIO.fromCompletionStage(UIO {
+ stage.thenApplyAsync { a =>
+ println("Stage completed with " + a)
+ a
+ }
+ })
+```
+
+By Jove, you can even turn it into fiber!
+
+```scala
+def stageToFiber[A](stage: => CompletionStage[A]): Fiber[Throwable, A] =
+ Fiber.fromCompletionStage(future)
+````
+
+This API creates a synthetic fiber which doesn't have any notion of identity.
+
+Additionally, you may want to go the other way and convert a ZIO value into a `CompletionStage`. Easy as pie:
+
+```scala
+def taskToStage[A](task: Task[A]): UIO[CompletableFuture[A]] =
+ task.toCompletableFuture
+```
+
+As you can see, it commits to a concrete class implementing the `CompletionStage` interface, i.e. `CompletableFuture`. It is worth to point out that any `IO[E, A]` can be turned into a completable future provided you can turn a value of type `E` into a `Throwable`:
+
+```scala
+def ioToStage[E, A](io: IO[E, A])(toThrowable: E => Throwable): UIO[CompletableFuture[A]] =
+ io.toCompletableFutureWith(toThrowable)
+```
+
+## Java Future
+
+You can embed any `java.util.concurrent.Future` in a ZIO computation via `ZIO.fromFutureJava`. A toy wrapper around Apache Async HTTP client could look like:
+
+```scala
+def execute(client: HttpAsyncClient, request: HttpUriRequest): RIO[Blocking, HttpResponse] =
+ ZIO.fromFutureJava(UIO {
+ client.execute(request, null)
+ })
+```
+
+That's it. Just a bit of a warning here, mate. As you can see from the requirement on the produced value, ZIO uses the blocking `Future#get` call internally. It is running on the blocking thread pool, of course, but I thought you should know. If possible, use `ZIO.fromCompletionStage` instead, as detailed above.
+
+Should you need it, it is also possible to convert a future into a fiber using `Fiber.fromFutureJava`. Same same, but different:
+
+```scala
+def execute(client: HttpAsyncClient, request: HttpUriRequest): Fiber[Throwable, HttpResponse] =
+ Fiber.fromFutureJava {
+ client.execute(request, null)
+ }
+```
+
+## NIO Completion handler
+
+Java libraries using channels from the NIO API for asynchronous, interruptible I/O can be hooked into by providing completion handlers. As in, reading the contents of a file:
+
+```scala
+def readFile(file: AsynchronousFileChannel): Task[Chunk[Byte]] = for {
+ pos <- Ref.make(0)
+ buf <- ZIO.effectTotal(ByteBuffer.allocate(1024))
+ contents <- Ref.make[Chunk[Byte]](Chunk.empty)
+ def go = pos.get.flatMap { p =>
+ ZIO.effectAsyncWithCompletionHandler[Chunk[Byte]] { handler =>
+ file.read(buf, p, buf, handler)
+ }.flatMap {
+ case -1 => contents.get
+ case n =>
+ ZIO.effectTotal {
+ val arr = Array.ofDim[Byte](n)
+ buf.get(arr, 0, n)
+ buf.clear()
+ Chunk.fromArray(arr)
+ }.flatMap { slice =>
+ contents.update(_ ++ slice)
+ } *> pos.update(_ + n) *> go
+ }
+ }
+ dump <- go
+} yield dump
+```
+
+As you can see, ZIO provides a CPS-style API here which is a bit different from the two sections above, but hey still super elegant.
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/guides/interop/with-javascript.md b/website/versioned_docs/version-1.0.18/guides/interop/with-javascript.md
new file mode 100644
index 000000000000..fddd118b8c79
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/interop/with-javascript.md
@@ -0,0 +1,52 @@
+---
+id: with-javascript
+title: "How to Interop with JavaScript?"
+---
+
+Include ZIO in your Scala.js project by adding the following to your `build.sbt`:
+
+```
+scalaJSUseMainModuleInitializer := true
+libraryDependencies += "dev.zio" %%% "zio" % "1.0.18"
+```
+
+## Example
+
+Your main function can extend `App` as follows.
+This example uses [scala-js-dom](https://github.com/scala-js/scala-js-dom) to access the DOM; to run the example you
+will need to add that library as a dependency to your `build.sbt`.
+
+```scala
+import org.scalajs.dom.{document, raw}
+import zio._
+import zio.duration._
+import zio.clock._
+
+object Main extends App {
+
+ def run(args: List[String]) = {
+ for {
+ _ <- putStrLn("Starting progress bar demo.") // Outputs on browser console log.
+ target <- IO.effectTotal(document.createElement("pre"))
+ _ <- update(target).repeat(Schedule.spaced(1.seconds))
+ _ <- IO.effectTotal(node.appendChild(target)) // "node" is provided in this page by mdoc.
+ } yield ExitCode.success
+ }
+
+ def update(target: raw.Element) = {
+ for {
+ time <- currentTime(TimeUnit.SECONDS)
+ output <- UIO.effectTotal(progress((time % 11).toInt, 10))
+ _ <- UIO.effectTotal(target.innerHTML = output)
+ } yield ()
+ }
+
+ def progress(tick: Int, size: Int) = {
+ val bar_length = tick
+ val empty_length = size - tick
+ val bar = "#" * bar_length + " " * empty_length
+ s"$bar $bar_length%"
+ }
+
+}
+```
diff --git a/website/versioned_docs/version-1.0.18/guides/interop/with-monix.md b/website/versioned_docs/version-1.0.18/guides/interop/with-monix.md
new file mode 100644
index 000000000000..a0261778ac29
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/interop/with-monix.md
@@ -0,0 +1,95 @@
+---
+id: with-monix
+title: "How to Interop with Monix?"
+---
+
+Checkout [`interop-monix`](https://github.com/zio/interop-monix) module for inter-operation support.
+
+## `Task` conversions
+
+Interop layer provides the following conversions:
+
+- from `Task[A]` to `UIO[Task[A]]`
+- from `Task[A]` to `Task[A]`
+
+To convert an `IO` value to `Task`, use the following method:
+
+```scala
+def toTask: UIO[eval.Task[A]]
+```
+
+To perform conversion in other direction, use the following extension method
+available on `IO` companion object:
+
+```scala
+def fromTask[A](task: eval.Task[A])(implicit scheduler: Scheduler): Task[A]
+```
+
+Note that in order to convert the `Task` to an `IO`, an appropriate `Scheduler`
+needs to be available.
+
+### Example
+
+```scala
+import monix.eval.Task
+import monix.execution.Scheduler.Implicits.global
+import zio.{ IO, Runtime }
+import zio.interop.monix._
+
+object UnsafeExample extends App {
+
+ val runtime = Runtime.default
+
+ def main(args: Array[String]): Unit = {
+ val io1 = IO.succeed(10)
+ val t1 = runtime.unsafeRun(io1.toTask)
+
+ t1.runToFuture.foreach(r => println(s"IO to task result is $r"))
+
+ val t2 = Task(10)
+ val io2 = IO.fromTask(t2).map(r => s"Task to IO result is $r")
+
+ println(runtime.unsafeRun(io2))
+ }
+}
+```
+
+## `Coeval` conversions
+
+To convert an `IO` value to `Coeval`, use the following method:
+
+```scala
+def toCoeval: UIO[eval.Coeval[A]]
+```
+
+To perform conversion in other direction, use the following extension method
+available on `IO` companion object:
+
+```scala
+def fromCoeval[A](coeval: eval.Coeval[A]): Task[A]
+```
+
+### Example
+
+```scala
+import monix.eval.Coeval
+import zio.{ IO, Runtime }
+import zio.interop.monix._
+
+object UnsafeExample extends App {
+
+ val runtime = Runtime.default
+
+ def main(args: Array[String]): Unit = {
+ val io1 = IO.succeed(10)
+ val c1 = runtime.unsafeRun(io1.toCoeval)
+
+ println(s"IO to coeval result is ${c1.value}")
+
+ val c2 = Coeval(10)
+ val io2 = IO.fromCoeval(c2).map(r => s"Coeval to IO result is $r")
+
+ println(runtime.unsafeRun(io2))
+ }
+}
+```
diff --git a/website/versioned_docs/version-1.0.18/guides/interop/with-reactive-streams.md b/website/versioned_docs/version-1.0.18/guides/interop/with-reactive-streams.md
new file mode 100644
index 000000000000..f1b4ec44c5d0
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/interop/with-reactive-streams.md
@@ -0,0 +1,92 @@
+---
+id: with-reactive-streams
+title: "How to Interop with Reactive Streams?"
+---
+
+Checkout the [`interop-reactive-streams`](https://github.com/zio/interop-reactive-streams) module for inter-operation support.
+
+## Reactive Streams `Producer` and `Subscriber`
+
+**ZIO** integrates with [Reactive Streams](http://reactivestreams.org) by providing conversions from `zio.stream.Stream` to `org.reactivestreams.Publisher`
+and from `zio.stream.Sink` to `org.reactivestreams.Subscriber` and vice versa. Simply import `import zio.interop.reactivestreams._` to make the
+conversions available.
+
+## Examples
+
+First, let's get a few imports out of the way.
+
+```scala
+import org.reactivestreams.example.unicast._
+import zio._
+import zio.interop.reactivestreams._
+import zio.stream._
+
+val runtime = Runtime.default
+```
+
+We use the following `Publisher` and `Subscriber` for the examples:
+
+```scala
+val publisher = new RangePublisher(3, 10)
+val subscriber = new SyncSubscriber[Int] {
+ override protected def whenNext(v: Int): Boolean = {
+ print(s"$v, ")
+ true
+ }
+}
+```
+
+### Publisher to Stream
+
+A `Publisher` used as a `Stream` buffers up to `qSize` elements. If possible, `qSize` should be
+a power of two for best performance. The default is 16.
+
+```scala
+val streamFromPublisher = publisher.toStream(qSize = 16)
+runtime.unsafeRun(
+ streamFromPublisher.run(Sink.collectAll[Integer])
+)
+```
+
+### Subscriber to Sink
+
+When running a `Stream` to a `Subscriber`, a side channel is needed for signalling failures.
+For this reason `toSink` returns a tuple of `Promise` and `Sink`. The `Promise` must be failed
+on `Stream` failure. The type parameter on `toSink` is the error type of *the Stream*.
+
+```scala
+val asSink = subscriber.toSink[Throwable]
+val failingStream = Stream.range(3, 13) ++ Stream.fail(new RuntimeException("boom!"))
+runtime.unsafeRun(
+ asSink.flatMap { case (errorP, sink) =>
+ failingStream.run(sink).catchAll(errorP.fail)
+ }
+)
+```
+
+### Stream to Publisher
+
+```scala
+val stream = Stream.range(3, 13)
+runtime.unsafeRun(
+ stream.toPublisher.flatMap { publisher =>
+ UIO(publisher.subscribe(subscriber))
+ }
+)
+```
+
+### Sink to Subscriber
+
+`toSubscriber` returns a `Subscriber` and an `IO` which completes with the result of running the
+`Sink` or the error if the `Publisher` fails.
+A `Sink` used as a `Subscriber` buffers up to `qSize` elements. If possible, `qSize` should be
+a power of two for best performance. The default is 16.
+
+```scala
+val sink = Sink.collectAll[Integer]
+runtime.unsafeRun(
+ sink.toSubscriber(qSize = 16).flatMap { case (subscriber, result) =>
+ UIO(publisher.subscribe(subscriber)) *> result
+ }
+)
+```
diff --git a/website/versioned_docs/version-1.0.18/guides/interop/with-scalaz-7x.md b/website/versioned_docs/version-1.0.18/guides/interop/with-scalaz-7x.md
new file mode 100644
index 000000000000..5a2824e5b89d
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/interop/with-scalaz-7x.md
@@ -0,0 +1,41 @@
+---
+id: with-scalaz-7x
+title: "How to Interop with Scalaz 7.x?"
+---
+
+## `ZIO` Instances
+
+If you are a happy Scalaz 7.2 user [`interop-scala7x`](https://github.com/zio/interop-scalaz) module offers `ZIO` instances for several typeclasses.
+
+### Example
+
+```scala
+import scalaz._, Scalaz._
+import zio.interop.scalaz72._
+
+type Database = IList[User]
+
+def findUser(id: UserId): ZIO[Database, UserError, User] = ...
+def findUsers(ids: IList[UserId]): ZIO[Database, UserError, IList[User]] = ids.traverse(findUser(_))
+```
+
+## `ZIO` parallel `Applicative` instance
+
+Due to `Applicative` and `Monad` coherence law `ZIO`'s `Applicative` instance has to be implemented in terms of `bind` hence when composing multiple effects using `Applicative` they will be sequenced. To cope with that limitation `ZIO` tagged with `Parallel` has an `Applicative` instance which is not `Monad` and operates in parallel.
+
+### Example
+
+```scala
+import scalaz._, Scalaz._
+import zio.interop.scalaz72._
+
+case class Dashboard(details: UserDetails, history: TransactionHistory)
+
+def getDetails(id: UserId): ZIO[Database, UserError, UserDetails] = ...
+def getHistory(id: UserId): ZIO[Database, UserError, TransactionHistory] = ...
+
+def buildDashboard(id: UserId): ZIO[Database, UserError, Dashboard] =
+ Tag.unwrap(^(par(getDetails(id)), par(getHistory(id)))(Dashboard.apply))
+
+def par[R, E, A](io: ZIO[R, E, A]): scalaz72.ParIO[R, E, A] = Tag(io)
+```
diff --git a/website/versioned_docs/version-1.0.18/guides/interop/with-twitter.md b/website/versioned_docs/version-1.0.18/guides/interop/with-twitter.md
new file mode 100644
index 000000000000..706bf0897323
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/interop/with-twitter.md
@@ -0,0 +1,31 @@
+---
+id: with-twitter
+title: "How to Interop with Twitter?"
+---
+
+[`interop-twitter`](https://github.com/zio/interop-twitter) module provides capability to convert [Twitter `Future`](https://twitter.github.io/util/docs/com/twitter/util/Future.html) into ZIO `Task`.
+
+### Example
+
+```scala
+import com.twitter.util.Future
+import zio.{ App, Task }
+import zio.console._
+import zio.interop.twitter._
+
+object Example extends App {
+ def run(args: List[String]) = {
+ val program =
+ for {
+ _ <- putStrLn("Hello! What is your name?")
+ name <- getStrLn
+ greeting <- Task.fromTwitterFuture(Task(greet(name)))
+ _ <- putStrLn(greeting)
+ } yield ()
+
+ program.exitCode
+ }
+
+ private def greet(name: String): Future[String] = Future.value(s"Hello, $name!")
+}
+```
diff --git a/website/versioned_docs/version-1.0.18/guides/migrate/from-cats-efect.md b/website/versioned_docs/version-1.0.18/guides/migrate/from-cats-efect.md
new file mode 100644
index 000000000000..d6b4beb8ebe2
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/migrate/from-cats-efect.md
@@ -0,0 +1,23 @@
+---
+id: from-cats-efect
+title: "How to Migrate from Cats Effect to ZIO?"
+---
+
+Cats `IO[A]` can be easily replaced with ZIO's `Task[A]` (an alias for `ZIO[Any, Throwable, A]`).
+Translation should be relatively straightfoward. Below, you'll find tables showing the ZIO equivalents of
+various `cats.*`'s methods.
+
+### Methods on cats.FlatMap.Ops
+
+| cats | ZIO |
+|-------|-----|
+|`flatMap`|`flatMap`|
+|`flatten`|`flatten`|
+|`productREval`|`zipRight`|
+|`productLEval`|`zipLeft`|
+|`mproduct`|`zipPar`|
+|`flatTap`|`tap`|
+
+### TODO
+
+TODO
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/guides/migrate/from-monix.md b/website/versioned_docs/version-1.0.18/guides/migrate/from-monix.md
new file mode 100644
index 000000000000..b181dd311f95
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/migrate/from-monix.md
@@ -0,0 +1,98 @@
+---
+id: from-monix
+title: "How to Migrate from Monix to ZIO?"
+---
+
+Monix's `Task[A]` can be easily replaced with ZIO's `Task[A]` (an alias for `ZIO[Any, Throwable, A]`).
+Translation should be relatively straightfoward. Below, you'll find tables showing the ZIO equivalents of
+ `monix.eval.Task`'s methods.
+
+Once you've completed the initial translation, you'll find that ZIO is outfitted with many additional
+methods which have no Monix equivalents, so have fun exploring the API and see if you can rewrite some
+of your logic at a higher level of abstraction, with more powerful combinators and fewer lines code.
+
+If you are using operators from from Cats Effect extension methods see also
+[here](https://zio.dev/docs/guides/migrate/from-cats-effect).
+
+### Methods on Trait
+
+| Monix | ZIO |
+|-------|-----|
+| `attempt` | `either` |
+| `bracketCase` | `bracketExit` |
+| `bracketE` | `bracketExit` |
+| `bracket` | `bracket` |
+| `delayExecution` | `delay` |
+| `dematerialize` | `absolve` |
+| `doOnCancel` | `onInterrupt` |
+| `doOnFinish` | `onExit` |
+| `failed` | `flip` |
+| `flatMap` | `flatMap` |
+| `flatten` | `flatten` |
+| `guaranteeCase` | `ensuringExit` |
+| `guarantee` | `ensuring` |
+| `loopForever` | `forever` |
+| `materialize` | `either` |
+| `memoize` | `memoize` |
+| `onErrorFallbackTo` | `orElse` |
+| `onErrorHandleWith` | `catchAll` |
+| `onErrorRecoverWith` | `catchSome` |
+| `onErrorRestart` | `retryN` |
+| `redeemWith` | `foldM` |
+| `redeem` | `fold` |
+| `restartUntil` | `repeatUntil` |
+| `start` | `fork` |
+| `timed` | `timed` |
+| `timeout` | `timeout` |
+| `uncancelable` | `uninterruptible` |
+
+### Methods on Companion Object
+
+| Monix | ZIO |
+|-------|-----|
+| `apply` | `apply` |
+| `asyncF` | `effectAsyncM` |
+| `async` | `effectAsync` |
+| `cancelable` | `effectAsyncInterrupt` |
+| `deferFuture` | `fromFuture` |
+| `defer` | `effectSuspend` |
+| `delay` | `effect` |
+| `eval` | `effect` |
+| `fromEither` | `fromEither` |
+| `fromFuture` | `fromFuture` |
+| `fromTry` | `fromTry` |
+| `map2` | `mapN` |
+| `mapBoth` | `mapParN` |
+| `never` | `never` |
+| `now` | `succeed` |
+| `parMap2` | `mapParN` |
+| `parSequenceN` | `collectAllParN` |
+| `parSequence` | `collectAllPar` |
+| `parTraverseN` | `foreachParN` |
+| `parTraverse` | `foreachPar` |
+| `parZip2` | `tupledPar` |
+| `pure` | `succeed` |
+| `racePair` | `raceWith` |
+| `race` | `raceFirst` |
+| `raiseError` | `fail` |
+| `sequence` | `collectAll` |
+| `shift` | `yield` |
+| `sleep` | `sleep` |
+| `suspend` | `effectSuspend` |
+| `traverse` | `foreach` |
+| `unit` | `unit` |
+
+### Data Structures
+
+| Monix / Cats Effect | ZIO |
+|-------|-----|
+| `Deferred` | `Promise` |
+| `Fiber` | `Fiber` |
+| `MVar` | `Queue` |
+| `Ref` | `Ref` |
+| `Resource` | `ZManaged` |
+| `Semaphore` | `Semaphore` |
+| `TaskApp` | `App` |
+| `TaskLocal` | `FiberRef` |
+| `Task` | `Task` |
+
diff --git a/website/versioned_docs/version-1.0.18/guides/mock-services.md b/website/versioned_docs/version-1.0.18/guides/mock-services.md
new file mode 100644
index 000000000000..13f3f77afd00
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/mock-services.md
@@ -0,0 +1,455 @@
+---
+id: mock-services
+title: "How to Mock Services?"
+---
+
+## How to test interactions between services?
+
+Whenever possible, we should strive to make our functions pure, which makes testing such function easy - you just need to assert on the return value.
+However in larger applications there is a need for intermediate layers that delegate the work to specialized services.
+
+For example, in a HTTP server the first layer of indirection are so called _routes_, whose job is to match the request and delegate the processing to
+downstream layers. Often below there is a second layer of indirection, so called _controllers_, which consist of several business logic units grouped
+by their domain. In a RESTful API that would be all operations on a certain model. The _controller_ to perform its job might call on further
+specialized services for communicating with the database, sending email, logging, et cetera.
+
+If the job of the _capability_ is to call on another _capability_, how should we test it?
+
+## Hidden outputs
+
+A pure function is such a function which operates only on its inputs and produces only its output. Command-like methods, by definition are impure, as
+their job is to change state of the collaborating object (performing a _side effect_). For example:
+
+
+```scala
+import scala.concurrent.Future
+
+def processEvent(event: Event): Future[Unit] = Future(println(s"Got $event"))
+```
+
+The signature of this method `Event => Future[Unit]` hints us we're dealing with a command. It returns `Unit` (well, wrapped in future, but it does
+not matter here), you can't do anything useful with `Unit` and it does not contain any information. It is the equivalent of returning nothing. It is
+also an unreliable return type, as when Scala expects the return type to be `Unit` it will discard whatever value it had (for details see
+[Section 6.26.1][link-sls-6.26.1] of the Scala Language Specification), which may shadow the fact that the final value produced (and discarded) was
+not the one you expected.
+
+Inside the future there may be happening any side effects. It may open a file, print to console, connect to databases. We simply don't know. Let's have a look how this problem would be solved using ZIO's effect system:
+
+
+```scala
+import zio._
+import zio.console.Console
+
+def processEvent(event: Event): URIO[Console, Unit] =
+ console.putStrLn(s"Got $event").orDie
+```
+
+With ZIO, we've regained to ability to reason about the effects called. We know that `processEvent` can only call on _capabilities_ of `Console`, so even though we still have `Unit` as the result, we have narrowed the possible effects space to a few.
+
+> **Note:** this is true assuming the programmer disciplines themselves to only perform effects expressed in the type signature.
+> There is no way (at the moment) to enforce this by the compiler. There is some research done in this space, perhaps future programming languages
+> will enable us to further constrain side effects.
+
+However, the same method could be implemented as:
+
+```scala
+def processEvent2(event: Event): URIO[Console, Unit] =
+ ZIO.unit
+```
+
+How can we test it did exactly what we expected it to do?
+
+## Mocking
+
+In this sort of situations we need mock implementations of our collaborator service. As _Martin Fowler_ puts it in his excellent article
+[Mocks Aren't Stubs][link-test-doubles]:
+
+> **Mocks** are (...) objects pre-programmed with expectations which form a specification of the calls they are expected to receive.
+
+ZIO Test provides a framework for mocking your modules.
+
+## Creating a mock service
+
+We'll be assuming you've read about modules and layers in the [contextual types][doc-contextual-types] guide. In the main sources we define the _service_, a module alias and _capability accessors_. In test sources we're defining the _mock object_ which extends `zio.test.mock.Mock` which holds _capability tags_ and _compose layer_.
+
+```scala
+// main sources
+
+import zio.stream.{ ZSink, ZStream }
+import zio.test.mock._
+
+type Example = Has[Example.Service]
+
+object Example {
+ trait Service {
+ val static : UIO[String]
+ def zeroArgs : UIO[Int]
+ def zeroArgsWithParens() : UIO[Long]
+ def singleArg(arg1: Int) : UIO[String]
+ def multiArgs(arg1: Int, arg2: Long) : UIO[String]
+ def multiParamLists(arg1: Int)(arg2: Long) : UIO[String]
+ def command(arg1: Int) : UIO[Unit]
+ def overloaded(arg1: Int) : UIO[String]
+ def overloaded(arg1: Long) : UIO[String]
+ def function(arg1: Int) : String
+ def sink(a: Int) : ZSink[Any, String, Int, Int, List[Int]]
+ def stream(a: Int) : ZStream[Any, String, Int]
+ }
+}
+```
+
+```scala
+// test sources
+
+object ExampleMock extends Mock[Example] {
+ object Static extends Effect[Unit, Nothing, String]
+ object ZeroArgs extends Effect[Unit, Nothing, Int]
+ object ZeroArgsWithParens extends Effect[Unit, Nothing, Long]
+ object SingleArg extends Effect[Int, Nothing, String]
+ object MultiArgs extends Effect[(Int, Long), Nothing, String]
+ object MultiParamLists extends Effect[(Int, Long), Nothing, String]
+ object Command extends Effect[Int, Nothing, Unit]
+ object Overloaded {
+ object _0 extends Effect[Int, Nothing, String]
+ object _1 extends Effect[Long, Nothing, String]
+ }
+ object Function extends Method[Int, Throwable, String]
+ object Sink extends Sink[Any, String, Int, Int, List[Int]]
+ object Stream extends Stream[Any, String, Int]
+
+ val compose: URLayer[Has[Proxy], Example] = ???
+}
+```
+
+A _capability tag_ is just a value which extends the `zio.test.mock.Capability[R <: Has[_], I, E, A]` type constructor, where:
+- `R` is the type of environment the method belongs to
+- `I` is the type of methods input arguments
+- `E` is the type of error it can fail with
+- `A` is the type of return value it can produce
+
+The `Capability` type is not publicly available, instead you have to extend `Mock` dependent types `Effect`, `Method`, `Sink` or `Stream`.
+
+We model input arguments according to following scheme:
+- for zero arguments the type is `Unit`
+- for one or more arguments, regardless in how many parameter lists, the type is a `TupleN` where `N` is the size of arguments list
+
+> **Note:** we're using tuples to represent multiple argument methods, which follows with a limit to max 22 arguments, as is Scala itself limited.
+
+For overloaded methods we nest a list of numbered objects, each representing subsequent overloads.
+
+Finally we need to define a _compose layer_ that can create our environment from a `Proxy`.
+A `Proxy` holds the mock state and serves predefined responses to calls.
+
+
+```scala
+import ExampleMock._
+
+val compose: URLayer[Has[Proxy], Example] =
+ ZLayer.fromServiceM { proxy =>
+ withRuntime.map { rts =>
+ new Example.Service {
+ val static = proxy(Static)
+ def zeroArgs = proxy(ZeroArgs)
+ def zeroArgsWithParens() = proxy(ZeroArgsWithParens)
+ def singleArg(arg1: Int) = proxy(SingleArg, arg1)
+ def multiArgs(arg1: Int, arg2: Long) = proxy(MultiArgs, arg1, arg2)
+ def multiParamLists(arg1: Int)(arg2: Long) = proxy(MultiParamLists, arg1, arg2)
+ def command(arg1: Int) = proxy(Command, arg1)
+ def overloaded(arg1: Int) = proxy(Overloaded._0, arg1)
+ def overloaded(arg1: Long) = proxy(Overloaded._1, arg1)
+ def function(arg1: Int) = rts.unsafeRunTask(proxy(Function, arg1))
+ def sink(a: Int) = rts.unsafeRun(proxy(Sink, a).catchAll(error => UIO(ZSink.fail[String, Int](error))))
+ def stream(a: Int) = rts.unsafeRun(proxy(Stream, a))
+ }
+ }
+ }
+```
+
+> **Note:** The `withRuntime` helper is defined in `Mock`. It accesses the Runtime via `ZIO.runtime` and if you're on JS platform, it will replace the executor to an unyielding one.
+
+A reference to this layer is passed to _capability tags_ so it can be used to automatically build environment for composed expectations on
+multiple services.
+
+> **Note:** for non-effectful capabilities you need to unsafely run the final effect to satisfy the required interface. For `ZSink` you also need to map the error into a failed sink as demonstrated above.
+
+## Complete example
+
+
+```scala
+// main sources
+
+import zio._
+import zio.console.Console
+import zio.test.mock._
+
+type AccountObserver = Has[AccountObserver.Service]
+
+object AccountObserver {
+ trait Service {
+ def processEvent(event: AccountEvent): UIO[Unit]
+ def runCommand(): UIO[Unit]
+ }
+
+ def processEvent(event: AccountEvent) =
+ ZIO.accessM[AccountObserver](_.get.processEvent(event))
+
+ def runCommand() =
+ ZIO.accessM[AccountObserver](_.get.runCommand)
+
+ val live: ZLayer[Console, Nothing, AccountObserver] =
+ ZLayer.fromService[Console.Service, Service] { console =>
+ new Service {
+ def processEvent(event: AccountEvent): UIO[Unit] =
+ for {
+ _ <- console.putStrLn(s"Got $event").orDie
+ line <- console.getStrLn.orDie
+ _ <- console.putStrLn(s"You entered: $line").orDie
+ } yield ()
+
+ def runCommand(): UIO[Unit] =
+ console.putStrLn("Done!").orDie
+ }
+ }
+}
+```
+
+```scala
+// test sources
+
+object AccountObserverMock extends Mock[AccountObserver] {
+
+ object ProcessEvent extends Effect[AccountEvent, Nothing, Unit]
+ object RunCommand extends Effect[Unit, Nothing, Unit]
+
+ val compose: URLayer[Has[Proxy], AccountObserver] =
+ ZLayer.fromService { proxy =>
+ new AccountObserver.Service {
+ def processEvent(event: AccountEvent) = proxy(ProcessEvent, event)
+ def runCommand(): UIO[Unit] = proxy(RunCommand)
+ }
+ }
+}
+```
+
+> **Note:** ZIO provides some useful macros to help you generate repetitive code, see [Scrapping the boilerplate with macros][doc-macros].
+
+## Provided ZIO services
+
+For each built-in ZIO service you will find their mockable counterparts in `zio.test.mock` package:
+- `MockClock` for `zio.clock.Clock`
+- `MockConsole` for `zio.console.Console`
+- `MockRandom` for `zio.random.Random`
+- `MockSystem` for `zio.system.System`
+
+## Setting up expectations
+
+To create expectations we use the previously defined _capability tags_:
+
+
+```scala
+import zio.test.Assertion._
+import zio.test.mock.Expectation._
+import zio.test.mock.MockSystem
+
+val exp01 = ExampleMock.SingleArg( // capability to build an expectation for
+ equalTo(42), // assertion of the expected input argument
+ value("bar") // result, that will be returned
+)
+```
+
+For methods that take input, the first argument will be an assertion on input, and the second the predefined result.
+
+In the most robust example, the result can be either a successful value or a failure. To construct either we must use
+one of following combinators from `zio.test.mock.Expectation` companion object:
+
+- `failure[E](failure: E)` Expectation result failing with `E`
+- `failureF[I, E](f: I => E)` Maps the input arguments `I` to expectation result failing with `E`.
+- `failureM[I, E](f: I => IO[E, Nothing])` Effectfully maps the input arguments `I` to expectation result failing with `E`.
+- `never` Expectation result computing forever.
+- `unit` Expectation result succeeding with `Unit`.
+- `value[A](value: A)` Expectation result succeeding with `A`.
+- `valueF[I, A](f: I => A)` Maps the input arguments `I` to expectation result succeeding with `A`.
+- `valueM[I, A](f: I => IO[Nothing, A])` Effectfully maps the input arguments `I` expectation result succeeding with `A`.
+
+For methods that take no input, we only define the expected output.
+
+```scala
+val exp02 = ExampleMock.ZeroArgs(value(42))
+```
+
+For methods that may return `Unit`, we may skip the predefined result (it will default to successful value) or use `unit` helper.
+
+```scala
+import zio.test.mock.MockConsole
+
+val exp03 = MockConsole.PutStrLn(equalTo("Welcome to ZIO!"))
+val exp04 = MockConsole.PutStrLn(equalTo("Welcome to ZIO!"), unit)
+```
+
+For methods that may return `Unit` and take no input we can skip both:
+
+```scala
+val exp05 = AccountObserverMock.RunCommand()
+```
+
+Finally we're all set and can create ad-hoc mock environments with our services.
+
+```scala
+import zio.test._
+
+val event = new AccountEvent {}
+val app: URIO[AccountObserver, Unit] = AccountObserver.processEvent(event)
+val mockEnv: ULayer[Console] = (
+ MockConsole.PutStrLn(equalTo(s"Got $event"), unit) ++
+ MockConsole.GetStrLn(value("42")) ++
+ MockConsole.PutStrLn(equalTo("You entered: 42"))
+)
+```
+
+We can combine our expectation to build complex scenarios using combinators defined in `zio.test.mock.Expectation`:
+
+- `andThen` (alias `++`) Compose two expectations, producing a new expectation to **satisfy both sequentially**.
+- `and` (alias `&&`) Compose two expectations, producing a new expectation to **satisfy both in any order**.
+- `or` (alias `||`) Compose two expectations, producing a new expectation to **satisfy only one of them**.
+- `repeated` Repeat expectation within given bounds, produces a new expectation to **satisfy itself sequentially given number of times**.
+- `atLeast` Lower-bounded variant of `repeated`, produces a new expectation to satisfy **itself sequentially at least given number of times**.
+- `atMost` Upper-bounded variant of `repeated`, produces a new expectation to satisfy **itself sequentially at most given number of times**.
+- `optional` Alias for `atMost(1)`, produces a new expectation to satisfy **itself at most once**.
+
+## Providing mocked environment
+
+```scala
+object AccountObserverSpec extends DefaultRunnableSpec {
+ def spec = suite("processEvent")(
+ testM("calls putStrLn > getStrLn > putStrLn and returns unit") {
+ val result = app.provideLayer(mockEnv >>> AccountObserver.live)
+ assertM(result)(isUnit)
+ }
+ )
+}
+```
+
+## Mocking unused collaborators
+
+Often the dependency on a collaborator is only in some branches of the code. To test the correct behaviour of branches without depedencies, we still have to provide it to the environment, but we would like to assert it was never called. With the `Mock.empty` method you can obtain a `ZLayer` with an empty service (no calls expected).
+
+```scala
+object MaybeConsoleSpec extends DefaultRunnableSpec {
+ def spec = suite("processEvent")(
+ testM("expect no call") {
+ def maybeConsole(invokeConsole: Boolean) =
+ ZIO.when(invokeConsole)(console.putStrLn("foo"))
+
+ val maybeTest1 = maybeConsole(false).provideLayer(MockConsole.empty)
+ val maybeTest2 = maybeConsole(true).provideLayer(MockConsole.PutStrLn(equalTo("foo")))
+ assertM(maybeTest1)(isUnit) *> assertM(maybeTest2)(isUnit)
+ }
+ )
+}
+```
+
+## Mocking multiple collaborators
+
+In some cases we have more than one collaborating service being called. You can create mocks for rich environments and as you enrich the environment by using _capability tags_ from another service, the underlaying mocked layer will be updated.
+
+```scala
+import zio.console.Console
+import zio.random.Random
+import zio.test.mock.MockRandom
+
+val combinedEnv: ULayer[Console with Random] = (
+ MockConsole.PutStrLn(equalTo("What is your name?")) ++
+ MockConsole.GetStrLn(value("Mike")) ++
+ MockRandom.NextInt(value(42)) ++
+ MockConsole.PutStrLn(equalTo("Mike, your lucky number today is 42!"))
+)
+
+val combinedApp =
+ for {
+ _ <- console.putStrLn("What is your name?")
+ name <- console.getStrLn.orDie
+ num <- random.nextInt
+ _ <- console.putStrLn(s"$name, your lucky number today is $num!")
+ } yield ()
+
+val result = combinedApp.provideLayer(combinedEnv)
+assertM(result)(isUnit)
+```
+
+## Polymorphic capabilities
+
+Mocking polymorphic methods is also supported, but the interface must require `zio.Tag` implicit evidence for each type parameter.
+
+```scala
+// main sources
+type PolyExample = Has[PolyExample.Service]
+
+object PolyExample {
+ trait Service {
+ def polyInput[I: Tag](input: I): Task[String]
+ def polyError[E: Tag](input: Int): IO[E, String]
+ def polyOutput[A: Tag](input: Int): Task[A]
+ def polyAll[I: Tag, E: Tag, A: Tag](input: I): IO[E, A]
+ }
+}
+```
+
+In the test sources we construct partially applied _capability tags_ by extending `Method.Poly` family. The unknown types
+must be provided at call site. To produce a final monomorphic `Method` tag we must use the `of` combinator and pass the
+missing types.
+
+```scala
+// test sources
+object PolyExampleMock extends Mock[PolyExample] {
+
+ object PolyInput extends Poly.Effect.Input[Throwable, String]
+ object PolyError extends Poly.Effect.Error[Int, String]
+ object PolyOutput extends Poly.Effect.Output[Int, Throwable]
+ object PolyAll extends Poly.Effect.InputErrorOutput
+
+ val compose: URLayer[Has[Proxy], PolyExample] =
+ ZLayer.fromServiceM { proxy =>
+ withRuntime.map { rts =>
+ new PolyExample.Service {
+ def polyInput[I: Tag](input: I) = proxy(PolyInput.of[I], input)
+ def polyError[E: Tag](input: Int) = proxy(PolyError.of[E], input)
+ def polyOutput[A: Tag](input: Int) = proxy(PolyOutput.of[A], input)
+ def polyAll[I: Tag, E: Tag, A: Tag](input: I) = proxy(PolyAll.of[I, E, A], input)
+ }
+ }
+ }
+}
+```
+
+Similarly, we use the same `of` combinator to refer to concrete monomorphic call in our test suite when building expectations:
+
+```scala
+import PolyExampleMock._
+
+val exp06 = PolyInput.of[String](equalTo("foo"), value("bar"))
+val exp07 = PolyInput.of[Int](equalTo(42), failure(new Exception))
+val exp08 = PolyInput.of[Long](equalTo(42L), value("baz"))
+
+val exp09 = PolyAll.of[Int, Throwable, String](equalTo(42), value("foo"))
+val exp10 = PolyAll.of[Int, Throwable, String](equalTo(42), failure(new Exception))
+```
+
+## More examples
+
+You can find more examples in the `examples` and `test-tests` subproject:
+
+- [MockExampleSpec][link-gh-mock-example-spec]
+- [EmptyMockSpec][link-gh-empty-mock-spec]
+- [ComposedMockSpec][link-gh-composed-mock-spec]
+- [ComposedEmptyMockSpec][link-gh-composed-empty-mock-spec]
+- [PolyMockSpec][link-gh-poly-mock-spec]
+
+[doc-contextual-types]: ../reference/contextual/index.md
+[doc-macros]: howto-macros.md
+[link-sls-6.26.1]: https://scala-lang.org/files/archive/spec/2.13/06-expressions.html#value-conversions
+[link-test-doubles]: https://martinfowler.com/articles/mocksArentStubs.html
+[link-gh-mock-example-spec]: https://github.com/zio/zio/blob/master/examples/shared/src/test/scala/zio/examples/test/MockExampleSpec.scala
+[link-gh-empty-mock-spec]: https://github.com/zio/zio/blob/master/test-tests/shared/src/test/scala/zio/test/mock/EmptyMockSpec.scala
+[link-gh-composed-mock-spec]: https://github.com/zio/zio/blob/master/test-tests/shared/src/test/scala/zio/test/mock/ComposedMockSpec.scala
+[link-gh-composed-empty-mock-spec]: https://github.com/zio/zio/blob/master/test-tests/shared/src/test/scala/zio/test/mock/ComposedEmptyMockSpec.scala
+[link-gh-poly-mock-spec]: https://github.com/zio/zio/blob/master/test-tests/shared/src/test/scala/zio/test/mock/PolyMockSpec.scala
diff --git a/website/versioned_docs/version-1.0.18/guides/test_effects.md b/website/versioned_docs/version-1.0.18/guides/test_effects.md
new file mode 100644
index 000000000000..3fc360eea6e3
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/test_effects.md
@@ -0,0 +1,828 @@
+---
+id: test-effects
+title: "How to Test effects?"
+---
+
+## How zio-test was designed
+
+`zio-test` is designed around the idea of making tests first-class objects. What it means is that tests (and other accompanying concepts like assertions) become ordinary values that can be passed around, transformed and composed together. This approach allows for greater flexibility comparing to some other testing frameworks where tests and additional logic around tests had to be put into callbacks so that framework could make use of them. This approach also fits better with other `ZIO` concepts like `ZManaged` which can only be used within a scoped block of code. This also created a mismatch between `BeforeAll`, `AfterAll` callback-like methods when there were resources that should be opened and closed during test suite execution.
+Another thing worth pointing out is that tests being values, are also effects. Implications of this design are far reaching. First of all well known problem of testing asynchronous value is gone. Whereas in other frameworks you have to somehow "run" your effects
+and at best wrap them in `scala.util.Future` because blocking would eliminate running on ScalaJS, `zio-test` expects you to create `ZIO` objects. There is no need for indirect transformations from one wrapping object to another. Second, because our tests are ordinary `ZIO` values we don't need to turn to testing framework for things like retries, timeouts and resource management. We can solve all those problems with full richness of functions that `ZIO` exposes.
+
+## Constructing tests
+
+All below code assumes that you have imported `zio.test._`
+
+The backbone of `zio-test` is the `Spec[L, T]` class. Every spec is labeled with `L` and can be a suite which contains other specs or a test of type `T`.
+
+The most common and easy way to create suites is to use `suite` function. For testing of pure functions there is `test` function and for effectful testing there is `testM`
+
+```scala
+import zio.test._
+import zio.test.environment.Live
+import zio.clock.nanoTime
+import Assertion.isGreaterThan
+
+val clockSuite = suite("clock") (
+ testM("time is non-zero") {
+ assertM(Live.live(nanoTime))(isGreaterThan(0L))
+ }
+)
+// clockSuite: Spec[Live, TestFailure[Nothing], TestSuccess] = Spec(
+// caseValue = LabeledCase(
+// label = "clock",
+// spec = Spec(
+// caseValue = MultipleCase(
+// specs = IndexedSeq(
+// Spec(
+// caseValue = LabeledCase(
+// label = "time is non-zero",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,21)))
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+```
+
+As you can see the whole suite was assigned to `clockSuite` val. As it was said suites can contain other suites so we can aggregate them as much as needed. Example, we can have multiple suites that test external HTTP apis and one big suite that will aggregate them all.
+
+
+```scala
+import zio.test._
+import Assertion._
+
+val paymentProviderABCSuite = suite("ABC payment provider tests") {test("Your test")(assert("Your value")(Assertion.isNonEmptyString))}
+// paymentProviderABCSuite: Spec[Any, TestFailure[Nothing], TestSuccess] = Spec(
+// caseValue = LabeledCase(
+// label = "ABC payment provider tests",
+// spec = Spec(
+// caseValue = MultipleCase(
+// specs = IndexedSeq(
+// Spec(
+// caseValue = LabeledCase(
+// label = "Your test",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,36)))
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+val paymentProviderXYZSuite = suite("XYZ payment provider tests") {test("Your other test")(assert("Your other value")(Assertion.isNonEmptyString))}
+// paymentProviderXYZSuite: Spec[Any, TestFailure[Nothing], TestSuccess] = Spec(
+// caseValue = LabeledCase(
+// label = "XYZ payment provider tests",
+// spec = Spec(
+// caseValue = MultipleCase(
+// specs = IndexedSeq(
+// Spec(
+// caseValue = LabeledCase(
+// label = "Your other test",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,39)))
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+val allPaymentProvidersTests = suite("All payment providers tests")(paymentProviderABCSuite, paymentProviderXYZSuite)
+// allPaymentProvidersTests: Spec[Any, TestFailure[Nothing], TestSuccess] = Spec(
+// caseValue = LabeledCase(
+// label = "All payment providers tests",
+// spec = Spec(
+// caseValue = MultipleCase(
+// specs = IndexedSeq(
+// Spec(
+// caseValue = LabeledCase(
+// label = "ABC payment provider tests",
+// spec = Spec(
+// caseValue = MultipleCase(
+// specs = IndexedSeq(
+// Spec(
+// caseValue = LabeledCase(
+// label = "Your test",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,36)))
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// ),
+// Spec(
+// caseValue = LabeledCase(
+// label = "XYZ payment provider tests",
+// spec = Spec(
+// caseValue = MultipleCase(
+// specs = IndexedSeq(
+// Spec(
+// caseValue = LabeledCase(
+// label = "Your other test",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,39)))
+// )
+// )
+// )
+// )
+// )
+// )
+// ...
+```
+
+Real tests that run some logic and return testing result are created mostly with `testM` function. It expects two arguments, first one that will be the label of test
+which will be used for visual reporting back to the user and an assertion of type
+`ZIO[R, E, TestResult]`. This means writing test in `zio-test` mostly gets down to creating a `ZIO` object that
+will produce `TestResult`. There is another variant of function for creating test that are pure called simply `test`.
+It expects a thunk of code that will just return a `TestResult` without packing it into `ZIO`.
+
+### Assertions - creating TestResults
+
+As it was already mentioned tests should return `TestResult`. The most common way to produce a `TestResult`
+is to resort to `assert` or its effectful counterpart `assertM`. Both of them accept a value of type `A` (effectful version wrapped in a `ZIO`) and an `Assertion[A]`.
+To create `Assertion[A]` object one can use functions defined under `zio.test.Assertion`. There are already a number
+of useful assertions predefined like `equalTo`, `isFalse`, `isTrue`, `contains`, `throws` and more.
+What is really useful in assertions is that they behave like boolean values and can be composed with operators
+known from operating on boolean values like and (`&&`), or (`||`), negation (`negate`).
+
+```scala
+import zio.test.Assertion
+
+val assertionForString: Assertion[String] = Assertion.containsString("Foo") && Assertion.endsWithString("Bar")
+// assertionForString: Assertion[String] = (containsString(Foo) && endsWithString(Bar))
+```
+
+What's more, assertions also compose with each other allowing for doing rich diffs not only simple value to value comparison.
+
+```scala
+import zio.test.Assertion.{isRight, isSome,equalTo, hasField}
+
+test("Check assertions") {
+ assert(Right(Some(2)))(isRight(isSome(equalTo(2))))
+}
+// res0: ZSpec[Any, Nothing] = Spec(
+// caseValue = LabeledCase(
+// label = "Check assertions",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,60)))
+// )
+// )
+// )
+// )
+```
+
+Here we're checking deeply nested values inside an `Either` and `Option`. Because `Assertion`s compose this is not a problem
+all layers are being peeled off tested for condition until final value is reached.
+Here the expression `Right(Some(2))` is of type `Either[Any, Option[Int]]`and our assertion `isRight(isSome(equalTo(2)))`
+is of type `Assertion[Either[Any, Option[Int]]]`
+
+
+```scala
+import zio.test._
+import zio.test.Assertion.{isRight, isSome,equalTo, isGreaterThanEqualTo, not, hasField}
+
+final case class Address(country:String, city:String)
+final case class User(name:String, age:Int, address: Address)
+
+test("Rich checking") {
+ assert(
+ User("Jonny", 26, Address("Denmark", "Copenhagen"))
+ )(
+ hasField("age", (u:User) => u.age, isGreaterThanEqualTo(18)) &&
+ hasField("country", (u:User) => u.address.country, not(equalTo("USA")))
+ )
+}
+// res2: ZSpec[Any, Nothing] = Spec(
+// caseValue = LabeledCase(
+// label = "Rich checking",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,83)))
+// )
+// )
+// )
+// )
+```
+There is also an easy way to test object's data for certain assertions with `hasField` which accepts besides a name, a mapping function from object to its tested property and `Assertion` object which will validate this property. Here our test checks if a person has at least 18 years and is not from USA. What is nice about those tests is that, test reporters will tell you exactly which assertion was broken. Let's say we would change `isGreaterThanEqualTo(18)` to `isGreaterThanEqualTo(40)` which will fail. Printout
+on console will be a nice detailed text explaining what exactly went wrong:
+
+```bash
+[info] User(Jonny,26,Address(Denmark,Copenhagen)) did not satisfy (hasField("age", _.age, isGreaterThanEqualTo(45)) && hasField("country", _.country, not(equalTo(USA))))
+[info] 26 did not satisfy isGreaterThanEqualTo(45)
+```
+
+
+Having this all in mind probably the most common and also most readable way of structuring tests is to pass
+a for-comprehension to `testM` function and yield a call to `assert` function.
+
+```scala
+import zio._
+import zio.test._
+import Assertion._
+
+testM("Semaphore should expose available number of permits") {
+ for {
+ s <- Semaphore.make(1L)
+ permits <- s.available
+ } yield assert(permits)(equalTo(1L))
+}
+// res3: ZSpec[Any, Nothing] = Spec(
+// caseValue = LabeledCase(
+// label = "Semaphore should expose available number of permits",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,105)))
+// )
+// )
+// )
+// )
+```
+
+### Running tests
+
+When all of our tests are constructed, we need to have a way to actually execute them. Your first stop is the `zio.test.DefaultRunnableSpec` which accepts a single suite that will be executed. A single suite might seem to be limiting but as it was already said suites can hold any number of other suites. You may structure your tests like this:
+
+
+```scala
+import zio.test._
+import zio.clock.nanoTime
+import Assertion._
+
+val suite1 = suite("suite1") (
+ testM("s1.t1") {assertM(nanoTime)(isGreaterThanEqualTo(0L))},
+ testM("s1.t2") {assertM(nanoTime)(isGreaterThanEqualTo(0L))}
+)
+// suite1: Spec[clock.package.Clock, TestFailure[Nothing], TestSuccess] = Spec(
+// caseValue = LabeledCase(
+// label = "suite1",
+// spec = Spec(
+// caseValue = MultipleCase(
+// specs = IndexedSeq(
+// Spec(
+// caseValue = LabeledCase(
+// label = "s1.t1",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,126)))
+// )
+// )
+// )
+// ),
+// Spec(
+// caseValue = LabeledCase(
+// label = "s1.t2",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,127)))
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+val suite2 = suite("suite2") (
+ testM("s2.t1") {assertM(nanoTime)(isGreaterThanEqualTo(0L))},
+ testM("s2.t2") {assertM(nanoTime)(isGreaterThanEqualTo(0L))},
+ testM("s2.t3") {assertM(nanoTime)(isGreaterThanEqualTo(0L))}
+)
+// suite2: Spec[clock.package.Clock, TestFailure[Nothing], TestSuccess] = Spec(
+// caseValue = LabeledCase(
+// label = "suite2",
+// spec = Spec(
+// caseValue = MultipleCase(
+// specs = IndexedSeq(
+// Spec(
+// caseValue = LabeledCase(
+// label = "s2.t1",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,132)))
+// )
+// )
+// )
+// ),
+// Spec(
+// caseValue = LabeledCase(
+// label = "s2.t2",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,133)))
+// )
+// )
+// )
+// ),
+// Spec(
+// caseValue = LabeledCase(
+// label = "s2.t3",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,134)))
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+val suite3 = suite("suite3") (
+ testM("s3.t1") {assertM(nanoTime)(isGreaterThanEqualTo(0L))}
+)
+// suite3: Spec[clock.package.Clock, TestFailure[Nothing], TestSuccess] = Spec(
+// caseValue = LabeledCase(
+// label = "suite3",
+// spec = Spec(
+// caseValue = MultipleCase(
+// specs = IndexedSeq(
+// Spec(
+// caseValue = LabeledCase(
+// label = "s3.t1",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,139)))
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+
+object AllSuites extends DefaultRunnableSpec {
+ def spec = suite("All tests")(suite1, suite2, suite3)
+}
+```
+
+`DefaultRunnableSpec` is very similar in its logic of operations to `zio.App`. Instead of providing one `ZIO` application
+at the end of the world we provide a suite that can be a tree of other suites and tests. Another resemblance is that `DefaultRunnableSpec` provides an Environment. Here it is an instance of `TestEnvironment` which helps us with controlling our systems infrastructure. More info on using test environment can be found in sections below.
+Just like with `zio.App` where at the very end an instance of `ZIO[R,E,A]` is expected where `R` can be at maximum of type `Environment` in `DefaultRunnableSpec` `R` cannot be more than `TestEnvironment`. So just like in normal application if our
+`R` is composed of some other modules we need to provide them first before test can be executed. How can we provide our dependencies?
+Here again the design of `zio-test` shines. Since our tests are ordinary values we can just transform them with a call to `mapTest`.
+It accepts a lambda of type `ZIO[R with TestSystem, TestFailure[Throwable], TestSuccess[Unit] ] => T1`. Without getting into too much details about types we can see that our lambda argument is a test instance (`ZIO`) that expects an environment of type `R with TestSystem`. This is no different from normal usage of ZIO in `zio.App`. We can use the same `provide`, `provideSome` methods to provide modules which `DefaultRunnableSpec` cannot provide itself as those are users modules. When all dependencies are provided we can run our tests in two ways. If we added `zio-test-sbt` to our dependencies and `zio.test.sbt.TestFramework` to SBT's `testFrameworks` our tests should be automatically picked up by SBT on invocation of `test`. However if we're not using SBT or have some other special needs `DefaultRunnableSpec` has a `main` method which can be invoked directly or with SBTs `test:run`.
+
+```sbt
+libraryDependencies ++= Seq(
+ "dev.zio" %% "zio-test" % zioVersion % "test",
+ "dev.zio" %% "zio-test-sbt" % zioVersion % "test"
+),
+testFrameworks += new TestFramework("zio.test.sbt.ZTestFramework")
+```
+
+## Using Test Environment
+
+What we expect from tests (at least those that we consider unit tests) is to be stable i.e. consecutive runs should yield the same results and take
+more or less the same amount of time. Biggest source of complexity during testing comes from external services which we cannot control like external
+payment APIs, object storages, http APIs etc. It is normal to hide these kind of services behind an interface and provide test instances to regain
+control and determinism. However there is another source of complexity that comes from the local infrastructure that is also hard to control without building prior abstractions. Things like stdin/stdout, clocks, random generators, schedulers can make writing tests hard or even impossible. Fortunately ZIO abstracted most of it in its runtime under `Environment` type. Thanks to this design `zio-test` could easily provide its own implementation named `TestEnvironment` which gives you test implementations of mentioned infrastructure. In most of the cases when you'll be using `ZIO`s `testM` test implementations are already created and should be controlled by exposed functions on companion object. If for some reason you would like to provide custom environment or are using other testing framework but still want to use test environment there are `make` functions on companion objects of test modules where you can construct your own.
+
+It is easy to accidentally use different test instances at the same time.
+
+```scala
+import zio.test._
+import zio.test.environment.TestClock
+import Assertion._
+import zio.duration._
+
+testM("`acquire` doesn't leak permits upon cancellation") {
+ for {
+ testClock <- TestClock.makeTest(TestClock.DefaultData)
+ s <- Semaphore.make(1L)
+ sf <- s.acquireN(2).timeout(1.millisecond).either.fork
+ _ <- testClock.adjust(1.second)
+ _ <- sf.join
+ _ <- s.release
+ permits <- s.available
+ } yield assert(permits, equalTo(2L))
+}
+```
+
+Above code doesn't work. We created a new `TestClock` instance and are correctly adjusting its time. What might be surprising is that call to `timeout` will use the `TestClock` provided by the `TestEnvironment` not our `testClock` instance. It easy to know why when you look at the signature of `timeout`:
+
+```scala
+import zio.duration.Duration
+import zio.clock.Clock
+
+sealed trait ZIO[-R, +E, +A] extends Serializable { self =>
+ /* All other method declarations in this trait ignored to avoid clutter */
+
+ def timeout(d: Duration): ZIO[R with Clock, E, Option[A]]
+}
+```
+
+The returned type is `ZIO[R with Clock, E, Option[A]]` where our environment is "some R plus a Clock".
+Before running this `Clock` has to be provided and the framework provides the Clock from the `TestEnvironment` not our instance variable as it is not aware that we created it.
+
+If you need to provide real implementations instead of the test instances to some part of your tests there is a `live` method which will transform your `ZIO[R, E, A]` to `ZIO[Live[R], E, A]`. Going from `R` to `Live[R]` instructs the framework that we really want to be provided with live implementations.
+
+### Testing Random
+
+When working with randomness testing might be hard because the inputs to the tested function change on every invocation so our code behave in a indeterministic way. Precisely because of this reason `ZIO` exposes `TestRandom` module which allows for fully deterministic testing of code
+that deals with Randomness.
+`TestRandom` can operate in two modes based on needed use case. In first mode it is a purely functional pseudo-random number generator. During generation on random values like when calling `nextInt` no internal state is being mutated. It is expected to chain such operations with combinators like `flatMap`. To preserve the same values generated between invocation of tests `setSeed` method can be used. It is guaranteed to return the same sequence of values for any given seed.
+
+```scala
+import zio.test.assert
+import zio.test.environment.TestRandom
+import zio.test.Assertion.equalTo
+
+testM("Use setSeed to generate stable values") {
+ for {
+ _ <- TestRandom.setSeed(27)
+ r1 <- random.nextLong
+ r2 <- random.nextLong
+ r3 <- random.nextLong
+ } yield
+ assert(List(r1,r2,r3))(equalTo(List[Long](
+ -4947896108136290151L,
+ -5264020926839611059L,
+ -9135922664019402287L
+ )))
+}
+// res4: ZSpec[TestRandom with random.package.Random, Nothing] = Spec(
+// caseValue = LabeledCase(
+// label = "Use setSeed to generate stable values",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,176)))
+// )
+// )
+// )
+// )
+```
+
+In second mode `TestRandom` maintains an internal buffer of values that can be "fed" upfront with methods such as `feedInts`. When random values are being generated first values from that buffer are being used.
+
+```scala
+import zio.test.environment.TestRandom
+testM("One can provide its own list of ints") {
+ for {
+ _ <- TestRandom.feedInts(1, 9, 2, 8, 3, 7, 4, 6, 5)
+ r1 <- random.nextInt
+ r2 <- random.nextInt
+ r3 <- random.nextInt
+ r4 <- random.nextInt
+ r5 <- random.nextInt
+ r6 <- random.nextInt
+ r7 <- random.nextInt
+ r8 <- random.nextInt
+ r9 <- random.nextInt
+ } yield assert(
+ List(1, 9, 2, 8, 3, 7, 4, 6, 5)
+ )(equalTo(List(r1, r2, r3, r4, r5, r6, r7, r8, r9)))
+}
+// res5: ZSpec[TestRandom with random.package.Random, Nothing] = Spec(
+// caseValue = LabeledCase(
+// label = "One can provide its own list of ints",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,197)))
+// )
+// )
+// )
+// )
+```
+
+When we run out of values in buffer `TestRandom` it falls back to first mode. If we want to we can also clear internal buffers by calling method
+like `clearInts`.
+
+### Testing Clock
+
+In most cases you want unit tests to be as fast as possible. Waiting for real time to pass by is a real killer for this. ZIO exposes a `TestClock` in `TestEnvironment` that can control time so we can deterministically and efficiently test effects involving the passage of time without actually having to wait for the full amount of time to pass. Calls to `sleep` and methods derived from it will semantically block until the clock time is set/adjusted to on or after the time the effect is scheduled to run.
+
+#### Clock Time
+Clock time is just like a clock on the wall, except that in our `TestClock`, the clock is broken Instead of moving by itself, the clock time only changes when adjusted or set by the user, using the `adjust` and `setTime` methods. The clock time never changes by itself. When the clock is adjusted, any effects scheduled to run on or before the new clock time will automatically be run, in order.
+
+#### Examples
+
+**Example 1**
+
+Thanks to the call to `TestClock.adjust(1.minute)` we moved the time instantly 1 minute.
+
+```scala
+import java.util.concurrent.TimeUnit
+import zio.clock.currentTime
+import zio.duration._
+import zio.test.Assertion.isGreaterThanEqualTo
+import zio.test._
+import zio.test.environment.TestClock
+
+testM("One can move time very fast") {
+ for {
+ startTime <- currentTime(TimeUnit.SECONDS)
+ _ <- TestClock.adjust(1.minute)
+ endTime <- currentTime(TimeUnit.SECONDS)
+ } yield assert(endTime - startTime)(isGreaterThanEqualTo(60L))
+}
+// res6: ZSpec[TestClock with Clock, Nothing] = Spec(
+// caseValue = LabeledCase(
+// label = "One can move time very fast",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,236)))
+// )
+// )
+// )
+// )
+```
+
+**Example 2**
+
+`TestClock` affects also all code running asynchronously that is scheduled to run after a certain time.
+
+```scala
+import zio.duration._
+import zio.test.Assertion.equalTo
+import zio.test._
+import zio.test.environment.TestClock
+
+testM("One can control time as he see fit") {
+ for {
+ promise <- Promise.make[Unit, Int]
+ _ <- (ZIO.sleep(10.seconds) *> promise.succeed(1)).fork
+ _ <- TestClock.adjust(10.seconds)
+ readRef <- promise.await
+ } yield assert(1)(equalTo(readRef))
+}
+// res7: ZSpec[Clock with TestClock, Unit] = Spec(
+// caseValue = LabeledCase(
+// label = "One can control time as he see fit",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,260)))
+// )
+// )
+// )
+// )
+```
+
+The above code creates a write once cell that will be set to "1" after 10 seconds asynchronously from a different thread thanks to call to `fork`. At the end we wait on the promise until it is set. With call to `TestClock.adjust(10.seconds)` we simulate passing of 10 seconds of time. Because of it we don't need to wait for the real 10 seconds to pass and thus our unit test can run faster This is a pattern that will very often be used when `sleep` and `TestClock` are being used for testing of effects that are based on time. The fiber that needs to sleep will be forked and `TestClock` will used to adjust the time so that all expected effects are run in the forked fiber.
+
+**Example 3**
+
+A more complex example leveraging layers and multiple services is shown below.
+
+```scala
+import zio.clock.Clock
+import zio.duration._
+import zio.test.Assertion._
+import zio.test._
+import zio.test.environment.{ TestClock, TestEnvironment }
+import zio._
+
+trait SchedulingService {
+ def schedule(promise: Promise[Unit, Int]): ZIO[Any, Exception, Boolean]
+}
+
+trait LoggingService {
+ def log(msg: String): ZIO[Any, Exception, Unit]
+}
+
+val schedulingLayer: ZLayer[Clock with Has[LoggingService], Nothing, Has[SchedulingService]] =
+ ZLayer.fromFunction { env =>
+ new SchedulingService {
+ def schedule(promise: Promise[Unit, Int]): ZIO[Any, Exception, Boolean] =
+ (ZIO.sleep(10.seconds) *> promise.succeed(1))
+ .tap(b => ZIO.service[LoggingService].flatMap(_.log(b.toString)))
+ .provide(env)
+ }
+}
+// schedulingLayer: ZLayer[Clock with Has[LoggingService], Nothing, Has[SchedulingService]] = Managed(
+// self = zio.ZManaged$$anon$2@58d7efdd
+// )
+
+testM("One can control time for failing effects too") {
+ val failingLogger = ZLayer.succeed(new LoggingService {
+ override def log(msg: String): ZIO[Any, Exception, Unit] = ZIO.fail(new Exception("BOOM"))
+ })
+
+ val partialLayer = (ZLayer.identity[Clock] ++ failingLogger) >>> schedulingLayer
+
+ val testCase =
+ for {
+ promise <- Promise.make[Unit, Int]
+ result <- ZIO.service[SchedulingService].flatMap(_.schedule(promise)).run.fork
+ _ <- TestClock.adjust(10.seconds)
+ readRef <- promise.await
+ result <- result.join
+ } yield assert(1)(equalTo(readRef)) && assert(result)(fails(isSubtype[Exception](anything)))
+ testCase.provideSomeLayer[TestEnvironment](partialLayer)
+}
+// res9: ZSpec[TestEnvironment, Unit] = Spec(
+// caseValue = LabeledCase(
+// label = "One can control time for failing effects too",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,315)))
+// )
+// )
+// )
+// )
+```
+
+In this case we want to test a layered effect that can potentially fail with an error. To do this we need to run the effect
+and use assertions that expect an `Exit` value.
+Because we are providing a layer to the test we need to provide everything expected by our test case and leave the test
+environment behind using `.provideSomeLayer[TestEnvironment]`. Keep in mind we do not provide any implementation of the `Clock`
+because doing will make force `SchedulingService` to use it, while the clock we need here is the `TestClock` provided by
+the test environment.
+
+The pattern with `Promise` and `await` can be generalized when we need to wait for multiple values using a `Queue`. We simply need to put multiple values into the queue and progress the clock multiple times and there is no need to create multiple promises. Even if you have a non-trivial flow of data from multiple streams that can produce at different intervals and would like to test snapshots of data in particular point in time `Queue` can help with that.
+
+```scala
+import zio.duration._
+import zio.test.Assertion.equalTo
+import zio.test._
+import zio.test.environment.TestClock
+import zio.stream._
+
+testM("zipWithLatest") {
+ val s1 = Stream.iterate(0)(_ + 1).fixed(100.milliseconds)
+ val s2 = Stream.iterate(0)(_ + 1).fixed(70.milliseconds)
+ val s3 = s1.zipWithLatest(s2)((_, _))
+
+ for {
+ q <- Queue.unbounded[(Int, Int)]
+ _ <- s3.foreach(q.offer).fork
+ fiber <- ZIO.collectAll(ZIO.replicate(4)(q.take)).fork
+ _ <- TestClock.adjust(1.second)
+ result <- fiber.join
+ } yield assert(result)(equalTo(List(0 -> 0, 0 -> 1, 1 -> 1, 1 -> 2)))
+}
+// res10: ZSpec[Any with Clock with TestClock, Nothing] = Spec(
+// caseValue = LabeledCase(
+// label = "zipWithLatest",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,352)))
+// )
+// )
+// )
+// )
+```
+
+### Testing Console
+
+`TestConsole` allows testing of applications that interact with console by modeling working with standard input and output
+as writing and reading to and from internal buffers.
+
+```scala
+import zio.test.environment.TestConsole
+import zio.console
+
+val consoleSuite = suite("ConsoleTest")(
+ testM("One can test output of console") {
+ for {
+ _ <- TestConsole.feedLines("Jimmy", "37")
+ _ <- console.putStrLn("What is your name?")
+ name <- console.getStrLn
+ _ <- console.putStrLn("What is your age?")
+ age <- console.getStrLn.map(_.toInt)
+ questionVector <- TestConsole.output
+ q1 = questionVector(0)
+ q2 = questionVector(1)
+ } yield {
+ assert(name)(equalTo("Jimmy")) &&
+ assert(age)(equalTo(37)) &&
+ assert(q1)(equalTo("What is your name?\n")) &&
+ assert(q2)(equalTo("What is your age?\n"))
+ }
+ }
+)
+// consoleSuite: Spec[console.package.Console with TestConsole, TestFailure[java.io.IOException], TestSuccess] = Spec(
+// caseValue = LabeledCase(
+// label = "ConsoleTest",
+// spec = Spec(
+// caseValue = MultipleCase(
+// specs = IndexedSeq(
+// Spec(
+// caseValue = LabeledCase(
+// label = "One can test output of console",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(test_effects.md,377)))
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+// )
+```
+
+Above code simulates an application that will ask for name and age of the user. To test it we prefill buffers with answers
+with call to `TestConsole.feedLines` method. Calls to `console.getStrLn` will get the value from the buffers instead of
+interacting with the users keyboard. Also all output that our program produces by calling `console.putStrLn` (and other
+printing methods) is being gathered and can be accessed with call to `TestConsole.output`.
+
+### Testing System
+
+With increased usage of containers and runtimes like Kubernetes more and more applications are being configured by means
+of environment variables. It is important to test this logic just like other parts of application. For this purpose `zio-test`
+exposes `TestSystem` module. Additionally to setting the environment variables it also allows for setting JVM system properties
+like in the code below:
+
+```scala
+import zio.system
+import zio.test.environment._
+
+for {
+ _ <- TestSystem.putProperty("java.vm.name", "VM")
+ result <- system.property("java.vm.name")
+} yield assert(result)(equalTo(Some("VM")))
+// res11: ZIO[TestSystem with system.package.System, Throwable, TestResult] = zio.ZIO$FlatMap@500bc260
+```
+
+It is worth noticing that no actual environment variables or properties will be set during testing so there will be
+no impact on other parts of the system.
+
+## Test Aspects
+
+Test aspects are used to modify existing tests or even entire suites that you have already created. Test aspects are
+applied to a test or suite using the `@@` operator. This is an example test suite showing the use of aspects to modify
+test behavior:
+
+```scala
+import zio.duration._
+import zio.test.Assertion._
+import zio.test.TestAspect._
+import zio.test._
+
+object MySpec extends DefaultRunnableSpec {
+ def spec = suite("A Suite")(
+ test("A passing test") {
+ assert(true)(isTrue)
+ },
+ test("A passing test run for JVM only") {
+ assert(true)(isTrue)
+ } @@ jvmOnly, //@@ jvmOnly only runs tests on the JVM
+ test("A passing test run for JS only") {
+ assert(true)(isTrue)
+ } @@ jsOnly, //@@ jsOnly only runs tests on Scala.js
+ test("A passing test with a timeout") {
+ assert(true)(isTrue)
+ } @@ timeout(10.nanos), //@@ timeout will fail a test that doesn't pass within the specified time
+ test("A failing test... that passes") {
+ assert(true)(isFalse)
+ } @@ failing, //@@ failing turns a failing test into a passing test
+ test("A ignored test") {
+ assert(false)(isTrue)
+ } @@ ignore, //@@ ignore marks test as ignored
+ test("A flaky test that only works on the JVM and sometimes fails; let's compose some aspects!") {
+ assert(false)(isTrue)
+ } @@ jvmOnly // only run on the JVM
+ @@ eventually //@@ eventually retries a test indefinitely until it succeeds
+ @@ timeout(20.nanos) //it's a good idea to compose `eventually` with `timeout`, or the test may never end
+ ) @@ timeout(60.seconds) //apply a timeout to the whole suite
+}
+```
diff --git a/website/versioned_docs/version-1.0.18/guides/use-test-assertions.md b/website/versioned_docs/version-1.0.18/guides/use-test-assertions.md
new file mode 100644
index 000000000000..9fce5f5f3228
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/guides/use-test-assertions.md
@@ -0,0 +1,318 @@
+---
+id: use-test-assertions
+title: "How to Use Test Assertions"
+---
+
+Using the `Assertion` type effectively often involves finding the best fitting
+function for the type of assumptions you would like to verify.
+
+This list is intended to break up the available functions into groups based on
+the _Result type_. The types of the functions are included as well, to guide
+intuition.
+
+For instance, if we wanted to assert that the fourth element of a `Vector[Int]`
+was a value equal to the number `5`, we would first look at assertions that
+operate on `Seq[A]`, with the type `Assertion[Seq[A]]`.
+
+For this example, I would select `hasAt`, as it accepts both the position into
+a sequence, as well as an `Assertion[A]` to apply at that position:
+
+```scala
+Assertion.hasAt[A](pos: Int)(assertion: Assertion[A]): Assertion[Seq[A]]
+```
+
+I could start by writing:
+
+
+```scala
+val xs = Vector(0, 1, 2, 3)
+// xs: Vector[Int] = Vector(0, 1, 2, 3)
+
+test("Fourth value is equal to 5") {
+ assert(xs)(hasAt(3)(???))
+}
+// res1: ZSpec[Any, Nothing] = Spec(
+// caseValue = LabeledCase(
+// label = "Fourth value is equal to 5",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(use-test-assertions.md,20)))
+// )
+// )
+// )
+// )
+```
+
+The second parameter to `hasAt` is an `Assertion[A]` that applies to the third
+element of that sequence, so I would look for functions that operate on `A`,
+of the return type `Assertion[A]`.
+
+I could select `equalTo`, as it accepts an `A` as a parameter, allowing me to
+supply `5`:
+
+
+```scala
+val xs = Vector(0, 1, 2, 3)
+// xs: Vector[Int] = Vector(0, 1, 2, 3)
+
+test("Fourth value is equal to 5") {
+ assert(xs)(hasAt(3)(equalTo(5)))
+}
+// res3: ZSpec[Any, Nothing] = Spec(
+// caseValue = LabeledCase(
+// label = "Fourth value is equal to 5",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(use-test-assertions.md,40)))
+// )
+// )
+// )
+// )
+```
+
+Let's say this is too restrictive, and I would prefer to assert that a value is
+_near_ the number five, with a tolerance of two. This requires a little more
+knowledge of the type `A`, so I'll look for an assertion in the `Numeric`
+section. `approximatelyEquals` looks like what we want, as it permits the
+starting value `reference`, as well as a `tolerance`, for any `A` that is
+`Numeric`:
+
+```scala
+Assertion.approximatelyEquals[A: Numeric](reference: A, tolerance: A): Assertion[A]
+```
+
+Changing out `equalTo` with `approximatelyEquals` leaves us with:
+
+
+```scala
+val xs = Vector(0, 1, 2, 3)
+// xs: Vector[Int] = Vector(0, 1, 2, 3)
+
+test("Fourth value is approximately equal to 5") {
+ assert(xs)(hasAt(3)(approximatelyEquals(5, 2)))
+}
+// res5: ZSpec[Any, Nothing] = Spec(
+// caseValue = LabeledCase(
+// label = "Fourth value is approximately equal to 5",
+// spec = Spec(
+// caseValue = TestCase(
+// test = ,
+// annotations = Map(zio.test.TestAnnotation@fa40ba79 -> List(SourceLocation(use-test-assertions.md,60)))
+// )
+// )
+// )
+// )
+```
+
+Values
+======
+
+Assertions that apply to plain values.
+
+Any
+---
+
+Assertions that apply to `Any` value.
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `anything` | `Assertion[Any]` | Makes a new assertion that always succeeds. |
+| `isNull` | `Assertion[Any]` | Makes a new assertion that requires a null value. |
+| `isSubtype[A](assertion: Assertion[A])(implicit C: ClassTag[A])` | `Assertion[Any]` | Makes a new assertion that requires a value have the specified type. |
+| `nothing` | `Assertion[Any]` | Makes a new assertion that always fails. |
+| `throwsA[E: ClassTag]` | `Assertion[Any]` | Makes a new assertion that requires the expression to throw. |
+
+A
+---
+
+Assertions that apply to specific values.
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `equalTo[A](expected: A)` | `Assertion[A]` | Makes a new assertion that requires a value equal the specified value. |
+| `hasField[A, B](name: String, proj: A => B, assertion: Assertion[B])` | `Assertion[A]` | Makes a new assertion that focuses in on a field in a case class. |
+| `isOneOf[A](values: Iterable[A])` | `Assertion[A]` | Makes a new assertion that requires a value to be equal to one of the specified values. |
+| `not[A](assertion: Assertion[A])` | `Assertion[A]` | Makes a new assertion that negates the specified assertion. |
+| `throws[A](assertion: Assertion[Throwable])` | `Assertion[A]` | Makes a new assertion that requires the expression to throw. |
+
+Numeric
+-------
+
+Assertions on `Numeric` types
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `approximatelyEquals[A: Numeric](reference: A, tolerance: A)` | `Assertion[A]` | Makes a new assertion that requires a given numeric value to match a value with some tolerance. |
+| `isNegative[A](implicit num: Numeric[A])` | `Assertion[A]` | Makes a new assertion that requires a numeric value is negative. |
+| `isPositive[A](implicit num: Numeric[A])` | `Assertion[A]` | Makes a new assertion that requires a numeric value is positive. |
+| `isZero[A](implicit num: Numeric[A])` | `Assertion[A]` | Makes a new assertion that requires a numeric value is zero. |
+| `nonNegative[A](implicit num: Numeric[A])` | `Assertion[A]` | Makes a new assertion that requires a numeric value is non negative. |
+| `nonPositive[A](implicit num: Numeric[A])` | `Assertion[A]` | Makes a new assertion that requires a numeric value is non positive. |
+
+Ordering
+--------
+
+Assertions on types that support `Ordering`
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `isGreaterThan[A](reference: A)(implicit ord: Ordering[A])` | `Assertion[A]` | Makes a new assertion that requires the value be greater than the specified reference value. |
+| `isGreaterThanEqualTo[A](reference: A)(implicit ord: Ordering[A])` | `Assertion[A]` | Makes a new assertion that requires the value be greater than or equal to the specified reference value. |
+| `isLessThan[A](reference: A)(implicit ord: Ordering[A])` | `Assertion[A]` | Makes a new assertion that requires the value be less than the specified reference value. |
+| `isLessThanEqualTo[A](reference: A)(implicit ord: Ordering[A])` | `Assertion[A]` | Makes a new assertion that requires the value be less than or equal to the specified reference value. |
+| `isWithin[A](min: A, max: A)(implicit ord: Ordering[A])` | `Assertion[A]` | Makes a new assertion that requires a value to fall within a specified min and max (inclusive). |
+
+Iterable
+========
+
+Assertions on types that extend `Iterable`, like `List`, `Seq`, `Set`, `Map`, and many others.
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `contains[A](element: A)` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable contain the specified element. See Assertion.exists if you want to require an Iterable to contain an element satisfying an assertion. |
+| `exists[A](assertion: Assertion[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable contain an element satisfying the given assertion. |
+| `forall[A](assertion: Assertion[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable contain only elements satisfying the given assertion. |
+| `hasFirst[A](assertion: Assertion[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable to contain the first element satisfying the given assertion. |
+| `hasIntersection[A](other: Iterable[A])(assertion: Assertion[Iterable[A]])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires the intersection of two Iterables satisfy the given assertion. |
+| `hasLast[A](assertion: Assertion[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable to contain the last element satisfying the given assertion. |
+| `hasSize[A](assertion: Assertion[Int])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires the size of an Iterable be satisfied by the specified assertion. |
+| `hasAtLeastOneOf[A](other: Iterable[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable contain at least one of the specified elements. |
+| `hasAtMostOneOf[A](other: Iterable[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable contain at most one of the specified elements. |
+| `hasNoneOf[A](other: Iterable[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable contain none of the specified elements. |
+| `hasOneOf[A](other: Iterable[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable contain exactly one of the specified elements. |
+| `hasSameElements[A](other: Iterable[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable to have the same elements as the specified Iterable, though not necessarily in the same order. |
+| `hasSameElementsDistinct[A](other: Iterable[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable to have the same distinct elements as the other Iterable, though not necessarily in the same order. |
+| `hasSubset[A](other: Iterable[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires the specified Iterable to be a subset of the other Iterable. |
+| `isDistinct` | `Assertion[Iterable[Any]]` | Makes a new assertion that requires an Iterable is distinct. |
+| `isEmpty` | `Assertion[Iterable[Any]]` | Makes a new assertion that requires an Iterable to be empty. |
+| `isNonEmpty` | `Assertion[Iterable[Any]]` | Makes a new assertion that requires an Iterable to be non empty. |
+
+Ordering
+--------
+
+Assertions that apply to ordered `Iterable`s
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `isSorted[A](implicit ord: Ordering[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable is sorted. |
+| `isSortedReverse[A](implicit ord: Ordering[A])` | `Assertion[Iterable[A]]` | Makes a new assertion that requires an Iterable is sorted in reverse order. |
+
+Seq
+---
+
+Assertions that operate on sequences (`List`, `Vector`, `Map`, and many others)
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `endsWith[A](suffix: Seq[A])` | `Assertion[Seq[A]]` | Makes a new assertion that requires a given string to end with the specified suffix. |
+| `hasAt[A](pos: Int)(assertion: Assertion[A])` | `Assertion[Seq[A]]` | Makes a new assertion that requires a sequence to contain an element satisfying the given assertion on the given position. |
+| `startsWith[A](prefix: Seq[A])` | `Assertion[Seq[A]]` | Makes a new assertion that requires a given sequence to start with the specified prefix. |
+
+Either
+======
+
+Assertions for `Either` values.
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `isLeft[A](assertion: Assertion[A])` | `Assertion[Either[A, Any]]` | Makes a new assertion that requires a Left value satisfying a specified assertion. |
+| `isLeft` | `Assertion[Either[Any, Any]]` | Makes a new assertion that requires an Either is Left. |
+| `isRight[A](assertion: Assertion[A])` | `Assertion[Either[Any, A]]` | Makes a new assertion that requires a Right value satisfying a specified assertion. |
+| `isRight` | `Assertion[Either[Any, Any]]` | Makes a new assertion that requires an Either is Right. |
+
+Exit/Cause/Throwable
+==========
+
+Assertions for `Exit` or `Cause` results.
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `containsCause[E](cause: Cause[E])` | `Assertion[Cause[E]]` | Makes a new assertion that requires a Cause contain the specified cause. |
+| `dies(assertion: Assertion[Throwable])` | `Assertion[Exit[Any, Any]]` | Makes a new assertion that requires an exit value to die. |
+| `failsCause[E](assertion: Assertion[Cause[E]])` | `Assertion[Exit[E, Any]]` | Makes a new assertion that requires an exit value to fail with a cause that meets the specified assertion. |
+| `fails[E](assertion: Assertion[E])` | `Assertion[Exit[E, Any]]` | Makes a new assertion that requires an exit value to fail. |
+| `isInterrupted` | `Assertion[Exit[Any, Any]]` | Makes a new assertion that requires an exit value to be interrupted. |
+| `succeeds[A](assertion: Assertion[A])` | `Assertion[Exit[Any, A]]` | Makes a new assertion that requires an exit value to succeed. |
+| `hasMessage(message: Assertion[String])` | `Assertion[Throwable]` | Makes a new assertion that requires an exception to have a certain message. |
+| `hasThrowableCause(cause: Assertion[Throwable])` | `Assertion[Throwable]` | Makes a new assertion that requires an exception to have a certain cause. |
+
+Try
+===
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `isFailure(assertion: Assertion[Throwable])` | `Assertion[Try[Any]]` | Makes a new assertion that requires a Failure value satisfying the specified assertion. |
+| `isFailure` | `Assertion[Try[Any]]` | Makes a new assertion that requires a Try value is Failure. |
+| `isSuccess[A](assertion: Assertion[A])` | `Assertion[Try[A]]` | Makes a new assertion that requires a Success value satisfying the specified assertion. |
+| `isSuccess` | `Assertion[Try[Any]]` | Makes a new assertion that requires a Try value is Success. |
+
+Sum type
+========
+
+An assertion that applies to some type, giving a method to transform the source
+type into another type, then assert a property on that projected type.
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `isCase[Sum, Proj]( termName: String, term: Sum => Option[Proj], assertion: Assertion[Proj])` | `Assertion[Sum]` | Makes a new assertion that requires the sum type be a specified term. |
+
+
+Map
+===
+
+Assertions for `Map[K, V]`
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `hasKey[K, V](key: K)` | `Assertion[Map[K, V]]` | Makes a new assertion that requires a Map to have the specified key. |
+| `hasKey[K, V](key: K, assertion: Assertion[V])` | `Assertion[Map[K, V]]` | Makes a new assertion that requires a Map to have the specified key with value satisfying the specified assertion. |
+| `hasKeys[K, V](assertion: Assertion[Iterable[K]])` | `Assertion[Map[K, V]]` | Makes a new assertion that requires a Map have keys satisfying the specified assertion. |
+| `hasValues[K, V](assertion: Assertion[Iterable[V]])` | `Assertion[Map[K, V]]` | Makes a new assertion that requires a Map have values satisfying the specified assertion. |
+
+String
+======
+
+Assertions for Strings
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `containsString(element: String)` | `Assertion[String]` | Makes a new assertion that requires a substring to be present. |
+| `endsWithString(suffix: String)` | `Assertion[String]` | Makes a new assertion that requires a given string to end with the specified suffix. |
+| `equalsIgnoreCase(other: String)` | `Assertion[String]` | Makes a new assertion that requires a given string to equal another ignoring case. |
+| `hasSizeString(assertion: Assertion[Int])` | `Assertion[String]` | Makes a new assertion that requires the size of a string be satisfied by the specified assertion. |
+| `isEmptyString` | `Assertion[String]` | Makes a new assertion that requires a given string to be empty. |
+| `isNonEmptyString` | `Assertion[String]` | Makes a new assertion that requires a given string to be non empty. |
+| `matchesRegex(regex: String)` | `Assertion[String]` | Makes a new assertion that requires a given string to match the specified regular expression. |
+| `startsWithString(prefix: String)` | `Assertion[String]` | Makes a new assertion that requires a given string to start with a specified prefix. |
+
+Boolean
+=======
+
+Assertions for Booleans
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `isFalse` | `Assertion[Boolean]` | Makes a new assertion that requires a value be false. |
+| `isTrue` | `Assertion[Boolean]` | Makes a new assertion that requires a value be true. |
+
+Option
+======
+
+Assertions for Optional values
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `isNone` | `Assertion[Option[Any]]` | Makes a new assertion that requires a None value. |
+| `isSome[A](assertion: Assertion[A])` | `Assertion[Option[A]]` | Makes a new assertion that requires a Some value satisfying the specified assertion. |
+| `isSome` | `Assertion[Option[Any]]` | Makes a new assertion that requires an Option is Some. |
+
+Unit
+====
+
+Assertion for Unit
+
+| Function | Result type | Description |
+| -------- | ----------- | ----------- |
+| `isUnit` | `Assertion[Unit]` | Makes a new assertion that requires the value be unit. |
diff --git a/website/versioned_docs/version-1.0.18/overview/background.md b/website/versioned_docs/version-1.0.18/overview/background.md
new file mode 100644
index 000000000000..0dd2363f22c9
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/overview/background.md
@@ -0,0 +1,119 @@
+---
+id: overview_background
+title: "Background"
+---
+
+Procedural Scala programs use _procedural functions_, which are:
+
+ * **Partial** — Procedures do not return values for some inputs (for example, they throw exceptions).
+ * **Non-Deterministic** — Procedures return different outputs for the same input.
+ * **Impure** — Procedures perform side-effects, which mutate data or interact with the external world.
+
+Unlike procedural Scala programs, functional Scala programs only use _pure functions_, which are:
+
+ * **Total** — Functions always return an output for every input.
+ * **Deterministic** — Functions return the same output for the same input.
+ * **Pure** — The only effect of providing a function an input is computing the output.
+
+Pure functions only combine or transform input values into output values in a total, deterministic way. Pure functions are easier to understand, easier to test, easier to refactor, and easier to abstract over.
+
+Functional programs do not interact with the external world directly, because that involves partiality, non-determinism and side-effects. Instead, functional programs construct and return _data structures_, which _describe_ (or _model_) interaction with the real world.
+
+Immutable data structures that model procedural effects are called _functional effects_. The concept of functional effects is critical to deeply understanding how ZIO works, and is introduced in the next section.
+
+## Programs As Values
+
+We can build a data structure to describe a console program with just three instructions:
+
+```scala
+sealed trait Console[+A]
+final case class Return[A](value: () => A) extends Console[A]
+final case class PrintLine[A](line: String, rest: Console[A]) extends Console[A]
+final case class ReadLine[A](rest: String => Console[A]) extends Console[A]
+```
+
+In this model, `Console[A]` is an immutable, type-safe value, which represents a console program that returns a value of type `A`.
+
+The `Console` data structure is an ordered _tree_, and at the very "end" of the program, you will find a `Return` instruction that stores a value of type `A`, which is the return value of the `Console[A]` program.
+
+Although very simple, this data structure is enough to build an interactive program:
+
+```scala
+val example1: Console[Unit] =
+ PrintLine("Hello, what is your name?",
+ ReadLine(name =>
+ PrintLine(s"Good to meet you, ${name}", Return(() => ())))
+)
+```
+
+This immutable value doesn't do anything—it just _describes_ a program that prints out a message, asks for input, and prints out another message that depends on the input.
+
+Although this program is just a model, we can translate the model into procedural effects quite simply using an _interpreter_, which recurses on the data structure, translating every instruction into the side-effect that it describes:
+
+```scala
+def interpret[A](program: Console[A]): A = program match {
+ case Return(value) =>
+ value()
+ case PrintLine(line, next) =>
+ println(line)
+ interpret(next)
+ case ReadLine(next) =>
+ interpret(next(scala.io.StdIn.readLine()))
+}
+```
+
+Interpreting (also called _running_ or _executing_) is not functional, because it may be partial, non-deterministic, and impure. In an ideal application, however, interpretation only needs to happen once: in the application's main function. The rest of the application can be purely functional.
+
+In practice, it's not very convenient to build console programs using constructors directly. Instead, we can define helper functions, which look more like their effectful equivalents:
+
+```scala
+def succeed[A](a: => A): Console[A] = Return(() => a)
+def printLine(line: String): Console[Unit] =
+ PrintLine(line, succeed(()))
+val readLine: Console[String] =
+ ReadLine(line => succeed(line))
+```
+
+Composing these "leaf" instructions into larger programs becomes a lot easier if we define `map` and `flatMap` methods on `Console`:
+
+ - The `map` method lets you transform a console program that returns an `A` into a console program that returns a `B`, by supplying a function `A => B`.
+ - The `flatMap` method lets you sequentially compose a console program that returns an `A` with a callback that returns another console program created from the `A`.
+
+ These two methods are defined as follows:
+
+```scala
+implicit class ConsoleSyntax[+A](self: Console[A]) {
+ def map[B](f: A => B): Console[B] =
+ flatMap(a => succeed(f(a)))
+
+ def flatMap[B](f: A => Console[B]): Console[B] =
+ self match {
+ case Return(value) => f(value())
+ case PrintLine(line, next) =>
+ PrintLine(line, next.flatMap(f))
+ case ReadLine(next) =>
+ ReadLine(line => next(line).flatMap(f))
+ }
+}
+```
+
+With these `map` and `flatMap` methods, we can now take advantage of Scala's `for` comprehensions, and write programs that look like their procedural equivalents:
+
+```scala
+val example2: Console[String] =
+ for {
+ _ <- printLine("What's your name?")
+ name <- readLine
+ _ <- printLine(s"Hello, ${name}, good to meet you!")
+ } yield name
+```
+
+When we wish to execute this program, we can call `interpret` on the `Console` value.
+
+All functional Scala programs are constructed like this: instead of interacting with the real world, they build a _functional effect_, which is nothing more than an immutable, type-safe, tree-like data structure that models procedural effects.
+
+Functional programmers use functional effects to build complex, real world software without giving up the equational reasoning, composability, and type safety afforded by purely functional programming.
+
+## Next Steps
+
+If functional effects are starting to make more sense, then the next step is to learn more about the [core effect type](index.md) in ZIO.
diff --git a/website/versioned_docs/version-1.0.18/overview/basic_concurrency.md b/website/versioned_docs/version-1.0.18/overview/basic_concurrency.md
new file mode 100644
index 000000000000..7ebf16d2f6fc
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/overview/basic_concurrency.md
@@ -0,0 +1,170 @@
+---
+id: overview_basic_concurrency
+title: "Basic Concurrency"
+---
+
+ZIO has low-level support for concurrency using _fibers_. While fibers are very powerful, they are low-level. To improve productivity, ZIO provides high-level operations built on fibers.
+
+When you can, you should always use high-level operations, rather than working with fibers directly. For the sake of completeness, this section introduces both fibers and some of the high-level operations built on them.
+
+## Fibers
+
+ZIO's concurrency is built on _fibers_, which are lightweight "green threads" implemented by the ZIO runtime system.
+
+Unlike operating system threads, fibers consume almost no memory, have growable and shrinkable stacks, don't waste resources blocking, and will be garbage collected automatically if they are suspended and unreachable.
+
+Fibers are scheduled by the ZIO runtime and will cooperatively yield to each other, which enables multitasking, even when operating in a single-threaded environment (like JavaScript, or even the JVM when configured with one thread).
+
+All effects in ZIO are executed by _some_ fiber. If you did not create the fiber, then the fiber was created by some operation you are using (if the operation is concurrent or parallel), or by the ZIO runtime system.
+
+Even if you only write "single-threaded" code, with no parallel or concurrent operations, then there will be at least one fiber: the "main" fiber that executes your effect.
+
+### The Fiber Data Type
+
+Every ZIO fiber is responsible for executing some effect, and the `Fiber` data type in ZIO represents a "handle" on that running computation. The `Fiber` data type is most similar to Scala's `Future` data type.
+
+The `Fiber[E, A]` data type in ZIO has two type parameters:
+
+ - **`E` Failure Type**. The fiber may fail with a value of this type.
+ - **`A` Success Type**. The fiber may succeed with a value of this type.
+
+Fibers do not have an `R` type parameter, because they model effects that are already running, and which already had their required environment provided to them.
+
+
+### Forking Effects
+
+The most fundamental way of creating a fiber is to take an existing effect and _fork_ it. Conceptually, _forking_ an effect begins executing the effect on a new fiber, giving you a reference to the newly-created `Fiber`.
+
+The following code creates a single fiber, which executes `fib(100)`:
+
+```scala
+def fib(n: Long): UIO[Long] = UIO {
+ if (n <= 1) UIO.succeed(n)
+ else fib(n - 1).zipWith(fib(n - 2))(_ + _)
+}.flatten
+
+val fib100Fiber: UIO[Fiber[Nothing, Long]] =
+ for {
+ fiber <- fib(100).fork
+ } yield fiber
+```
+
+### Joining Fibers
+
+One of the methods on `Fiber` is `Fiber#join`, which returns an effect. The effect returned by `Fiber#join` will succeed or fail as per the fiber:
+
+```scala
+for {
+ fiber <- IO.succeed("Hi!").fork
+ message <- fiber.join
+} yield message
+```
+
+### Awaiting Fibers
+
+Another method on `Fiber` is `Fiber#await`, which returns an effect containing an `Exit` value, which provides full information on how the fiber completed.
+
+```scala
+for {
+ fiber <- IO.succeed("Hi!").fork
+ exit <- fiber.await
+} yield exit
+```
+
+### Interrupting Fibers
+
+A fiber whose result is no longer needed may be _interrupted_, which immediately terminates the fiber, safely releasing all resources and running all finalizers.
+
+Like `await`, `Fiber#interrupt` returns an `Exit` describing how the fiber completed.
+
+```scala
+for {
+ fiber <- IO.succeed("Hi!").forever.fork
+ exit <- fiber.interrupt
+} yield exit
+```
+
+By design, the effect returned by `Fiber#interrupt` does not resume until the fiber has completed. If this behavior is not desired, you can `fork` the interruption itself:
+
+```scala
+for {
+ fiber <- IO.succeed("Hi!").forever.fork
+ _ <- fiber.interrupt.fork // I don't care!
+} yield ()
+```
+
+### Composing Fibers
+
+ZIO lets you compose fibers with `Fiber#zip` or `Fiber#zipWith`.
+
+These methods combine two fibers into a single fiber that produces the results of both. If either fiber fails, then the composed fiber will fail.
+
+```scala
+for {
+ fiber1 <- IO.succeed("Hi!").fork
+ fiber2 <- IO.succeed("Bye!").fork
+ fiber = fiber1.zip(fiber2)
+ tuple <- fiber.join
+} yield tuple
+```
+
+Another way fibers compose is with `Fiber#orElse`. If the first fiber succeeds, the composed fiber will succeed with its result; otherwise, the composed fiber will complete with the exit value of the second fiber (whether success or failure).
+
+```scala
+for {
+ fiber1 <- IO.fail("Uh oh!").fork
+ fiber2 <- IO.succeed("Hurray!").fork
+ fiber = fiber1.orElse(fiber2)
+ message <- fiber.join
+} yield message
+```
+
+## Parallelism
+
+ZIO provides many operations for performing effects in parallel. These methods are all named with a `Par` suffix that helps you identify opportunities to parallelize your code.
+
+For example, the ordinary `ZIO#zip` method zips two effects together, sequentially. But there is also a `ZIO#zipPar` method, which zips two effects together in parallel.
+
+The following table summarizes some of the sequential operations and their corresponding parallel versions:
+
+| **Description** | **Sequential** | **Parallel** |
+| -----------------------------: | :---------------: | :------------------: |
+| Zips two effects into one | `ZIO#zip` | `ZIO#zipPar` |
+| Zips two effects into one | `ZIO#zipWith` | `ZIO#zipWithPar` |
+| Zips multiple effects into one | `ZIO#tupled` | `ZIO#tupledPar` |
+| Collects from many effects | `ZIO.collectAll` | `ZIO.collectAllPar` |
+| Effectfully loop over values | `ZIO.foreach` | `ZIO.foreachPar` |
+| Reduces many values | `ZIO.reduceAll` | `ZIO.reduceAllPar` |
+| Merges many values | `ZIO.mergeAll` | `ZIO.mergeAllPar` |
+
+For all the parallel operations, if one effect fails, then others will be interrupted, to minimize unnecessary computation.
+
+If the fail-fast behavior is not desired, potentially failing effects can be first converted into infallible effects using the `ZIO#either` or `ZIO#option` methods.
+
+## Racing
+
+ZIO lets you race multiple effects in parallel, returning the first successful result:
+
+```scala
+for {
+ winner <- IO.succeed("Hello").race(IO.succeed("Goodbye"))
+} yield winner
+```
+
+If you want the first success or failure, rather than the first success, then you can use `left.either race right.either`, for any effects `left` and `right`.
+
+## Timeout
+
+ZIO lets you timeout any effect using the `ZIO#timeout` method, which returns a new effect that succeeds with an `Option`. A value of `None` indicates the timeout elapsed before the effect completed.
+
+```scala
+import zio.duration._
+
+IO.succeed("Hello").timeout(10.seconds)
+```
+
+If an effect times out, then instead of continuing to execute in the background, it will be interrupted so no resources will be wasted.
+
+## Next Steps
+
+If you are comfortable with basic concurrency, then the next step is to learn about [testing effects](testing_effects.md).
diff --git a/website/versioned_docs/version-1.0.18/overview/basic_operations.md b/website/versioned_docs/version-1.0.18/overview/basic_operations.md
new file mode 100644
index 000000000000..37f60e50316d
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/overview/basic_operations.md
@@ -0,0 +1,84 @@
+---
+id: overview_basic_operations
+title: "Basic Operations"
+---
+
+
+## Mapping
+
+You can map over the success channel of an effect by calling the `ZIO#map` method. This lets you transform the success values of effects.
+
+```scala
+import zio._
+
+val succeeded: UIO[Int] = IO.succeed(21).map(_ * 2)
+```
+
+You can map over the error channel of an effect by calling the `ZIO#mapError` method. This lets you transform the failure values of effects.
+
+```scala
+val failed: IO[Exception, Unit] =
+ IO.fail("No no!").mapError(msg => new Exception(msg))
+```
+
+Note that mapping over an effect's success or error channel does not change the success or failure of the effect, in the same way that mapping over an `Either` does not change whether the `Either` is `Left` or `Right`.
+
+## Chaining
+
+You can execute two effects in sequence with the `flatMap` method, which requires that you pass a callback, which will receive the value of the first effect, and can return a second effect that depends on this value:
+
+```scala
+val sequenced =
+ getStrLn.flatMap(input => putStrLn(s"You entered: $input"))
+```
+
+If the first effect fails, the callback passed to `flatMap` will never be invoked, and the composed effect returned by `flatMap` will also fail.
+
+In _any_ chain of effects, the first failure will short-circuit the whole chain, just like throwing an exception will prematurely exit a sequence of statements.
+
+## For Comprehensions
+
+Because the `ZIO` data type supports both `flatMap` and `map`, you can use Scala's _for comprehensions_ to build sequential effects:
+
+```scala
+val program =
+ for {
+ _ <- putStrLn("Hello! What is your name?")
+ name <- getStrLn
+ _ <- putStrLn(s"Hello, ${name}, welcome to ZIO!")
+ } yield ()
+```
+
+_For comprehensions_ provide a more procedural syntax for composing chains of effects.
+
+## Zipping
+
+You can combine two effects into a single effect with the `ZIO#zip` method. The resulting effect succeeds with a tuple that contains the success values of both effects:
+
+```scala
+val zipped: UIO[(String, Int)] =
+ ZIO.succeed("4").zip(ZIO.succeed(2))
+```
+
+Note that `zip` operates sequentially: the effect on the left side is executed before the effect on the right side.
+
+In any `zip` operation, if either the left or right hand sides fail, then the composed effect will fail, because _both_ values are required to construct the tuple.
+
+Sometimes, when the success value of an effect is not useful (for example, it is `Unit`), it can be more convenient to use the `ZIO#zipLeft` or `ZIO#zipRight` functions, which first perform a `zip`, and then map over the tuple to discard one side or the other:
+
+```scala
+val zipRight1 =
+ putStrLn("What is your name?").zipRight(getStrLn)
+```
+
+The `zipRight` and `zipLeft` functions have symbolic aliases, known as `*>` and `<*`, respectively. Some developers find these operators easier to read:
+
+```scala
+val zipRight2 =
+ putStrLn("What is your name?") *>
+ getStrLn
+```
+
+## Next Step
+
+If you are comfortable with the basic operations on ZIO effects, then the next step is to learn about [error handling](handling_errors.md).
diff --git a/website/versioned_docs/version-1.0.18/overview/creating_effects.md b/website/versioned_docs/version-1.0.18/overview/creating_effects.md
new file mode 100644
index 000000000000..d469791e1150
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/overview/creating_effects.md
@@ -0,0 +1,240 @@
+---
+id: overview_creating_effects
+title: "Creating Effects"
+---
+
+This section explores some of the common ways to create ZIO effects from values, from common Scala types, and from both synchronous and asynchronous side-effects.
+
+
+## From Success Values
+
+Using the `ZIO.succeed` method, you can create an effect that succeeds with the specified value:
+
+```scala
+val s1 = ZIO.succeed(42)
+```
+
+You can also use methods in the companion objects of the `ZIO` type aliases:
+
+```scala
+val s2: Task[Int] = Task.succeed(42)
+```
+
+The `succeed` method takes a by-name parameter to make sure that any accidental side effects from constructing the value can be properly managed by the ZIO Runtime. However, `succeed` is intended for values which do not have any side effects. If you know that your value does have side effects consider using `ZIO.effectTotal` for clarity.
+
+```scala
+val now = ZIO.effectTotal(System.currentTimeMillis())
+```
+
+The value inside a successful effect constructed with `ZIO.effectTotal` will only be constructed if absolutely required.
+
+## From Failure Values
+
+Using the `ZIO.fail` method, you can create an effect that models failure:
+
+```scala
+val f1 = ZIO.fail("Uh oh!")
+```
+
+For the `ZIO` data type, there is no restriction on the error type. You may use strings, exceptions, or custom data types appropriate for your application.
+
+Many applications will model failures with classes that extend `Throwable` or `Exception`:
+
+```scala
+val f2 = Task.fail(new Exception("Uh oh!"))
+```
+
+Note that unlike the other effect companion objects, the `UIO` companion object does not have `UIO.fail`, because `UIO` values cannot fail.
+
+## From Scala Values
+
+Scala's standard library contains a number of data types that can be converted into ZIO effects.
+
+### Option
+
+An `Option` can be converted into a ZIO effect using `ZIO.fromOption`:
+
+```scala
+val zoption: IO[Option[Nothing], Int] = ZIO.fromOption(Some(2))
+```
+
+The error type of the resulting effect is `Option[Nothing]`, which provides no information on why the value is not there. You can change the `Option[Nothing]` into a more specific error type using `ZIO#mapError`:
+
+```scala
+val zoption2: IO[String, Int] = zoption.mapError(_ => "It wasn't there!")
+```
+
+You can also readily compose it with other operators while preserving the optional nature of the result (similar to an `OptionT`)
+
+
+```scala
+val maybeId: IO[Option[Nothing], String] = ZIO.fromOption(Some("abc123"))
+def getUser(userId: String): IO[Throwable, Option[User]] = ???
+def getTeam(teamId: String): IO[Throwable, Team] = ???
+
+
+val result: IO[Throwable, Option[(User, Team)]] = (for {
+ id <- maybeId
+ user <- getUser(id).some
+ team <- getTeam(user.teamId).asSomeError
+} yield (user, team)).optional
+```
+
+### Either
+
+An `Either` can be converted into a ZIO effect using `ZIO.fromEither`:
+
+```scala
+val zeither = ZIO.fromEither(Right("Success!"))
+```
+
+The error type of the resulting effect will be whatever type the `Left` case has, while the success type will be whatever type the `Right` case has.
+
+### Try
+
+A `Try` value can be converted into a ZIO effect using `ZIO.fromTry`:
+
+```scala
+import scala.util.Try
+
+val ztry = ZIO.fromTry(Try(42 / 0))
+```
+
+The error type of the resulting effect will always be `Throwable`, because `Try` can only fail with values of type `Throwable`.
+
+### Function
+
+A function `A => B` can be converted into a ZIO effect with `ZIO.fromFunction`:
+
+```scala
+val zfun: URIO[Int, Int] =
+ ZIO.fromFunction((i: Int) => i * i)
+```
+
+The environment type of the effect is `A` (the input type of the function), because in order to run the effect, it must be supplied with a value of this type.
+
+### Future
+
+A `Future` can be converted into a ZIO effect using `ZIO.fromFuture`:
+
+```scala
+import scala.concurrent.Future
+
+lazy val future = Future.successful("Hello!")
+
+val zfuture: Task[String] =
+ ZIO.fromFuture { implicit ec =>
+ future.map(_ => "Goodbye!")
+ }
+```
+
+The function passed to `fromFuture` is passed an `ExecutionContext`, which allows ZIO to manage where the `Future` runs (of course, you can ignore this `ExecutionContext`).
+
+The error type of the resulting effect will always be `Throwable`, because `Future` can only fail with values of type `Throwable`.
+
+## From Side-Effects
+
+ZIO can convert both synchronous and asynchronous side-effects into ZIO effects (pure values).
+
+These functions can be used to wrap procedural code, allowing you to seamlessly use all features of ZIO with legacy Scala and Java code, as well as third-party libraries.
+
+### Synchronous Side-Effects
+
+A synchronous side-effect can be converted into a ZIO effect using `ZIO.effect`:
+
+```scala
+import scala.io.StdIn
+
+val getStrLn: Task[String] =
+ ZIO.effect(StdIn.readLine())
+```
+
+The error type of the resulting effect will always be `Throwable`, because side-effects may throw exceptions with any value of type `Throwable`.
+
+If a given side-effect is known to not throw any exceptions, then the side-effect can be converted into a ZIO effect using `ZIO.effectTotal`:
+
+```scala
+def putStrLn(line: String): UIO[Unit] =
+ ZIO.effectTotal(println(line))
+```
+
+You should be careful when using `ZIO.effectTotal`—when in doubt about whether or not a side-effect is total, prefer `ZIO.effect` to convert the effect.
+
+If you wish to refine the error type of an effect (by treating other errors as fatal), then you can use the `ZIO#refineToOrDie` method:
+
+```scala
+import java.io.IOException
+
+val getStrLn2: IO[IOException, String] =
+ ZIO.effect(StdIn.readLine()).refineToOrDie[IOException]
+```
+
+### Asynchronous Side-Effects
+
+An asynchronous side-effect with a callback-based API can be converted into a ZIO effect using `ZIO.effectAsync`:
+
+
+```scala
+object legacy {
+ def login(
+ onSuccess: User => Unit,
+ onFailure: AuthError => Unit): Unit = ???
+}
+
+val login: IO[AuthError, User] =
+ IO.effectAsync[AuthError, User] { callback =>
+ legacy.login(
+ user => callback(IO.succeed(user)),
+ err => callback(IO.fail(err))
+ )
+ }
+```
+
+Asynchronous ZIO effects are much easier to use than callback-based APIs, and they benefit from ZIO features like interruption, resource-safety, and superior error handling.
+
+## Blocking Synchronous Side-Effects
+
+Some side-effects use blocking IO or otherwise put a thread into a waiting state. If not carefully managed, these side-effects can deplete threads from your application's main thread pool, resulting in work starvation.
+
+ZIO provides the `zio.blocking` package, which can be used to safely convert such blocking side-effects into ZIO effects.
+
+A blocking side-effect can be converted directly into a ZIO effect blocking with the `effectBlocking` method:
+
+```scala
+import zio.blocking._
+
+val sleeping =
+ effectBlocking(Thread.sleep(Long.MaxValue))
+```
+
+The resulting effect will be executed on a separate thread pool designed specifically for blocking effects.
+
+Blocking side-effects can be interrupted by invoking `Thread.interrupt` using the `effectBlockingInterrupt` method.
+
+Some blocking side-effects can only be interrupted by invoking a cancellation effect. You can convert these side-effects using the `effectBlockingCancelable` method:
+
+```scala
+import java.net.ServerSocket
+import zio.UIO
+
+def accept(l: ServerSocket) =
+ effectBlockingCancelable(l.accept())(UIO.effectTotal(l.close()))
+```
+
+If a side-effect has already been converted into a ZIO effect, then instead of `effectBlocking`, the `blocking` method can be used to ensure the effect will be executed on the blocking thread pool:
+
+```scala
+import scala.io.{ Codec, Source }
+
+def download(url: String) =
+ Task.effect {
+ Source.fromURL(url)(Codec.UTF8).mkString
+ }
+
+def safeDownload(url: String) =
+ blocking(download(url))
+```
+
+## Next Steps
+
+If you are comfortable creating effects from values, Scala data types, and side-effects, the next step is learning [basic operations](basic_operations.md) on effects.
diff --git a/website/versioned_docs/version-1.0.18/overview/handling_errors.md b/website/versioned_docs/version-1.0.18/overview/handling_errors.md
new file mode 100644
index 000000000000..82bd194cdca4
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/overview/handling_errors.md
@@ -0,0 +1,129 @@
+---
+id: overview_handling_errors
+title: "Handling Errors"
+---
+
+This section looks at some of the common ways to detect and respond to failure.
+
+
+## Either
+
+You can surface failures with `ZIO#either`, which takes an `ZIO[R, E, A]` and produces an `ZIO[R, Nothing, Either[E, A]]`.
+
+```scala
+val zeither: UIO[Either[String, Int]] =
+ IO.fail("Uh oh!").either
+```
+
+You can submerge failures with `ZIO.absolve`, which is the opposite of `either` and turns an `ZIO[R, Nothing, Either[E, A]]` into a `ZIO[R, E, A]`:
+
+```scala
+def sqrt(io: UIO[Double]): IO[String, Double] =
+ ZIO.absolve(
+ io.map(value =>
+ if (value < 0.0) Left("Value must be >= 0.0")
+ else Right(Math.sqrt(value))
+ )
+ )
+```
+
+## Catching All Errors
+
+If you want to catch and recover from all types of errors and effectfully attempt recovery, you can use the `catchAll` method:
+
+
+```scala
+val z: IO[IOException, Array[Byte]] =
+ openFile("primary.json").catchAll(_ =>
+ openFile("backup.json"))
+```
+
+In the callback passed to `catchAll`, you may return an effect with a different error type (or perhaps `Nothing`), which will be reflected in the type of effect returned by `catchAll`.
+
+## Catching Some Errors
+
+If you want to catch and recover from only some types of exceptions and effectfully attempt recovery, you can use the `catchSome` method:
+
+```scala
+val data: IO[IOException, Array[Byte]] =
+ openFile("primary.data").catchSome {
+ case _ : FileNotFoundException =>
+ openFile("backup.data")
+ }
+```
+
+Unlike `catchAll`, `catchSome` cannot reduce or eliminate the error type, although it can widen the error type to a broader class of errors.
+
+## Fallback
+
+You can try one effect, or, if it fails, try another effect, with the `orElse` combinator:
+
+```scala
+val primaryOrBackupData: IO[IOException, Array[Byte]] =
+ openFile("primary.data").orElse(openFile("backup.data"))
+```
+
+## Folding
+
+Scala's `Option` and `Either` data types have `fold`, which let you handle both failure and success at the same time. In a similar fashion, `ZIO` effects also have several methods that allow you to handle both failure and success.
+
+The first fold method, `fold`, lets you non-effectfully handle both failure and success, by supplying a non-effectful handler for each case:
+
+```scala
+lazy val DefaultData: Array[Byte] = Array(0, 0)
+
+val primaryOrDefaultData: UIO[Array[Byte]] =
+ openFile("primary.data").fold(
+ _ => DefaultData,
+ data => data)
+```
+
+The second fold method, `foldM`, lets you effectfully handle both failure and success, by supplying an effectful (but still pure) handler for each case:
+
+```scala
+val primaryOrSecondaryData: IO[IOException, Array[Byte]] =
+ openFile("primary.data").foldM(
+ _ => openFile("secondary.data"),
+ data => ZIO.succeed(data))
+```
+
+Nearly all error handling methods are defined in terms of `foldM`, because it is both powerful and fast.
+
+In the following example, `foldM` is used to handle both failure and success of the `readUrls` method:
+
+```scala
+val urls: UIO[Content] =
+ readUrls("urls.json").foldM(
+ error => IO.succeed(NoContent(error)),
+ success => fetchContent(success)
+ )
+```
+
+## Retrying
+
+There are a number of useful methods on the ZIO data type for retrying failed effects.
+
+The most basic of these is `ZIO#retry`, which takes a `Schedule` and returns a new effect that will retry the first effect if it fails, according to the specified policy:
+
+```scala
+import zio.clock._
+
+val retriedOpenFile: ZIO[Clock, IOException, Array[Byte]] =
+ openFile("primary.data").retry(Schedule.recurs(5))
+```
+
+The next most powerful function is `ZIO#retryOrElse`, which allows specification of a fallback to use, if the effect does not succeed with the specified policy:
+
+```scala
+ openFile("primary.data").retryOrElse(
+ Schedule.recurs(5),
+ (_, _) => ZIO.succeed(DefaultData))
+```
+
+The final method, `ZIO#retryOrElseEither`, allows returning a different type for the fallback.
+
+For more information on how to build schedules, see the documentation on [Schedule](../reference/misc/schedule.md).
+
+## Next Steps
+
+If you are comfortable with basic error handling, then the next step is to learn about safe [resource handling](handling_resources.md).
diff --git a/website/versioned_docs/version-1.0.18/overview/handling_resources.md b/website/versioned_docs/version-1.0.18/overview/handling_resources.md
new file mode 100644
index 000000000000..38476973ff0f
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/overview/handling_resources.md
@@ -0,0 +1,64 @@
+---
+id: overview_handling_resources
+title: "Handling Resources"
+---
+
+This section looks at some of the common ways to safely handle resources using ZIO.
+
+ZIO's resource management features work across synchronous, asynchronous, concurrent, and other effect types, and provide strong guarantees even in the presence of failure, interruption, or defects in the application.
+
+
+## Finalizing
+
+ZIO provides similar functionality to `try` / `finally` with the `ZIO#ensuring` method.
+
+Like `try` / `finally`, the `ensuring` operation guarantees that if an effect begins executing and then terminates (for whatever reason), then the finalizer will begin executing.
+
+```scala
+val finalizer =
+ UIO.effectTotal(println("Finalizing!"))
+// finalizer: UIO[Unit] = zio.ZIO$EffectTotal@65ccae4c
+
+val finalized: IO[String, Unit] =
+ IO.fail("Failed!").ensuring(finalizer)
+// finalized: IO[String, Unit] = zio.ZIO$CheckInterrupt@5071b0bd
+```
+
+The finalizer is not allowed to fail, which means that it must handle any errors internally.
+
+Like `try` / `finally`, finalizers can be nested, and the failure of any inner finalizer will not affect outer finalizers. Nested finalizers will be executed in reverse order, and linearly (not in parallel).
+
+Unlike `try` / `finally`, `ensuring` works across all types of effects, including asynchronous and concurrent effects.
+
+## Bracket
+
+A common use for `try` / `finally` is safely acquiring and releasing resources, such as new socket connections or opened files:
+
+```scala
+val handle = openFile(name)
+
+try {
+ processFile(handle)
+} finally closeFile(handle)
+```
+
+ZIO encapsulates this common pattern with `ZIO#bracket`, which allows you to specify an _acquire_ effect, which acquires a resource; a _release_ effect, which releases it; and a _use_ effect, which uses the resource.
+
+The release effect is guaranteed to be executed by the runtime system, even in the presence of errors or interruption.
+
+
+```scala
+val groupedFileData: IO[IOException, Unit] =
+ openFile("data.json").bracket(closeFile(_)) { file =>
+ for {
+ data <- decodeData(file)
+ grouped <- groupData(data)
+ } yield grouped
+ }
+```
+
+Like `ensuring`, brackets have compositional semantics, so if one bracket is nested inside another bracket, and the outer bracket acquires a resource, then the outer bracket's release will always be called, even if, for example, the inner bracket's release fails.
+
+## Next Steps
+
+If you are comfortable with resource handling, then the next step is to learn about [basic concurrency](basic_concurrency.md).
diff --git a/website/versioned_docs/version-1.0.18/overview/index.md b/website/versioned_docs/version-1.0.18/overview/index.md
new file mode 100644
index 000000000000..347f589b8283
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/overview/index.md
@@ -0,0 +1,52 @@
+---
+id: overview_index
+title: "Summary"
+---
+
+ZIO is a library for asynchronous and concurrent programming that is based on pure functional programming.
+
+> For background on how pure functional programming deals with effects like input and output, see the [Background](background.md) section.
+
+At the core of ZIO is `ZIO`, a powerful effect type inspired by Haskell's `IO` monad. This data type lets you solve complex problems with simple, type-safe, testable, and composable code.
+
+## ZIO
+
+The `ZIO[R, E, A]` data type has three type parameters:
+
+ - **`R` - Environment Type**. The effect requires an environment of type `R`. If this type parameter is `Any`, it means the effect has no requirements, because you can run the effect with any value (for example, the unit value `()`).
+ - **`E` - Failure Type**. The effect may fail with a value of type `E`. Some applications will use `Throwable`. If this type parameter is `Nothing`, it means the effect cannot fail, because there are no values of type `Nothing`.
+ - **`A` - Success Type**. The effect may succeed with a value of type `A`. If this type parameter is `Unit`, it means the effect produces no useful information, while if it is `Nothing`, it means the effect runs forever (or until failure).
+
+For example, an effect of type `ZIO[Any, IOException, Byte]` has no requirements, may fail with a value of type `IOException`, or may succeed with a value of type `Byte`.
+
+A value of type `ZIO[R, E, A]` is like an effectful version of the following function type:
+
+```scala
+R => Either[E, A]
+```
+
+This function, which requires an `R`, might produce either an `E`, representing failure, or an `A`, representing success. ZIO effects are not actually functions, of course, because they model complex effects, like asynchronous and concurrent effects.
+
+## Type Aliases
+
+The `ZIO` data type is the only effect type in ZIO. However, there are a family of type aliases and companion objects that simplify common cases:
+
+ - `UIO[A]` — This is a type alias for `ZIO[Any, Nothing, A]`, which represents an effect that has no requirements, and cannot fail, but can succeed with an `A`.
+ - `URIO[R, A]` — This is a type alias for `ZIO[R, Nothing, A]`, which represents an effect that requires an `R`, and cannot fail, but can succeed with an `A`.
+ - `Task[A]` — This is a type alias for `ZIO[Any, Throwable, A]`, which represents an effect that has no requirements, and may fail with a `Throwable` value, or succeed with an `A`.
+ - `RIO[R, A]` — This is a type alias for `ZIO[R, Throwable, A]`, which represents an effect that requires an `R`, and may fail with a `Throwable` value, or succeed with an `A`.
+ - `IO[E, A]` — This is a type alias for `ZIO[Any, E, A]`, which represents an effect that has no requirements, and may fail with an `E`, or succeed with an `A`.
+
+These type aliases all have companion objects, and these companion objects have methods that can be used to construct values of the appropriate type.
+
+If you are new to functional effects, we recommend starting with the `Task` type, which has a single type parameter, and corresponds most closely to the `Future` data type built into Scala's standard library.
+
+If you are using _Cats Effect_ libraries, you may find the `RIO` type useful, since it allows you to thread environments through third-party libraries and your application.
+
+No matter what type alias you use in your application, `UIO` can be useful for describing infallible effects, including those resulting from handling all errors.
+
+Finally, if you are an experienced functional programmer, then direct use of the `ZIO` data type is recommended, although you may find it useful to create your own family of type aliases in different parts of your application.
+
+## Next Steps
+
+If you are comfortable with the ZIO data type, and its family of type aliases, the next step is learning how to [create effects](creating_effects.md).
diff --git a/website/versioned_docs/version-1.0.18/overview/performance.md b/website/versioned_docs/version-1.0.18/overview/performance.md
new file mode 100644
index 000000000000..19c9d5c4af5f
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/overview/performance.md
@@ -0,0 +1,10 @@
+---
+id: overview_performance
+title: "Performance"
+---
+
+`zio` has excellent performance, featuring a hand-optimized, low-level interpreter that achieves zero allocations for right-associated binds, and minimal allocations for left-associated binds.
+
+The `benchmarks` project may be used to compare `IO` with other effect monads, including `Future` (which is not an effect monad but is included for reference), Monix `Task`, and Cats `IO`.
+
+As of the time of this writing, `IO` is significantly faster than or at least comparable to all other purely functional solutions.
diff --git a/website/versioned_docs/version-1.0.18/overview/platforms.md b/website/versioned_docs/version-1.0.18/overview/platforms.md
new file mode 100644
index 000000000000..0aa5b07bd9ea
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/overview/platforms.md
@@ -0,0 +1,34 @@
+---
+id: overview_platforms
+title: "Platforms"
+---
+
+ZIO provides a consistent interface across platforms to the maximum extent possible, allowing developers to write code once and deploy it everywhere. However, there are some unavoidable differences between platforms to be aware of.
+
+## JVM
+
+ZIO supports Java versions 8 and above and Scala versions 2.11, 2.12, 2.13, and Dotty.
+
+On the JVM, the `Blocking` service is available to lock effects on the blocking thread pool and is included as part of the `ZEnv`. See the documentation on [Creating Effects](../overview/creating_effects.md) for further discussion on blocking synchronous side-effects.
+
+## Scala.js
+
+ZIO supports Scala.js 1.0.
+
+While ZIO is a zero dependency library, some basic capabilities of the platform are assumed. In particular, due to the absence of implementations for certain `java.time` methods in Scala.js, users must bring their own `java.time` dependency. The one used by ZIO in its own internal test suites is [scala-java-time](https://github.com/cquiroz/scala-java-time). It can be added as a dependency like so:
+
+```scala
+libraryDependencies ++= Seq(
+ "io.github.cquiroz" %%% "scala-java-time" % "2.2.0",
+ "io.github.cquiroz" %%% "scala-java-time-tzdb" % "2.2.0"
+)
+```
+
+Because of its single threaded execution model, blocking operations are not supported on Scala.js. As such, the `Blocking` service is not available and is not included in the `ZEnv`. In addition, several other methods are not supported or are unsafe on Scala.js:
+
+* The `readLine` method in the `Console` service is not supported because reading a line from the console blocks until input is received and the underlying method from the Scala standard library is not implemented on Scala.js.
+* The `unsafeRun`, `unsafeRunTask`, and `unsafeRunSync` methods on `Runtime` are not safe. All of these methods return a value synchronously and may require blocking if the effect includeds asynchronous steps, including yield points introduced by the runtime to guarantee fairness. Users should use the `unsafeRunAsync`, `unsafeRunAsync_`, or `unsafeRunToFuture` methods instead.
+
+## Scala Native
+
+Support for Scala Native is currently experimental. More details will be added regarding support for the Scala Native platform when they are available.
diff --git a/website/versioned_docs/version-1.0.18/overview/running_effects.md b/website/versioned_docs/version-1.0.18/overview/running_effects.md
new file mode 100644
index 000000000000..b116c65d941b
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/overview/running_effects.md
@@ -0,0 +1,85 @@
+---
+id: overview_running_effects
+title: "Running Effects"
+---
+
+ZIO provides several different ways of running effects in your application.
+
+## App
+
+If you construct a single effect for your whole program, then the most natural way to run the effect is to extend `zio.App`.
+
+This class provides Scala with a main function, so it can be called from IDEs and launched from the command-line. All you have to do is implement the `run` method, which will be passed all command-line arguments in a `List`:
+
+```scala
+import zio._
+import zio.console._
+
+object MyApp extends zio.App {
+
+ def run(args: List[String]) =
+ myAppLogic.exitCode
+
+ val myAppLogic =
+ for {
+ _ <- putStrLn("Hello! What is your name?")
+ name <- getStrLn
+ _ <- putStrLn(s"Hello, ${name}, welcome to ZIO!")
+ } yield ()
+}
+```
+
+If you are using a custom environment for your application, you will have to supply your environment to the effect (using `ZIO#provide`) before you return it from `run`, because `App` does not know how to supply custom environments.
+
+## Default Runtime
+
+Most applications are not greenfield, and must integrate with legacy code, and procedural libraries and frameworks.
+
+In these cases, a better solution for running effects is to create a `Runtime`, which can be passed around and used to run effects wherever required.
+
+ZIO contains a default runtime called `Runtime.default`. This `Runtime` bundles together production implementations of all ZIO modules (including `Console`, `System`, `Clock`, `Random`, `Scheduler`, and on the JVM, `Blocking`), and it can run effects that require any combination of these modules.
+
+To access it, merely use
+
+```scala
+val runtime = Runtime.default
+```
+
+Once you have a runtime, you can use it to execute effects:
+
+```scala
+runtime.unsafeRun(ZIO(println("Hello World!")))
+```
+
+In addition to the `unsafeRun` method, there are other methods that allow executing effects asynchronously or into `Future` values.
+
+## Custom Runtime
+
+If you are using a custom environment for your application, then you may find it useful to create a `Runtime` specifically tailored for that environment.
+
+A custom `Runtime[R]` can be created with two values:
+
+ - **`R` Environment**. This is the environment that will be provided to effects when they are executed.
+ - **`Platform`**. This is a platform that is required by ZIO in order to bootstrap the runtime system.
+
+For example, the following creates a `Runtime` that can provide an `Int` to effects, using the default `Platform` provided by ZIO:
+
+```scala
+import zio.internal.Platform
+
+val myRuntime: Runtime[Int] = Runtime(42, Platform.default)
+```
+
+## Error Reporting
+
+In the `Platform` that is a part of every runtime, there is an error reporter that will be called by ZIO to report every unhandled error. It is a good idea to supply your own error reporter, which can log unhandled errors to a file.
+
+The default unhandled error reporter merely logs the error to standard error.
+
+## Next Steps
+
+If you are comfortable with running effects, then congratulations!
+
+You are now ready to dive into other sections on the ZIO website, covering data types, use cases, and interop with other systems.
+
+Refer to the Scaladoc for detailed documentation on all the core ZIO types and methods.
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/overview/testing_effects.md b/website/versioned_docs/version-1.0.18/overview/testing_effects.md
new file mode 100644
index 000000000000..9f69c73a2b90
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/overview/testing_effects.md
@@ -0,0 +1,211 @@
+---
+id: overview_testing_effects
+title: "Testing Effects"
+---
+
+There are many approaches to testing functional effects, including using free monads, using tagless-final, and using environmental effects. Although all of these approaches are compatible with ZIO, the simplest and most ergonomic is _environmental effects_.
+
+This section introduces environmental effects and shows you how to write testable functional code using them.
+
+
+## Environments
+
+The ZIO data type has an `R` type parameter, which is used to describe the type of _environment_ required by the effect.
+
+ZIO effects can access the environment using `ZIO.environment`, which provides direct access to the environment, as a value of type `R`.
+
+```scala
+for {
+ env <- ZIO.environment[Int]
+ _ <- putStrLn(s"The value of the environment is: $env")
+} yield env
+```
+
+The environment does not have to be a primitive value like an integer. It can be much more complex, like a `trait` or `case class`.
+
+When the environment is a type with fields, then the `ZIO.access` method can be used to access a given part of the environment in a single method call:
+
+```scala
+final case class Config(server: String, port: Int)
+
+val configString: URIO[Config, String] =
+ for {
+ server <- ZIO.access[Config](_.server)
+ port <- ZIO.access[Config](_.port)
+ } yield s"Server: $server, port: $port"
+```
+
+Even effects themselves can be stored in the environment! In this case, to access and execute an effect, the `ZIO.accessM` method can be used.
+
+```scala
+trait DatabaseOps {
+ def getTableNames: Task[List[String]]
+ def getColumnNames(table: String): Task[List[String]]
+}
+
+val tablesAndColumns: ZIO[DatabaseOps, Throwable, (List[String], List[String])] =
+ for {
+ tables <- ZIO.accessM[DatabaseOps](_.getTableNames)
+ columns <- ZIO.accessM[DatabaseOps](_.getColumnNames("user_table"))
+ } yield (tables, columns)
+```
+
+When an effect is accessed from the environment, as in the preceding example, the effect is called an _environmental effect_.
+
+Later, we'll see how environmental effects provide an easy way to test ZIO applications.
+
+### Providing Environments
+
+Effects that require an environment cannot be run without first _providing_ their environment to them.
+
+The simplest way to provide an effect the environment that it requires is to use the `ZIO#provide` method:
+
+```scala
+val square: URIO[Int, Int] =
+ for {
+ env <- ZIO.environment[Int]
+ } yield env * env
+
+val result: UIO[Int] = square.provide(42)
+```
+
+Once you provide an effect with the environment it requires, then you get back an effect whose environment type is `Any`, indicating its requirements have been fully satisfied.
+
+The combination of `ZIO.accessM` and `ZIO#provide` are all that is necessary to fully use environmental effects for easy testability.
+
+## Environmental Effects
+
+The fundamental idea behind environmental effects is to _program to an interface, not an implementation_. In the case of functional Scala, interfaces do not contain any methods that perform side-effects, although they may contain methods that return _functional effects_.
+
+Rather than passing interfaces throughout our code base manually, injecting them using dependency injection, or threading them using incoherent implicits, we use _ZIO Environment_ to do the heavy lifting, which results in elegant, inferrable, and painless code.
+
+In this section, we'll explore how to use environmental effects by developing a testable database service.
+
+### Define the Service
+
+We will define the database service with the help of a module, which is an interface that contains only a single field, which provides access to the service.
+
+
+```scala
+object Database {
+ trait Service {
+ def lookup(id: UserID): Task[UserProfile]
+ def update(id: UserID, profile: UserProfile): Task[Unit]
+ }
+}
+trait Database {
+ def database: Database.Service
+}
+```
+
+In this example, `Database` is the _module_, which contains the `Database.Service` _service_. The _service_ is just an ordinary interface, placed inside the companion object of the module, which contains functions provide the _capabilities_ of the service.
+
+### Provide Helpers
+
+In order to make it easier to access the database service as an environmental effect, we will define helper functions that use `ZIO.accessM`.
+
+```scala
+object db {
+ def lookup(id: UserID): RIO[Database, UserProfile] =
+ ZIO.accessM(_.database.lookup(id))
+
+ def update(id: UserID, profile: UserProfile): RIO[Database, Unit] =
+ ZIO.accessM(_.database.update(id, profile))
+}
+```
+
+While these helpers are not required, because we can access the database module directly through `ZIO.accessM`, the helpers are easy to write and make use-site code simpler.
+
+### Use the Service
+
+Now that we have defined a module and helper functions, we are now ready to build an example that uses the database service:
+
+```scala
+val lookedupProfile: RIO[Database, UserProfile] =
+ for {
+ profile <- db.lookup(userId)
+ } yield profile
+```
+
+The effect in this example interacts with the database solely through the environment, which in this case, is a module that provides access to the database service.
+
+To actually run such an effect, we need to provide an implementation of the database module.
+
+### Implement Live Service
+
+Now we will implement a live database module, which will actually interact with our production database:
+
+```scala
+trait DatabaseLive extends Database {
+ def database: Database.Service =
+ new Database.Service {
+ def lookup(id: UserID): Task[UserProfile] = ???
+ def update(id: UserID, profile: UserProfile): Task[Unit] = ???
+ }
+}
+object DatabaseLive extends DatabaseLive
+```
+
+In the preceding snippet, the implementation of the two database methods is not provided, because that would require details beyond the scope of this tutorial.
+
+### Run the Database Effect
+
+We now have a database module, helpers to interact with the database module, and a live implementation of the database module.
+
+We can now provide the live database module to our application, using `ZIO.provide`:
+
+```scala
+def main: RIO[Database, Unit] = ???
+
+def main2: Task[Unit] =
+ main.provide(DatabaseLive)
+```
+
+The resulting effect has no requirements, so it can now be executed with a ZIO runtime.
+
+### Implement Test Service
+
+To test code that interacts with the database, we don't want to interact with a real database, because our tests would be slow and brittle, and fail randomly even when our application logic is correct.
+
+Although you can use mocking libraries to create test modules, in this section, we will simply create a test module directly, to show that no magic is involved:
+
+```scala
+class TestService extends Database.Service {
+ private var map: Map[UserID, UserProfile] = Map()
+
+ def setTestData(map0: Map[UserID, UserProfile]): Task[Unit] =
+ Task { map = map0 }
+
+ def getTestData: Task[Map[UserID, UserProfile]] =
+ Task(map)
+
+ def lookup(id: UserID): Task[UserProfile] =
+ Task(map(id))
+
+ def update(id: UserID, profile: UserProfile): Task[Unit] =
+ Task.effect { map = map + (id -> profile) }
+}
+trait TestDatabase extends Database {
+ val database: TestService = new TestService
+}
+object TestDatabase extends TestDatabase
+```
+
+Because this module will only be used in tests, it simulates interaction with a database by extracting and updating data in a hard-coded map. To make this module fiber-safe, you could instead use a `Ref` and not a `var` to hold the map.
+
+### Test Database Code
+
+To test code that requires the database, we need only provide it with our test database module:
+
+```scala
+def code: RIO[Database, Unit] = ???
+
+def code2: Task[Unit] =
+ code.provide(TestDatabase)
+```
+
+Our application code can work with either our production database module, or the test database module.
+
+## Next Steps
+
+If you are comfortable with testing effects, then the next step is to learn about [running effects](running_effects.md).
diff --git a/website/versioned_docs/version-1.0.18/reference/concurrency/hub.md b/website/versioned_docs/version-1.0.18/reference/concurrency/hub.md
new file mode 100644
index 000000000000..cd911b704857
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/concurrency/hub.md
@@ -0,0 +1,417 @@
+---
+id: hub
+title: "Hub"
+---
+
+A `Hub[A]` is an asynchronous message hub. Publishers can publish messages of type `A` to the hub and subscribers can subscribe to receive messages of type `A` from the hub.
+
+Unlike a `Queue`, where each value offered to the queue can be taken by _one_ taker, each value published to a hub can be received by _all_ subscribers. Whereas a `Queue` represents the optimal solution to the problem of how to _distribute_ values, a `Hub` represents the optimal solution to the problem of how to _broadcast_ them.
+
+The fundamental operators on a `Hub` are `publish` and `subscribe`:
+
+```scala
+import zio._
+
+trait Hub[A] {
+ def publish(a: A): UIO[Boolean]
+ def subscribe: ZManaged[Any, Nothing, Dequeue[A]]
+}
+```
+
+The `publish` operator returns a `ZIO` effect that publishes a message of type `A` to the hub and succeeds with a value describing whether the message was successfully published to the hub.
+
+The `subscribe` operator returns a `ZManaged` effect where the `acquire` action of the `ZManaged` subscribes to the hub and the `release` action unsubscribes from the hub. Within the context of the `ZManaged` we have access to a `Dequeue`, which is a `Queue` that can only be dequeued from, that allows us to take messages published to the hub.
+
+For example, we can use a hub to broadcast a message to multiple subscribers like this:
+
+```scala
+Hub.bounded[String](2).flatMap { hub =>
+ hub.subscribe.zip(hub.subscribe).use { case (left, right) =>
+ for {
+ _ <- hub.publish("Hello from a hub!")
+ _ <- left.take.flatMap(console.putStrLn(_))
+ _ <- right.take.flatMap(console.putStrLn(_))
+ } yield ()
+ }
+}
+```
+
+A subscriber will only receive messages that are published to the hub while it is subscribed. So if we want to make sure that a particular message is received by a subscriber we must take care that the subscription has completed before publishing the message to the hub.
+
+We can do this by publishing a message to the hub within the scope of the subscription as in the example above or by using other coordination mechanisms such as completing a `Promise` when the `acquire` action of the `ZManaged` has completed.
+
+Of course, in many cases such as subscribing to receive real time data we may not care about this because we are happy to just pick up with the most recent messages after we have subscribed. But for testing and simple applications this can be an important point to keep in mind.
+
+## Constructing Hubs
+
+The most common way to create a hub is with the `bounded` constructor, which returns an effect that creates a new hub with the specified requested capacity.
+
+```scala
+def bounded[A](requestedCapacity: Int): UIO[Hub[A]] =
+ ???
+```
+
+For maximum efficiency you should create hubs with capacities that are powers of two.
+
+Just like a bounded queue, a bounded hub applies back pressure to publishers when it is at capacity, so publishers will semantically block on calls to `publish` if the hub is full.
+
+The advantage of the back pressure strategy is that it guarantees that all subscribers will receive all messages published to the hub while they are subscribed. However, it does create the risk that a slow subscriber will slow down the rate at which messages are published and received by other subscribers.
+
+If you do not want this you can create a hub with the `dropping` constructor.
+
+```scala
+def dropping[A](requestedCapacity: Int): UIO[Hub[A]] =
+ ???
+```
+
+A dropping hub will simply drop values published to it if the hub is at capacity, returning `false` on calls to `publish` if the hub is full to signal that the value was not successfully published.
+
+The advantage of the dropping strategy is that publishers can continue to publish new values so when there is space in the hub the newest values can be published to the hub. However, subscribers are no longer guaranteed to receive all values published to the hub and a slow subscriber can still prevent messages from being published to the hub and received by other subscribers.
+
+You can also create a hub with the `sliding` constructor.
+
+```scala
+def sliding[A](requestedCapacity: Int): UIO[Hub[A]] =
+ ???
+```
+
+A sliding hub will drop the oldest value if a new value is published to it and the hub is at capacity, so publishing will always succeed immediately.
+
+The advantage of the sliding strategy is that a slow subscriber cannot slow down that rate at which messages are published to the hub or received by other subscribers. However, it creates the risk that slow subscribers may not receive all messages published to the hub.
+
+Finally, you can create a hub with the `unbounded` constructor.
+
+```scala
+def unbounded[A]: UIO[Hub[A]] =
+ ???
+```
+
+An unbounded hub is never at capacity so publishing to an unbounded hub always immediately succeeds.
+
+The advantage of an unbounded hub is that it combines the guarantees that all subscribers will receive all messages published to the hub and that a slow subscriber will not slow down the rate at which messages are published and received by other subscribers. However, it does this at the cost of potentially growing without bound if messages are published to the hub more quickly than they are taken by the slowest subscriber.
+
+In general you should prefer bounded, dropping, or sliding hubs for this reason. However, unbounded hubs can be useful in certain situations where you do not know exactly how many values will be published to the hub but are confident that it will not exceed a reasonable size or want to handle that concern at a higher level of your application.
+
+## Operators On Hubs
+
+In addition to `publish` and `subscribe`, many of the same operators that are available on queues are available on hubs.
+
+We can publish multiple values to the hub using the `publishAll` operator.
+
+```scala
+trait Hub[A] {
+ def publishAll(as: Iterable[A]): UIO[Boolean]
+}
+```
+
+We can check the capacity of the hub as well as the number of messages currently in the hub using the `size` and `capacity` operators.
+
+```scala
+trait Hub[A] {
+ def capacity: Int
+ def size: UIO[Int]
+}
+```
+
+Note that `capacity` returns an `Int` because the capacity is set at hub creation and never changes. In contrast, `size` returns a `ZIO` effect that determines the current size of the hub since the number of messages in the hub can change over time.
+
+We can also shut down the hub, check whether it has been shut down, or await its shut down. Shutting down a hub will shut down all the queues associated with subscriptions to the hub, properly propagating the shut down signal.
+
+```scala
+trait Hub[A] {
+ def awaitShutdown: UIO[Unit]
+ def isShutdown: UIO[Boolean]
+ def shutdown: UIO[Unit]
+}
+```
+
+As you can see, the operators on `Hub` are identical to the ones on `Queue` with the exception of `publish` and `subscribe` replacing `offer` and `take`. So if you know how to use a `Queue` you already know how to use a `Hub`.
+
+In fact, a `Hub` can be viewed as a `Queue` that can only be written to.
+
+```scala
+trait Hub[A] {
+ def toQueue[A]: Enqueue[A]
+}
+```
+
+Here the `Enqueue` type represents a queue that can only be enqueued. Enqueing to the queue publishes a value to the hub, shutting down the queue shuts down the hub, and so on.
+
+This can be extremely useful because it allows us to use a `Hub` anywhere we are currently using a `Queue` that we only write to.
+
+For example, say we are using the `into` operator on `ZStream` to send all elements of a stream of financial transactions to a `Queue` for processing by a downstream consumer.
+
+```scala
+import zio.stream._
+
+trait ZStream[-R, +E, +O] {
+ def into[R1 <: R, E1 >: E](
+ queue: ZEnqueue[R1, Nothing, Take[E1, O]]
+ ): ZIO[R1, E1, Unit]
+}
+```
+
+We would now like to have multiple downstream consumers process each of these transactions, for example to persist them and log them in addition to applying our business logic to them. With `Hub` this is easy because we can just use the `toQueue` operator to view any `Hub` as a `Queue` that can only be written to.
+
+
+```scala
+type Transaction = ???
+
+val transactionStream: ZStream[Any, Nothing, Transaction] =
+ ???
+
+val hub: Hub[Transaction] =
+ ???
+
+transactionStream.into(hub.toQueue)
+```
+
+All of the elements from the transaction stream will now be published to the hub. We can now have multiple downstream consumers process elements from the financial transactions stream with the guarantee that all downstream consumers will see all transactions in the stream, changing the topology of our data flow from one-to-one to one-to-many with a single line change.
+
+## Polymorphic Hubs
+
+Like many of the other data structures in ZIO, a `Hub` is actually a type alias for a more polymorphic data structure called a `ZHub`.
+
+```scala
+trait ZHub[-RA, -RB, +EA, +EB, -A, B] {
+ def publish(a: A): ZIO[RA, EA, Boolean]
+ def subscribe: ZManaged[Any, Nothing, ZDequeue[RB, EB, B]]
+}
+
+type Hub[A] = ZHub[Any, Any, Nothing, Nothing, A, A]
+```
+
+A `ZHub` allows publishers to publish messages of type `A` to the hub and subscribers to subscribe to receive messages of type `B` from the hub. Publishing messages to the hub can require an environment of type `RA` and fail with an error of type `EA` and taking messages from the hub can require an environment of type `RB` and fail with an error of type `EB`.
+
+Defining hubs polymorphically like this allows us to describe hubs that potentially transform their inputs or outputs in some way.
+
+To create a polymorphic hub we begin with a normal hub as described above and then add logic to it for transforming its inputs or outputs.
+
+We can transform the type of messages received from the hub using the `map` and `mapM` operators.
+
+```scala
+trait ZHub[-RA, -RB, +EA, +EB, -A, +B] {
+ def map[C](f: B => C): ZHub[RA, RB, EA, EB, A, C]
+ def mapM[RC <: RB, EC >: EB, C](f: B => ZIO[RC, EC, C]): ZHub[RA, RC, EA, EC, A, C]
+}
+```
+
+The `map` operator allows us to transform the type of messages received from the hub with the specified function. Conceptually, every time a message is taken from the hub by a subscriber it will first be transformed with the function `f` before being received by the subscriber.
+
+The `mapM` operator works the same way except it allows us to perform an effect each time a value is taken from the hub. We could use this for example to log each time a message is taken from the hub.
+
+
+```scala
+import zio.clock._
+
+val hub: Hub[Int] = ???
+
+val hubWithLogging: ZHub[Any, Clock with Console, Nothing, Nothing, Int, Int] =
+ hub.mapM { n =>
+ clock.currentDateTime.orDie.flatMap { currentDateTime =>
+ console.putStrLn(s"Took message $n from the hub at $currentDateTime").orDie
+ }.as(n)
+ }
+```
+
+Note that the specified function in `map` or `mapM` will be applied each time a message is taken from the hub by a subscriber. Thus, if there are `n` subscribers to the hub the function will be evaluated `n` times for each message published to the hub.
+
+This can be useful if we want to, for example, observe the different times that different subscribers are taking messages from the hub as in the example above. However, it is less efficient if we want to apply a transformation once for each value published to the hub.
+
+For this we can use the `contramap` and `contramapM` operators defined on `ZHub`.
+
+```scala
+trait ZHub[-RA, -RB, +EA, +EB, -A, +B] {
+ def contramap[C](
+ f: C => A
+ ): ZHub[RA, RB, EA, EB, C, B]
+ def contramapM[RC <: RA, EC >: EA, C](
+ f: C => ZIO[RC, EC, A]
+ ): ZHub[RC, RB, EC, EB, C, B]
+}
+```
+
+The `contramap` operator allows us to transform each value published to the hub by applying the specified function. Conceptually it returns a new hub where every time we publish a value we first transform it with the specified function before publishing it to the original hub.
+
+The `contramapM` operator works the same way except it allows us to perform an effect each time a message is published to the hub.
+
+Using these operators, we could describe a hub that validates its inputs, allowing publishers to publish raw data and subscribers to receive validated data while signaling to publishers when data they attempt to publish is not valid.
+
+
+```scala
+import zio.clock._
+
+val hub: Hub[Int] = ???
+
+val hubWithLogging: ZHub[Any, Any, String, Nothing, String, Int] =
+ hub.contramapM { (s: String) =>
+ ZIO.effect(s.toInt).orElseFail(s"$s is not a valid message")
+ }
+```
+
+We can also transform inputs and outputs at the same time using the `dimap` or `dimapM` operators.
+
+```scala
+trait ZHub[-RA, -RB, +EA, +EB, -A, +B] {
+ def dimap[C, D](
+ f: C => A,
+ g: B => D
+ ): ZHub[RA, RB, EA, EB, C, D]
+ def dimapM[RC <: RA, RD <: RB, EC >: EA, ED >: EB, C, D](
+ f: C => ZIO[RC, EC, A],
+ g: B => ZIO[RD, ED, D]
+ ): ZHub[RC, RD, EC, ED, C, D]
+}
+```
+
+These correspond to transforming the inputs and outputs of a hub at the same time using the specified functions. This is the same as transforming the outputs with `map` or `mapM` and the inputs with `contramap` or `contramapM`.
+
+In addition to just transforming the inputs and outputs of a hub we can also filter the inputs or outputs of a hub.
+
+```scala
+trait ZHub[-RA, -RB, +EA, +EB, -A, +B] {
+ def filterInput[A1 <: A](
+ f: A1 => Boolean
+ ): ZHub[RA, RB, EA, EB, A1, B]
+ def filterInputM[RA1 <: RA, EA1 >: EA, A1 <: A](
+ f: A1 => ZIO[RA1, EA1, Boolean]
+ ): ZHub[RA1, RB, EA1, EB, A1, B]
+ def filterOutput(
+ f: B => Boolean
+ ): ZHub[RA, RB, EA, EB, A, B]
+ def filterOutputM[RB1 <: RB, EB1 >: EB](
+ f: B => ZIO[RB1, EB1, Boolean]
+ ): ZHub[RA, RB1, EA, EB1, A, B]
+}
+```
+
+Filtering the inputs to a hub conceptually "throws away" messages that do not meet the filter predicate before they are published to the hub. The `publish` operator will return `false` to signal that such a message was not successfully published to the hub.
+
+Similarly, filtering the outputs from a hub causes subscribers to ignore messages that do not meet the filter predicate, continuing to take messages from the hub until they find one that does meet the filter predicate.
+
+We could, for example, create a hub that only handles tweets containing a particular term.
+
+```scala
+final case class Tweet(text: String)
+
+val hub: Hub[Tweet] = ???
+
+val zioHub: Hub[Tweet] =
+ hub.filterInput(_.text.contains("zio"))
+```
+
+In most cases the hubs we work with in practice will be monomorphic hubs and we will use the hub purely to broadcast values, performing any necessary effects before publishing values to the hub or after taking values from the hub. But it is nice to know that we have this kind of power if we need it.
+
+## Hubs And Streams
+
+Hubs play extremely well with streams.
+
+We can create a `ZStream` from a subscription to a hub using the `fromHub` operator.
+
+```scala
+import zio.stream._
+
+object ZStream {
+ def fromHub[R, E, O](hub: ZHub[Nothing, R, Any, E, Nothing, O]): ZStream[R, E, O] =
+ ???
+}
+```
+
+This will return a stream that subscribes to receive values from a hub and then emits every value published to the hub while the subscription is active. When the stream ends the subscriber will automatically be unsubscribed from the hub.
+
+There is also a `fromHubManaged` operator that returns the stream in the context of a managed effect.
+
+```scala
+object ZStream {
+ def fromHubManaged[R, E, O](
+ hub: ZHub[Nothing, R, Any, E, Nothing, O]
+ ): ZManaged[Any, Nothing, ZStream[R, E, O]] =
+ ???
+}
+```
+
+The managed effect here describes subscribing to receive messages from the hub while the stream describes taking messages from the hub. This can be useful when we need to ensure that a consumer has subscribed before a producer begins publishing values.
+
+Here is an example of using it:
+
+
+```scala
+for {
+ promise <- Promise.make[Nothing, Unit]
+ hub <- Hub.bounded[String](2)
+ managed = ZStream.fromHubManaged(hub).tapM(_ => promise.succeed(()))
+ stream = ZStream.unwrapManaged(managed)
+ fiber <- stream.take(2).runCollect.fork
+ _ <- promise.await
+ _ <- hub.publish("Hello")
+ _ <- hub.publish("World")
+ _ <- fiber.join
+} yield ()
+```
+
+Notice that in this case we used a `Promise` to ensure that the subscription had completed before publishing to the hub. The `ZManaged` in the return type of `fromHubManaged` made it easy for us to signal when the subscription had occurred by using `tapM` and completing the `Promise`.
+
+Of course in many real applications we don't need this kind of sequencing and just want to subscribe to receive new messages. In this case we can use the `fromHub` operator to return a `ZStream` that will automatically handle subscribing and unsubscribing for us.
+
+There is also a `fromHubWithShutdown` variant that shuts down the hub itself when the stream ends. This is useful when the stream represents your main application logic and you want to shut down other subscriptions to the hub when the stream ends.
+
+Each of these constructors also has `Chunk` variants, `fromChunkHub` and `fromChunkHubWithShutdown`, that allow you to preserve the chunked structure of data when working with hubs and streams.
+
+In addition to being able to create streams from subscriptions to hubs, there are a variety of ways to send values emitted by streams to hubs to build more complex data flow graphs.
+
+The simplest of these is the `toHub` operator, which constructs a new hub and publishes each element emitted by the stream to that hub.
+
+```scala
+trait ZStream[-R, +E, +O] {
+ def toHub(
+ capacity: Int
+ ): ZManaged[R, Nothing, ZHub[Nothing, Any, Any, Nothing, Nothing, Take[E, O]]]
+}
+```
+
+The hub will be constructed with the `bounded` constructor using the specified capacity.
+
+If you want to send values emitted by a stream to an existing hub or a hub created using one of the other hub constructors you can use the `intoHub` operator.
+
+```scala
+trait ZStream[-R, +E, +O] {
+ def intoHub[R1 <: R, E1 >: E](
+ hub: ZHub[R1, Nothing, Nothing, Any, Take[E1, O], Any]
+ ): ZIO[R1, E1, Unit]
+}
+```
+
+There is an `intoHubManaged` variant of this if you want to send values to the hub in the context of a `ZManaged` instead of a `ZIO` effect.
+
+You can also create a sink that sends values to a hub.
+
+```scala
+object ZSink {
+ def fromHub[R, E, I](
+ hub: ZHub[R, Nothing, E, Any, I, Any]
+ ): ZSink[R, E, I, Nothing, Unit] =
+ ???
+}
+```
+
+The sink will publish each value sent to the sink to the specified hub. Again there is a `fromHubWithShutdown` variant that will shut down the hub when the stream ends.
+
+Finally, `ZHub` is used internally to provide a highly efficient implementation of the `broadcast` family of operators, including `broadcast` and `broadcastDynamic`.
+
+```scala
+trait ZStream[-R, +E, +O] {
+ def broadcast(
+ n: Int,
+ maximumLag: Int
+ ): ZManaged[R, Nothing, List[ZStream[Any, E, O]]]
+ def broadcastDynamic(
+ maximumLag: Int
+ ): ZManaged[R, Nothing, ZManaged[Any, Nothing, ZStream[Any, E, O]]]
+}
+```
+
+The `broadcast` operator generates the specified number of new streams and broadcasts each value from the original stream to each of the new streams. The `broadcastDynamic` operator returns a new `ZManaged` value that you can use to dynamically subscribe and unsubscribe to receive values broadcast from the original stream.
+
+You don't have to do anything with `ZHub` to take advantage of these operators other than enjoy their optimized implementation in terms of `ZHub`.
+
+With `broadcast` and other `ZStream` operators that model distributing values to different streams and combining values from different streams it is straightforward to build complex data flow graphs, all while being as performant as possible.
diff --git a/website/versioned_docs/version-1.0.18/reference/concurrency/index.md b/website/versioned_docs/version-1.0.18/reference/concurrency/index.md
new file mode 100644
index 000000000000..26abfb211362
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/concurrency/index.md
@@ -0,0 +1,96 @@
+---
+id: index
+title: "Introduction"
+---
+
+## Overview
+
+Most of the time, in concurrent programming we have a single state that we need to read and update that concurrently. When we have multiple fibers reading or writing to the same memory location we encounter the race condition. The main goal in every concurrent program is to have a consistent view of states among all threads.
+
+There is two major concurrency model which tries to solve this problem:
+
+1. **Shared State** — In this model, all threads communicate with each other by sharing the same memory location.
+
+2. **Message Passing (Distributed State)** — This model provides primitives for sending and receiving messages, and the state is distributed. Each thread of execution has its own state.
+
+The _Shared Memory_ model has two main solutions:
+
+1. **Lock-Based** — In the locking model, the general primitives for synchronization are ـlocksـ, that control access to critical sections. When a thread wants to modify the critical section, it acquires the lock and says I'm the only thread that is allowed to modify the state right now, after it's work finished it unlocks the critical section and says I'm done, all other threads can modify this memory section.
+
+2. **Non-blocking** — Non-blocking algorithms usually use hardware-intrinsic atomic operations like `compare-and-swap` (CAS), without using any locks. This method follows an optimistic design with a transactional memory mechanism to roll back in conflict situations.
+
+## Implication of Locking Mechanism
+
+There are lots of drawback with lock-based concurrency:
+
+1. Incorrect use of locks can lead to deadlock. We need to care about the locking orders. If we don't place the locks in the right order, we may encounter a deadlock situation.
+
+2. Identifying the critical section of a code that is vulnerable to race conditions is overwhelming. We should always care about them and remember to lock everywhere it's required.
+
+3. It makes our software design very sophisticated to become scalable and reliable. It doesn't scale with program size and complexity.
+
+4. To prevent missing the releasing of the acquired locks, we should always care about exceptions and error handling inside locking sections.
+
+5. The locking mechanism violates the encapsulation property of our pieces of programs. So systems that build with locking mechanism are difficult to compose without knowing about their internals.
+
+## Lock-free Concurrency Model
+
+As the lock-oriented programming does not compose and has lots of drawbacks, ZIO uses a _lock-free concurrency model_ which is a variation of non-blocking algorithms. The magic behind all of ZIO concurrency primitives is that they use CAS (_compare-and-set_) operation.
+
+Let's see how the `modify` function of `Ref` is implemented without any locking mechanism:
+
+
+```scala
+ final case class Ref[A](value: AtomicReference[A]) { self =>
+ def modify[B](f: A => (B, A)): UIO[B] = UIO.effectTotal {
+ var loop = true
+ var b: B = null.asInstanceOf[B]
+ while (loop) {
+ val current = value.get
+ val tuple = f(current)
+ b = tuple._1
+ loop = !value.compareAndSet(current, tuple._2)
+ }
+ b
+ }
+ }
+```
+
+The idea behind the `modify` is that a variable is only updated if it still has the same value as the time we had read the value from the original memory location. If the value has changed, it retries in the while loop until it succeeds.
+
+## Advantage of Using ZIO Concurrency
+
+Here we are going to enumerate some points that why the ZIO concurrency model helps us to do our job well:
+
+1. **Composable** — Due to the use of the lock-free concurrency model, ZIO brings us a composable concurrent primitive and lots of great combinators in a declarative style.
+
+> **Note:** `Ref` and `Promise` and subsequently all other ZIO concurrent primitives that are on top of these two basic primitives **are not transactionally composable**.
+>
+> We cannot do transactional changes across two or more such concurrent primitives. They are susceptible to race conditions and deadlocks. **So don't use them if you need to perform an atomic operation on top of a composed sequence of multiple state-changing operations. use [`STM`](../stm/index.md) instead**.
+
+2. **Non-blocking** — All of the ZIO primitives are a hundred percent asynchronous and nonblocking.
+
+3. **Resource Safety** — ZIO concurrency model comes with strong guarantees of resource safety. If any interruption occurs in between concurrent operations, it won't leak any resource. So it allows us to write compositional operators like timeout and racing without worrying about any leaks.
+
+## Concurrent Primitives
+
+Let's take a quick look at ZIO concurrent primitives, what are they and why they exist.
+
+### Basic Operations
+
+`Ref` and `Promise` are the two simple concurrency primitives which provide an orthogonal basis for building concurrency structures. They are assembly language of other concurrent data structures:
+
+- **[Ref](ref.md)** — `Ref` and all its variant like [`ZRef`](zref.md), [`ZRefM`](zrefm.md) and [`RefM`](refm.md) are building blocks for writing concurrent stateful applications. Anytime we need to share information between multiple fibers, and those fibers have to update the same information, they need to communicate through something that provides the guarantee of atomicity. So all of these `Ref` primitives are atomic and thread-safe. They provide us a reliable foundation for synchronizing concurrent programs.
+
+- **[Promise](promise.md)** — A `Promise` is a model of a variable that may be set a single time, and awaited on by many fibers. This primitive is very useful when we need some point of synchronization between two or multiple fibers.
+
+By using these two simple primitives, we can build lots of other asynchronous concurrent data structures like `Semaphore`, `Queue` and `Hub`.
+
+### Others
+
+- **[Semaphore](semaphore.md)** — A `Semaphore` is an asynchronous (non-blocking) semaphore that plays well with ZIO's interruption. `Semaphore` is a generalization of a mutex. It has a certain number of permits, which can be held and released concurrently by different parties. Attempts to acquire more permits than available result in the acquiring fiber being suspended until the specified number of permits become available.
+
+- **[Queue](queue.md)** — A `Queue` is an asynchronous queue that never blocks, which is safe for multiple concurrent producers and consumers.
+
+- **[Hub](hub.md)** - A `Hub` is an asynchronous message hub that allows publishers to efficiently broadcast values to many subscribers.
+
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/reference/concurrency/promise.md b/website/versioned_docs/version-1.0.18/reference/concurrency/promise.md
new file mode 100644
index 000000000000..3bcf769ce855
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/concurrency/promise.md
@@ -0,0 +1,116 @@
+---
+id: promise
+title: "Promise"
+---
+
+A `Promise[E, A]` is a variable of `IO[E, A]` type that can be set exactly once.
+
+Promise is a **purely functional synchronization primitive** which represents a single value that may not yet be available. When we create a Promise, it always started with an empty value, then it can be completed exactly once at some point, and then it will never become empty or modified again.
+
+Promise is a synchronization primitive. So, it is useful whenever we want to wait for something to happen. Whenever we need to synchronize a fiber with another fiber, we can use Promise. It allows us to have fibers waiting for other fibers to do things. Any time we want to handoff of a work from one fiber to another fiber or anytime we want to suspend a fiber until some other fiber does a certain amount of work, well we need to be using a Promise. Also, We can use `Promise` with `Ref` to build other concurrent primitives, like Queue and Semaphore.
+
+By calling `await` on the Promise, the current fiber blocks, until that event happens. As we know, blocking thread in ZIO, don't actually block kernel threads. They are semantic blocking, so when a fiber is blocked the underlying thread is free to run all other fibers.
+
+Promise is the equivalent of Scala's Promise. It's almost the same, except it has two type parameters, instead of one. Also instead of calling `future`, we need to call `await` on ZIO Promise to wait for the Promise to be completed.
+
+Promises can be failed with a value of type `E` and succeeded that is completed with success with the value of type `A`. So there are two ways we can complete a Promise, with failure or success and then whoever is waiting on the Promise will get back that failure or success.
+
+
+## Operations
+
+### Creation
+
+Promises can be created using `Promise.make[E, A]`, which returns `UIO[Promise[E, A]]`. This is a description of creating a promise, but not the actual promise. Promises cannot be created outside of IO, because creating them involves allocating mutable memory, which is an effect and must be safely captured in IO.
+
+### Completing
+
+You can complete a `Promise[E, A]` in few different ways:
+* successfully with a value of type `A` using `succeed`
+* with `Exit[E, A]` using `done` - each `await` will get this exit propagated
++ with result of effect `IO[E, A]` using `complete` - the effect will be executed once and the result will be propagated to all waiting fibers
+* with effect `IO[E, A]` using `completeWith` - first fiber that calls `completeWith` wins and sets effect that **will be executed by each `await`ing fiber**, so be careful when using `p.completeWith(someEffect)` and rather use `p.complete(someEffect` unless executing `someEffect` by each `await`ing fiber is intent
+* simply fail with `E` using `fail`
+* simply defect with `Throwable` using `die`
+* fail or defect with `Cause[E]` using `halt`
+* interrupt it with `interrupt`
+
+Following example shows usage of all of them:
+```scala
+import zio._
+
+val race: IO[String, Int] = for {
+ p <- Promise.make[String, Int]
+ _ <- p.succeed(1).fork
+ _ <- p.complete(ZIO.succeed(2)).fork
+ _ <- p.completeWith(ZIO.succeed(3)).fork
+ _ <- p.done(Exit.succeed(4)).fork
+ _ <- p.fail("5")
+ _ <- p.halt(Cause.die(new Error("6")))
+ _ <- p.die(new Error("7"))
+ _ <- p.interrupt.fork
+ value <- p.await
+ } yield value
+```
+
+The act of completing a Promise results in an `UIO[Boolean]`, where the `Boolean` represents whether the promise value has been set (`true`) or whether it was already set (`false`). This is demonstrated below:
+
+```scala
+val ioPromise1: UIO[Promise[Exception, String]] = Promise.make[Exception, String]
+val ioBooleanSucceeded: UIO[Boolean] = ioPromise1.flatMap(promise => promise.succeed("I'm done"))
+```
+
+Another example with `fail(...)`:
+
+```scala
+val ioPromise2: UIO[Promise[Exception, Nothing]] = Promise.make[Exception, Nothing]
+val ioBooleanFailed: UIO[Boolean] = ioPromise2.flatMap(promise => promise.fail(new Exception("boom")))
+```
+
+To re-iterate, the `Boolean` tells us whether or not the operation took place successfully (`true`) i.e. the Promise
+was set with the value or the error.
+
+### Awaiting
+We can get a value from a Promise using `await`, calling fiber will suspend until Promise is completed.
+
+```scala
+val ioPromise3: UIO[Promise[Exception, String]] = Promise.make[Exception, String]
+val ioGet: IO[Exception, String] = ioPromise3.flatMap(promise => promise.await)
+```
+
+### Polling
+The computation will suspend (in a non-blocking fashion) until the Promise is completed with a value or an error.
+
+If we don't want to suspend, and we only want to query the state of whether or not the Promise has been completed, we can use `poll`:
+
+```scala
+val ioPromise4: UIO[Promise[Exception, String]] = Promise.make[Exception, String]
+val ioIsItDone: UIO[Option[IO[Exception, String]]] = ioPromise4.flatMap(p => p.poll)
+val ioIsItDone2: IO[Option[Nothing], IO[Exception, String]] = ioPromise4.flatMap(p => p.poll.get)
+```
+
+If the Promise was not completed when we called `poll` then the IO will fail with the `Unit` value otherwise, we obtain an `IO[E, A]`, where `E` represents if the Promise completed with an error and `A` indicates that the Promise successfully completed with an `A` value.
+
+`isDone` returns `UIO[Boolean]` that evaluates to `true` if promise is already completed.
+
+## Example Usage
+Here is a scenario where we use a `Promise` to hand-off a value between two `Fiber`s
+
+```scala
+import java.io.IOException
+import zio.console._
+import zio.duration._
+import zio.clock._
+
+val program: ZIO[Console with Clock, IOException, Unit] =
+ for {
+ promise <- Promise.make[Nothing, String]
+ sendHelloWorld = (IO.succeed("hello world") <* sleep(1.second)).flatMap(promise.succeed)
+ getAndPrint = promise.await.flatMap(putStrLn(_))
+ fiberA <- sendHelloWorld.fork
+ fiberB <- getAndPrint.fork
+ _ <- (fiberA zip fiberB).join
+ } yield ()
+```
+
+In the example above, we create a Promise and have a Fiber (`fiberA`) complete that promise after 1 second and a second Fiber (`fiberB`) will call `await` on that Promise to obtain a `String` and then print it to screen. The example prints `hello world` to the screen after 1 second. Remember, this is just a description of the program and not the execution
+itself.
diff --git a/website/versioned_docs/version-1.0.18/reference/concurrency/queue.md b/website/versioned_docs/version-1.0.18/reference/concurrency/queue.md
new file mode 100644
index 000000000000..aee595499c50
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/concurrency/queue.md
@@ -0,0 +1,266 @@
+---
+id: queue
+title: "Queue"
+---
+
+`Queue` is a lightweight in-memory queue built on ZIO with composable and transparent back-pressure. It is fully asynchronous (no locks or blocking), purely-functional and type-safe.
+
+A `Queue[A]` contains values of type `A` and has two basic operations: `offer`, which places an `A` in the `Queue`, and `take` which removes and returns the oldest value in the `Queue`.
+
+```scala
+import zio._
+
+val res: UIO[Int] = for {
+ queue <- Queue.bounded[Int](100)
+ _ <- queue.offer(1)
+ v1 <- queue.take
+} yield v1
+```
+
+## Creating a queue
+
+A `Queue` can be bounded (with a limited capacity) or unbounded.
+
+There are several strategies to process new values when the queue is full:
+
+- The default `bounded` queue is back-pressured: when full, any offering fiber will be suspended until the queue is able to add the item;
+- A `dropping` queue will drop new items when the queue is full;
+- A `sliding` queue will drop old items when the queue is full.
+
+To create a back-pressured bounded queue:
+```scala
+val boundedQueue: UIO[Queue[Int]] = Queue.bounded[Int](100)
+```
+
+To create a dropping queue:
+```scala
+val droppingQueue: UIO[Queue[Int]] = Queue.dropping[Int](100)
+```
+
+To create a sliding queue:
+```scala
+val slidingQueue: UIO[Queue[Int]] = Queue.sliding[Int](100)
+```
+
+To create an unbounded queue:
+```scala
+val unboundedQueue: UIO[Queue[Int]] = Queue.unbounded[Int]
+```
+
+## Adding items to a queue
+
+The simplest way to add a value to the queue is `offer`:
+
+```scala
+val res1: UIO[Unit] = for {
+ queue <- Queue.bounded[Int](100)
+ _ <- queue.offer(1)
+} yield ()
+```
+
+When using a back-pressured queue, offer might suspend if the queue is full: you can use `fork` to wait in a different fiber.
+
+```scala
+val res2: UIO[Unit] = for {
+ queue <- Queue.bounded[Int](1)
+ _ <- queue.offer(1)
+ f <- queue.offer(1).fork // will be suspended because the queue is full
+ _ <- queue.take
+ _ <- f.join
+} yield ()
+```
+
+It is also possible to add multiple values at once with `offerAll`:
+
+```scala
+val res3: UIO[Unit] = for {
+ queue <- Queue.bounded[Int](100)
+ items = Range.inclusive(1, 10).toList
+ _ <- queue.offerAll(items)
+} yield ()
+```
+
+## Consuming Items from a Queue
+
+The `take` operation removes the oldest item from the queue and returns it. If the queue is empty, this will suspend, and resume only when an item has been added to the queue. As with `offer`, you can use `fork` to wait for the value in a different fiber.
+
+```scala
+val oldestItem: UIO[String] = for {
+ queue <- Queue.bounded[String](100)
+ f <- queue.take.fork // will be suspended because the queue is empty
+ _ <- queue.offer("something")
+ v <- f.join
+} yield v
+```
+
+You can consume the first item with `poll`. If the queue is empty you will get `None`, otherwise the top item will be returned wrapped in `Some`.
+
+```scala
+val polled: UIO[Option[Int]] = for {
+ queue <- Queue.bounded[Int](100)
+ _ <- queue.offer(10)
+ _ <- queue.offer(20)
+ head <- queue.poll
+} yield head
+```
+
+You can consume multiple items at once with `takeUpTo`. If the queue doesn't have enough items to return, it will return all the items without waiting for more offers.
+
+```scala
+val taken: UIO[List[Int]] = for {
+ queue <- Queue.bounded[Int](100)
+ _ <- queue.offer(10)
+ _ <- queue.offer(20)
+ list <- queue.takeUpTo(5)
+} yield list
+```
+
+Similarly, you can get all items at once with `takeAll`. It also returns without waiting (an empty list if the queue is empty).
+
+```scala
+val all: UIO[List[Int]] = for {
+ queue <- Queue.bounded[Int](100)
+ _ <- queue.offer(10)
+ _ <- queue.offer(20)
+ list <- queue.takeAll
+} yield list
+```
+
+## Shutting Down a Queue
+
+It is possible with `shutdown` to interrupt all the fibers that are suspended on `offer*` or `take*`. It will also empty the queue and make all future calls to `offer*` and `take*` terminate immediately.
+
+```scala
+val takeFromShutdownQueue: UIO[Unit] = for {
+ queue <- Queue.bounded[Int](3)
+ f <- queue.take.fork
+ _ <- queue.shutdown // will interrupt f
+ _ <- f.join // Will terminate
+} yield ()
+```
+
+You can use `awaitShutdown` to execute an effect when the queue is shut down. This will wait until the queue is shut down. If the queue is already shutdown, it will resume right away.
+
+```scala
+val awaitShutdown: UIO[Unit] = for {
+ queue <- Queue.bounded[Int](3)
+ p <- Promise.make[Nothing, Boolean]
+ f <- queue.awaitShutdown.fork
+ _ <- queue.shutdown
+ _ <- f.join
+} yield ()
+```
+
+## Transforming queues
+
+A `Queue[A]` is in fact a type alias for `ZQueue[Any, Any, Nothing, Nothing, A, A]`.
+The signature for the expanded version is:
+```scala
+trait ZQueue[RA, RB, EA, EB, A, B]
+```
+
+Which is to say:
+- The queue may be offered values of type `A`. The enqueueing operations require an environment of type `RA` and may fail with errors of type `EA`;
+- The queue will yield values of type `B`. The dequeueing operations require an environment of type `RB` and may fail with errors of type `EB`.
+
+Note how the basic `Queue[A]` cannot fail or require any environment for any of its operations.
+
+With separate type parameters for input and output, there are rich composition opportunities for queues:
+
+### ZQueue#map
+
+The output of the queue may be mapped:
+
+```scala
+val mapped: UIO[String] =
+ for {
+ queue <- Queue.bounded[Int](3)
+ mapped = queue.map(_.toString)
+ _ <- mapped.offer(1)
+ s <- mapped.take
+ } yield s
+```
+
+### ZQueue#mapM
+
+We may also use an effectful function to map the output. For example,
+we could annotate each element with the timestamp at which it was dequeued:
+
+```scala
+import java.util.concurrent.TimeUnit
+import zio.clock._
+
+val currentTimeMillis = currentTime(TimeUnit.MILLISECONDS)
+
+val annotatedOut: UIO[ZQueue[Any, Clock, Nothing, Nothing, String, (Long, String)]] =
+ for {
+ queue <- Queue.bounded[String](3)
+ mapped = queue.mapM { el =>
+ currentTimeMillis.map((_, el))
+ }
+ } yield mapped
+```
+
+### ZQueue#contramapM
+
+Similarly to `mapM`, we can also apply an effectful function to
+elements as they are enqueued. This queue will annotate the elements
+with their enqueue timestamp:
+
+```scala
+val annotatedIn: UIO[ZQueue[Clock, Any, Nothing, Nothing, String, (Long, String)]] =
+ for {
+ queue <- Queue.bounded[(Long, String)](3)
+ mapped = queue.contramapM { el: String =>
+ currentTimeMillis.map((_, el))
+ }
+ } yield mapped
+```
+
+This queue has the same type as the previous one, but the timestamp is
+attached to the elements when they are enqueued. This is reflected in
+the type of the environment required by the queue for enqueueing.
+
+To complete this example, we could combine this queue with `mapM` to
+compute the time that the elements stayed in the queue:
+
+```scala
+import zio.duration._
+
+val timeQueued: UIO[ZQueue[Clock, Clock, Nothing, Nothing, String, (Duration, String)]] =
+ for {
+ queue <- Queue.bounded[(Long, String)](3)
+ enqueueTimestamps = queue.contramapM { el: String =>
+ currentTimeMillis.map((_, el))
+ }
+ durations = enqueueTimestamps.mapM { case (enqueueTs, el) =>
+ currentTimeMillis
+ .map(dequeueTs => ((dequeueTs - enqueueTs).millis, el))
+ }
+ } yield durations
+```
+
+### ZQueue#bothWith
+
+We may also compose two queues together into a single queue that
+broadcasts offers and takes from both of the queues:
+
+```scala
+val fromComposedQueues: UIO[(Int, String)] =
+ for {
+ q1 <- Queue.bounded[Int](3)
+ q2 <- Queue.bounded[Int](3)
+ q2Mapped = q2.map(_.toString)
+ both = q1.bothWith(q2Mapped)((_, _))
+ _ <- both.offer(1)
+ iAndS <- both.take
+ (i, s) = iAndS
+ } yield (i, s)
+```
+
+## Additional Resources
+
+- [ZIO Queue Talk by John De Goes @ ScalaWave 2018](https://www.slideshare.net/jdegoes/zio-queue)
+- [ZIO Queue Talk by Wiem Zine El Abidine @ PSUG 2018](https://www.slideshare.net/wiemzin/psug-zio-queue)
+- [Elevator Control System using ZIO](https://medium.com/@wiemzin/elevator-control-system-using-zio-c718ae423c58)
+- [Scalaz 8 IO vs Akka (typed) actors vs Monix](https://blog.softwaremill.com/scalaz-8-io-vs-akka-typed-actors-vs-monix-part-1-5672657169e1)
diff --git a/website/versioned_docs/version-1.0.18/reference/concurrency/ref.md b/website/versioned_docs/version-1.0.18/reference/concurrency/ref.md
new file mode 100644
index 000000000000..35b649ff2a4e
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/concurrency/ref.md
@@ -0,0 +1,357 @@
+---
+id: ref
+title: "Ref"
+---
+
+`Ref[A]` models a **mutable reference** to a value of type `A` in which we can store **immutable** data. The two basic operations are `set`, which fills the `Ref` with a new value, and `get`, which retrieves its current content. All operations on a `Ref` are atomic and thread-safe, providing a reliable foundation for synchronizing concurrent programs.
+
+`Ref` is ZIO's analog to something like a State Monad in more Haskell-Oriented FP. We don't need State Monad in ZIO, because we have `Ref`s. `Ref`s allow us to get and set state, or update it.
+
+When we write stateful applications, we need some mechanism to manage our state. We need a way to update the in-memory state in a functional way. So this is why we need `Ref`s.
+
+`Ref`s are:
+- Purely Functional and Referential Transparent
+- Concurrent-Safe and Lock-free
+- Update and Modify atomically
+
+## Concurrent Stateful Application
+**`Ref`s are building blocks for writing concurrent stateful applications**. Without `Ref` or something equivalently, we can't do that. Anytime we need to share information between multiple fibers, and those fibers have to update the same information, they need to communicate through something that provides the guarantee of atomicity. So `Ref`s can update the state in an atomic way, consistent and isolated from all other concurrent updates.
+
+**`Ref`s are concurrent-safe**. we can share the same `Ref` among many fibers. All of them can update `Ref` concurrently. We don't have to worry about race conditions. Even we have ten thousand fibers all updating the same `Ref` as long as they are using atomic update and modify functions, we will have zero race conditions.
+
+
+## Operations
+The `Ref` has lots of operations. Here we are going to introduce the most important and common ones. Also, note that `Ref` is a type alias for `ZRef`. `ZRef` has many type parameters. Basically, all of these type parameters on `ZRef` are useful for the more advanced operators. So as a not advanced user, don't worry about them.
+
+### make
+`Ref` is never empty and it always contains something. We can create `Ref` by providing the initial value to the `make`, which is a constructor of the `Ref` data type. We should pass an **immutable value** of type `A` to the constructor, and it returns an `UIO[Ref[A]]` value:
+
+
+```scala
+def make[A](a: A): UIO[Ref[A]]
+```
+
+As we can see, the output is wrapped in `UIO`, which means creating `Ref` is effectful. Whenever we `make`, `update`, or `modify` the `Ref`, we are doing some effectful operation, this is why their output is wrapped in `UIO`. It helps the API remain referential transparent.
+
+Let's create some `Ref`s from immutable values:
+
+```scala
+val counterRef = Ref.make(0)
+// counterRef: UIO[Ref[Int]] = zio.ZIO$EffectTotal@34507957
+val stringRef = Ref.make("initial")
+// stringRef: UIO[Ref[String]] = zio.ZIO$EffectTotal@3564252
+
+sealed trait State
+case object Active extends State
+case object Changed extends State
+case object Closed extends State
+
+val stateRef = Ref.make(Active)
+// stateRef: UIO[Ref[Active.type]] = zio.ZIO$EffectTotal@1c10a424
+```
+
+> _**Warning**_:
+>
+> The big mistake to creating `Ref` is trying to store mutable data inside it. It doesn't work. The only way to use a `Ref` is to store **immutable data** inside it, otherwise, it does not provide us atomic guarantees, and we can have collisions and race conditions.
+
+As we mentioned above, we shouldn't create `Ref` from a mutable variable. The following snippet compiles, but it leads us to race conditions due to improper use of `make`:
+
+```scala
+// Compiles but don't work properly
+var init = 0
+// init: Int = 0
+val counterRef = Ref.make(init)
+// counterRef: UIO[Ref[Int]] = zio.ZIO$EffectTotal@7a7980cf
+```
+
+So we should change the `init` to be immutable:
+
+```scala
+val init = 0
+// init: Int = 0
+val counterRef = Ref.make(init)
+// counterRef: UIO[Ref[Int]] = zio.ZIO$EffectTotal@4dd4c416
+```
+
+### get
+The `get` method returns the current value of the reference. Its return type is `IO[EB, B]`. Which `B` is the value type of returning effect and in the failure case, `EB` is the error type of that effect.
+
+```scala
+def get: IO[EB, B]
+```
+
+As the `make` and `get` methods of `Ref` are effectful, we can chain them together with flatMap. In the following example, we create a `Ref` with `initial` value, and then we acquire the current state with the `get` method:
+
+```scala
+Ref.make("initial")
+ .flatMap(_.get)
+ .flatMap(current => putStrLn(s"current value of ref: $current"))
+```
+
+We can use syntactic sugar representation of flatMap series with for-comprehension:
+
+```scala
+for {
+ ref <- Ref.make("initial")
+ value <- ref.get
+} yield assert(value == "initial")
+```
+
+Note that, there is no way to access the shared state outside the monadic operations.
+
+### set
+The `set` method atomically writes a new value to the `Ref`.
+
+```scala
+for {
+ ref <- Ref.make("initial")
+ _ <- ref.set("update")
+ value <- ref.get
+} yield assert(value == "update")
+```
+
+### update
+With `update`, we can atomically update the state of `Ref` with a given **pure** function. A function that we pass to the `update` needs to be a pure function, it needs to be deterministic and free of side effects.
+
+```scala
+def update(f: A => A): IO[E, Unit]
+```
+
+Assume we have a counter, we can increase its value with the `update` method:
+
+```scala
+val counterInitial = 0
+for {
+ counterRef <- Ref.make(counterInitial)
+ _ <- counterRef.update(_ + 1)
+ value <- counterRef.get
+} yield assert(value == 1)
+```
+
+> _**Note**_:
+>
+> The `update` is not the composition of `get` and `set`, this composition is not concurrently safe. So whenever we need to update our state, we should not compose `get` and `set` to manage our state in a concurrent environment. Instead, we should use the `update` operation which modifies its `Ref` atomically.
+
+The following snippet is not concurrent safe:
+
+```scala
+// Unsafe State Management
+object UnsafeCountRequests extends zio.App {
+ import zio.console._
+
+ def request(counter: Ref[Int]) = for {
+ current <- counter.get
+ _ <- counter.set(current + 1)
+ } yield ()
+
+ private val initial = 0
+ private val program =
+ for {
+ ref <- Ref.make(initial)
+ _ <- request(ref) zipPar request(ref)
+ rn <- ref.get
+ _ <- putStrLn(s"total requests performed: $rn")
+ } yield ()
+
+ override def run(args: List[String]) = program.exitCode
+}
+```
+
+The above snippet doesn't behave deterministically. This program sometimes print 2 and sometime print 1. So let's fix that issue by using `update` which behaves atomically:
+
+```scala
+// Unsafe State Management
+object CountRequests extends zio.App {
+ import zio.console._
+
+ def request(counter: Ref[Int]): ZIO[Console, Nothing, Unit] = {
+ for {
+ _ <- counter.update(_ + 1)
+ reqNumber <- counter.get
+ _ <- putStrLn(s"request number: $reqNumber").orDie
+ } yield ()
+ }
+
+ private val initial = 0
+ private val program =
+ for {
+ ref <- Ref.make(initial)
+ _ <- request(ref) zipPar request(ref)
+ rn <- ref.get
+ _ <- putStrLn(s"total requests performed: $rn").orDie
+ } yield ()
+
+ override def run(args: List[String]) = program.exitCode
+}
+```
+
+Here is another use case of `update` to write `repeat` combinator:
+
+```scala
+def repeat[E, A](n: Int)(io: IO[E, A]): IO[E, Unit] =
+ Ref.make(0).flatMap { iRef =>
+ def loop: IO[E, Unit] = iRef.get.flatMap { i =>
+ if (i < n)
+ io *> iRef.update(_ + 1) *> loop
+ else
+ IO.unit
+ }
+ loop
+ }
+```
+
+### modify
+`modify` is a more powerful version of the `update`. It atomically modifies its `Ref` with the given function and, also computes a return value. The function that we pass to the `modify` needs to be a pure function; it needs to be deterministic and free of side effects.
+
+```scala
+def modify[B](f: A => (B, A)): IO[E, B]
+```
+
+Remember the `CountRequest` example. What if we want to log the number of each request, inside the `request` function? Let's see what happen if we write that function with the composition of `update` and `get` methods:
+
+```scala
+// Unsafe in Concurrent Environment
+def request(counter: Ref[Int]) = {
+ for {
+ _ <- counter.update(_ + 1)
+ rn <- counter.get
+ _ <- putStrLn(s"request number received: $rn")
+ } yield ()
+}
+```
+What happens if between running the update and get, another update in another fiber performed? This function doesn't perform in a deterministic fashion in concurrent environments. So we need a way to perform **get and set and get** atomically. This is why we need the `modify` method. Let's fix the `request` function to do that atomically:
+
+```scala
+// Safe in Concurrent Environment
+def request(counter: Ref[Int]) = {
+ for {
+ rn <- counter.modify(c => (c + 1, c + 1))
+ _ <- putStrLn(s"request number received: $rn")
+ } yield ()
+}
+```
+
+## AtomicReference in Java
+For Java programmers, we can think of `Ref` as an AtomicReference. Java has a `java.util.concurrent.atomic` package and that package contains `AtomicReference`, `AtomicLong`, `AtomicBoolean` and so forth. We can think of `Ref` as being an `AtomicReference`. It has roughly the same power, the same guarantees, and the same limitations. It packages it up in a higher-level context and of course, makes it ZIO friendly.
+
+## Ref vs. State Monad
+Basically `Ref` allows us to have all the power of State Monad inside ZIO. State Monad lacks two important features that we use in real-life application development:
+
+1. Concurrency Support
+2. Error Handling
+
+### Concurrency
+State Monad is its effect system that only includes state. It allows us to do pure stateful computations. We can only get, set and update related computations to managing the state. State Monad updates its state with series of stateful computations sequentially, but **we can't use the State Monad to do async or concurrent computations**. But `Ref`s have great support on concurrent and async programming.
+
+### Error Handling
+In real-life applications, we need error handling. In most real-life stateful applications, we will involve some database IO and API calls and or some concurrent and sync stuff that it can fail in different ways along the path of execution. So besides the state management, we need a way to do error handling. The State Monad doesn't have the ability to model error management.
+
+We can combine State Monad and Either Monad with StateT monad transformer, but it imposes massive performance overhead. It doesn't buy us anything that we can't do with a Ref. So it is an anti-pattern. In the ZIO model, errors are encoded in effects and Ref utilizes that. So besides state management, we have the ability to error-handling without any further work.
+
+## State Transformers
+
+Those who live on the dark side of mutation sometimes have it easy; they can add state everywhere like it's Christmas. Behold:
+
+```scala
+var idCounter = 0
+def freshVar: String = {
+ idCounter += 1
+ s"var${idCounter}"
+}
+val v1 = freshVar
+val v2 = freshVar
+val v3 = freshVar
+```
+
+As functional programmers, we know better and have captured state mutation in the form of functions of type `S => (A, S)`. `Ref` provides such an encoding, with `S` being the type of the value, and `modify` embodying the state mutation function.
+
+```scala
+Ref.make(0).flatMap { idCounter =>
+ def freshVar: UIO[String] =
+ idCounter.modify(cpt => (s"var${cpt + 1}", cpt + 1))
+
+ for {
+ v1 <- freshVar
+ v2 <- freshVar
+ v3 <- freshVar
+ } yield ()
+}
+```
+
+## Building more sophisticated concurrency primitives
+
+`Ref` is low-level enough that it can serve as the foundation for other concurrency data types.
+
+Semaphores are a classic abstract data type for controlling access to shared resources. They are defined as a triple S = (v, P, V) where v is the number of units of the resource that are currently available, and P and V are operations that respectively decrement and increment v; P will only complete when v is non-negative and must wait if it isn't.
+
+Well, with `Ref`s, that's easy to do! The only difficulty is in `P`, where we must fail and retry when either `v` is negative or its value has changed between the moment we read it and the moment we try to update it. A naive implementation could look like:
+
+```scala
+sealed trait S {
+ def P: UIO[Unit]
+ def V: UIO[Unit]
+}
+
+object S {
+ def apply(v: Long): UIO[S] =
+ Ref.make(v).map { vref =>
+ new S {
+ def V = vref.update(_ + 1).unit
+
+ def P = (vref.get.flatMap { v =>
+ if (v < 0)
+ IO.fail(())
+ else
+ vref.modify(v0 => if (v0 == v) (true, v - 1) else (false, v)).flatMap {
+ case false => IO.fail(())
+ case true => IO.unit
+ }
+ } <> P).unit
+ }
+ }
+}
+```
+
+Let's rock these crocodile boots we found the other day at the market and test our semaphore at the night club, yee-haw:
+
+```scala
+import zio.duration.Duration
+import zio.clock._
+import zio.console._
+import zio.random._
+
+val party = for {
+ dancefloor <- S(10)
+ dancers <- ZIO.foreachPar(1 to 100) { i =>
+ dancefloor.P *> nextDouble.map(d => Duration.fromNanos((d * 1000000).round)).flatMap { d =>
+ putStrLn(s"${i} checking my boots") *> sleep(d) *> putStrLn(s"${i} dancing like it's 99")
+ } *> dancefloor.V
+ }
+} yield ()
+```
+
+It goes without saying you should take a look at ZIO's own `Semaphore`, it does all this and more without wasting all those CPU cycles while waiting.
+
+## Polymorphic `Ref`s
+
+`Ref[A]` is actually a type alias for `ZRef[Nothing, Nothing, A, A]`. The type signature of `ZRef` is:
+
+```scala
+trait ZRef[+EA, +EB, -A, +B]
+```
+
+A `ZRef` is a polymorphic, purely functional description of a mutable reference. The fundamental operations of a `ZRef` are `set` and `get`. `set` takes a value of type `A` and sets the reference to a new value, potentially failing with an error of type `EA`. `get` gets the current value of the reference and returns a value of type `B`, potentially failing with an error of type `EB`.
+
+When the error and value types of the `ZRef` are unified, that is, it is a `ZRef[E, E, A, A]`, the `ZRef` also supports atomic `modify` and `update` operations as discussed above.
+
+A simple use case is passing out read-only or write-only views of a reference:
+
+```scala
+for {
+ ref <- Ref.make(false)
+ readOnly = ref.readOnly
+ writeOnly = ref.writeOnly
+ _ <- writeOnly.set(true)
+ value <- readOnly.get
+} yield value
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/concurrency/refm.md b/website/versioned_docs/version-1.0.18/reference/concurrency/refm.md
new file mode 100644
index 000000000000..9daec63f7f95
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/concurrency/refm.md
@@ -0,0 +1,43 @@
+---
+id: refm
+title: "RefM"
+---
+`RefM[A]` models a **mutable reference** to a value of type `A` in which we can store **immutable** data, and update it atomically **and** effectfully.
+
+> _**Note:**_
+>
+> Almost all of `RefM` operations are the same as `Ref`. We suggest reading [`Ref`](ref.md) at first if you are not familiar with `Ref`.
+
+Let's explain how we can update a shared state effectfully with `RefM`. The `update` method and all other related methods get an effectful operation and then run they run these effects to change the shared state. This is the main difference between `RefM` and `Ref`.
+
+In the following example, we should pass in `updateEffect` to it which is the description of an update operation. So `RefM` is going to update the `refM` by running the `updateEffect`:
+
+```scala
+import zio._
+for {
+ refM <- RefM.make("current")
+ updateEffect = IO.effectTotal("update")
+ _ <- refM.update(_ => updateEffect)
+ value <- refM.get
+} yield assert(value == "update")
+```
+
+In real-world applications, there are cases where we want to run an effect, e.g. query a database, and then update the shared state. This is where `RefM` can help us to update the shared state in a more actor model fashion. We have a shared mutable state but for every different command or message, and we want execute our effect and update the state.
+
+We can pass in an effectful program into every single update. All of them will be done parallel, but the result will be sequenced in such a fashion that they only touched the state at different times, and we end up with a consistent state at the end.
+
+In the following example, we are going to send `getAge` request to usersApi for each user and updating the state respectively:
+
+
+```scala
+val meanAge =
+ for {
+ ref <- RefM.make(0)
+ _ <- IO.foreachPar(users) { user =>
+ ref.update(sumOfAges =>
+ api.getAge(user).map(_ + sumOfAges)
+ )
+ }
+ v <- ref.get
+ } yield (v / users.length)
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/concurrency/semaphore.md b/website/versioned_docs/version-1.0.18/reference/concurrency/semaphore.md
new file mode 100644
index 000000000000..f93c0c732677
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/concurrency/semaphore.md
@@ -0,0 +1,54 @@
+---
+id: semaphore
+title: "Semaphore"
+---
+
+A `Semaphore` datatype which allows synchronization between fibers with the `withPermit` operation, which safely acquires and releases a permit.
+`Semaphore` is based on `Ref[A]` datatype.
+
+## Operations
+
+For example a synchronization of asynchronous tasks can
+be done via acquiring and releasing a semaphore with given number of permits it can spend.
+When the acquire operation cannot be performed, due to insufficient `permits` value in the semaphore, such task
+is placed in internal suspended fibers queue and will be awaken when `permits` value is sufficient:
+
+```scala
+import java.util.concurrent.TimeUnit
+import zio._
+import zio.console._
+import zio.duration.Duration
+
+val task = for {
+ _ <- putStrLn("start")
+ _ <- ZIO.sleep(Duration(2, TimeUnit.SECONDS))
+ _ <- putStrLn("end")
+} yield ()
+
+val semTask = (sem: Semaphore) => for {
+ _ <- sem.withPermit(task)
+} yield ()
+
+val semTaskSeq = (sem: Semaphore) => (1 to 3).map(_ => semTask(sem))
+
+val program = for {
+
+ sem <- Semaphore.make(permits = 1)
+
+ seq <- ZIO.effectTotal(semTaskSeq(sem))
+
+ _ <- ZIO.collectAllPar(seq)
+
+} yield ()
+```
+
+As the binary semaphore is a special case of counting semaphore
+we can acquire and release any value, regarding semaphore's permits:
+
+```scala
+val semTaskN = (sem: Semaphore) => for {
+ _ <- sem.withPermits(5)(task)
+} yield ()
+```
+
+The guarantee of `withPermit` (and its corresponding counting version `withPermits`) is that acquisition will be followed by equivalent release, regardless of whether the task succeeds, fails, or is interrupted.
diff --git a/website/versioned_docs/version-1.0.18/reference/concurrency/zref.md b/website/versioned_docs/version-1.0.18/reference/concurrency/zref.md
new file mode 100644
index 000000000000..a08d0f94fe7e
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/concurrency/zref.md
@@ -0,0 +1,17 @@
+---
+id: zref
+title: "ZRef"
+---
+
+A `ZRef[EA, EB, A, B]` is a polymorphic, purely functional description of a mutable reference. The fundamental operations of a `ZRef` are `set` and `get`.
+
+- **`set`** takes a value of type `A` and sets the reference to a new value, potentially failing with an error of type `EA`.
+- **`get`** gets the current value of the reference and returns a value of type `B`, potentially
+failing with an error of type `EB`.
+
+When the error and value types of the `ZRef` are unified, that is, it is a `ZRef[E, E, A, A]`, the `ZRef` also supports atomic `modify` and `update` operations. All operations are guaranteed to be safe for concurrent access.
+
+
+> _**Note:**_
+>
+>While `ZRef` provides the functional equivalent of a mutable reference, the value inside the `ZRef` should be immutable. For performance reasons `ZRef` is implemented in terms of compare and swap operations rather than synchronization. **These operations are not safe for mutable values that do not support concurrent access**.
diff --git a/website/versioned_docs/version-1.0.18/reference/concurrency/zrefm.md b/website/versioned_docs/version-1.0.18/reference/concurrency/zrefm.md
new file mode 100644
index 000000000000..541b606d8232
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/concurrency/zrefm.md
@@ -0,0 +1,18 @@
+---
+id: zrefm
+title: "ZRefM"
+---
+
+A `ZRefM[RA, RB, EA, EB, A, B]` is a polymorphic, purely functional description of a mutable reference.
+
+The fundamental operations of a `ZRefM`are `set` and `get`.
+- **`set`** takes a value of type `A` and sets the reference to a new value, requiring an environment of type `RA` and potentially failing with an error of type `EA`.
+- **`get`** gets the current value of the reference and returns a value of type `B`, requiring an environment of type
+`RB` and potentially failing with an error of type `EB`.
+
+When the error and value types of the `ZRefM` are unified, that is, it is a `ZRefM[E, E, A, A]`, the `ZRefM` also supports atomic `modify` and `update` operations.
+
+
+> _**Note:**_
+>
+> Unlike `ZRef`, `ZRefM` allows performing effects within update operations, at some cost to performance. Writes will semantically block other writers, while multiple readers can read simultaneously.
diff --git a/website/versioned_docs/version-1.0.18/reference/contextual/has.md b/website/versioned_docs/version-1.0.18/reference/contextual/has.md
new file mode 100644
index 000000000000..b28e3d538523
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/contextual/has.md
@@ -0,0 +1,226 @@
+---
+id: has
+title: "Has"
+---
+
+The trait `Has[A]` is used with the ZIO environment to express an effect's dependency on a service of type `A`.
+
+For example,`RIO[Has[Console.Service], Unit]` is an effect that requires a `Console.Service` service.
+
+## Overview
+ZIO Wrap services with `Has` data type to:
+
+1. **Combine** multiple services together.
+2. **Bind** services into their implementations.
+
+### Combining Services
+Two or more `Has[_]` elements can be combined _horizontally_ using their `++` operator:
+
+
+```scala
+val logger: Has[Logging] = Has(new Logging{})
+val random: Has[RandomInt] = Has(new RandomInt{})
+
+// Note the use of the infix `++` operator on `Has` to combine two `Has` elements:
+val combined: Has[Logging] with Has[RandomInt] = logger ++ random
+```
+
+### Binding Services
+
+The extra power that is given by `Has` is that the resulting data structure is backed by an _heterogeneous map_. `Has` can be thought of as a `Map[K, V]` which keys are _service types_ and values are _service implementations_. from service type to service implementation, that collects each instance that is mixed in so that the instances can be accessed/extracted/modified individually, all while still guaranteeing supreme type safety.
+
+ZIO internally can ask `combined` using `get` method to determine binding configurations:
+
+```scala
+// get back the Logging and RandomInt services from the combined values:
+val logger: Logging = combined.get[Logging]
+val random: RandomInt = combined.get[RandomInt]
+```
+
+These are implementation details. Usually, we don't create a `Has` directly. Instead, we create a `Has` using `ZLayer`.
+
+## Motivation
+Some components in an application might depend upon more than one service, so we might need to combine multiple services and feed them to the ZIO Environment. Services cannot directly be combined, they can be combined if they first wrapped in the `Has` data type.
+
+Let's get into this problem and how the `Has` data type, solves this problem:
+
+### Problem
+ZIO environment has a `ZIO#provide` which takes an `R` and returns a `ZIO` effect which doesn't require `R` and ready to be run by the `unsafeRun` operation of `Runtime`.
+
+Assume we have two `Logging` and `RandomInt` services:
+
+
+```scala
+trait Logging {
+ def log(line: String): UIO[Unit]
+}
+
+trait RandomInt {
+ def random: UIO[Int]
+}
+```
+
+We also provided their accessors to their companion object. We just used `ZIO.accessM` to access environment of each service:
+
+```scala
+object Logging {
+ def log(line: String): ZIO[Logging, Nothing, Unit] = ZIO.accessM[Logging](_.log(line))
+}
+
+object RandomInt {
+ val random: ZIO[RandomInt, Nothing, Int] = ZIO.accessM[RandomInt](_.random)
+}
+```
+
+Now, we are ready to write our application using these interfaces. We are going to write a simple program which generates a new random number and feed it into the logger:
+
+```scala
+val myApp: ZIO[Logging with RandomInt, Nothing, Unit] =
+ for {
+ _ <- Logging.log("Application Started!")
+ nextInt <- RandomInt.random
+ - <- Logging.log(s"Random number generated: ${nextInt.toString}")
+ } yield ()
+```
+
+To run this program, we need to implement a live version of `Logging` and `RandomInt` services. So let's implement each of them:
+
+```scala
+val LoggingLive: Logging = new Logging {
+ override def log(line: String): UIO[Unit] =
+ ZIO.effectTotal(println(line))
+}
+
+val RandomIntLive: RandomInt = new RandomInt {
+ override def random: UIO[Int] =
+ ZIO.effectTotal(scala.util.Random.nextInt())
+}
+```
+
+Great! Now, we are ready to inject these two dependencies into our application `myApp` through `ZIO.provide` function.
+
+```scala
+lazy val mainApp = myApp.provide(???) //What to provide?
+```
+
+As the type of `myApp` effect is `ZIO[Logging with RandomInt, Nothing, Unit]`, we should provide an object with a type of `Logging with RandomInt`. Oh! How can we combine `LoggingLive` and `RandomIntLive` objects together? Unfortunately, we don't have a way to combine these two objects to create a required service (`Logging with RandomInt`).
+
+But, there is a workaround, we can throw away these implementations and write a new implementation for an intersection of these two services:
+
+```scala
+val LoggingWithRandomIntLive = new Logging with RandomInt {
+ override def log(line: String): UIO[Unit] =
+ ZIO.effectTotal(println(line))
+
+ override def random: UIO[Int] =
+ ZIO.effectTotal(scala.util.Random.nextInt())
+}
+```
+
+Now, we can provide this implementation into our application:
+
+```scala
+val mainApp: IO[Nothing, Unit] = myApp.provide(LoggingWithRandomIntLive)
+```
+
+The `mainApp` doesn't need any environmental services and can be run by using the ZIO Runtime system:
+
+```scala
+Runtime.default.unsafeRun(mainApp)
+```
+
+But this workaround is not perfect, because every time we are writing an application, we need to provide a specific implementation for its requirement. This is overwhelming.
+
+We need to implement each of each service separately and at the end of the day, combine them and provide that to our application. This is where the `Has[_]` wrapper data type comes into play.
+
+### Solution
+
+`Has[_]` data type enables us to combine different services and provide them to the ZIO Environment. Let's solve the previous problem by using the `Has` wrapper.
+
+First, we should change the accessor methods to return us an effect which requires services wrapped into the `Has` data type:
+
+```scala
+object Logging {
+ def log(line: String): ZIO[Has[Logging], Nothing, Unit] =
+ ZIO.serviceWith[Logging](_.log(line))
+}
+
+object RandomInt {
+ val random: ZIO[Has[RandomInt], Nothing, Int] =
+ ZIO.serviceWith[RandomInt](_.random)
+}
+```
+
+`ZIO.serviceWith` is accessor method like `ZIO.accessM`, it accesses the specified service in the environment of effect, but it returns a ZIO effect which requires a service wrapped in `Has[_]` data type.
+
+We should refactor our application to represent the correct types.
+
+```scala
+val myApp: ZIO[Has[Logging] with Has[RandomInt], Nothing, Unit] =
+ for {
+ _ <- Logging.log("Application Started!")
+ nextInt <- RandomInt.random
+ - <- Logging.log(s"Random number generated: ${nextInt.toString}")
+ } yield ()
+```
+
+Now, our application is a ZIO effect which requires `Has[Logging] with Has[RandomInt]` services. Let's combine implementation of these two services using Has data type:
+
+```scala
+val combined: Has[Logging] with Has[RandomInt] = Has(LoggingLive) ++ Has(RandomIntLive)
+```
+
+Let's feed the combined services into our application:
+
+```scala
+val effect: IO[Nothing, Unit] = myApp.provide(combined)
+zio.Runtime.default.unsafeRun(effect)
+```
+
+That is how the `Has` data type helps us to combine services. The previous example was just for demonstrating purposes, and we rarely create `Has` data type directly. Instead, we create a `Has` via `ZLayer`.
+
+Whenever we lift a service value into `ZLayer` with the `ZLayer.succeed` constructor or `toLayer`, ZIO will wrap our service with `Has` data type.
+
+Let's implement `Logging` and `RandomInt` services:
+
+```scala
+case class LoggingLive() extends Logging {
+ override def log(line: String): UIO[Unit] =
+ ZIO.effectTotal(println(line))
+}
+
+case class RandomIntLive() extends RandomInt {
+ override def random: UIO[Int] =
+ ZIO.effectTotal(scala.util.Random.nextInt())
+}
+```
+
+Now, we can lift these two implementations into the `ZLayer`. The `ZLayer` will wrap our services into the `Has[_]` data type:
+
+
+```scala
+object LoggingLive {
+ val layer: URLayer[Any, Has[Logging]] =
+ (LoggingLive.apply _).toLayer
+}
+
+object RandomIntLive {
+ val layer: URLayer[Any, Has[RandomInt]] =
+ (RandomIntLive.apply _).toLayer
+}
+```
+
+Now, when we combine multiple layers together, these services will combined via `with` intersection type:
+
+```scala
+val myLayer: ZLayer[Any, Nothing, Has[Logging] with Has[RandomInt]] =
+ LoggingLive.layer ++ RandomIntLive.layer
+```
+
+Finally, when we provide our layer into the ZIO effect, ZIO can access the binding configuration and extract each service. ZIO does internally these pieces of wiring machinery, we don't care about the implementation detail:
+
+```scala
+val mainApp: ZIO[Any, Nothing, Unit] = myApp.provideLayer(myLayer)
+// mainApp: ZIO[Any, Nothing, Unit] = zio.ZIO$CheckInterrupt@4682e5d9
+```
+
diff --git a/website/versioned_docs/version-1.0.18/reference/contextual/index.md b/website/versioned_docs/version-1.0.18/reference/contextual/index.md
new file mode 100644
index 000000000000..73e82226e01a
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/contextual/index.md
@@ -0,0 +1,508 @@
+---
+id: index
+title: "Introduction"
+---
+
+## ZIO Environment
+
+The `ZIO[-R, +E, +A]` data type describes an effect that requires an input type of `R`, as an environment, may fail with an error of type `E` or succeed and produces a value of type `A`.
+
+The input type is also known as _environment type_. This type-parameter indicates that to run an effect we need one or some services as an environment of that effect. In other word, `R` represents the _requirement_ for the effect to run, meaning we need to fulfill the requirement in order to make the effect _runnable_.
+
+`R` represents dependencies; whatever services, config, or wiring a part of a ZIO program depends upon to work. We will explore what we can do with `R`, as it plays a crucial role in `ZIO`.
+
+For example, when we have `ZIO[Console, Nothing, Unit]`, this shows that to run this effect we need to provide an implementation of the `Console` service:
+
+```scala
+val effect: ZIO[Console, Nothing, Unit] = putStrLn("Hello, World!").orDie
+```
+
+So finally when we provide a live version of `Console` service to our `effect`, it will be converted to an effect that doesn't require any environmental service:
+
+```scala
+val mainApp: ZIO[Any, Nothing, Unit] = effect.provideLayer(Console.live)
+```
+
+Finally, to run our application we can put our `mainApp` inside the `run` method:
+
+```scala
+import zio.{ExitCode, ZEnv, ZIO}
+import zio.console._
+
+object MainApp extends zio.App {
+ val effect: ZIO[Console, Nothing, Unit] = putStrLn("Hello, World!").orDie
+ val mainApp: ZIO[Any, Nothing, Unit] = effect.provideLayer(Console.live)
+
+ override def run(args: List[String]): ZIO[ZEnv, Nothing, ExitCode] =
+ mainApp.exitCode
+}
+```
+
+Sometimes an effect needs more than one environmental service, it doesn't matter, in these cases, we compose all dependencies by `++` operator:
+
+```scala
+import zio.console._
+import zio.random._
+
+val effect: ZIO[Console with Random, Nothing, Unit] = for {
+ r <- nextInt
+ _ <- putStrLn(s"random number: $r").orDie
+} yield ()
+
+val mainApp: ZIO[Any, Nothing, Unit] = effect.provideLayer(Console.live ++ Random.live)
+```
+
+We don't need to provide live layers for built-in services (don't worry, we will discuss layers later in this page). ZIO has a `ZEnv` type alias for the composition of all ZIO built-in services (Clock, Console, System, Random, and Blocking). So we can run the above `effect` as follows:
+
+```scala
+import zio.console._
+import zio.random._
+import zio.{ExitCode, ZEnv, ZIO}
+
+object MainApp extends zio.App {
+ val effect: ZIO[Console with Random, Nothing, Unit] = for {
+ r <- nextInt
+ _ <- putStrLn(s"random number: $r").orDie
+ } yield ()
+
+ override def run(args: List[String]): ZIO[ZEnv, Nothing, ExitCode] =
+ effect.exitCode
+}
+```
+
+ZIO environment facility enables us to:
+
+1. **Code to Interface** — like object-oriented paradigm, in ZIO we encouraged to code to interfaces and defer the implementation. It is the best practice, but ZIO does not enforce us to do that.
+
+2. **Write a Testable Code** — By coding to an interface, whenever we want to test our effects, we can easily mock any external services, by providing a _test_ version of those instead of the `live` version.
+
+## Contextual Data Types
+
+Defining service in ZIO is not very different from object-oriented style, it has the same principle; coding to an interface, not an implementation. But the way ZIO encourages us to implement this principle by using _Module Pattern_ which doesn't very differ from the object-oriented style.
+
+ZIO have two data type that plays a key role in writing ZIO services using _Module Pattern_:
+1. Has
+2. ZLayer
+
+So, before diving into the _Module Pattern_, We need to learn more about ZIO Contextual Data Types. Let's review each of them:
+
+### Has
+
+`Has[A]` represents a dependency on a service of type `A`, e.g. Has[Logging]. Some components in an application might depend upon more than one service.
+
+ZIO wrap services with `Has` data type to:
+
+1. **Wire/bind** services into their implementations. This data type has an internal map to maintain this binding.
+
+2. **Combine** multiple services together. Two or more `Has[_]` elements can be combined _horizontally_ using their `++` operator.
+
+
+### ZLayer
+
+`ZLayer[-RIn, +E, +ROut]` is a recipe to build an environment of type `ROut`, starting from a value `RIn`, and possibly producing an error `E` during creation.
+
+We can compose `layerA` and `layerB` _horizontally_ to build a layer that has the requirements of both layers, to provide the capabilities of both layers, through `layerA ++ layerB`
+
+We can also compose layers _vertically_, meaning the output of one layer is used as input for the subsequent layer to build the next layer, resulting in one layer with the requirement of the first, and the output of the second layer: `layerA >>> layerB`. When doing this, the first layer must output all the services required by the second layer, but we can defer creating some of these services and require them as part of the input of the final layer using `ZLayer.identity`.
+
+## Defining Services in OOP
+
+Before diving into writing services in ZIO style, let's review how we define them in object-oriented fashion:
+
+1. **Service Definition** — In object-oriented programming, we define services with traits. A service is a bundle of related functionality which are defined in a trait:
+
+```scala
+trait FooService {
+
+}
+```
+
+2. **Service Implementation** — We implement these services by using classes:
+
+```scala
+class FooServiceImpl extends FooService {
+
+}
+```
+
+3. **Defining Dependencies** — If the creation of a service depends on other services, we can define these dependencies by using constructors:
+
+```scala
+trait ServiceA {
+
+}
+
+trait ServiceB {
+
+}
+
+class FooServiceImpl(a: ServiceA, b: ServiceB) {
+
+}
+```
+
+In object-oriented programming, the best practice is to _program to an interface, not an implementation_. So in the previous example, `ServiceA` and `ServiceB` are interfaces, not concrete classes.
+
+4. **Injecting Dependencies** — Now, the client of `FooServiceImpl` service can provide its own implementation of `ServiceA` and `ServiceB`, and inject them to the `FooServiceImpl` constructor:
+
+```scala
+class ServiceAImpl extends ServiceA
+class ServiceBImpl extends ServiceB
+val fooService = new FooServiceImpl(new ServiceAImpl, new ServiceBImpl)
+```
+
+Sometimes, as the number of dependent services grows and the dependency graph of our application becomes complicated, we need an automatic way of wiring and providing dependencies into the services of our application. In these situations, we might use a dependency injection framework to do all its magic machinery for us.
+
+## Defining Services in ZIO
+
+A service is a group of functions that deals with only one concern. Keeping the scope of each service limited to a single responsibility improves our ability to understand code, in that we need to focus only on one topic at a time without juggling too many concepts together in our head.
+
+`ZIO` itself provides the basic capabilities through modules, e.g. see how `ZEnv` is defined.
+
+In the functional Scala as well as in object-oriented programming the best practice is to _Program to an Interface, Not an Implementation_. This is the most important design principle in software development and helps us to write maintainable code by:
+
+* Allowing the client to hold an interface as a contract and don't worry about the implementation. The interface signature determines all operations that should be done.
+
+* Enabling a developer to write more testable programs. When we write a test for our business logic we don't have to run and interact with real services like databases which makes our test run very slow. If our code is correct our test code should always pass, there should be no hidden variables or depend on outside sources. We can't know that the database is always running correctly. We don't want to fail our tests because of the failure of external service.
+
+* Providing the ability to write more modular applications. So we can plug in different implementations for different purposes without a major modification.
+
+It is not mandatory but ZIO encourages us to follow this principle by bundling related functionality as an interface by using _Module Pattern_.
+
+The core idea is that a layer depends upon the interfaces exposed by the layers immediately below itself, but is completely unaware of its dependencies' internal implementations.
+
+In object-oriented programming:
+
+- **Service Definition** is done by using _interfaces_ (Scala trait or Java Interface).
+- **Service Implementation** is done by implementing interfaces using _classes_ or creating _new object_ of the interface.
+- **Defining Dependencies** is done by using _constructors_. They allow us to build classes, give their dependencies. This is called constructor-based dependency injection.
+
+We have a similar analogy in Module Pattern, except instead of using _constructors_ we use **`ZLayer`** to define dependencies. So in ZIO fashion, we can think of `ZLayer` as a service constructor.
+
+ZIO has two patterns to write services. The first version of _Module Pattern_ has some boilerplate, but the second version is very concise and straightforward. ZIO doesn't mandate any of them, you can use whichever you like.
+
+### Module Pattern 1.0
+
+Let's start learning this pattern by writing a `Logging` service:
+
+1. **Bundling** — Define an object that gives the name to the module, this can be (not necessarily) a package object. We create a `logging` object, all the definitions and implementations will be included in this object.
+
+2. **Wrapping Service Type Definition with `Has[_]` Data Type** — At the first step, we create a package object of `logging`, and inside that we define the `Logging` module as a type alias for `Has[Logging.Service]`.
+
+3. **Service Definition** — Then we create the `Logging` companion object. Inside the companion object, we define the service definition with a trait named `Service`. Traits are how we define services. A service could be all the stuff that is related to one concept with singular responsibility.
+
+4. **Service Implementation** — After that, we implement our service by creating a new Service and then lifting that entire implementation into the `ZLayer` data type by using the `ZLayer.succeed` constructor.
+
+5. **Defining Dependencies** — If our service has a dependency on other services, we should use constructors like `ZLayer.fromService` and `ZLayer.fromServices`.
+
+6. **Accessor Methods** — Finally, to create the API more ergonomic, it's better to write accessor methods for all of our service methods.
+
+Accessor methods allow us to utilize all the features inside the service through the ZIO Environment. That means, if we call `log`, we don't need to pull out the `log` function from the ZIO Environment. The `accessM` method helps us to access the environment of effect and reduce the redundant operation, every time.
+
+
+
+```scala
+object logging {
+ type Logging = Has[Logging.Service]
+
+ // Companion object exists to hold service definition and also the live implementation.
+ object Logging {
+ trait Service {
+ def log(line: String): UIO[Unit]
+ }
+
+ val live: ULayer[Logging] = ZLayer.succeed {
+ new Service {
+ override def log(line: String): UIO[Unit] =
+ ZIO.effectTotal(println(line))
+ }
+ }
+ }
+
+ // Accessor Methods
+ def log(line: => String): URIO[Logging, Unit] =
+ ZIO.accessM(_.get.log(line))
+}
+```
+
+We might need `Console` and `Clock` services to implement the `Logging` service. In this case, we use `ZLayer.fromServices` constructor:
+
+
+```scala
+object logging {
+ type Logging = Has[Logging.Service]
+
+ // Companion object exists to hold service definition and also the live implementation.
+ object Logging {
+ trait Service {
+ def log(line: String): UIO[Unit]
+ }
+
+ val live: URLayer[Clock with Console, Logging] =
+ ZLayer.fromServices[Clock.Service, Console.Service, Logging.Service] {
+ (clock: Clock.Service, console: Console.Service) =>
+ new Service {
+ override def log(line: String): UIO[Unit] =
+ for {
+ current <- clock.currentDateTime.orDie
+ _ <- console.putStrLn(current.toString + "--" + line).orDie
+ } yield ()
+ }
+ }
+ }
+
+ // Accessor Methods
+ def log(line: => String): URIO[Logging, Unit] =
+ ZIO.accessM(_.get.log(line))
+}
+```
+
+
+This is how ZIO services are created. Let's use the `Logging` service in our application:
+
+```scala
+object LoggingExample extends zio.App {
+ import zio.RIO
+ import logging._
+
+ private val app: RIO[Logging, Unit] = log("Hello, World!")
+
+ override def run(args: List[String]) =
+ app.provideLayer(Logging.live).exitCode
+}
+```
+
+During writing an application we don't care which implementation version of the `Logging` service will be injected into our `app`, later at the end of the day, it will be provided by methods like `provideLayer`.
+
+### Module Pattern 2.0
+
+Writing services with _Module Pattern 2.0_ is much easier than the previous one. It removes some level of indirection from the previous version, and much more similar to the object-oriented approach in writing services.
+
+_Module Pattern 2.0_ has more similarity with object-oriented way of defining services. We use classes to implement services, and we use constructors to define service dependencies; At the end of the day, we lift class constructor into the `ZLayer`.
+
+1. **Service Definition** — Defining service in this version has changed slightly compared to the previous version. We would take the service definition and pull it out into the top-level:
+
+
+```scala
+trait Logging {
+ def log(line: String): UIO[Unit]
+}
+```
+
+2. **Service Implementation** — It is the same as what we did in object-oriented fashion. We implement the service with Scala class. By convention, we name the live version of its implementation as `LoggingLive`:
+
+```scala
+case class LoggingLive() extends Logging {
+ override def log(line: String): UIO[Unit] =
+ ZIO.effectTotal(print(line))
+}
+```
+
+3. **Define Service Dependencies** — We might need `Console` and `Clock` services to implement the `Logging` service. In this case, we put its dependencies into its constructor. All the dependencies are just interfaces, not implementation. Just like what we did in object-oriented style:
+
+
+```scala
+import zio.console.Console
+import zio.clock.Clock
+case class LoggingLive(console: Console.Service, clock: Clock.Service) extends Logging {
+ override def log(line: String): UIO[Unit] =
+ for {
+ current <- clock.currentDateTime.orDie
+ _ <- console.putStrLn(current.toString + "--" + line).orDie
+ } yield ()
+}
+```
+
+4. **Defining ZLayer** — Now, we create a companion object for `LoggingLive` data type and lift the service implementation into the `ZLayer`:
+
+```scala
+object LoggingLive {
+ val layer: URLayer[Has[Console.Service] with Has[Clock.Service], Has[Logging]] =
+ (LoggingLive(_, _)).toLayer
+}
+```
+
+5. **Accessor Methods** — Finally, to create the API more ergonomic, it's better to write accessor methods for all of our service methods. Just like what we did in Module Pattern 1.0, but with a slight change, in this case, instead of using `ZIO.accessM` we use `ZIO.serviceWith` method to define accessors inside the service companion object:
+
+```scala
+object Logging {
+ def log(line: String): URIO[Has[Logging], Unit] = ZIO.serviceWith[Logging](_.log(line))
+}
+```
+
+That's it! Very simple! ZIO encourages us to follow some of the best practices in object-oriented programming. So it doesn't require us to throw away all our object-oriented knowledge.
+
+> **Note:**
+>
+> In _Module Pattern 2.0_ we don't use type aliases for Has wrappers, like `type Logging = Has[Logging.Service]`. So unlike the previous pattern, we encourage using explicitly the `Has` wrappers whenever we want to specify the dependency on a service.
+>
+> So instead of writing `ZLayer[Console with Clock, Nothing, Logging]`, we write `ZLayer[Has[Console] with Has[Clock], Nothing, Has[Logging]]`.
+
+Finally, we provide required layers to our `app` effect:
+
+```scala
+ import zio._
+ val app = Logging.log("Application Started")
+
+ zio.Runtime.default.unsafeRun(
+ app.provideLayer(LoggingLive.layer)
+ )
+```
+
+## Dependency Injection in ZIO
+
+ZLayers combined with the ZIO environment, allow us to use ZIO for dependency injection. There are two parts for dependency injection:
+1. **Building Dependency Graph**
+2. **Dependency Propagation**
+
+ZIO has a full solution to the dependency injection problem. It solves the first problem by using compositional properties of `ZLayer`, and solves the second by using ZIO Environment facilities like `ZIO#provide`.
+
+The way ZIO manages dependencies between application components gives us extreme power in terms of compositionality and offering the capability to easily change different implementations. This is particularly useful during _testing_ and _mocking_.
+
+By using ZLayer and ZIO Environment we can solve the propagation and wire-up problems in dependency injection. But it doesn't necessary to use it, we can still use things like [Guice](https://github.com/google/guice) with ZIO, or we might like to use [izumi distage](https://izumi.7mind.io/distage/index.html) solution for dependency injection.
+
+### Building Dependency Graph
+
+Assume we have several services with their dependencies, and we need a way to compose and wiring up these dependencies and create the dependency graph of our application. `ZLayer` is a ZIO solution for this problem, it allows us to build up the whole application dependency graph by composing layers horizontally and vertically. More information about how to compose layers is on the [ZLayer](zlayer.md) page.
+
+### Dependency Propagation
+
+When we write an application, our application has a lot of dependencies. We need a way to provide implementations and feeding and propagating all dependencies throughout the whole application. We can solve the propagation problem by using _ZIO environment_.
+
+During the development of an application, we don't care about implementations. Incrementally, when we use various effects with different requirements on their environment, all part of our application composed together, and at the end of the day we have a ZIO effect which requires some services as an environment. Before running this effect by `unsafeRun` we should provide an implementation of these services into the ZIO Environment of that effect.
+
+ZIO has some facilities for doing this. `ZIO#provide` is the core function that allows us to _feed_ an `R` to an effect that requires an `R`.
+
+Notice that the act of `provide`ing an effect with its environment, eliminates the environment dependency in the resulting effect type, represented by type `Any` of the resulting environment.
+
+#### Using `provide` Method
+
+The `ZIO#provide` takes an `R` environment and provides it to the `ZIO` effect which eliminates its dependency on `R`:
+
+```scala
+trait ZIO[-R, +E, +A] {
+ def provide(r: R)(implicit ev: NeedsEnv[R]): IO[E, A]
+}
+```
+
+This is similar to dependency injection, and the `provide` function can be thought of as `inject`.
+
+
+Assume we have the following services:
+
+```scala
+trait Logging {
+ def log(str: String): UIO[Unit]
+}
+
+object Logging {
+ def log(line: String) = ZIO.serviceWith[Logging](_.log(line))
+}
+```
+
+Let's write a simple program using `Logging` service:
+
+```scala
+val app: ZIO[Has[Logging], Nothing, Unit] = Logging.log("Application Started!")
+```
+
+We can `provide` implementation of `Logging` service into the `app` effect:
+
+```scala
+val loggingImpl = Has(new Logging {
+ override def log(line: String): UIO[Unit] =
+ UIO.effectTotal(println(line))
+})
+
+val effect = app.provide(loggingImpl)
+```
+
+Most of the time, we don't use `Has` directly to implement our services, instead; we use `ZLayer` to construct the dependency graph of our application, then we use methods like `ZIO#provideLayer` to propagate dependencies into the environment of our ZIO effect.
+
+#### Using `provideLayer` Method
+
+Unlike the `ZIO#provide` which takes and an `R`, the `ZIO#provideLayer` takes a `ZLayer` to the ZIO effect and translates it to another level.
+
+Assume we have written this piece of program that requires Clock and Console services:
+
+```scala
+import zio.clock._
+import zio.console._
+import zio.random._
+
+val myApp: ZIO[Random with Console with Clock, Nothing, Unit] = for {
+ random <- nextInt
+ _ <- putStrLn(s"A random number: ${random.toString}").orDie
+ current <- currentDateTime.orDie
+ _ <- putStrLn(s"Current Data Time: ${current.toString}").orDie
+} yield ()
+```
+
+We can compose the live implementation of `Random`, `Console` and `Clock` services horizontally and then provide them to the `myApp` effect by using `ZIO#provideLayer` method:
+
+```scala
+val mainEffect: ZIO[Any, Nothing, Unit] =
+ myApp.provideLayer(Random.live ++ Console.live ++ Clock.live)
+```
+
+As we see, the type of our effect converted from `ZIO[Random with Console with Clock, Nothing, Unit]` which requires two services to `ZIO[Any, Nothing, Unit]` effect which doesn't require any services.
+
+#### Using `provideSomeLayer` Method
+
+Sometimes we have written a program, and we don't want to provide all its requirements. In these cases, we can use `ZIO#provideSomeLayer` to partially apply some layers to the `ZIO` effect.
+
+In the previous example, if we just want to provide the `Console`, we should use `ZIO#provideSomeLayer`:
+
+```scala
+val mainEffect: ZIO[Random with Clock, Nothing, Unit] =
+ myApp.provideSomeLayer[Random with Clock](Console.live)
+```
+
+> **Note:**
+>
+> When using `ZIO#provideSomeLayer[R0 <: Has[_]]`, we should provide the remaining type as `R0` type parameter. This workaround helps the compiler to infer the proper types.
+
+#### Using `provideCustomLayer` Method
+
+`ZEnv` is a convenient type alias that provides several built-in ZIO layers that are useful in most applications.
+
+Sometimes we have written a program that contains ZIO built-in services and some other services that are not part of `ZEnv`.
+
+ As `ZEnv` provides us the implementation of built-in services, we just need to provide layers for those services that are not part of the `ZEnv`.
+
+`ZIO#provideCustomLayer` helps us to do so and returns an effect that only depends on `ZEnv`.
+
+Let's write an effect that has some built-in services and also has a `Logging` service:
+
+
+```scala
+trait Logging {
+ def log(str: String): UIO[Unit]
+}
+
+object Logging {
+ def log(line: String) = ZIO.serviceWith[Logging](_.log(line))
+}
+
+object LoggingLive {
+ val layer: ULayer[Has[Logging]] = ZLayer.succeed {
+ new Logging {
+ override def log(str: String): UIO[Unit] = ???
+ }
+ }
+}
+
+val myApp: ZIO[Has[Logging] with Console with Clock, Nothing, Unit] = for {
+ _ <- Logging.log("Application Started!")
+ current <- currentDateTime.orDie
+ _ <- putStrLn(s"Current Data Time: ${current.toString}").orDie
+} yield ()
+```
+
+This program uses two ZIO built-in services, `Console` and `Clock`. We don't need to provide `Console` and `Clock` manually, to reduce some boilerplate, we use `ZEnv` to satisfy some common base requirements.
+
+By using `ZIO#provideCustomLayer` we only provide the `Logging` layer, and it returns a `ZIO` effect which only requires `ZEnv`:
+
+```scala
+val mainEffect: ZIO[ZEnv, Nothing, Unit] = myApp.provideCustomLayer(LoggingLive.layer)
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/contextual/layer.md b/website/versioned_docs/version-1.0.18/reference/contextual/layer.md
new file mode 100644
index 000000000000..a92b8b2ca510
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/contextual/layer.md
@@ -0,0 +1,10 @@
+---
+id: layer
+title: "Layer"
+---
+
+`Layer[+E, +ROut]` is a type alias for `ZLayer[Any, E, ROut]`, which represents a layer that doesn't require any services, it may fail with an error type of `E`, and returns `ROut` as its output.
+
+```scala
+type Layer[+E, +ROut] = ZLayer[Any, E, ROut]
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/contextual/rlayer.md b/website/versioned_docs/version-1.0.18/reference/contextual/rlayer.md
new file mode 100644
index 000000000000..a97431975c53
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/contextual/rlayer.md
@@ -0,0 +1,10 @@
+---
+id: rlayer
+title: "RLayer"
+---
+
+`RLayer[-RIn, +ROut]` is a type alias for `ZLayer[RIn, Throwable, ROut]`, which represents a layer that requires `RIn` as its input, it may fail with `Throwable` value, or returns `ROut` as its output.
+
+```scala
+type RLayer[-RIn, +ROut] = ZLayer[RIn, Throwable, ROut]
+```
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/reference/contextual/task-layer.md b/website/versioned_docs/version-1.0.18/reference/contextual/task-layer.md
new file mode 100644
index 000000000000..a8655e4762a2
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/contextual/task-layer.md
@@ -0,0 +1,10 @@
+---
+id: tasklayer
+title: "TaskLayer"
+---
+
+`TaskLayer[+ROut]` is a type alias for `ZLayer[Any, Throwable, ROut]`, which represents a layer that doesn't require any services as its input, it may fail with `Throwable` value, and returns `ROut` as its output.
+
+```scala
+type TaskLayer[+ROut] = ZLayer[Any, Throwable, ROut]
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/contextual/ulayer.md b/website/versioned_docs/version-1.0.18/reference/contextual/ulayer.md
new file mode 100644
index 000000000000..ad0e6e79a1ca
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/contextual/ulayer.md
@@ -0,0 +1,10 @@
+---
+id: ulayer
+title: "ULayer"
+---
+
+`ULayer[+ROut]` is a type alias for `ZLayer[Any, Nothing, ROut]`, which represents a layer that doesn't require any services as its input, it can't fail, and returns `ROut` as its output.
+
+```scala
+type ULayer[+ROut] = ZLayer[Any, Nothing, ROut]
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/contextual/urlayer.md b/website/versioned_docs/version-1.0.18/reference/contextual/urlayer.md
new file mode 100644
index 000000000000..9705223beacf
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/contextual/urlayer.md
@@ -0,0 +1,10 @@
+---
+id: urlayer
+title: "URLayer"
+---
+
+`URLayer[-RIn, +ROut]` is a type alias for `ZLayer[RIn, Nothing, ROut]`, which represents a layer that requires `RIn` as its input, it can't fail, and returns `ROut` as its output.
+
+```scala
+type URLayer[-RIn, +ROut] = ZLayer[RIn, Nothing, ROut]
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/contextual/zlayer.md b/website/versioned_docs/version-1.0.18/reference/contextual/zlayer.md
new file mode 100644
index 000000000000..b2a4531a44f2
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/contextual/zlayer.md
@@ -0,0 +1,576 @@
+---
+id: zlayer
+title: "ZLayer"
+---
+
+A `ZLayer[-RIn, +E, +ROut]` describes a layer of an application: every layer in an application requires some services as input `RIn` and produces some services as the output `ROut`.
+
+ZLayers are:
+
+1. **Recipes for Creating Services** — They describe how a given dependencies produces another services. For example, the `ZLayer[Logging with Database, Throwable, UserRepo]` is a recipe for building a service that requires `Logging` and `Database` service, and it produces a `UserRepo` service.
+
+2. **An Alternative to Constructors** — We can think of `ZLayer` as a more powerful version of a constructor, it is an alternative way to represent a constructor. Like a constructor, it allows us to build the `ROut` service in terms of its dependencies (`RIn`).
+
+3. **Composable** — Because of their excellent **composition properties**, layers are the idiomatic way in ZIO to create services that depend on other services. We can define layers that are relying on each other.
+
+4. **Effectful and Resourceful** — The construction of ZIO layers can be effectful and resourceful, they can be acquired and safely released when the services are done being utilized.
+
+5. **Asynchronous** — Unlike class constructors which are blocking, ZLayer is fully asynchronous and non-blocking.
+
+For example, a `ZLayer[Blocking with Logging, Throwable, Database]` can be thought of as a function that map `Blocking` and `Logging` services into `Database` service:
+
+```scala
+(Blocking, Logging) => Database
+```
+
+So we can say that the `Database` service has two dependencies: `Blocking` and `Logging` services.
+
+Let's see how we can create a layer:
+
+## Creation
+
+`ZLayer` is an **alternative to a class constructor**, a recipe to create a service. This recipe may contain the following information:
+
+1. **Dependencies** — To create a service, we need to indicate what other service we are depending on. For example, a `Database` service might need `Socket` and `Blocking` services to perform its operations.
+
+2. **Acquisition/Release Action** — It may contain how to initialize a service. For example, if we are creating a recipe for a `Database` service, we should provide how the `Database` will be initialized, via acquisition action. Also, it may contain how to release a service. For example, how the `Database` releases its connection pools.
+
+In some cases, a `ZLayer` may not have any dependencies or requirements from the environment. In this case, we can specify `Any` for the `RIn` type parameter. The `Layer` type alias provided by ZIO is a convenient way to define a layer without requirements.
+
+There are many ways to create a ZLayer. Here's an incomplete list:
+ - `ZLayer.succeed` to create a layer from an existing service
+ - `ZLayer.succeedMany` to create a layer from a value that's one or more services
+ - `ZLayer.fromFunction` to create a layer from a function from the requirement to the service
+ - `ZLayer.fromEffect` to lift a `ZIO` effect to a layer requiring the effect environment
+ - `ZLayer.fromAcquireRelease` for a layer based on resource acquisition/release. The idea is the same as `ZManaged`.
+ - `ZLayer.fromService` to build a layer from a service
+ - `ZLayer.fromServices` to build a layer from a number of required services
+ - `ZLayer.identity` to express the requirement for a layer
+ - `ZIO#toLayer` or `ZManaged#toLayer` to construct a layer from an effect
+
+Where it makes sense, these methods have also variants to build a service effectfully (suffixed by `M`), resourcefully (suffixed by `Managed`), or to create a combination of services (suffixed by `Many`).
+
+Let's review some of the `ZLayer`'s most useful constructors:
+
+### From Simple Values
+
+With `ZLayer.succeed` we can construct a `ZLayer` from a value. It returns a `ULayer[Has[A]]` value, which represents a layer of application that _has_ a service of type `A`:
+
+```scala
+def succeed[A: Tag](a: A): ULayer[Has[A]]
+```
+
+In the following example, we are going to create a `nameLayer` that provides us the name of `Adam`.
+
+
+```scala
+val nameLayer: ULayer[Has[String]] = ZLayer.succeed("Adam")
+```
+
+In most cases, we use `ZLayer.succeed` to provide a layer of service of type `A`.
+
+For example, assume we have written the following service:
+
+```scala
+object terminal {
+ type Terminal = Has[Terminal.Service]
+
+ object Terminal {
+ trait Service {
+ def putStrLn(line: String): UIO[Unit]
+ }
+
+ object Service {
+ val live: Service = new Service {
+ override def putStrLn(line: String): UIO[Unit] =
+ ZIO.effectTotal(println(line))
+ }
+ }
+ }
+}
+```
+
+Now we can create a `ZLayer` from the `live` version of this service:
+
+```scala
+import terminal._
+val live: ZLayer[Any, Nothing, Terminal] = ZLayer.succeed(Terminal.Service.live)
+```
+
+### From Managed Resources
+
+Some components of our applications need to be managed, meaning they undergo a resource acquisition phase before usage, and a resource release phase after usage (e.g. when the application shuts down).
+
+Fortunately, the construction of ZIO layers can be effectful and resourceful, this means they can be acquired and safely released when the services are done being utilized.
+
+`ZLayer` relies on the powerful `ZManaged` data type and this makes this process extremely simple.
+
+We can lift any `ZManaged` to `ZLayer` by providing a managed resource to the `ZLayer.fromManaged` constructor:
+
+
+```scala
+val managedFile = ZManaged.fromAutoCloseable(
+ ZIO.effect(scala.io.Source.fromFile("file.txt"))
+)
+val fileLayer: ZLayer[Any, Throwable, Has[BufferedSource]] =
+ ZLayer.fromManaged(managedFile)
+```
+
+Also, every `ZManaged` can be converted to `ZLayer` by calling `ZLayer#toLayer`:
+
+```scala
+val fileLayer: ZLayer[Any, Throwable, Has[BufferedSource]] = managedFile.toLayer
+```
+
+Let's see another real-world example of creating a layer from managed resources. Assume we have written a managed `UserRepository`:
+
+
+```scala
+def userRepository: ZManaged[Blocking with Console, Throwable, UserRepository] = for {
+ cfg <- dbConfig.toManaged_
+ _ <- initializeDb(cfg).toManaged_
+ xa <- makeTransactor(cfg)
+} yield new UserRepository(xa)
+```
+
+We can convert that to `ZLayer` with `ZLayer.fromManaged` or `ZManaged#toLayer`:
+
+```scala
+val usersLayer = userRepository.toLayer
+// usersLayer: ZLayer[Blocking with Console, Throwable, Has[UserRepository]] = Managed(
+// self = zio.ZManaged$$anon$2@7b8f02a3
+// )
+val usersLayer_ = ZLayer.fromManaged(userRepository)
+// usersLayer_: ZLayer[Blocking with Console, Throwable, Has[UserRepository]] = Managed(
+// self = zio.ZManaged$$anon$2@1ea831da
+// )
+```
+
+Also, we can create a `ZLayer` directly from `acquire` and `release` actions of a managed resource:
+
+```scala
+def acquire = ZIO.effect(new FileInputStream("file.txt"))
+def release(resource: Closeable) = ZIO.effectTotal(resource.close())
+
+val inputStreamLayer = ZLayer.fromAcquireRelease(acquire)(release)
+// inputStreamLayer: ZLayer[Any, Throwable, Has[FileInputStream]] = Managed(
+// self = zio.ZManaged$$anon$2@69e80340
+// )
+```
+
+### From ZIO Effects
+
+We can create `ZLayer` from any `ZIO` effect by using `ZLayer.fromEffect` constructor, or calling `ZIO#toLayer` method:
+
+```scala
+val layer = ZLayer.fromEffect(ZIO.succeed("Hello, World!"))
+// layer: ZLayer[Any, Nothing, Has[String]] = Managed(
+// self = zio.ZManaged$$anon$2@684ca4c9
+// )
+val layer_ = ZIO.succeed("Hello, World!").toLayer
+// layer_: ZLayer[Any, Nothing, Has[String]] = Managed(
+// self = zio.ZManaged$$anon$2@6bd53979
+// )
+```
+
+Assume we have a `ZIO` effect that read the application config from a file, we can create a layer from that:
+
+
+```scala
+def loadConfig: Task[AppConfig] = Task.effect(???)
+val configLayer = ZLayer.fromEffect(loadConfig)
+// configLayer: ZLayer[Any, Throwable, Has[AppConfig]] = Managed(
+// self = zio.ZManaged$$anon$2@34ace338
+// )
+```
+
+### From another Service
+
+Every `ZLayer` describes an application that requires some services as input and produces some services as output. Sometimes when we are writing a new layer, we may need to access and depend on one or several services.
+
+The `ZLayer.fromService` construct a layer that purely depends on the specified service:
+
+```scala
+def fromService[A: Tag, B: Tag](f: A => B): ZLayer[Has[A], Nothing, Has[B]]
+```
+
+Assume we want to write a `live` version of the following logging service:
+
+```scala
+object logging {
+ type Logging = Has[Logging.Service]
+
+ object Logging {
+ trait Service {
+ def log(msg: String): UIO[Unit]
+ }
+ }
+}
+```
+
+We can create that by using `ZLayer.fromService` constructor, which depends on `Console` service:
+
+
+```scala
+val live: ZLayer[Console, Nothing, Logging] = ZLayer.fromService(console =>
+ new Service {
+ override def log(msg: String): UIO[Unit] = console.putStrLn(msg).orDie
+ }
+)
+```
+
+## Vertical and Horizontal Composition
+
+We said that we can think of the `ZLayer` as a more powerful _constructor_. Constructors are not composable, because they are not values. While a constructor is not composable, `ZLayer` has a nice facility to compose with other `ZLayer`s. So we can say that a `Zlayer` turns a constructor into values.
+
+`ZLayer`s can be composed together horizontally or vertically:
+
+1. **Horizontal Composition** — They can be composed together horizontally with the `++` operator. When we compose two layers horizontally, the new layer that this layer requires all the services that both of them require, also this layer produces all services that both of them produces. Horizontal composition is a way of composing two layers side-by-side. It is useful when we combine two layers that they don't have any relationship with each other.
+
+2. **Vertical Composition** — If we have a layer that requires `A` and produces `B`, we can compose this layer with another layer that requires `B` and produces `C`; this composition produces a layer that requires `A` and produces `C`. The feed operator, `>>>`, stack them on top of each other by using vertical composition. This sort of composition is like _function composition_, feeding an output of one layer to an input of another.
+
+Let's get into an example, assume we have these services with their implementations:
+
+
+```scala
+trait Logging { }
+trait Database { }
+trait BlobStorage { }
+trait UserRepo { }
+trait DocRepo { }
+
+case class LoggerImpl(console: Console.Service) extends Logging { }
+case class DatabaseImp(blocking: Blocking.Service) extends Database { }
+case class UserRepoImpl(logging: Logging, database: Database) extends UserRepo { }
+case class BlobStorageImpl(logging: Logging) extends BlobStorage { }
+case class DocRepoImpl(logging: Logging, database: Database, blobStorage: BlobStorage) extends DocRepo { }
+```
+
+We can't compose these services together, because their constructors are not value. `ZLayer` can convert these services into values, then we can compose them together.
+
+Let's assume we have lifted these services into `ZLayer`s:
+
+```scala
+val logging: URLayer[Has[Console.Service], Has[Logging]] =
+ (LoggerImpl.apply _).toLayer
+val database: URLayer[Has[Blocking.Service], Has[Database]] =
+ (DatabaseImp(_)).toLayer
+val userRepo: URLayer[Has[Logging] with Has[Database], Has[UserRepo]] =
+ (UserRepoImpl(_, _)).toLayer
+val blobStorage: URLayer[Has[Logging], Has[BlobStorage]] =
+ (BlobStorageImpl(_)).toLayer
+val docRepo: URLayer[Has[Logging] with Has[Database] with Has[BlobStorage], Has[DocRepo]] =
+ (DocRepoImpl(_, _, _)).toLayer
+```
+
+Now, we can compose logging and database horizontally:
+
+```scala
+val newLayer: ZLayer[Has[Console.Service] with Has[Blocking.Service], Throwable, Has[Logging] with Has[Database]] = logging ++ database
+```
+
+And then we can compose the `newLayer` with `userRepo` vertically:
+
+```scala
+val myLayer: ZLayer[Has[Console.Service] with Has[Blocking.Service], Throwable, Has[UserRepo]] = newLayer >>> userRepo
+```
+
+## Layer Memoization
+
+One important feature of `ZIO` layers is that **they are shared by default**, meaning that if the same layer is used twice, the layer will only be allocated a single time.
+
+For every layer in our dependency graph, there is only one instance of it that is shared between all the layers that depend on it.
+
+If we don't want to share a module, we should create a fresh, non-shared version of it through `ZLayer#fresh`.
+
+## Updating Local Dependencies
+
+
+Given a layer, it is possible to update one or more components it provides. We update a dependency in two ways:
+
+1. **Using the `update` Method** — This method allows us to replace one requirement with a different implementation:
+
+```scala
+val withPostgresService = horizontal.update[UserRepo.Service]{ oldRepo => new UserRepo.Service {
+ override def getUser(userId: UserId): IO[DBError, Option[User]] = UIO(???)
+ override def createUser(user: User): IO[DBError, Unit] = UIO(???)
+ }
+ }
+```
+
+2. **Using Horizontal Composition** — Another way to update a requirement is to horizontally compose in a layer that provides the updated service. The resulting composition will replace the old layer with the new one:
+
+```scala
+val dbLayer: Layer[Nothing, UserRepo] = ZLayer.succeed(new UserRepo.Service {
+ override def getUser(userId: UserId): IO[DBError, Option[User]] = ???
+ override def createUser(user: User): IO[DBError, Unit] = ???
+ })
+
+val updatedHorizontal2 = horizontal ++ dbLayer
+```
+
+## Hidden Versus Passed Through Dependencies
+
+One design decision regarding building dependency graphs is whether to hide or pass through the upstream dependencies of a service. `ZLayer` defaults to hidden dependencies but makes it easy to pass through dependencies as well.
+
+To illustrate this, consider the Postgres-based repository discussed above:
+
+```scala
+val connection: ZLayer[Any, Nothing, Has[Connection]] = connectionLayer
+val userRepo: ZLayer[Has[Connection], Nothing, UserRepo] = postgresLayer
+val layer: ZLayer[Any, Nothing, UserRepo] = connection >>> userRepo
+```
+
+Notice that in `layer`, the dependency `UserRepo` has on `Connection` has been "hidden", and is no longer expressed in the type signature. From the perspective of a caller, `layer` just outputs a `UserRepo` and requires no inputs. The caller does not need to be concerned with the internal implementation details of how the `UserRepo` is constructed.
+
+To provide only some inputs, we need to explicitly define what inputs still need to be provided:
+
+```scala
+trait Configuration
+
+val userRepoWithConfig: ZLayer[Has[Configuration] with Has[Connection], Nothing, UserRepo] =
+ ZLayer.succeed(new Configuration{}) ++ postgresLayer
+val partialLayer: ZLayer[Has[Configuration], Nothing, UserRepo] =
+ (ZLayer.identity[Has[Configuration]] ++ connection) >>> userRepoWithConfig
+```
+
+In this example the requirement for a `Connection` has been satisfied, but `Configuration` is still required by `partialLayer`.
+
+This achieves an encapsulation of services and can make it easier to refactor code. For example, say we want to refactor our application to use an in-memory database:
+
+```scala
+val updatedLayer: ZLayer[Any, Nothing, UserRepo] = dbLayer
+```
+
+No other code will need to be changed, because the previous implementation's dependency upon a `Connection` was hidden from users, and so they were not able to rely on it.
+
+However, if an upstream dependency is used by many other services, it can be convenient to "pass through" that dependency, and include it in the output of a layer. This can be done with the `>+>` operator, which provides the output of one layer to another layer, returning a new layer that outputs the services of _both_ layers.
+
+
+```scala
+val layer: ZLayer[Any, Nothing, Has[Connection] with UserRepo] = connection >+> userRepo
+```
+
+Here, the `Connection` dependency has been passed through, and is available to all downstream services. This allows a style of composition where the `>+>` operator is used to build a progressively larger set of services, with each new service able to depend on all the services before it.
+
+
+```scala
+lazy val baker: ZLayer[Any, Nothing, Baker] = ???
+lazy val ingredients: ZLayer[Any, Nothing, Ingredients] = ???
+lazy val oven: ZLayer[Any, Nothing, Oven] = ???
+lazy val dough: ZLayer[Baker with Ingredients, Nothing, Dough] = ???
+lazy val cake: ZLayer[Baker with Oven with Dough, Nothing, Cake] = ???
+
+lazy val all: ZLayer[Any, Nothing, Baker with Ingredients with Oven with Dough with Cake] =
+ baker >+> // Baker
+ ingredients >+> // Baker with Ingredients
+ oven >+> // Baker with Ingredients with Oven
+ dough >+> // Baker with Ingredients with Oven with Dough
+ cake // Baker with Ingredients with Oven with Dough with Cake
+```
+
+`ZLayer` makes it easy to mix and match these styles. If you pass through dependencies and later want to hide them you can do so through a simple type ascription:
+
+```scala
+lazy val hidden: ZLayer[Any, Nothing, Cake] = all
+```
+
+And if you do build your dependency graph more explicitly, you can be confident that layers used in multiple parts of the dependency graph will only be created once due to memoization and sharing.
+
+## Cyclic Dependencies
+
+The `ZLayer` mechanism makes it impossible to build cyclic dependencies, making the initialization process very linear, by construction.
+
+## Asynchronous Service Construction
+
+Another important note about `ZLayer` is that, unlike constructors which are synchronous, `ZLayer` is _asynchronous_. Constructors in classes are always synchronous. This is a drawback for non-blocking applications. Because sometimes we might want to use something that is blocking the inside constructor.
+
+For example, when we are constructing some sort of Kafka streaming service, we might want to connect to the Kafka cluster in the constructor of our service, which takes some time. So that wouldn't be a good idea to blocking inside a constructor. There are some workarounds for fixing this issue, but they are not perfect as the ZIO solution.
+
+Well, with ZIO ZLayer, our constructor could be asynchronous, and they also can block definitely. And that is because `ZLayer` has the full power of ZIO. And as a result, we have strictly more power on our constructors with ZLayer.
+
+We can acquire resources asynchronously or in a blocking fashion, and spend some time doing that, and we don't need to worry about it. That is not an anti-pattern. This is the best practice with ZIO.
+
+## Examples
+
+### The simplest ZLayer application
+
+This application demonstrates a ZIO program with a single dependency on a simple string value:
+
+```scala
+import zio._
+
+object Example extends zio.App {
+
+ // Define our simple ZIO program
+ val zio: ZIO[Has[String], Nothing, Unit] = for {
+ name <- ZIO.access[Has[String]](_.get)
+ _ <- UIO(println(s"Hello, $name!"))
+ } yield ()
+
+ // Create a ZLayer that produces a string and can be used to satisfy a string
+ // dependency that the program has
+ val nameLayer: ULayer[Has[String]] = ZLayer.succeed("Adam")
+
+ // Run the program, providing the `nameLayer`
+ def run(args: List[String]): URIO[ZEnv, ExitCode] =
+ zio.provideLayer(nameLayer).as(ExitCode.success)
+}
+
+```
+
+### ZLayer application with dependencies
+
+In the following example, our ZIO application has several dependencies:
+ - `zio.clock.Clock`
+ - `zio.console.Console`
+ - `ModuleB`
+
+`ModuleB` in turn depends upon `ModuleA`:
+
+```scala
+import zio._
+import zio.clock._
+import zio.console._
+import zio.duration.Duration._
+import java.io.IOException
+
+object moduleA {
+ type ModuleA = Has[ModuleA.Service]
+
+ object ModuleA {
+ trait Service {
+ def letsGoA(v: Int): UIO[String]
+ }
+
+ val any: ZLayer[ModuleA, Nothing, ModuleA] =
+ ZLayer.requires[ModuleA]
+
+ val live: Layer[Nothing, Has[Service]] = ZLayer.succeed {
+ new Service {
+ def letsGoA(v: Int): UIO[String] = UIO(s"done: v = $v ")
+ }
+ }
+ }
+
+ def letsGoA(v: Int): URIO[ModuleA, String] =
+ ZIO.accessM(_.get.letsGoA(v))
+}
+
+import moduleA._
+
+object moduleB {
+ type ModuleB = Has[ModuleB.Service]
+
+ object ModuleB {
+ trait Service {
+ def letsGoB(v: Int): UIO[String]
+ }
+
+ val any: ZLayer[ModuleB, Nothing, ModuleB] =
+ ZLayer.requires[ModuleB]
+
+ val live: ZLayer[ModuleA, Nothing, ModuleB] = ZLayer.fromService { (moduleA: ModuleA.Service) =>
+ new Service {
+ def letsGoB(v: Int): UIO[String] =
+ moduleA.letsGoA(v)
+ }
+ }
+ }
+
+ def letsGoB(v: Int): URIO[ModuleB, String] =
+ ZIO.accessM(_.get.letsGoB(v))
+}
+
+object ZLayerApp0 extends zio.App {
+
+ import moduleB._
+
+ val env = Console.live ++ Clock.live ++ (ModuleA.live >>> ModuleB.live)
+ val program: ZIO[Console with Clock with ModuleB, IOException, Unit] =
+ for {
+ _ <- putStrLn(s"Welcome to ZIO!")
+ _ <- sleep(Finite(1000))
+ r <- letsGoB(10)
+ _ <- putStrLn(r)
+ } yield ()
+
+ def run(args: List[String]) =
+ program.provideLayer(env).exitCode
+
+}
+
+// output:
+// [info] running ZLayersApp
+// Welcome to ZIO!
+// done: v = 10
+```
+
+### ZLayer example with complex dependencies
+
+In this example, we can see that `ModuleC` depends upon `ModuleA`, `ModuleB`, and `Clock`. The layer provided to the runnable application shows how dependency layers can be combined using `++` into a single combined layer. The combined layer will then be able to produce both of the outputs of the original layers as a single layer:
+
+```scala
+import zio._
+import zio.clock._
+
+object ZLayerApp1 extends scala.App {
+ val rt = Runtime.default
+
+ type ModuleA = Has[ModuleA.Service]
+
+ object ModuleA {
+
+ trait Service {}
+
+ val any: ZLayer[ModuleA, Nothing, ModuleA] =
+ ZLayer.requires[ModuleA]
+
+ val live: ZLayer[Any, Nothing, ModuleA] =
+ ZLayer.succeed(new Service {})
+ }
+
+ type ModuleB = Has[ModuleB.Service]
+
+ object ModuleB {
+
+ trait Service {}
+
+ val any: ZLayer[ModuleB, Nothing, ModuleB] =
+ ZLayer.requires[ModuleB]
+
+ val live: ZLayer[Any, Nothing, ModuleB] =
+ ZLayer.succeed(new Service {})
+ }
+
+ type ModuleC = Has[ModuleC.Service]
+
+ object ModuleC {
+
+ trait Service {
+ def foo: UIO[Int]
+ }
+
+ val any: ZLayer[ModuleC, Nothing, ModuleC] =
+ ZLayer.requires[ModuleC]
+
+ val live: ZLayer[ModuleA with ModuleB with Clock, Nothing, ModuleC] =
+ ZLayer.succeed {
+ new Service {
+ val foo: UIO[Int] = UIO.succeed(42)
+ }
+ }
+
+ val foo: URIO[ModuleC, Int] =
+ ZIO.accessM(_.get.foo)
+ }
+
+ val env = (ModuleA.live ++ ModuleB.live ++ ZLayer.identity[Clock]) >>> ModuleC.live
+
+ val res = ModuleC.foo.provideCustomLayer(env)
+
+ val out = rt.unsafeRun(res)
+ println(out)
+ // 42
+}
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/core/cause.md b/website/versioned_docs/version-1.0.18/reference/core/cause.md
new file mode 100644
index 000000000000..0b9ef00449fb
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/core/cause.md
@@ -0,0 +1,45 @@
+---
+id: cause
+title: "Cause"
+---
+
+`Cause[E]` is a description of a full story of failure, which is included in an [Exit.Failure](exit.md). Many times in ZIO something can fail for a value of type `E`, but there are other ways things can fail too.
+
+`IO[E, A]` is polymorphic in values of type `E` and we can work with any error type that we want, but there is a lot of information that is not inside an arbitrary `E` value. So as a result ZIO needs somewhere to store things like **unexpected exceptions or defects**, **stack and execution traces**, **cause of fiber interruptions**, and so forth.
+
+## Cause Variations
+`Cause` has several variations which encode all the cases:
+
+1. `Fail[+E](value: E)` contains the cause of expected failure of type `E`.
+
+2. `Die(value: Throwable)` contains the cause of a defect or in other words, an unexpected failure of type `Throwable`. If we have a bug in our code and something throws an unexpected exception, that information would be described inside a Die.
+
+3. `Interrupt(fiberId)` contains information of the fiber id that causes fiber interruption.
+
+4. `Traced(cause, trace)` store stack traces and execution traces.
+
+5. `Meta(cause, data)`
+
+6. `Both(left, right)` & `Then(left, right)` store composition of two parallel and sequential causes. Sometimes fibers can fail for more than one reason. If we are doing two things at once and both of them fail then we actually have two errors. Examples:
+ + If we perform ZIO's analog of try-finally (e.g. ZIO#ensuring), and both of `try` and `finally` blocks fail, so their causes are encoded with `Then`.
+ + If we run two parallel fibers with `zipPar` and all of them fail, so their causes will be encoded with `Both`.
+
+Let's try to create some of these causes:
+
+```scala
+import zio._
+import zio.duration._
+for {
+ failExit <- ZIO.fail("Oh! Error!").run
+ dieExit <- ZIO.effectTotal(5 / 0).run
+ thenExit <- ZIO.fail("first").ensuring(ZIO.die(throw new Exception("second"))).run
+ bothExit <- ZIO.fail("first").zipPar(ZIO.die(throw new Exception("second"))).run
+ fiber <- ZIO.sleep(1.second).fork
+ _ <- fiber.interrupt
+ interruptionExit <- fiber.join.run
+} yield ()
+```
+
+## Lossless Error Model
+ZIO is very aggressive about preserving the full information related to a failure. ZIO capture all type of errors into the `Cause` data type. So its error model is lossless. It doesn't throw information related to the failure result. So we can figure out exactly what happened during the operation of our effects.
+
diff --git a/website/versioned_docs/version-1.0.18/reference/core/exit.md b/website/versioned_docs/version-1.0.18/reference/core/exit.md
new file mode 100644
index 000000000000..81313fdd4e2d
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/core/exit.md
@@ -0,0 +1,24 @@
+---
+id: exit
+title: "Exit"
+---
+
+An `Exit[E, A]` value describes how fibers end life. It has two possible values:
+- `Exit.Success` contain a success value of type `A`.
+- `Exit.Failure` contains a failure [Cause](cause.md) of type `E`.
+
+We can call `run` on our effect to determine the Success or Failure of our fiber:
+
+```scala
+import zio._
+import zio.console._
+for {
+ successExit <- ZIO.succeed(1).run
+ _ <- successExit match {
+ case Exit.Success(value) =>
+ putStrLn(s"exited with success value: ${value}")
+ case Exit.Failure(cause) =>
+ putStrLn(s"exited with failure state: $cause")
+ }
+} yield ()
+```
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/reference/core/index.md b/website/versioned_docs/version-1.0.18/reference/core/index.md
new file mode 100644
index 000000000000..a7252d4c94c9
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/core/index.md
@@ -0,0 +1,14 @@
+---
+id: index
+title: "Summary"
+---
+
+ - **[ZIO](zio.md)** — A `ZIO` is a value that models an effectful program, which might fail or succeed.
+ + **[UIO](uio.md)** — An `UIO[A]` is a type alias for `ZIO[Any, Nothing, A]`.
+ + **[URIO](urio.md)** — An `URIO[R, A]` is a type alias for `ZIO[R, Nothing, A]`.
+ + **[Task](task.md)** — A `Task[A]` is a type alias for `ZIO[Any, Throwable, A]`.
+ + **[RIO](rio.md)** — A `RIO[R, A]` is a type alias for `ZIO[R, Throwable, A]`.
+ + **[IO](io.md)** — An `IO[E, A]` is a type alias for `ZIO[Any, E, A]`.
+- **[Exit](exit.md)** — An `Exit[E, A]` describes the result of executing an `IO` value.
+- **[Cause](cause.md)** - `Cause[E]` is a description of a full story of a fiber failure.
+- **[Runtime](runtime.md)** — A `Runtime[R]` is capable of executing tasks within an environment `R`.
diff --git a/website/versioned_docs/version-1.0.18/reference/core/io.md b/website/versioned_docs/version-1.0.18/reference/core/io.md
new file mode 100644
index 000000000000..c1c7e05e767c
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/core/io.md
@@ -0,0 +1,29 @@
+---
+id: io
+title: "IO"
+---
+
+`IO[E, A]` is a type alias for `ZIO[Any, E, A]`, which represents an effect that has no requirements, and may fail with an `E`, or succeed with an `A`.
+
+> **Note:**
+>
+> In Scala, the _type alias_ is a way to give a name to another type, to avoid having to repeat the original type again and again. It doesn't affect the type-checking process. It just helps us to have an expressive API design.
+
+Let's see how the `IO` type alias is defined:
+
+
+```scala
+type IO[+E, +A] = ZIO[Any, E, A]
+```
+
+So the `IO` just equal to `ZIO` which doesn't need any requirement.
+
+`ZIO` values of type `IO[E, Nothing]` (where the value type is `Nothing`) are considered _unproductive_, because the `Nothing` type is _uninhabitable_, i.e. there can be no actual values of type `Nothing`. Values of this type may fail with an `E`, but will never produce a value.
+
+> **Note:** _Principle of The Least Power_
+>
+> The `ZIO` data type is the most powerful effect in the ZIO library. It helps us to model various types of workflows. On other hand, the type aliases are a way of subtyping and specializing the `ZIO` type, specific for a less powerful workflow.
+>
+> Lot of the time, we don't need such a piece of powerful machinery. So as a rule of thumb, whenever we require a less powerful effect, it's better to use the proper specialized type alias.
+>
+> So there is no need to convert type aliases to the `ZIO` data type, whenever the `ZIO` data type is required, we can use the most precise type alias to fit our workflow requirement.
diff --git a/website/versioned_docs/version-1.0.18/reference/core/rio.md b/website/versioned_docs/version-1.0.18/reference/core/rio.md
new file mode 100644
index 000000000000..5fb30cceb332
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/core/rio.md
@@ -0,0 +1,27 @@
+---
+id: rio
+title: "RIO"
+---
+
+`RIO[R, A]` is a type alias for `ZIO[R, Throwable, A]`, which represents an effect that requires an `R`, and may fail with a `Throwable` value, or succeed with an `A`.
+
+> **_Note:_**
+>
+> In Scala, the _type alias_ is a way to give a name to another type, to avoid having to repeat the original type again and again. It doesn't affect the type-checking process. It just helps us to have an expressive API design.
+
+Let's see how `RIO` is defined:
+
+```scala
+type RIO[-R, +A] = ZIO[R, Throwable, A]
+```
+
+So the `RIO` just equal to `ZIO` which its error channel is `Throwable`.
+
+
+> **Note:** _Principle of The Least Power_
+>
+> The `ZIO` data type is the most powerful effect in the ZIO library. It helps us to model various types of workflows. On other hand, the type aliases are a way of subtyping and specializing the `ZIO` type, specific for a less powerful workflow.
+>
+> Lot of the time, we don't need such a piece of powerful machinery. So as a rule of thumb, whenever we require a less powerful effect, it's better to use the proper specialized type alias.
+>
+> So there is no need to convert type aliases to the `ZIO` data type, whenever the `ZIO` data type is required, we can use the most precise type alias to fit our workflow requirement.
diff --git a/website/versioned_docs/version-1.0.18/reference/core/runtime.md b/website/versioned_docs/version-1.0.18/reference/core/runtime.md
new file mode 100644
index 000000000000..b81d804d407a
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/core/runtime.md
@@ -0,0 +1,292 @@
+---
+id: runtime
+title: "Runtime"
+---
+
+A `Runtime[R]` is capable of executing tasks within an environment `R`.
+
+To run an effect, we need a `Runtime`, which is capable of executing effects. Runtimes bundle a thread pool together with the environment that effects need.
+
+## What is a Runtime System?
+
+Whenever we write a ZIO program, we create a ZIO effect from ZIO constructors plus using its combinators. We are building a blueprint. ZIO effect is just a data structure that describes the execution of a concurrent program. So we end up with a tree data structure that contains lots of different data structures combined together to describe what the ZIO effect should do. This data structure doesn't do anything, it is just a description of a concurrent program.
+
+So the most thing we should keep in mind when we are working with a functional effect system like ZIO is that when we are writing code, printing a string onto the console, reading a file, querying a database, and so forth; We are just writing a workflow or blueprint of an application. We are just building a data structure.
+
+So how ZIO run these workflows? This is where ZIO Runtime System comes into play. Whenever we run an `unsaferun` function, the Runtime System is responsible to step through all the instructions described by the ZIO effect and execute them.
+
+To simplify everything, we can think of a Runtime System like a black box that takes both the ZIO effect (`ZIO[R, E, A]`) and its environment (`R`), it will run this effect and then will return its result as an `Either[E, A]` value.
+
+
+![ZIO Runtime System](/img/zio-runtime-system.svg)
+
+## Responsibilities of the Runtime System
+
+Runtime Systems have a lot of responsibilities:
+
+1. **Execute every step of the blueprint** — They have to execute every step of the blueprint in a while loop until it's done.
+
+2. **Handle unexpected errors** — They have to handle unexpected errors, not just the expected ones but also the unexpected ones.
+
+3. **Spawn concurrent fiber** — They are actually responsible for the concurrency that effect systems have. They have to spawn a fiber every time we call `fork` on an effect to spawn off a new fiber.
+
+4. **Cooperatively yield to other fibers** — They have to cooperatively yield to other fibers so that fibers that are sort of hogging the spotlight, don't get to monopolize all the CPU resources. They have to make sure that the fibers split the CPU cores among all the fibers that are working.
+
+5. **Capture execution and stack traces** — They have to keep track of where we are in the progress of our own user-land code so the nice detailed execution traces can be captured.
+
+6. **Ensure finalizers are run appropriately** — They have to ensure finalizers are run appropriately at the right point in all circumstances to make sure that resources are closed that clean-up logic is executed. This is the feature that powers ZManaged and all the other resource-safe constructs in ZIO.
+
+7. **Handle asynchronous callback** — They have to handle this messy job of dealing with asynchronous callbacks. So we don't have to deal with async code. When we are doing ZIO, everything is just async out of the box.
+
+## Running a ZIO Effect
+
+There are two ways to run ZIO effect:
+1. **Using `zio.App` entry point**
+2. **Using `unsafeRun` method directly**
+
+### Using zio.App
+
+In most cases we use this method to run our ZIO effect. `zio.App` has a `run` function which is the main entry point for running a ZIO application on the JVM:
+
+```scala
+package zio
+trait App {
+ def run(args: List[String]): URIO[ZEnv, ExitCode]
+}
+```
+
+Assume we have written an effect using ZIO:
+
+```scala
+import zio.console._
+
+def myAppLogic =
+ for {
+ _ <- putStrLn("Hello! What is your name?")
+ n <- getStrLn
+ _ <- putStrLn("Hello, " + n + ", good to meet you!")
+ } yield ()
+```
+
+Now we can run that effect using `run` entry point:
+
+```scala
+object MyApp extends zio.App {
+ final def run(args: List[String]) =
+ myAppLogic.exitCode
+}
+```
+
+### Using unsafeRun
+
+Another way to execute ZIO effect is to feed the ZIO effect to the `unsafeRun` method of Runtime system:
+
+```scala
+object RunZIOEffectUsingUnsafeRun extends scala.App {
+ zio.Runtime.default.unsafeRun(
+ myAppLogic
+ )
+}
+```
+
+We don't usually use this method to run our effects. One of the use cases of this method is when we are integrating the legacy (non-effectful code) with the ZIO effect. It also helps us to refactor a large legacy code base into a ZIO effect gradually; Assume we have decided to refactor a component in the middle of a legacy code and rewrite that with ZIO. We can start rewriting that component with the ZIO effect and then integrate that component with the existing code base, using the `unsafeRun` function.
+
+## Default Runtime
+
+ZIO contains a default runtime called `Runtime.default`, configured with the `ZEnv` (the default ZIO environment) and a default `Platform` designed to work well for mainstream usage. It is already implemented as below:
+
+```scala
+object Runtime {
+ lazy val default: Runtime[ZEnv] = Runtime(ZEnv.Services.live, Platform.default)
+}
+```
+
+The default runtime includes a default `Platform` which contains minimum capabilities to bootstrap execution of ZIO tasks and live (production) versions of all ZIO built-in services. The default ZIO environment (`ZEnv`) for the `JS` platform includes `Clock`, `Console`, `System`, `Random`; and the `JVM` platform also has a `Blocking` service:
+
+```scala
+// Default JS environment
+type ZEnv = Clock with Console with System with Random
+
+// Default JVM environment
+type ZEnv = Clock with Console with System with Random with Blocking
+```
+
+We can easily access the default `Runtime` to run an effect:
+
+```scala
+object MainApp extends scala.App {
+ val runtime = Runtime.default
+ runtime.unsafeRun(myAppLogic)
+}
+```
+
+## Custom Runtime
+
+Sometimes we need to create a custom `Runtime` with a user-defined environment and user-specified `Platform`. Many real applications should not use `Runtime.default`. Instead, they should make their own `Runtime` which configures the `Platform` and environment accordingly.
+
+Some use-cases of custom Runtimes:
+
+### Providing Environment to Runtime System
+
+The custom runtime can be used to run many different effects that all require the same environment, so we don't have to call `ZIO#provide` on all of them before we run them.
+
+For example, assume we want to create a `Runtime` for services that are for testing purposes, and they don't interact with real external APIs. So we can create a runtime, especially for testing.
+
+Let's say we have defined two `Logging` and `Email` services:
+
+```scala
+trait Logging {
+ def log(line: String): UIO[Unit]
+}
+
+object Logging {
+ def log(line: String): URIO[Has[Logging], Unit] =
+ ZIO.serviceWith[Logging](_.log(line))
+}
+
+trait Email {
+ def send(user: String, content: String): Task[Unit]
+}
+
+object Email {
+ def send(user: String, content: String): ZIO[Has[Email], Throwable, Unit] =
+ ZIO.serviceWith[Email](_.send(user, content))
+}
+```
+
+We are going to implement a live version of `Logging` service and also a mock version of `Email` service for testing:
+
+```scala
+case class LoggingLive() extends Logging {
+ override def log(line: String): UIO[Unit] =
+ ZIO.effectTotal(print(line))
+}
+
+case class EmailMock() extends Email {
+ override def send(user: String, content: String): Task[Unit] =
+ ZIO.effect(println(s"sending email to $user"))
+}
+```
+
+Let's create a custom runtime that contains these two service implementations in its environment:
+
+```scala
+val testableRuntime = Runtime(
+ Has.allOf[Logging, Email](LoggingLive(), EmailMock()),
+ Platform.default
+)
+```
+
+Also, we can map the default runtime to the new runtime, so we can append new services to the default ZIO environment:
+
+```scala
+val testableRuntime: Runtime[zio.ZEnv with Has[Logging] with Has[Email]] =
+ Runtime.default
+ .map((zenv: zio.ZEnv) =>
+ zenv ++ Has.allOf[Logging, Email](LoggingLive(), EmailMock())
+ )
+```
+
+Now we can run our effects using this custom `Runtime`:
+
+```scala
+testableRuntime.unsafeRun(
+ for {
+ _ <- Logging.log("sending newsletter")
+ _ <- Email.send("David", "Hi! Here is today's newsletter.")
+ } yield ()
+)
+```
+
+### Application Monitoring
+
+Sometimes to diagnose runtime issues and understand what is going on in our application we need to add some sort of monitoring task to the Runtime System. It helps us to track fibers and their status.
+
+By adding a `Supervisor` to the current platform of the Runtime System, we can track the activity of fibers in a program. So every time a fiber gets started, forked, or every time a fiber ends its life, all these contextual pieces of information get reported to that `Supervisor`.
+
+For example, the [ZIO ZMX](https://zio.github.io/zio-zmx/) enables us to monitor our ZIO application. To include that in our project we must add the following line to our `build.sbt`:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-zmx" % "0.0.6"
+```
+
+ZIO ZMX has a specialized `Supervisor` called `ZMXSupervisor` that can be added to our existing `Runtime`:
+
+```scala
+import zio._
+import zio.console._
+import zio.zmx._
+import zio.zmx.diagnostics._
+
+val program: ZIO[Console, Throwable, Unit] =
+ for {
+ _ <- putStrLn("Waiting for input")
+ a <- getStrLn
+ _ <- putStrLn("Thank you for " + a)
+ } yield ()
+
+val diagnosticsLayer: ZLayer[ZEnv, Throwable, Has[Diagnostics]] =
+ Diagnostics.make("localhost", 1111)
+
+val runtime: Runtime[ZEnv] =
+ Runtime.default.mapPlatform(_.withSupervisor(ZMXSupervisor))
+
+runtime.unsafeRun(program.provideCustomLayer(diagnosticsLayer))
+```
+
+### Application Tracing
+
+We can enable or disable execution tracing or configure its setting. Execution tracing has full of junk. There are lots of allocations that all need to be garbage collected afterward. So it has a tremendous impact on the complexity of the application runtime.
+
+Users often turn off tracing in critical areas of their application. Also, when we are doing benchmark operation, it is better to create a `Runtime` without tracing capability:
+
+```scala
+import zio.internal.Tracing
+import zio.internal.tracing.TracingConfig
+
+val rt1 = Runtime.default.mapPlatform(_.withTracing(Tracing.disabled))
+val rt2 = Runtime.default.mapPlatform(_.withTracing(Tracing.enabledWith(TracingConfig.stackOnly)))
+
+val config = TracingConfig(
+ traceExecution = true,
+ traceEffectOpsInExecution = true,
+ traceStack = true,
+ executionTraceLength = 100,
+ stackTraceLength = 100,
+ ancestryLength = 10,
+ ancestorExecutionTraceLength = 10,
+ ancestorStackTraceLength = 10
+)
+val rt3 = Runtime.default.mapPlatform(_.withTracingConfig(config))
+```
+
+### User-defined Executor
+
+An executor is responsible for executing effects. The way how each effect will be run including detail of threading, scheduling, and so forth, is separated from the caller. So, if we need to have a specialized executor according to our requirements, we can provide that to the ZIO `Runtime`:
+
+```scala
+import zio.internal.Executor
+import java.util.concurrent.{ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue}
+
+val runtime = Runtime.default.mapPlatform(
+ _.withExecutor(
+ Executor.fromThreadPoolExecutor(_ => 1024)(
+ new ThreadPoolExecutor(
+ 5,
+ 10,
+ 5000,
+ TimeUnit.MILLISECONDS,
+ new LinkedBlockingQueue[Runnable]()
+ )
+ )
+ )
+)
+```
+
+### Benchmarking
+
+To do benchmark operation, we need a `Runtime` with settings suitable for that. It would be better to disable tracing and auto-yielding. ZIO has a built-in `Platform` proper for benchmark operations, called `Platform.benchmark` which we can map the default `Platform` to the benchmark version:
+
+```scala
+val benchmarkRuntime = Runtime.default.mapPlatform(_ => Platform.benchmark)
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/core/task.md b/website/versioned_docs/version-1.0.18/reference/core/task.md
new file mode 100644
index 000000000000..4344e6d982c1
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/core/task.md
@@ -0,0 +1,31 @@
+---
+id: task
+title: "Task"
+---
+
+`Task[A]` is a type alias for `ZIO[Any, Throwable, A]`, which represents an effect that has no requirements, and may fail with a `Throwable` value, or succeed with an `A`.
+
+Let's see how the `Task` type alias is defined:
+
+
+```scala
+type Task[+A] = ZIO[Any, Throwable, A]
+```
+
+So the `Task` just equal to `ZIO` which doesn't require any dependency. Its error channel is `Throwable`, so it may fail with `Throwable` and may succeed with an `A` value.
+
+> **Note:**
+>
+> In Scala, the _type alias_ is a way to give a name to another type, to avoid having to repeat the original type again and again. It doesn't affect the type-checking process. It just helps us to have an expressive API design.
+
+Some time, we know that our effect may fail, but we don't care the type of that exception, this is where we can use `Task`. The type signature of this type-alias is similar to the `Future[T]` and Cats `IO`.
+
+If we want to be less precise and want to eliminate the need to think about requirements and error types, we can use `Task`. This type-alias is a good start point for anyone who wants to refactor the current code base which is written in Cats `IO` or Monix `Task`.
+
+> **Note:** _Principle of The Least Power_
+>
+> The `ZIO` data type is the most powerful effect in the ZIO library. It helps us to model various types of workflows. On other hand, the type aliases are a way of subtyping and specializing the `ZIO` type, specific for a less powerful workflow.
+>
+> Lot of the time, we don't need such a piece of powerful machinery. So as a rule of thumb, whenever we require a less powerful effect, it's better to use the proper specialized type alias.
+>
+> So there is no need to convert type aliases to the `ZIO` data type, whenever the `ZIO` data type is required, we can use the most precise type alias to fit our workflow requirement.
diff --git a/website/versioned_docs/version-1.0.18/reference/core/uio.md b/website/versioned_docs/version-1.0.18/reference/core/uio.md
new file mode 100644
index 000000000000..cb2c2b56b08f
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/core/uio.md
@@ -0,0 +1,49 @@
+---
+id: uio
+title: "UIO"
+---
+
+`UIO[A]` is a type alias for `ZIO[Any, Nothing, A]`, which represents an **Unexceptional** effect that doesn't require any specific environment, and cannot fail, but can succeed with an `A`.
+
+> **_Note:_**
+>
+> In Scala, the _type alias_ is a way to give a name to another type, to avoid having to repeat the original type again and again. It doesn't affect the type-checking process. It just helps us to have an expressive API design.
+
+Let's see how the `UIO` type alias is defined:
+
+
+```scala
+type UIO[+A] = ZIO[Any, Nothing, A]
+```
+
+So the `UIO` just equal to `ZIO` which doesn't need any requirement and cannot fail because in the Scala the `Nothing` type has no inhabitant, we can't create an instance of type `Nothing`.
+
+`ZIO` values of type `UIO[A]` (where the error type is `Nothing`) are considered _infallible_,
+because the `Nothing` type is _uninhabitable_, i.e. there can be no actual values of type `Nothing`. Values of this type may produce an `A`, but will never fail with an `E`.
+
+Let's write a fibonacci function. As we don't expect any failure, it is an unexceptional effect:
+
+In the following example, the `fib`, doesn't have any requirement, as it is an unexceptional effect, we don't except any failure, and it succeeds with value of type `Int`:
+
+```scala
+import zio.UIO
+def fib(n: Int): UIO[Int] =
+ if (n <= 1) {
+ UIO.succeed(1)
+ } else {
+ for {
+ fiber1 <- fib(n - 2).fork
+ fiber2 <- fib(n - 1).fork
+ v2 <- fiber2.join
+ v1 <- fiber1.join
+ } yield v1 + v2
+ }
+```
+
+> **Note:** _Principle of The Least Power_
+>
+> The `ZIO` data type is the most powerful effect in the ZIO library. It helps us to model various types of workflows. On other hand, the type aliases are a way of subtyping and specializing the `ZIO` type, specific for a less powerful workflow.
+>
+> Lot of the time, we don't need such a piece of powerful machinery. So as a rule of thumb, whenever we require a less powerful effect, it's better to use the proper specialized type alias.
+>
+> So there is no need to convert type aliases to the `ZIO` data type, whenever the `ZIO` data type is required, we can use the most precise type alias to fit our workflow requirement.
diff --git a/website/versioned_docs/version-1.0.18/reference/core/urio.md b/website/versioned_docs/version-1.0.18/reference/core/urio.md
new file mode 100644
index 000000000000..1482ac62c70e
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/core/urio.md
@@ -0,0 +1,35 @@
+---
+id: urio
+title: "URIO"
+---
+
+`URIO[R, A]` is a type alias for `ZIO[R, Nothing, A]`, which represents an effect that requires an `R`, and cannot fail, but can succeed with an `A`.
+
+> **_Note:_**
+>
+> In Scala, the _type alias_ is a way to give a name to another type, to avoid having to repeat the original type again and again. It doesn't affect the type-checking process. It just helps us to have an expressive API design.
+
+Let's see how the `URIO` type alias is defined:
+
+
+```scala
+type URIO[-R, +A] = ZIO[R, Nothing, A]
+```
+
+So the `URIO` just equal to `ZIO` which requires `R` and cannot fail because in the Scala the `Nothing` type has no inhabitant, we can't create an instance of type `Nothing`. It succeeds with `A`.
+
+In following example, the type of `putStrLn` is `URIO[Console, Unit]` which means, it requires `Console` service as an environment, and it succeeds with `Unit` value:
+
+
+```scala
+def putStrLn(line: => String): ZIO[Console, IOException, Unit] =
+ ZIO.accessM(_.get putStrLn line)
+```
+
+> **Note:** _Principle of The Least Power_
+>
+> The `ZIO` data type is the most powerful effect in the ZIO library. It helps us to model various types of workflows. On other hand, the type aliases are a way of subtyping and specializing the `ZIO` type, specific for a less powerful workflow.
+>
+> Lot of the time, we don't need such a piece of powerful machinery. So as a rule of thumb, whenever we require a less powerful effect, it's better to use the proper specialized type alias.
+>
+> So there is no need to convert type aliases to the `ZIO` data type, whenever the `ZIO` data type is required, we can use the most precise type alias to fit our workflow requirement.
diff --git a/website/versioned_docs/version-1.0.18/reference/core/zio.md b/website/versioned_docs/version-1.0.18/reference/core/zio.md
new file mode 100644
index 000000000000..3f93f30dd546
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/core/zio.md
@@ -0,0 +1,895 @@
+---
+id: zio
+title: "ZIO"
+---
+
+A `ZIO[R, E, A]` value is an immutable value that lazily describes a workflow or job. The workflow requires some environment `R`, and may fail with an error of type `E`, or succeed with a value of type `A`.
+
+A value of type `ZIO[R, E, A]` is like an effectful version of the following function type:
+
+```scala
+R => Either[E, A]
+```
+
+This function, which requires an `R`, might produce either an `E`, representing failure, or an `A`, representing success. ZIO effects are not actually functions, of course, because they model complex effects, like asynchronous and concurrent effects.
+
+ZIO effects model resourceful interaction with the outside world, including synchronous, asynchronous, concurrent, and parallel interaction.
+
+ZIO effects use a fiber-based concurrency model, with built-in support for
+scheduling, fine-grained interruption, structured concurrency, and high scalability.
+
+The `ZIO[R, E, A]` data type has three type parameters:
+
+ - **`R` - Environment Type**. The effect requires an environment of type `R`. If this type parameter is `Any`, it means the effect has no requirements, because we can run the effect with any value (for example, the unit value `()`).
+ - **`E` - Failure Type**. The effect may fail with a value of type `E`. Some applications will use `Throwable`. If this type parameter is `Nothing`, it means the effect cannot fail, because there are no values of type `Nothing`.
+ - **`A` - Success Type**. The effect may succeed with a value of type `A`. If this type parameter is `Unit`, it means the effect produces no useful information, while if it is `Nothing`, it means the effect runs forever (or until failure).
+
+In the following example, the `getStrLn` function requires the `Console` service, it may fail with value of type `IOException`, or may succeed with a value of type `String`:
+
+
+```scala
+val getStrLn: ZIO[Console, IOException, String] =
+ ZIO.accessM(_.get.getStrLn)
+```
+
+`ZIO` values are immutable, and all `ZIO` functions produce new `ZIO` values, enabling `ZIO` to be reasoned about and used like any ordinary Scala immutable data structure.
+
+`ZIO` values do not actually _do_ anything; they are just values that _model_ or _describe_ effectful interactions.
+
+`ZIO` can be _interpreted_ by the ZIO runtime system into effectful interactions with the external world. Ideally, this occurs at a single time, in our application's `main` function. The `App` class provides this functionality automatically.
+
+## Table of Content
+
+- [Creation](#creation)
+ * [Success Values](#success-values)
+ * [Failure Values](#failure-values)
+ * [From Values](#from-values)
+ + [Option](#option)
+ + [Either](#either)
+ + [Try](#try)
+ + [Function](#function)
+ + [Future](#future)
+ + [Promise](#promise)
+ + [Fiber](#fiber)
+ * [From Side-Effects](#from-side-effects)
+ + [Synchronous](#synchronous)
+ - [Blocking Synchronous Side-Effects](#blocking-synchronous-side-effects)
+ + [Asynchronous](#asynchronous)
+ * [Creating Suspended Effects](#creating-suspended-effects)
+- [Mapping](#mapping)
+ * [map](#map)
+ * [mapError](#maperror)
+ * [mapEffect](#mapeffect)
+- [Zipping](#zipping)
+ * [zipLeft and zipRight](#zipleft-and-zipright)
+- [Chaining](#chaining)
+- [Parallelism](#parallelism)
+ * [Racing](#racing)
+- [Timeout](#timeout)
+- [Resource Management](#resource-management)
+ * [Finalizing](#finalizing)
+ + [Asynchronous Try / Finally](#asynchronous-try--finally)
+ + [Unstoppable Finalizers](#unstoppable-finalizers)
+ * [Brackets](#brackets)
+- [Unswallowed Exceptions](#unswallowed-exceptions)
+
+
+## Creation
+
+In this section we explore some of the common ways to create ZIO effects from values, from common Scala types, and from both synchronous and asynchronous side-effects. Here is the summary list of them:
+
+
+### Success Values
+
+| Function | Input Type | Output Type |
+|-----------|------------|-------------|
+| `succeed` | `A` | `UIO[A]` |
+
+Using the `ZIO.succeed` method, we can create an effect that succeeds with the specified value:
+
+```scala
+val s1 = ZIO.succeed(42)
+```
+
+We can also use methods in the companion objects of the `ZIO` type aliases:
+
+```scala
+val s2: Task[Int] = Task.succeed(42)
+```
+
+> _**Note:**_ `succeed` vs. `effectTotal`
+>
+> The `succeed` is nothing different than `effectTotal` they are the same but for different purposes for clarity. The `succeed` method takes a by-name parameter to make sure that any accidental side effects from constructing the value can be properly managed by the ZIO Runtime. However, `succeed` is intended for values which do not have any side effects. If we know that our value does have side effects, we should consider using `ZIO.effectTotal` for clarity.
+
+```scala
+val now = ZIO.effectTotal(System.currentTimeMillis())
+```
+
+The value inside a successful effect constructed with `ZIO.effectTotal` will only be constructed if absolutely required.
+
+### Failure Values
+
+| Function | Input Type | Output Type |
+|----------|------------|------------------|
+| `fail` | `E` | `IO[E, Nothing]` |
+
+Using the `ZIO.fail` method, we can create an effect that models failure:
+
+```scala
+val f1 = ZIO.fail("Uh oh!")
+```
+
+For the `ZIO` data type, there is no restriction on the error type. We may use strings, exceptions, or custom data types appropriate for our application.
+
+Many applications will model failures with classes that extend `Throwable` or `Exception`:
+
+```scala
+val f2 = Task.fail(new Exception("Uh oh!"))
+```
+
+Note that unlike the other effect companion objects, the `UIO` companion object does not have `UIO.fail`, because `UIO` values cannot fail.
+
+### From Values
+ZIO contains several constructors which help us to convert various data types into the `ZIO` effect.
+
+#### Option
+
+| Function | Input Type | Output Type |
+|-----------------|--------------------------|--------------------------|
+| `fromOption` | `Option[A]` | `IO[Option[Nothing], A]` |
+| `some` | `A` | `UIO[Option[A]]` |
+| `none` | | `UIO[Option[Nothing]]` |
+| `getOrFail` | `Option[A]` | `Task[A]` |
+| `getOrFailUnit` | `Option[A]` | `IO[Unit, A]` |
+| `getOrFailWith` | `e:=> E, v:=> Option[A]` | `IO[E, A]` |
+
+An `Option` can be converted into a ZIO effect using `ZIO.fromOption`:
+
+```scala
+val zoption: IO[Option[Nothing], Int] = ZIO.fromOption(Some(2))
+```
+
+The error type of the resulting effect is `Option[Nothing]`, which provides no information on why the value is not there. We can change the `Option[Nothing]` into a more specific error type using `ZIO#mapError`:
+
+```scala
+val zoption2: IO[String, Int] = zoption.mapError(_ => "It wasn't there!")
+```
+
+We can also readily compose it with other operators while preserving the optional nature of the result (similar to an `OptionT`)
+
+
+```scala
+val maybeId: IO[Option[Nothing], String] = ZIO.fromOption(Some("abc123"))
+def getUser(userId: String): IO[Throwable, Option[User]] = ???
+def getTeam(teamId: String): IO[Throwable, Team] = ???
+
+
+val result: IO[Throwable, Option[(User, Team)]] = (for {
+ id <- maybeId
+ user <- getUser(id).some
+ team <- getTeam(user.teamId).asSomeError
+} yield (user, team)).optional
+```
+
+#### Either
+
+| Function | Input Type | Output Type |
+|--------------|----------------|---------------------------|
+| `fromEither` | `Either[E, A]` | `IO[E, A]` |
+| `left` | `A` | `UIO[Either[A, Nothing]]` |
+| `right` | `A` | `UIO[Either[Nothing, B]]` |
+
+An `Either` can be converted into a ZIO effect using `ZIO.fromEither`:
+
+```scala
+val zeither = ZIO.fromEither(Right("Success!"))
+```
+
+The error type of the resulting effect will be whatever type the `Left` case has, while the success type will be whatever type the `Right` case has.
+
+#### Try
+
+| Function | Input Type | Output Type |
+|-----------|---------------------|-------------|
+| `fromTry` | `scala.util.Try[A]` | `Task[A]` |
+
+A `Try` value can be converted into a ZIO effect using `ZIO.fromTry`:
+
+```scala
+import scala.util.Try
+
+val ztry = ZIO.fromTry(Try(42 / 0))
+```
+
+The error type of the resulting effect will always be `Throwable`, because `Try` can only fail with values of type `Throwable`.
+
+#### Function
+
+| Function | Input Type | Output Type |
+|-----------------|-----------------|----------------|
+| `fromFunction` | `R => A` | `URIO[R, A]` |
+| `fromFunctionM` | `R => IO[E, A]` | `ZIO[R, E, A]` |
+
+A function `A => B` can be converted into a ZIO effect with `ZIO.fromFunction`:
+
+```scala
+val zfun: URIO[Int, Int] =
+ ZIO.fromFunction((i: Int) => i * i)
+```
+
+The environment type of the effect is `A` (the input type of the function), because in order to run the effect, it must be supplied with a value of this type.
+
+#### Future
+
+| Function | Input Type | Output Type |
+|-----------------------|--------------------------------------------------|--------------------|
+| `fromFuture` | `ExecutionContext => scala.concurrent.Future[A]` | `Task[A]` |
+| `fromFutureJava` | `java.util.concurrent.Future[A]` | `RIO[Blocking, A]` |
+| `fromFunctionFuture` | `R => scala.concurrent.Future[A]` | `RIO[R, A]` |
+| `fromFutureInterrupt` | `ExecutionContext => scala.concurrent.Future[A]` | `Task[A]` |
+
+A `Future` can be converted into a ZIO effect using `ZIO.fromFuture`:
+
+```scala
+import scala.concurrent.Future
+
+lazy val future = Future.successful("Hello!")
+
+val zfuture: Task[String] =
+ ZIO.fromFuture { implicit ec =>
+ future.map(_ => "Goodbye!")
+ }
+```
+
+The function passed to `fromFuture` is passed an `ExecutionContext`, which allows ZIO to manage where the `Future` runs (of course, we can ignore this `ExecutionContext`).
+
+The error type of the resulting effect will always be `Throwable`, because `Future` can only fail with values of type `Throwable`.
+
+#### Promise
+| Function | Input Type | Output Type |
+|--------------------|-------------------------------|-------------|
+| `fromPromiseScala` | `scala.concurrent.Promise[A]` | `Task[A]` |
+
+A `Promise` can be converted into a ZIO effect using `ZIO.fromPromiseScala`:
+
+
+```scala
+val func: String => String = s => s.toUpperCase
+for {
+ promise <- ZIO.succeed(scala.concurrent.Promise[String]())
+ _ <- ZIO.effect {
+ Try(func("hello world from future")) match {
+ case Success(value) => promise.success(value)
+ case Failure(exception) => promise.failure(exception)
+ }
+ }.fork
+ value <- ZIO.fromPromiseScala(promise)
+ _ <- putStrLn(s"Hello World in UpperCase: $value")
+} yield ()
+```
+
+#### Fiber
+
+| Function | Input Type | Output Type |
+|--------------|----------------------|-------------|
+| `fromFiber` | `Fiber[E, A]` | `IO[E, A]` |
+| `fromFiberM` | `IO[E, Fiber[E, A]]` | `IO[E, A]` |
+
+A `Fiber` can be converted into a ZIO effect using `ZIO.fromFiber`:
+
+```scala
+val io: IO[Nothing, String] = ZIO.fromFiber(Fiber.succeed("Hello From Fiber!"))
+```
+
+### From Side-Effects
+
+ZIO can convert both synchronous and asynchronous side-effects into ZIO effects (pure values).
+
+These functions can be used to wrap procedural code, allowing us to seamlessly use all features of ZIO with legacy Scala and Java code, as well as third-party libraries.
+
+#### Synchronous
+
+| Function | Input Type | Output Type | Note |
+|---------------|------------|-------------|---------------------------------------------|
+| `effectTotal` | `A` | `UIO[A]` | Imports a total synchronous effect |
+| `effect` | `A` | Task[A] | Imports a (partial) synchronous side-effect |
+
+A synchronous side-effect can be converted into a ZIO effect using `ZIO.effect`:
+
+```scala
+import scala.io.StdIn
+
+val getStrLine: Task[String] =
+ ZIO.effect(StdIn.readLine())
+```
+
+The error type of the resulting effect will always be `Throwable`, because side-effects may throw exceptions with any value of type `Throwable`.
+
+If a given side-effect is known to not throw any exceptions, then the side-effect can be converted into a ZIO effect using `ZIO.effectTotal`:
+
+```scala
+def putStrLine(line: String): UIO[Unit] =
+ ZIO.effectTotal(println(line))
+
+val effectTotalTask: UIO[Long] =
+ ZIO.effectTotal(System.nanoTime())
+```
+
+We should be careful when using `ZIO.effectTotal`—when in doubt about whether or not a side-effect is total, prefer `ZIO.effect` to convert the effect.
+
+If this is too broad, the `refineOrDie` method of `ZIO` may be used to retain only certain types of exceptions, and to die on any other types of exceptions:
+
+```scala
+import java.io.IOException
+
+val getStrLn2: IO[IOException, String] =
+ ZIO.effect(StdIn.readLine()).refineToOrDie[IOException]
+```
+
+##### Blocking Synchronous Side-Effects
+
+| Function | Input Type | Output Type |
+|----------------------------|-------------------------------------|---------------------------------|
+| `blocking` | `ZIO[R, E, A]` | `ZIO[R, E, A]` |
+| `effectBlocking` | `A` | `RIO[Blocking, A]` |
+| `effectBlockingCancelable` | `effect: => A`, `cancel: UIO[Unit]` | `RIO[Blocking, A]` |
+| `effectBlockingInterrupt` | `A` | `RIO[Blocking, A]` |
+| `effectBlockingIO` | `A` | `ZIO[Blocking, IOException, A]` |
+
+Some side-effects use blocking IO or otherwise put a thread into a waiting state. If not carefully managed, these side-effects can deplete threads from our application's main thread pool, resulting in work starvation.
+
+ZIO provides the `zio.blocking` package, which can be used to safely convert such blocking side-effects into ZIO effects.
+
+A blocking side-effect can be converted directly into a ZIO effect blocking with the `effectBlocking` method:
+
+```scala
+import zio.blocking._
+
+val sleeping =
+ effectBlocking(Thread.sleep(Long.MaxValue))
+```
+
+The resulting effect will be executed on a separate thread pool designed specifically for blocking effects.
+
+Blocking side-effects can be interrupted by invoking `Thread.interrupt` using the `effectBlockingInterrupt` method.
+
+Some blocking side-effects can only be interrupted by invoking a cancellation effect. We can convert these side-effects using the `effectBlockingCancelable` method:
+
+```scala
+import java.net.ServerSocket
+import zio.UIO
+
+def accept(l: ServerSocket) =
+ effectBlockingCancelable(l.accept())(UIO.effectTotal(l.close()))
+```
+
+If a side-effect has already been converted into a ZIO effect, then instead of `effectBlocking`, the `blocking` method can be used to ensure the effect will be executed on the blocking thread pool:
+
+```scala
+import scala.io.{ Codec, Source }
+
+def download(url: String) =
+ Task.effect {
+ Source.fromURL(url)(Codec.UTF8).mkString
+ }
+
+def safeDownload(url: String) =
+ blocking(download(url))
+```
+
+#### Asynchronous
+
+| Function | Input Type | Output Type |
+|------------------------|---------------------------------------------------------------|----------------|
+| `effectAsync` | `(ZIO[R, E, A] => Unit) => Any` | `ZIO[R, E, A]` |
+| `effectAsyncM` | `(ZIO[R, E, A] => Unit) => ZIO[R, E, Any]` | `ZIO[R, E, A]` |
+| `effectAsyncMaybe` | `(ZIO[R, E, A] => Unit) => Option[ZIO[R, E, A]]` | `ZIO[R, E, A]` |
+| `effectAsyncInterrupt` | `(ZIO[R, E, A] => Unit) => Either[Canceler[R], ZIO[R, E, A]]` | `ZIO[R, E, A]` |
+
+An asynchronous side-effect with a callback-based API can be converted into a ZIO effect using `ZIO.effectAsync`:
+
+
+```scala
+object legacy {
+ def login(
+ onSuccess: User => Unit,
+ onFailure: AuthError => Unit): Unit = ???
+}
+
+val login: IO[AuthError, User] =
+ IO.effectAsync[AuthError, User] { callback =>
+ legacy.login(
+ user => callback(IO.succeed(user)),
+ err => callback(IO.fail(err))
+ )
+ }
+```
+
+Asynchronous ZIO effects are much easier to use than callback-based APIs, and they benefit from ZIO features like interruption, resource-safety, and superior error handling.
+
+### Creating Suspended Effects
+
+| Function | Input Type | Output Type |
+|--------------------------|----------------------------------------|----------------|
+| `effectSuspend` | `RIO[R, A]` | `RIO[R, A]` |
+| `effectSuspendTotal` | `ZIO[R, E, A]` | `ZIO[R, E, A]` |
+| `effectSuspendTotalWith` | `(Platform, Fiber.Id) => ZIO[R, E, A]` | `ZIO[R, E, A]` |
+| `effectSuspendWith` | `(Platform, Fiber.Id) => RIO[R, A]` | `RIO[R, A]` |
+
+A `RIO[R, A]` effect can be suspended using `effectSuspend` function:
+
+```scala
+val suspendedEffect: RIO[Any, ZIO[Console, IOException, Unit]] =
+ ZIO.effectSuspend(ZIO.effect(putStrLn("Suspended Hello World!")))
+```
+
+## Mapping
+
+### map
+We can change an `IO[E, A]` to an `IO[E, B]` by calling the `map` method with a function `A => B`. This lets us transform values produced by actions into other values.
+
+```scala
+import zio.{ UIO, IO }
+
+val mappedValue: UIO[Int] = IO.succeed(21).map(_ * 2)
+```
+
+### mapError
+We can transform an `IO[E, A]` into an `IO[E2, A]` by calling the `mapError` method with a function `E => E2`. This lets us transform the failure values of effects:
+
+```scala
+val mappedError: IO[Exception, String] =
+ IO.fail("No no!").mapError(msg => new Exception(msg))
+```
+
+> _**Note:**_
+>
+> Note that mapping over an effect's success or error channel does not change the success or failure of the effect, in the same way that mapping over an `Either` does not change whether the `Either` is `Left` or `Right`.
+
+### mapEffect
+`mapEffect` returns an effect whose success is mapped by the specified side-effecting `f` function, translating any thrown exceptions into typed failed effects.
+
+Converting literal "Five" String to Int by calling `toInt` is a side effecting because it will throws `NumberFormatException` exception:
+
+```scala
+val task: RIO[Any, Int] = ZIO.succeed("hello").mapEffect(_.toInt)
+```
+
+`mapEffect` converts an unchecked exception to a checked one by returning the `RIO` effect.
+
+## Chaining
+
+We can execute two actions in sequence with the `flatMap` method. The second action may depend on the value produced by the first action.
+
+```scala
+val chainedActionsValue: UIO[List[Int]] = IO.succeed(List(1, 2, 3)).flatMap { list =>
+ IO.succeed(list.map(_ + 1))
+}
+```
+
+If the first effect fails, the callback passed to `flatMap` will never be invoked, and the composed effect returned by `flatMap` will also fail.
+
+In _any_ chain of effects, the first failure will short-circuit the whole chain, just like throwing an exception will prematurely exit a sequence of statements.
+
+Because the `ZIO` data type supports both `flatMap` and `map`, we can use Scala's _for comprehensions_ to build sequential effects:
+
+```scala
+val program =
+ for {
+ _ <- putStrLn("Hello! What is your name?")
+ name <- getStrLn
+ _ <- putStrLn(s"Hello, ${name}, welcome to ZIO!")
+ } yield ()
+```
+
+_For comprehensions_ provide a more procedural syntax for composing chains of effects.
+
+## Zipping
+
+We can combine two effects into a single effect with the `zip` method. The resulting effect succeeds with a tuple that contains the success values of both effects:
+
+```scala
+val zipped: UIO[(String, Int)] =
+ ZIO.succeed("4").zip(ZIO.succeed(2))
+```
+
+Note that `zip` operates sequentially: the effect on the left side is executed before the effect on the right side.
+
+In any `zip` operation, if either the left or right-hand sides fail, then the composed effect will fail, because _both_ values are required to construct the tuple.
+
+### zipLeft and zipRight
+
+Sometimes, when the success value of an effect is not useful (or example, it is `Unit`), it can be more convenient to use the `zipLeft` or `zipRight` functions, which first perform a `zip`, and then map over the tuple to discard one side or the other:
+
+```scala
+val zipRight1 =
+ putStrLn("What is your name?").zipRight(getStrLn)
+```
+
+The `zipRight` and `zipLeft` functions have symbolic aliases, known as `*>` and `<*`, respectively. Some developers find these operators easier to read:
+
+```scala
+val zipRight2 =
+ putStrLn("What is your name?") *>
+ getStrLn
+```
+
+## Parallelism
+
+ZIO provides many operations for performing effects in parallel. These methods are all named with a `Par` suffix that helps us identify opportunities to parallelize our code.
+
+For example, the ordinary `ZIO#zip` method zips two effects together, sequentially. But there is also a `ZIO#zipPar` method, which zips two effects together in parallel.
+
+The following table summarizes some of the sequential operations and their corresponding parallel versions:
+
+| **Description** | **Sequential** | **Parallel** |
+| ---------------------------: | :---------------: | :------------------: |
+| Zips two effects into one | `ZIO#zip` | `ZIO#zipPar` |
+| Zips two effects into one | `ZIO#zipWith` | `ZIO#zipWithPar` |
+| Collects from many effects | `ZIO.collectAll` | `ZIO.collectAllPar` |
+| Effectfully loop over values | `ZIO.foreach` | `ZIO.foreachPar` |
+| Reduces many values | `ZIO.reduceAll` | `ZIO.reduceAllPar` |
+| Merges many values | `ZIO.mergeAll` | `ZIO.mergeAllPar` |
+
+For all the parallel operations, if one effect fails, then others will be interrupted, to minimize unnecessary computation.
+
+If the fail-fast behavior is not desired, potentially failing effects can be first converted into infallible effects using the `ZIO#either` or `ZIO#option` methods.
+
+### Racing
+
+ZIO lets us race multiple effects in parallel, returning the first successful result:
+
+```scala
+for {
+ winner <- IO.succeed("Hello").race(IO.succeed("Goodbye"))
+} yield winner
+```
+
+If we want the first success or failure, rather than the first success, then we can use `left.either race right.either`, for any effects `left` and `right`.
+
+## Timeout
+
+ZIO lets us timeout any effect using the `ZIO#timeout` method, which returns a new effect that succeeds with an `Option`. A value of `None` indicates the timeout elapsed before the effect completed.
+
+```scala
+import zio.duration._
+
+IO.succeed("Hello").timeout(10.seconds)
+```
+
+If an effect times out, then instead of continuing to execute in the background, it will be interrupted so no resources will be wasted.
+
+## Error Management
+
+### Either
+
+| Function | Input Type | Output Type |
+|---------------|---------------------------|-------------------------|
+| `ZIO#either` | | `URIO[R, Either[E, A]]` |
+| `ZIO.absolve` | `ZIO[R, E, Either[E, A]]` | `ZIO[R, E, A]` |
+
+We can surface failures with `ZIO#either`, which takes an `ZIO[R, E, A]` and produces an `ZIO[R, Nothing, Either[E, A]]`.
+
+```scala
+val zeither: UIO[Either[String, Int]] =
+ IO.fail("Uh oh!").either
+```
+
+We can submerge failures with `ZIO.absolve`, which is the opposite of `either` and turns an `ZIO[R, Nothing, Either[E, A]]` into a `ZIO[R, E, A]`:
+
+```scala
+def sqrt(io: UIO[Double]): IO[String, Double] =
+ ZIO.absolve(
+ io.map(value =>
+ if (value < 0.0) Left("Value must be >= 0.0")
+ else Right(Math.sqrt(value))
+ )
+ )
+```
+
+### Catching
+
+| Function | Input Type | Output Type |
+|-----------------------|---------------------------------------------------------|-------------------|
+| `ZIO#catchAll` | `E => ZIO[R1, E2, A1]` | `ZIO[R1, E2, A1]` |
+| `ZIO#catchAllCause` | `Cause[E] => ZIO[R1, E2, A1]` | `ZIO[R1, E2, A1]` |
+| `ZIO#catchAllDefect` | `Throwable => ZIO[R1, E1, A1]` | `ZIO[R1, E1, A1]` |
+| `ZIO#catchAllTrace` | `((E, Option[ZTrace])) => ZIO[R1, E2, A1]` | `ZIO[R1, E2, A1]` |
+| `ZIO#catchSome` | `PartialFunction[E, ZIO[R1, E1, A1]]` | `ZIO[R1, E1, A1]` |
+| `ZIO#catchSomeCause` | `PartialFunction[Cause[E], ZIO[R1, E1, A1]]` | `ZIO[R1, E1, A1]` |
+| `ZIO#catchSomeDefect` | `PartialFunction[Throwable, ZIO[R1, E1, A1]]` | `ZIO[R1, E1, A1]` |
+| `ZIO#catchSomeTrace` | `PartialFunction[(E, Option[ZTrace]), ZIO[R1, E1, A1]]` | `ZIO[R1, E1, A1]` |
+
+#### Catching All Errors
+If we want to catch and recover from all types of errors and effectfully attempt recovery, we can use the `catchAll` method:
+
+
+```scala
+val z: IO[IOException, Array[Byte]] =
+ readFile("primary.json").catchAll(_ =>
+ readFile("backup.json"))
+```
+
+In the callback passed to `catchAll`, we may return an effect with a different error type (or perhaps `Nothing`), which will be reflected in the type of effect returned by `catchAll`.
+#### Catching Some Errors
+
+If we want to catch and recover from only some types of exceptions and effectfully attempt recovery, we can use the `catchSome` method:
+
+```scala
+val data: IO[IOException, Array[Byte]] =
+ readFile("primary.data").catchSome {
+ case _ : FileNotFoundException =>
+ readFile("backup.data")
+ }
+```
+
+Unlike `catchAll`, `catchSome` cannot reduce or eliminate the error type, although it can widen the error type to a broader class of errors.
+
+### Fallback
+
+| Function | Input Type | Output Type |
+|------------------|---------------------------|-----------------------------|
+| `orElse` | `ZIO[R1, E2, A1]` | `ZIO[R1, E2, A1]` |
+| `orElseEither` | `ZIO[R1, E2, B]` | `ZIO[R1, E2, Either[A, B]]` |
+| `orElseFail` | `E1` | `ZIO[R, E1, A]` |
+| `orElseOptional` | `ZIO[R1, Option[E1], A1]` | `ZIO[R1, Option[E1], A1]` |
+| `orElseSucceed` | `A1` | `URIO[R, A1]` |
+
+We can try one effect, or, if it fails, try another effect, with the `orElse` combinator:
+
+```scala
+val primaryOrBackupData: IO[IOException, Array[Byte]] =
+ readFile("primary.data").orElse(readFile("backup.data"))
+```
+
+### Folding
+
+| Function | Input Type | Output Type |
+|--------------|----------------------------------------------------------------------------------|------------------|
+| `fold` | `failure: E => B, success: A => B` | `URIO[R, B]` |
+| `foldCause` | `failure: Cause[E] => B, success: A => B` | `URIO[R, B]` |
+| `foldM` | `failure: E => ZIO[R1, E2, B], success: A => ZIO[R1, E2, B]` | `ZIO[R1, E2, B]` |
+| `foldCauseM` | `failure: Cause[E] => ZIO[R1, E2, B], success: A => ZIO[R1, E2, B]` | `ZIO[R1, E2, B]` |
+| `foldTraceM` | `failure: ((E, Option[ZTrace])) => ZIO[R1, E2, B], success: A => ZIO[R1, E2, B]` | `ZIO[R1, E2, B]` |
+
+Scala's `Option` and `Either` data types have `fold`, which let us handle both failure and success at the same time. In a similar fashion, `ZIO` effects also have several methods that allow us to handle both failure and success.
+
+The first fold method, `fold`, lets us non-effectfully handle both failure and success, by supplying a non-effectful handler for each case:
+
+```scala
+lazy val DefaultData: Array[Byte] = Array(0, 0)
+
+val primaryOrDefaultData: UIO[Array[Byte]] =
+ readFile("primary.data").fold(
+ _ => DefaultData,
+ data => data)
+```
+
+The second fold method, `foldM`, lets us effectfully handle both failure and success, by supplying an effectful (but still pure) handler for each case:
+
+```scala
+val primaryOrSecondaryData: IO[IOException, Array[Byte]] =
+ readFile("primary.data").foldM(
+ _ => readFile("secondary.data"),
+ data => ZIO.succeed(data))
+```
+
+Nearly all error handling methods are defined in terms of `foldM`, because it is both powerful and fast.
+
+In the following example, `foldM` is used to handle both failure and success of the `readUrls` method:
+
+```scala
+val urls: UIO[Content] =
+ readUrls("urls.json").foldM(
+ error => IO.succeed(NoContent(error)),
+ success => fetchContent(success)
+ )
+```
+
+### Retrying
+
+| Function | Input Type | Output Type |
+|---------------------|----------------------------------------------------------------------|----------------------------------------|
+| `retry` | `Schedule[R1, E, S]` | `ZIO[R1 with Clock, E, A]` |
+| `retryN` | `n: Int` | `ZIO[R, E, A]` |
+| `retryOrElse` | `policy: Schedule[R1, E, S], orElse: (E, S) => ZIO[R1, E1, A1]` | `ZIO[R1 with Clock, E1, A1]` |
+| `retryOrElseEither` | `schedule: Schedule[R1, E, Out], orElse: (E, Out) => ZIO[R1, E1, B]` | `ZIO[R1 with Clock, E1, Either[B, A]]` |
+| `retryUntil` | `E => Boolean` | `ZIO[R, E, A]` |
+| `retryUntilEquals` | `E1` | `ZIO[R, E1, A]` |
+| `retryUntilM` | `E => URIO[R1, Boolean]` | `ZIO[R1, E, A]` |
+| `retryWhile` | `E => Boolean` | `ZIO[R, E, A]` |
+| `retryWhileEquals` | `E1` | `ZIO[R, E1, A]` |
+| `retryWhileM` | `E => URIO[R1, Boolean]` | `ZIO[R1, E, A]` |
+
+When we are building applications we want to be resilient in the face of a transient failure. This is where we need to retry to overcome these failures.
+
+There are a number of useful methods on the ZIO data type for retrying failed effects.
+
+The most basic of these is `ZIO#retry`, which takes a `Schedule` and returns a new effect that will retry the first effect if it fails, according to the specified policy:
+
+```scala
+import zio.clock._
+
+val retriedOpenFile: ZIO[Clock, IOException, Array[Byte]] =
+ readFile("primary.data").retry(Schedule.recurs(5))
+```
+
+The next most powerful function is `ZIO#retryOrElse`, which allows specification of a fallback to use, if the effect does not succeed with the specified policy:
+
+```scala
+readFile("primary.data").retryOrElse(
+ Schedule.recurs(5),
+ (_, _:Long) => ZIO.succeed(DefaultData)
+)
+```
+
+The final method, `ZIO#retryOrElseEither`, allows returning a different type for the fallback.
+
+## Resource Management
+
+ZIO's resource management features work across synchronous, asynchronous, concurrent, and other effect types, and provide strong guarantees even in the presence of failure, interruption, or defects in the application.
+
+### Finalizing
+
+Scala has a `try` / `finally` construct which helps us to make sure we don't leak resources because no matter what happens in the try, the `finally` block will be executed. So we can open files in the try block, and then we can close them in the `finally` block, and that gives us the guarantee that we will not leak resources.
+
+#### Asynchronous Try / Finally
+The problem with the `try` / `finally` construct is that it only applies with synchronous code, they don't work for asynchronous code. ZIO gives us a method called `ensuring` that works with either synchronous or asynchronous actions. So we have a functional try/finally but across the async region of our code, also our finalizer could have async regions.
+
+Like `try` / `finally`, the `ensuring` operation guarantees that if an effect begins executing and then terminates (for whatever reason), then the finalizer will begin executing:
+
+```scala
+val finalizer =
+ UIO.effectTotal(println("Finalizing!"))
+// finalizer: UIO[Unit] = zio.ZIO$EffectTotal@438627be
+
+val finalized: IO[String, Unit] =
+ IO.fail("Failed!").ensuring(finalizer)
+// finalized: IO[String, Unit] = zio.ZIO$CheckInterrupt@54924fe9
+```
+
+The finalizer is not allowed to fail, which means that it must handle any errors internally.
+
+Like `try` / `finally`, finalizers can be nested, and the failure of any inner finalizer will not affect outer finalizers. Nested finalizers will be executed in reverse order, and linearly (not in parallel).
+
+Unlike `try` / `finally`, `ensuring` works across all types of effects, including asynchronous and concurrent effects.
+
+Here is another example of ensuring that our clean-up action called before our effect is done:
+
+```scala
+import zio.Task
+var i: Int = 0
+val action: Task[String] =
+ Task.effectTotal(i += 1) *>
+ Task.fail(new Throwable("Boom!"))
+val cleanupAction: UIO[Unit] = UIO.effectTotal(i -= 1)
+val composite = action.ensuring(cleanupAction)
+```
+
+> _**Note:**
+> Finalizers offer very powerful guarantees, but they are low-level, and should generally not be used for releasing resources. For higher-level logic built on `ensuring`, see `ZIO#bracket` on the bracket section.
+
+#### Unstoppable Finalizers
+
+In Scala when we nest `try` / `finally` finalizers, they cannot be stopped. If we have nested finalizers and one of them fails for some sort of catastrophic reason the ones on the outside will still be run and in the correct order.
+
+```scala
+try {
+ try {
+ try {
+ ...
+ } finally f1
+ } finally f2
+} finally f3
+```
+
+Also in ZIO like `try` / `finally`, the finalizers are unstoppable. This means if we have a buggy finalizer, and it is going to leak some resources that unfortunately happens, we will leak the minimum amount of resources because all other finalizers will be run in the correct order.
+
+```scala
+val io = ???
+io.ensuring(f1)
+ .ensuring(f2)
+ .ensuring(f3)
+```
+
+### Brackets
+
+In Scala the `try` / `finally` is often used to manage resources. A common use for `try` / `finally` is safely acquiring and releasing resources, such as new socket connections or opened files:
+
+```scala
+val handle = openFile(name)
+
+try {
+ processFile(handle)
+} finally closeFile(handle)
+```
+
+ZIO encapsulates this common pattern with `ZIO#bracket`, which allows us to specify an _acquire_ effect, which acquires a resource; a _release_ effect, which releases it; and a _use_ effect, which uses the resource. Bracket lets us open a file and close the file and no matter what happens when we are using that resource.
+
+The release action is guaranteed to be executed by the runtime system, even if the utilize action throws an exception or the executing fiber is interrupted.
+
+Brackets are a built-in primitive that let us safely acquire and release resources. They are used for a similar purpose as `try/catch/finally`, only brackets work with synchronous and asynchronous actions, work seamlessly with fiber interruption, and are built on a different error model that ensures no errors are ever swallowed.
+
+Brackets consist of an *acquire* action, a *utilize* action (which uses the acquired resource), and a *release* action.
+
+```scala
+import zio.{ UIO, IO }
+```
+
+
+```scala
+val groupedFileData: IO[IOException, Unit] = openFile("data.json").bracket(closeFile(_)) { file =>
+ for {
+ data <- decodeData(file)
+ grouped <- groupData(data)
+ } yield grouped
+}
+```
+
+Brackets have compositional semantics, so if a bracket is nested inside another bracket, and the outer bracket acquires a resource, then the outer bracket's release will always be called, even if, for example, the inner bracket's release fails.
+
+Let's look at a full working example on using brackets:
+
+```scala
+import zio.{ ExitCode, Task, UIO }
+import java.io.{ File, FileInputStream }
+import java.nio.charset.StandardCharsets
+
+object Main extends App {
+
+ // run my bracket
+ def run(args: List[String]) =
+ mybracket.orDie.as(ExitCode.success)
+
+ def closeStream(is: FileInputStream) =
+ UIO(is.close())
+
+ // helper method to work around in Java 8
+ def readAll(fis: FileInputStream, len: Long): Array[Byte] = {
+ val content: Array[Byte] = Array.ofDim(len.toInt)
+ fis.read(content)
+ content
+ }
+
+ def convertBytes(is: FileInputStream, len: Long) =
+ Task.effect(println(new String(readAll(is, len), StandardCharsets.UTF_8))) // Java 8
+ //Task.effect(println(new String(is.readAllBytes(), StandardCharsets.UTF_8))) // Java 11+
+
+ // mybracket is just a value. Won't execute anything here until interpreted
+ val mybracket: Task[Unit] = for {
+ file <- Task(new File("/tmp/hello"))
+ len = file.length
+ string <- Task(new FileInputStream(file)).bracket(closeStream)(convertBytes(_, len))
+ } yield string
+}
+```
+
+## Unswallowed Exceptions
+
+The Java and Scala error models are broken. Because if we have the right combinations of `try`/`finally`/`catch`es we can actually throw many exceptions, and then we are only able to catch one of them. All the other ones are lost. They are swallowed into a black hole, and also the one that we catch is the wrong one. It is not the primary cause of the failure.
+
+In the following example, we are going to show this behavior:
+
+```scala
+ try {
+ try throw new Error("e1")
+ finally throw new Error("e2")
+ } catch {
+ case e: Error => println(e)
+ }
+```
+
+The above program just prints the `e2`, which is lossy and, also is not the primary cause of failure.
+
+But in the ZIO version, all the errors will still be reported. So even though we are only able to catch one error, the other ones will be reported which we have full control over them. They don't get lost.
+
+Let's write a ZIO version:
+
+```scala
+IO.fail("e1")
+ .ensuring(IO.effectTotal(throw new Exception("e2")))
+ .catchAll {
+ case "e1" => putStrLn("e1")
+ case "e2" => putStrLn("e2")
+ }
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/fiber/fiber.md b/website/versioned_docs/version-1.0.18/reference/fiber/fiber.md
new file mode 100644
index 000000000000..2d6f6e1b3dec
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/fiber/fiber.md
@@ -0,0 +1,368 @@
+---
+id: fiber
+title: "Fiber"
+---
+
+To perform an effect without blocking the current process, we can use fibers, which are a lightweight concurrency mechanism.
+
+We can `fork` any `IO[E, A]` to immediately yield an `UIO[Fiber[E, A]]`. The provided `Fiber` can be used to `join` the fiber, which will resume on production of the fiber's value, or to `interrupt` the fiber, which immediately terminates the fiber and safely releases all resources acquired by the fiber.
+
+
+```scala
+val analyzed =
+ for {
+ fiber1 <- analyzeData(data).fork // IO[E, Analysis]
+ fiber2 <- validateData(data).fork // IO[E, Boolean]
+ // Do other stuff
+ valid <- fiber2.join
+ _ <- if (!valid) fiber1.interrupt
+ else IO.unit
+ analyzed <- fiber1.join
+ } yield analyzed
+```
+
+## Operations
+
+### fork and join
+Whenever we need to start a fiber, we have to `fork` an effect, it gives us a fiber. It is similar to the `start` method on Java thread or submitting a new thread to the thread pool in Java, it is the same idea. Also, joining is a way of waiting for that fiber to compute its value. We are going to wait until it's done.
+
+In the following example, we are going to run sleep and printing on a separate fiber and at the end, waiting for that fiber to compute its value:
+
+```scala
+import zio._
+import zio.console._
+import zio.clock._
+import zio.duration._
+for {
+ fiber <- (sleep(3.seconds) *>
+ putStrLn("Hello, after 3 second") *>
+ ZIO.succeed(10)).fork
+ _ <- putStrLn(s"Hello, World!")
+ res <- fiber.join
+ _ <- putStrLn(s"Our fiber succeeded with $res")
+} yield ()
+```
+
+### fork0
+A more powerful variant of `fork`, called `fork0`, allows specification of supervisor that will be passed any non-recoverable errors from the forked fiber, including all such errors that occur in finalizers. If this supervisor is not specified, then the supervisor of the parent fiber will be used, recursively, up to the root handler, which can be specified in `Runtime` (the default supervisor merely prints the stack trace).
+
+### forkDaemon
+
+The `ZIO#forkDaemon` forks the effect into a new fiber **attached to the global scope**. Because the new fiber is attached to the global scope, when the fiber executing the returned effect terminates, the forked fiber will continue running.
+
+In the following example, we have three effects: `inner`, `outer`, and `mainApp`. The outer effect is forking the `inner` effect using `ZIO#forkDaemon`. The `mainApp` effect is forking the `outer` fiber using `ZIO#fork` method and interrupt it after 3 seconds. Since the `inner` effect is forked in global scope, it will not be interrupted and continue its job:
+
+```scala
+val inner = putStrLn("Inner job is running.")
+ .delay(1.seconds)
+ .forever
+ .onInterrupt(putStrLn("Inner job interrupted.").orDie)
+
+val outer = (
+ for {
+ f <- inner.forkDaemon
+ _ <- putStrLn("Outer job is running.").delay(1.seconds).forever
+ _ <- f.join
+ } yield ()
+).onInterrupt(putStrLn("Outer job interrupted.").orDie)
+
+val mainApp = for {
+ fiber <- outer.fork
+ _ <- fiber.interrupt.delay(3.seconds)
+ _ <- ZIO.never
+} yield ()
+```
+
+### interrupt
+Whenever we want to get rid of our fiber, we can simply call interrupt on that. The interrupt operation does not resume until the fiber has completed or has been interrupted and all its finalizers have been run. These precise semantics allow construction of programs that do not leak resources.
+
+### await
+To inspect whether our fiber succeeded or failed, we can call `await` on fiber. if we call `await` it will wait for that fiber to terminate, and it will give us back the fiber's value as an `Exit`. That exit value could be failure or success.
+
+```scala
+import zio.console._
+import zio.random._
+for {
+ b <- nextBoolean
+ fiber <- (if (b) ZIO.succeed(10) else ZIO.fail("The boolean was not true")).fork
+ exitValue <- fiber.await
+ _ <- exitValue match {
+ case Exit.Success(value) => putStrLn(s"Fiber succeeded with $value")
+ case Exit.Failure(cause) => putStrLn(s"Fiber failed")
+ }
+} yield ()
+```
+
+The `await` is similar to join but lower level than join. When we call join if the underlying fiber failed then our attempt to join it will also fail with the same error.
+
+### Parallelism
+To execute actions in parallel, the `zipPar` method can be used:
+
+
+```scala
+def bigCompute(m1: Matrix, m2: Matrix, v: Matrix): UIO[Matrix] =
+ for {
+ t <- computeInverse(m1).zipPar(computeInverse(m2))
+ (i1, i2) = t
+ r <- applyMatrices(i1, i2, v)
+ } yield r
+```
+
+The `zipPar` combinator has resource-safe semantics. If one computation fails, the other computation will be interrupted, to prevent wasting resources.
+
+### Racing
+
+Two `IO` actions can be *raced*, which means they will be executed in parallel, and the value of the first action that completes successfully will be returned.
+
+```scala
+fib(100) race fib(200)
+```
+
+The `race` combinator is resource-safe, which means that if one of the two actions returns a value, the other one will be interrupted, to prevent wasting resources.
+
+The `race` and even `zipPar` combinators are a specialization of a much-more powerful combinator called `raceWith`, which allows executing user-defined logic when the first of two actions succeeds.
+
+On the JVM, fibers will use threads, but will not consume *unlimited* threads. Instead, fibers yield cooperatively during periods of high-contention.
+
+```scala
+def fib(n: Int): UIO[Int] =
+ if (n <= 1) {
+ IO.succeed(1)
+ } else {
+ for {
+ fiber1 <- fib(n - 2).fork
+ fiber2 <- fib(n - 1).fork
+ v2 <- fiber2.join
+ v1 <- fiber1.join
+ } yield v1 + v2
+ }
+```
+
+## Error Model
+The `IO` error model is simple, consistent, permits both typed errors and termination, and does not violate any laws in the `Functor` hierarchy.
+
+An `IO[E, A]` value may only raise errors of type `E`. These errors are recoverable by using the `either` method. The resulting effect cannot fail, because the failure case bas been exposed as part of the `Either` success case.
+
+```scala
+val error: Task[String] = IO.fail(new RuntimeException("Some Error"))
+val errorEither: ZIO[Any, Nothing, Either[Throwable, String]] = error.either
+```
+
+Separately from errors of type `E`, a fiber may be terminated for the following reasons:
+
+* **The fiber self-terminated or was interrupted by another fiber**. The "main" fiber cannot be interrupted because it was not forked from any other fiber.
+
+* **The fiber failed to handle some error of type `E`**. This can happen only when an `IO.fail` is not handled. For values of type `UIO[A]`, this type of failure is impossible.
+
+* **The fiber has a defect that leads to a non-recoverable error**. There are only two ways this can happen:
+
+ 1. A partial function is passed to a higher-order function such as `map` or `flatMap`. For example, `io.map(_ => throw e)`, or `io.flatMap(a => throw e)`. The solution to this problem is to not to pass impure functions to purely functional libraries like ZIO, because doing so leads to violations of laws and destruction of equational reasoning.
+
+ 2. Error-throwing code was embedded into some value via `IO.effectTotal`, etc. For importing partial effects into `IO`, the proper solution is to use a method such as `IO.effect`, which safely translates exceptions into values.
+
+When a fiber is terminated, the reason for the termination, expressed as a `Throwable`, is passed to the fiber's supervisor, which may choose to log, print the stack trace, restart the fiber, or perform some other action appropriate to the context.
+
+A fiber cannot stop its own interruption. However, all finalizers will be run during termination, even when some finalizers throw non-recoverable errors. Errors thrown by finalizers are passed to the fiber's supervisor.
+
+There are no circumstances in which any errors will be "lost", which makes the `IO` error model more diagnostic-friendly than the `try`/`catch`/`finally` construct that is baked into both Scala and Java, which can easily lose errors.
+
+## Fiber Interruption
+
+In Java, a thread can be interrupted via `Thread#interrupt` via another thread, but it may don't respect the interruption request. Unlike Java, in ZIO when a fiber interrupts another fiber, we know that the interruption occurs, and it always works.
+
+When working with ZIO fibers, we should consider these notes about fiber interruptions:
+
+### Interruptible/Uninterruptible Regions
+
+All fibers are interruptible by default. To make an effect uninterruptible we can use `Fiber#uninterruptible`, `ZIO#uninterruptible` or `ZIO.uninterruptible`. We have also interruptible versions of these methods to make an uninterruptible effect, interruptible.
+
+```scala
+for {
+ fiber <- clock.currentDateTime
+ .flatMap(time => putStrLn(time.toString))
+ .schedule(Schedule.fixed(1.seconds))
+ .uninterruptible
+ .fork
+ _ <- fiber.interrupt // Runtime stuck here and does not go further
+} yield ()
+```
+
+Note that there is no way to stop interruption. We can only delay it, by making an effect uninterruptible.
+
+### Fiber Finalization on Interruption
+
+When a fiber done its work or even interrupted, the finalizer of that fiber is guaranteed to be executed:
+
+```scala
+for {
+ fiber <- putStrLn("Working on the first job")
+ .schedule(Schedule.fixed(1.seconds))
+ .ensuring {
+ (putStrLn(
+ "Finalizing or releasing a resource that is time-consuming"
+ ) *> ZIO.sleep(7.seconds)).orDie
+ }
+ .fork
+ _ <- fiber.interrupt.delay(4.seconds)
+ _ <- putStrLn(
+ "Starting another task when the interruption of the previous task finished"
+ )
+} yield ()
+```
+
+The `release` action may take some time freeing up resources. So it may slow down the fiber's interruption.
+
+### Fast Interruption
+
+As we saw in the previous section, the ZIO runtime gets stuck on interruption task until the fiber's finalizer finishes its job. We can prevent this behavior by using `ZIO#disconnect` or `Fiber#interruptFork` which perform fiber's interruption in the background or in separate daemon fiber:
+
+Let's try the `Fiber#interruptFork`:
+
+```scala
+for {
+ fiber <- putStrLn("Working on the first job")
+ .schedule(Schedule.fixed(1.seconds))
+ .ensuring {
+ (putStrLn(
+ "Finalizing or releasing a resource that is time-consuming"
+ ) *> ZIO.sleep(7.seconds)).orDie
+ }
+ .fork
+ _ <- fiber.interruptFork.delay(4.seconds) // fast interruption
+ _ <- putStrLn(
+ "Starting another task while interruption of the previous fiber happening in the background"
+ )
+} yield ()
+```
+
+### Interrupting Blocking Operations
+
+The `zio.blocking.effectBlocking` is interruptible by default, but its interruption will not translate to the JVM thread interruption. Instead, we can use `zio.blocking.effectBlockingInterruptible` method. By using `effectBlockingInterruptible` method if that effect is interrupted, it will translate the ZIO interruption to the JVM thread interruption. ZIO has a comprehensive guide about blocking operation at [blocking service](../services/blocking.md) page.
+
+### Automatic Interruption
+
+If we never _cancel_ a running effect explicitly, ZIO performs **automatic interruption** for several reasons:
+
+1. **Structured Concurrency** — If a parent fiber terminates, then by default, all child fibers are interrupted, and they cannot outlive their parent. We can prevent this behavior by using `ZIO#forkDaemon` or `ZIO#forkIn` instead of `ZIO#fork`.
+
+2. **Parallelism** — If one effect fails during the execution of many effects in parallel, the others will be canceled. Examples include `foreachPar`, `zipPar`, and all other parallel operators.
+
+3. **Timeouts** — If a running effect being timed out has not been completed in the specified amount of time, then the execution is canceled.
+
+4. **Racing** — The loser of a race, if still running, is canceled.
+
+### Joining an Interrupted Fiber
+
+We can join an interrupted fiber, which will cause our fiber to become interrupted. And this process does not inhibit finalization. So, **ZIO's concurrency model respect brackets even we are going to _join_ an interrupted fiber**:
+
+```scala
+val myApp =
+ (
+ for {
+ fiber <- putStrLn("Running a job").delay(1.seconds).forever.fork
+ _ <- fiber.interrupt.delay(3.seconds)
+ _ <- fiber.join // Joining an interrupted fiber
+ } yield ()
+ ).ensuring(
+ putStrLn(
+ "This finalizer will be executed without occurring any deadlock"
+ ).orDie
+ )
+```
+
+A fiber that is interrupted because of joining another interrupted fiber will properly finalize; this is a distinction between ZIO and the other effect systems, which _deadlock_ the joining fiber.
+
+## Thread Shifting - JVM
+By default, fibers make no guarantees as to which thread they execute on. They may shift between threads, especially as they execute for long periods of time.
+
+Fibers only ever shift onto the thread pool of the runtime system, which means that by default, fibers running for a sufficiently long time will always return to the runtime system's thread pool, even when their (asynchronous) resumptions were initiated from other threads.
+
+For performance reasons, fibers will attempt to execute on the same thread for a (configurable) minimum period, before yielding to other fibers. Fibers that resume from asynchronous callbacks will resume on the initiating thread, and continue for some time before yielding and resuming on the runtime thread pool.
+
+These defaults help guarantee stack safety and cooperative multitasking. They can be changed in `Runtime` if automatic thread shifting is not desired.
+
+## Type of Workloads
+Let's discuss the type of workloads that a fiber can handle. There are three types of workloads that a fiber can handle:
+1. **CPU Work/Pure CPU Bound** is a computation that uses the computational power of a CPU intensively and purely, without exceeding the computation boundary. By intensive, means a huge chunk of work which involves a significant amount of time to computed by CPU, e.g. complex numerical computation.
+2. **Blocking I/O** is a computation, which exceeds the computational boundary by doing communication in a blocking fashion. For example, waiting for a certain amount of time to elapse or waiting for an external event to happen are blocking I/O operations.
+3. **Asynchronous I/O** is a computation, which exceeds the computation boundary by doing communication asynchronously, e.g. registering a callback for a specific event.
+
+### CPU Work
+What we refer to as CPU Work is pure computational firepower without involving any interaction and communication with the outside world. It doesn't involve any I/O operation. It's a pure computation. By I/O, we mean anything that involves reading from and writing to an external resource such as a file or a socket or web API, or anything that would be characterized as I/O.
+
+Fibers are designed to be **cooperative** which means that **they will yield to each other as required to preserve some level of fairness**. If we have a fiber that's is doing CPU Work which passes through one or more ZIO operations such as flatMap or map, as long as there exists a touchpoint where the ZIO runtime system can sort of keep a tab on that ongoing CPU Work then that fiber will yield to other fibers. These touchpoints allow many fibers who are doing CPU Work to end up sharing the same thread.
+
+What if though, we have a CPU Work operation that takes a really long time to run? Let's say 30 seconds or something it does pure CPU Work very computationally intensive? What happens if we take that single gigantic function and put that into a `ZIO.effect`? So there is no way for the ZIO Runtime that can force that fiber to yield to other fibers. In this situation, the ZIO Runtime cannot preserve some level of fairness, and that single big CPU operation monopolizes the underlying thread. It is not a good practice to monopolize the underlying thread.
+
+ZIO has a special thread pool that can be used to do these computations. That's the **blocking thread pool**. There is a `ZIO.blocking._` package that contains an operator called `blocking`, which can be used to run a big CPU Work on a dedicated thread. So, it doesn't interfere with all the other work that is going on simultaneously in the ZIO Runtime system.
+
+If a CPU Work doesn't yield quickly, then that is going to monopolize a thread. So how can we determine that our CPU Work can yield quickly or not?
+- If we wrote that CPU Work by composing many ZIO operations, even that CPU Work is very CPU intensive, due to the composition of ZIO operations, it has a chance to yield quickly to other fibers and don't monopolizing a thread.
+- If that CPU work doesn't compose any ZIO operation, or we lift that from a legacy library, the ZIO Runtime doesn't have any chance of yielding quickly to other fibers. So this fiber going to monopolize the underlying thread.
+
+The best practice is to run those huge CPU Work on a dedicated thread pool, by lifting them with the `blocking` operator in the `ZIO.blocking` package.
+
+> _**Note**:_
+>
+> So as a rule of thumb, when we have a huge CPU Work that is not chunked with built-in ZIO operations and going to monopolize the underlying thread, we should run that on a dedicated thread pool that is designed to perform CPU-driven tasks.
+
+### Blocking I/O
+Inside Java, there are many methods that we can call them that will put our thread to sleep. For example, if we call `read` on a socket and there is nothing to read right now because not enough bytes have been read from the other side over the TCP/IP protocol, then that will put our thread to sleep.
+
+Most of the I/O operations and certainly all the classic I/O operations like `InputStream` and `OutputStream` are utilizing a locking mechanism that will parks a thread. When we `write` to the `OutputStream`, actually before the data has been written to that file, that method will just `block` it, and it will `park` the thread. The thread will `wait` until the data can actually be written and after it is actually written then it can return. It is the same way for `read` and similar blocking operations. Anytime we use a `lock`, anything in `java.util.concurrent.locks`, all those locks use this mechanism. All these operations are called blocking because they `park` the thread that is doing the work, and the thread that's doing the work goes to sleep.
+
+> _**Note:**_
+>
+> What we refer to as blocking I/O is not necessarily just an I/O operation. Remember every time we use a `lock` we are also `park`ing a thread. It goes to `sleep`, and it has to be woken up again. We refer to this entire class of operations as **blocking I/O**.
+
+There are multiple types of overhead associated with parking a thread:
+
+1. When we `park` a thread then that thread is still consuming resources, it's still obviously consuming stack resources, the heap, and all metadata associated with the underlying thread in the JVM.
+
+2. Then even further down deeper, in the operating system because every JVM thread corresponds to an operating system level thread, there's a large amount of overhead. Every thread has a pre-allocated stack size so that memory is reserved even if that thread's not doing any work. That memory is sort of reserved for the thread, and we cannot touch it.
+
+3. Besides, the actual process of putting a thread to sleep and then later waking it up again is computationally intensive. It slows down our computations.
+
+This is why it has become a sort of best practice and part of the architectural pattern of reactive applications to design what is called **non-blocking application**. Non-blocking is synonymous with asynchronous. Non-blocking and asynchronous and to some degree even reactive, they're all trying to get at something which is we want scalable applications.
+
+Scalable applications cannot afford to have thousands of threads just sitting around doing nothing and just consuming work and taking a long time to wake up again. We cannot do so-called blocking I/O, in scalable applications. It is considered an anti-pattern because it is not efficient. That is not a way to build scalable applications and nonetheless, we have to support that use case.
+
+Today, we have lots of common Java libraries that use blocking constructs, like `InputStream` and `OutputStream`, and `Reader` and `Writer`. Also, the JDBC is entirely blocking. The only way of doing database I/O in Java is blocking. So obviously, we have to do blocking I/O. We can do blocking I/O from a fiber. So is it best practice? No, it should be avoided whenever possible, but the reality is we have to do blocking I/O.
+
+Whenever we lift a blocking I/O operation into ZIO, the ZIO Runtime is executing a fiber that is doing blocking I/O. The underlying thread will be parked, and it has to be woken up later. It doesn't have any ability to stop from happening. It can't stop an underlying thread from being parked that's just the way these APIs are designed. So, we have to block. There's no way around that. That fiber will be monopolizing the underneath thread and therefore that thread is not available for performing all the work of the other fibers in the system. So that can be a bottleneck point in our application.
+
+And again, the solution to this problem is the same as the solution to the first class of problems, the CPU Work. The solution is to run this **using the blocking thread pool** in ZIO which will ensure that this blocking code executes on its dedicated thread pool. **So it doesn't have to interfere or compete with the threads that are used for doing the bulk of work in your application**. So basically ZIO's philosophy is if we have to do CPU Work or if we have to do synchronous that's ok we can do that. Just we need to do it in the right place. So it doesn't interfere with our primary thread pool.
+
+ZIO has one primary built-in fixed thread pool. This sort of workhorse thread pool is designed to be used for the majority of our application requirements. It has a certain number of threads in it and that stays constant over the lifetime of our application.
+
+Why is that the case? Well because for the majority of workloads in our applications, it does not actually help things to create more threads than the number of CPU cores. If we have eight cores, it does not accelerate any sort of processing to create more than eight threads. Because at the end of the day our hardware is only capable of running eight things at the same time.
+
+If we create a thousand threads on a system that can only run eight of them in parallel at a time, then what does the operating system have to do? As we have not as much as the needed CPU core, the operating system starts giving a little slice of the eight core to all these threads by switching between them over and over again.
+
+The context switching overhead is significant. The CPU has to load in new registers, refill all its caches, it has to go through all these crazy complex processes that interfere with its main job to get stuff done. There's significant overhead associated with that. As a result, it's not going to be very efficient. We are going to waste a lot of our time and resources just switching back and forth between all these threads, that would kill our application.
+
+So for that reason, ZIO's default thread pool is fixed with a number of threads equal to the number of CPU cores. That is the best practice. That means that no matter how much work we create if we create a hundred thousand fibers, they will still run on a fixed number of threads.
+
+Let's say we do blocking I/O on the main ZIO thread pool, so we have got eight threads all sitting and parked on a socket read. What happens to all the other 100000 fibers in our system? They line up in a queue waiting for their chance to run. That's not ideal. That's why we should take these effects that either do blocking I/O, or they do big CPU Work that's not chunked and run them using ZIO's blocking thread pool which will give us a dedicated thread.
+
+That dedicated thread is not efficient but again, sometimes we have to interact with legacy code and legacy code is full of all blocking codes. We just need to be able to handle that gracefully and ZIO does that using the blocking thread pool.
+
+### Asynchronous I/O
+The third category is asynchronous I/O, and we refer to it as Async Work. Async Work is a code that whenever it runs into something that it needs to wait on, instead of blocking and parking the thread, it registers a callback, and returns immediately.
+
+ It allows us to register a callback and then when that result is available then our callback will be invoked. Callbacks are the fundamental way by which all async code on the JVM works. There is no mechanism in the JVM right now to support async code natively and once that would probably happen in the Loom project, in the future, and will simplify a lot of things.
+
+But for now, in current days, sort of JVM all published JVM versions there's no such thing. The only way we can get a non-blocking async code is to have this callback registering mechanism.
+
+Callbacks have the pro that they don't wait for CPU. Instead of waiting to read the next chunk from a socket or instead of waiting to acquire a lock, all we have to do is call it and give it a callback. It doesn't need to do anything else it can return control to the thread pool and then later on when that data has been read from the socket or when that lock has been acquired or when that amount of time has elapsed if we're sleeping for a certain amount of time then our callback can be invoked.
+
+It has the potential to be extraordinarily efficient. The drawback of callbacks is they are not so pretty and fun to work with. They don't compose well with try-finally constructs. Error handling is really terrible, we have to do error propagation on our own. So that is what gave rise to data types like `Future` which eliminates the need for callbacks. By using `Future`s we can wrap callback-based APIs and get the benefits of async but without the callbacks. Also, it has a for-comprehension that we can structure our code as a nice linear sequence.
+
+Similarly in ZIO we never see a callback with ZIO, but fundamentally everything boils down to asynchronous on the JVM in a callback fashion. Callback base code is obscenely efficient but it is extraordinarily painful to deal with directly. Data types like `Future` and `ZIO` allow us to avoid even seeing a callback in our code.
+
+With ZIO, we do not have to think about a callback nonetheless sometimes when we need to integrate with legacy code. ZIO has an appropriate constructor to turn that ugly callback-based API into a ZIO effect. It is the `async` constructor.
+
+Every time we do one of ZIO blocking operations it doesn't actually block the underlying thread, but also it is a semantic blocking managed by ZIO. For example, every time we see something like `ZIO.sleep` or when we take something from a queue (`queue.take`) or offer something to a queue (`queue.offer`) or if we acquire a permit from a semaphore (`semaphore.withPermit`) or if we acquire a lock (`ZIO.lock`) and so forth, we are blocking semantically. If we use the same stuff in Java, like `Thread.sleep` or any of its `lock` machinery then those things are going to block a thread. So this is why we say ZIO is 100% blocking but the Java thread is not.
+
+All of the pieces of machinery ZIO gives us are 100% asynchronous and non-blocking. As they don't block and monopolize the thread, all of the async work is executed on the primary thread pool in ZIO.
+
diff --git a/website/versioned_docs/version-1.0.18/reference/fiber/fiberid.md b/website/versioned_docs/version-1.0.18/reference/fiber/fiberid.md
new file mode 100644
index 000000000000..3e8e235ab3f7
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/fiber/fiberid.md
@@ -0,0 +1,7 @@
+---
+id: fiberid
+title: "Fiber.Id"
+---
+
+The identity of a [Fiber](fiber.md), described by the time it began life (`startTimeMillis`), and a monotonically increasing sequence number generated from an atomic counter (`seqNumber`).
+
diff --git a/website/versioned_docs/version-1.0.18/reference/fiber/fiberref.md b/website/versioned_docs/version-1.0.18/reference/fiber/fiberref.md
new file mode 100644
index 000000000000..87a88e645d3a
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/fiber/fiberref.md
@@ -0,0 +1,157 @@
+---
+id: fiberref
+title: "FiberRef"
+---
+
+`FiberRef[A]` models a mutable reference to a value of type `A`. The two basic operations are `set`, which sets the reference to a new value, and `get`, which retrieves the current value of the reference.
+
+We can think of `FiberRef` as Java's `ThreadLocal` on steroids. So, just like we have `ThreadLocal` in Java we have `FiberRef` in ZIO. So as different threads have different `ThreadLocal`s, we can say different fibers have different `FiberRef`s. They don't intersect or overlap in any way. `FiberRef` is the fiber version of `ThreadLocal` with significant improvements in its semantics. A `ThreadLocal` only has a mutable state in which each thread accesses its own copy, but threads don't propagate their state to their children's.
+
+As opposed to `Ref[A]`, the value of a `FiberRef[A]` is bound to an executing fiber. Different fibers who hold the same `FiberRef[A]` can independently set and retrieve values of the reference, without collisions.
+
+```scala
+import zio._
+
+for {
+ fiberRef <- FiberRef.make[Int](0)
+ _ <- fiberRef.set(10)
+ v <- fiberRef.get
+} yield v == 10
+```
+
+## Operations
+
+`FiberRef[A]` has an API almost identical to `Ref[A]`. It includes well-known methods such as:
+
+- `FiberRef#get`. Returns the current value of the reference.
+- `FiberRef#set`. Sets the current value of the reference.
+- `FiberRef#update` / `FiberRef#updateSome` updates the value with the specified function.
+- `FiberRef#modify`/ `FiberRef#modifySome` modifies the value with the specified function, computing a return value for the operation.
+
+You can also use `locally` to scope `FiberRef` value only for a given effect:
+
+```scala
+for {
+ correlationId <- FiberRef.make[String]("")
+ v1 <- correlationId.locally("my-correlation-id")(correlationId.get)
+ v2 <- correlationId.get
+} yield v1 == "my-correlation-id" && v2 == ""
+```
+
+## Propagation
+
+Let's go back to the `FiberRef`s analog called `ThreadLocal` and see how it works. If we have thread `A` with its `ThreadLocal` and thread `A` creates a new thread, let's call it thread `B`. When thread `A` sends thread `B` the same `ThreadLocal` then what value does thread `B` see inside the `ThreadLocal`? Well, it sees the default value of the `ThreadLocal`. It does not see `A`s value of the `ThreadLocal`. So in other words, `ThreadLocal`s do not propagate their values across the sort of graph of threads so when one thread creates another, the `ThreadLocal` value is not propagated from parent to child.
+
+`FiberRefs` improve on that model quite dramatically. Basically, whenever a child's fiber is created from its parent, the `FiberRef` value of parent fiber propagated to its child fiber.
+
+### Copy-on-Fork
+`FiberRef[A]` has *copy-on-fork* semantics for `ZIO#fork`. This essentially means that a child `Fiber` starts with `FiberRef` values of its parent. When the child set a new value of `FiberRef`, the change is visible only to the child itself. The parent fiber still has its own value.
+
+So if we create a `FiberRef` and, we set its value to `5`, and we pass this `FiberRef` to a child fiber, it sees the value `5`. If the child fiber modifies the value `5` to `6`, the parent fiber can't see that change. So the child fiber gets its own copy of the `FiberRef`, and it can modify it locally. Those changes will not affect the parent fiber:
+
+```scala
+for {
+ fiberRef <- FiberRef.make(5)
+ promise <- Promise.make[Nothing, Int]
+ _ <- fiberRef
+ .updateAndGet(_ => 6)
+ .flatMap(promise.succeed).fork
+ childValue <- promise.await
+ parentValue <- fiberRef.get
+} yield assert(parentValue == 5 && childValue == 6)
+```
+
+### join Semantic
+If we `join` a fiber then its `FiberRef` is merged back into the parent fiber:
+
+```scala
+for {
+ fiberRef <- FiberRef.make(5)
+ child <- fiberRef.set(6).fork
+ _ <- child.join
+ parentValue <- fiberRef.get
+} yield assert(parentValue == 6)
+```
+
+So if we `fork` a fiber and that child fiber modifies a bunch of `FiberRef`s and then later we join it, we get those modifications merged back into the parent fiber. So that's the semantic model of ZIO on `join`.
+
+Each fiber has its `FiberRef` and modifying it locally. So when they do their job and `join` their parent, how do they get merged? By default, the last child fiber will win, the last fiber which is going to `join` will override the parent's `FiberRef` value.
+
+As we can see, `child1` is the last fiber, so its value which is `6`, gets merged back into its parent:
+
+```scala
+for {
+ fiberRef <- FiberRef.make(5)
+ child1 <- fiberRef.set(6).fork
+ child2 <- fiberRef.set(7).fork
+ _ <- child2.join
+ _ <- child1.join
+ parentValue <- fiberRef.get
+} yield assert(parentValue == 6)
+```
+
+### Custom Merge
+Furthermore we can customize how, if at all, the value will be update when a fiber is forked and how values will be combined when a fiber is merged. To do this you specify the desired behavior during `FiberRef#make`:
+```scala
+for {
+ fiberRef <- FiberRef.make(initial = 0, join = math.max)
+ child <- fiberRef.update(_ + 1).fork
+ _ <- fiberRef.update(_ + 2)
+ _ <- child.join
+ value <- fiberRef.get
+} yield assert(value == 2)
+```
+
+### await semantic
+Important to note that `await`, has no such properties, so `await` waits for the child fiber to finish and gives us its value as an `Exit`:
+
+```scala
+for {
+ fiberRef <- FiberRef.make(5)
+ child <- fiberRef.set(6).fork
+ _ <- child.await
+ parentValue <- fiberRef.get
+} yield assert(parentValue == 5)
+```
+
+`Join` has higher-level semantics that `await` because it will fail if the child fiber failed, and it will also merge back its value to its parent.
+
+### inheritRefs
+We can inherit the values from all `FiberRef`s from an existing `Fiber` using the `Fiber#inheritRefs` method:
+
+```scala
+for {
+ fiberRef <- FiberRef.make[Int](0)
+ latch <- Promise.make[Nothing, Unit]
+ fiber <- (fiberRef.set(10) *> latch.succeed(())).fork
+ _ <- latch.await
+ _ <- fiber.inheritRefs
+ v <- fiberRef.get
+} yield v == 10
+```
+
+Note that `inheritRefs` is automatically called on `join`. This effectively means that both of the following effects behave identically:
+
+```scala
+val withJoin =
+ for {
+ fiberRef <- FiberRef.make[Int](0)
+ fiber <- fiberRef.set(10).fork
+ _ <- fiber.join
+ v <- fiberRef.get
+ } yield assert(v == 10)
+```
+
+```scala
+val withoutJoin =
+ for {
+ fiberRef <- FiberRef.make[Int](0)
+ fiber <- fiberRef.set(10).fork
+ _ <- fiber.inheritRefs
+ v <- fiberRef.get
+ } yield assert(v == 10)
+```
+
+## Memory Safety
+
+The value of a `FiberRef` is automatically garbage collected once the `Fiber` owning it is finished. A `FiberRef` that is no longer reachable (has no reference to it in user-code) will cause all fiber-specific values of the reference to be garbage collected, even if they were once used in a `Fiber` that is currently executing.
diff --git a/website/versioned_docs/version-1.0.18/reference/fiber/fiberstatus.md b/website/versioned_docs/version-1.0.18/reference/fiber/fiberstatus.md
new file mode 100644
index 000000000000..e6c893595ac5
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/fiber/fiberstatus.md
@@ -0,0 +1,28 @@
+---
+id: fiberstatus
+title: "Fiber.Status"
+---
+
+`Fiber.Status` describe the current status of a [Fiber](fiber.md).
+
+Each fiber can be in one of the following statues:
+- Done
+- Finishing
+- Running
+- Suspended
+
+In the following example, we are going to `await` on a never-ending fiber and determine the id of that fiber, which we are blocking on:
+
+```scala
+import zio._
+import zio.console._
+for {
+ f1 <- ZIO.never.fork
+ f2 <- f1.await.fork
+ blockingOn <- f2.status
+ .collect(()) { case Fiber.Status.Suspended(_, _, _, blockingOn, _) =>
+ blockingOn
+ }
+ .eventually
+} yield (assert(blockingOn == List(f1.id)))
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/fiber/index.md b/website/versioned_docs/version-1.0.18/reference/fiber/index.md
new file mode 100644
index 000000000000..3cc26142793b
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/fiber/index.md
@@ -0,0 +1,91 @@
+---
+id: index
+title: "Introduction"
+---
+
+## Introduction
+
+A Fiber can be thought of as a virtual thread. A Fiber is the analog of a Java thread (`java.lang.thread`), but it performs much better. Fibers, on the other hand, are implemented in such a fashion that a single JVM thread will end up executing many fibers. We can think of fibers as unbounded JVM threads.
+
+> _**Warning**, if you are not an advanced programmer:_
+>
+> You should avoid fibers. If you can avoid fibers, then do it. ZIO gives you many concurrent primitives like `raceWith`, `zipPar`, `foreachPar`, and so forth, which allows you to avoid using fibers directly.
+>
+> Fibers just like threads are low-level constructs. It's not generally recommended for an average programmer to manually use fibers. It is very easy to make lots of mistakes or to introduce performance problems by manually using them.
+
+## Why Fibers?
+
+There are some limitations with JVM threads:
+
+1. **They are scarce** — Threads on the JVM map to the operating system level threads which imposes an upper bound on the number of threads that we can have inside our application.
+
+2. **Expensive on creation** — Their creation is expensive in terms of time and memory complexity.
+
+3. **Much Overhead on Context Switching** — Switching between the execution of one thread to another thread is not cheap, and it takes a lot of time.
+
+4. **Lack of Composability** — Threads are not typed. They don't have a meaningful return type, due to this limitation, we cannot compose threads. Also, it has no type parameters for error and it is assumed if that thread started it might throw some exception of throwable type. In Java when we create a thread, we should provide a `run` function that returns void. It's a void returning method. So threads cannot finish with any specific value.
+
+5. **Synchronous**
+
+In the following sections, we are going to discuss the key features of fibers, and how fibers overcame Java thread drawbacks.
+
+### Unbounded Size
+
+So whereas the mapping from JVM threads to operating system threads is one-to-one, **the mapping of fiber to a thread is many-to-one**. That is to say, any JVM thread will end up executing anywhere from hundreds to thousands even tens of thousands of threads concurrently, by hopping back and forth between them as necessary. They give us virtual threads in which it has the benefits of threads but the scalability way beyond threads. In other words, fibers provide us massive concurrently with **lightweight green threading** on the JVM.
+
+As a rough rule of thumb, we can have an application with a thousand real threads. No problem, modern servers can support applications with a thousand threads. However, we cannot have an application with a hundred thousand threads, that application will die. That just won't make any progress. The JVM nor our operating system can physically support a hundred thousand threads. However, it is no problem to have a Scala application with a hundred thousand fibers that application can perform in a very high-performance fashion, and the miracle that enables that to happen is fiber.
+
+### Lightweight
+
+**JVM threads are expensive to create in order of time and memory complexity.** Also it takes a lot of time to switch between one thread of execution to another. Fibers are virtual and, and as they use **green threading**, they are considered to be **lightweight cooperative threads**, this means that fibers always _yield_ their executions to each other without the overhead of preemptive scheduling.
+
+### Asynchronous
+
+Fiber is asynchronous and, a thread is always synchronous. That is why fibers have higher scalability because they are asynchronous. Threads are not, that is why they don't scale as well.
+
+### Typed and Composable
+
+**Fibers have typed error and success values**. So actually fiber has two type parameters `E` and `A`:
+
+- The `E` corresponds to the error channel. It indicates the error type with which the fiber can fail.
+
+- The `A` corresponds to the success value of the computation. That is the type with which the fiber can succeed. Whereas fibers can finish with the value of type `A`.
+
+The fact, that fibers are typed allows us to write more type-safe programs. Also, it increases the compositional properties of our programs. Because we can say, we are going to wait on that fiber to finish and when it's done, we are going to get its value of type `A`.
+
+### Interrupt Safe
+
+With threads in Java, it is not a safe operation to terminate them, by using the stop method. The stop operation has been [deprecated](https://docs.oracle.com/javase/1.5.0/docs/guide/misc/threadPrimitiveDeprecation.html). So this is not a safe operation to force kill a thread. Instead, we should try to request an interruption to the thread, but in this case, **the thread may not respond to our request, and it may just go forever**.
+
+**Fiber has a safe version of this functionality that works very well**. Just like we can interrupt a thread, we can interrupt a fiber too, but interruption of fibers is much more reliable. It will always work, and **it probably works very fast**. We don't need to wait around, we can just try to interrupt them, and they will be gone very soon.
+
+### Structured Concurrency
+
+Until now, we find that ZIO fiber solves a lot of drawbacks of using Java threads. With fibers, we can have hundreds of thousands and even thousands of thousands of fibers are started and working together. We reached a very massive concurrently with fibers. Now how can we manage them? Some of them are top-level fibers and some others are forked and become children of their parents. How can we manage their scopes, how to keep track of all fibers, and prevent them to leak? What happens during the execution of a child fiber, the parent execution interrupted? The child fibers should be scoped to their parent fibers. We need a way to manage these scopes automatically. This is where structured concurrency shines.
+
+> _**Important**:_
+>
+> It's worth mentioning that in the ZIO model, all codes run on the fiber. There is no such thing as code that is executing outside of the fiber. When we create a main function in ZIO that returns an effect, even if we don't explicitly fork a fiber when we execute that effect, that effect will execute on what is called the main fiber. It's a top-level fiber.
+>
+>It's just like if we have a main function in Java then that main function will execute on the main thread. There is no code in Java that does not execute on a thread. All code executes on a thread even if you didn't create a thread.
+
+ZIO has support for structured concurrency. The way ZIO structured concurrency works is that **the child fibers are scoped to their parent fibers** which means **when the parent effect is done running then its child's effects will be automatically interrupted**. So when we fork, and we get back a fiber, the fiber's lifetime is bound to the parent fiber, that forked it. It is very difficult to leak fibers because child fibers are guaranteed to complete before their parents.
+
+The structure concurrency gives us a way to reason about fiber lifespans. We can statically reason about the lifetimes of children fibers just by looking at our code. We don't need to insert complicated logic to keep track of all the child fibers and manually shut them down.
+
+#### Global Lifetime
+
+Sometimes we want a child fiber to outlive the scope of the parent. what do you do in that case? well, we have another operator called `forkDaemon`. The `forkDaemon` forks the fiber as a daemon fiber. Daemon fibers can outlive their parents. They can live forever. They run in the background doing their work until they end with failure or success. This gives us a way to spawn background jobs that should just keep on going regardless of what happens to the parent.
+
+#### Fine-grained Scope
+
+If we need a very flexible fine-grained control over the lifetime of a fiber there is another operator called `forkin`. We can fork a fiber inside a specific scope, when that scope is closed then the fiber will be terminated.
+
+## Fiber Data Types
+
+ZIO fiber contains a few data types that can help us solve complex problems:
+
+- **[Fiber](fiber.md)** — A fiber value models an `IO` value that has started running, and is the moral equivalent of a green thread.
+- **[FiberRef](fiberref.md)** — `FiberRef[A]` models a mutable reference to a value of type `A`. As opposed to `Ref[A]`, a value is bound to an executing `Fiber` only. You can think of it as Java's `ThreadLocal` on steroids.
+- **[Fiber.Status](fiberstatus.md)** — `Fiber.Status` describe the current status of a Fiber.
+- **[Fiber.Id](fiberid.md)** — `Fiber.Id` describe the unique identity of a Fiber.
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/reference/index.md b/website/versioned_docs/version-1.0.18/reference/index.md
new file mode 100644
index 000000000000..285ad4f6ff1b
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/index.md
@@ -0,0 +1,92 @@
+---
+id: index
+title: "Introduction"
+---
+
+ZIO contains a few data types that can help you solve complex problems in asynchronous and concurrent programming. ZIO data types categorize into these sections:
+
+1. [Core Data Types](#core-data-types)
+2. [Contextual Data Types](#contextual-data-types)
+3. [Concurrency](#concurrency)
+ - [Fiber Primitives](#fiber-primitives)
+ - [Concurrency Primitives](#concurrency-primitives)
+ - [Synchronization Aids](#synchronization-aids)
+ - [STM](#stm)
+3. [Resource Management](#resource-management)
+6. [Streaming](#streaming)
+7. [Miscellaneous](#miscellaneous)
+
+## Core Data Types
+ - **[ZIO](core/zio.md)** — A `ZIO` is a value that models an effectful program, which might fail or succeed.
+ + **[UIO](core/uio.md)** — An `UIO[A]` is a type alias for `ZIO[Any, Nothing, A]`.
+ + **[URIO](core/urio.md)** — An `URIO[R, A]` is a type alias for `ZIO[R, Nothing, A]`.
+ + **[Task](core/task.md)** — A `Task[A]` is a type alias for `ZIO[Any, Throwable, A]`.
+ + **[RIO](core/rio.md)** — A `RIO[R, A]` is a type alias for `ZIO[R, Throwable, A]`.
+ + **[IO](core/io.md)** — An `IO[E, A]` is a type alias for `ZIO[Any, E, A]`.
+ - **[Exit](core/exit.md)** — An `Exit[E, A]` describes the result of executing an `IO` value.
+ - **[Cause](core/cause.md)** - `Cause[E]` is a description of a full story of a fiber failure.
+ - **[Runtime](core/runtime.md)** — A `Runtime[R]` is capable of executing tasks within an environment `R`.
+
+## Contextual Data Types
+- **[Has](contextual/has.md)** — The trait `Has[A]` is used with the [ZIO environment](contextual/index.md#zio-environment) to express an effect's dependency on a service of type `A`.
+- **[ZLayer](contextual/zlayer.md)** — The `ZIO[-R, +E, +A]` data type describes an effect that requires an input type of `R`, as an environment, may fail with an error of type `E` or succeed and produces a value of type `A`.
+ + **[RLayer](contextual/rlayer.md)** — `RLayer[-RIn, +ROut]` is a type alias for `ZLayer[RIn, Throwable, ROut]`, which represents a layer that requires `RIn` as its input, it may fail with `Throwable` value, or returns `ROut` as its output.
+ + **[ULayer](contextual/ulayer.md)** — ULayer[+ROut] is a type alias for ZLayer[Any, Nothing, ROut], which represents a layer that doesn't require any services as its input, it can't fail, and returns ROut as its output.
+ + **[Layer](contextual/layer.md)** — Layer[+E, +ROut] is a type alias for ZLayer[Any, E, ROut], which represents a layer that doesn't require any services, it may fail with an error type of E, and returns ROut as its output.
+ + **[URLayer](contextual/urlayer.md)** — URLayer[-RIn, +ROut] is a type alias for ZLayer[RIn, Nothing, ROut], which represents a layer that requires RIn as its input, it can't fail, and returns ROut as its output.
+ + **[TaskLayer](contextual/task-layer.md)** — TaskLayer[+ROut] is a type alias for ZLayer[Any, Throwable, ROut], which represents a layer that doesn't require any services as its input, it may fail with Throwable value, and returns ROut as its output.
+
+## Concurrency
+
+### Fiber Primitives
+ - **[Fiber](fiber/fiber.md)** — A fiber value models an `IO` value that has started running, and is the moral equivalent of a green thread.
+ - **[FiberRef](fiber/fiberref.md)** — `FiberRef[A]` models a mutable reference to a value of type `A`. As opposed to `Ref[A]`, a value is bound to an executing `Fiber` only. You can think of it as Java's `ThreadLocal` on steroids.
+ - **[Fiber.Status](fiber/fiberstatus.md)** — `Fiber.Status` describe the current status of a Fiber.
+ - **[Fiber.Id](fiber/fiberid.md)** — `Fiber.Id` describe the unique identity of a Fiber.
+
+### Concurrency Primitives
+ - **[Hub](concurrency/hub.md)** - A `Hub` is an asynchronous message hub that allows publishers to efficiently broadcast values to many subscribers.
+ - **[Promise](concurrency/promise.md)** — A `Promise` is a model of a variable that may be set a single time, and awaited on by many fibers.
+ - **[Semaphore](concurrency/semaphore.md)** — A `Semaphore` is an asynchronous (non-blocking) semaphore that plays well with ZIO's interruption.
+- **[ZRef](concurrency/zref.md)** — A `ZRef[EA, EB, A, B]` is a polymorphic, purely functional description of a mutable reference. The fundamental operations of a `ZRef` are `set` and `get`.
+ + **[Ref](concurrency/ref.md)** — `Ref[A]` models a mutable reference to a value of type `A`. The two basic operations are `set`, which fills the `Ref` with a new value, and `get`, which retrieves its current content. All operations on a `Ref` are atomic and thread-safe, providing a reliable foundation for synchronizing concurrent programs.
+- **[ZRefM](concurrency/zrefm.md)** — A `ZRefM[RA, RB, EA, EB, A, B]` is a polymorphic, purely functional description of a mutable reference.
+ + **[RefM](concurrency/refm.md)** — `RefM[A]` models a **mutable reference** to a value of type `A` in which we can store **immutable** data, and update it atomically **and** effectfully.
+ - **[Queue](concurrency/queue.md)** — A `Queue` is an asynchronous queue that never blocks, which is safe for multiple concurrent producers and consumers.
+
+### Synchronization aids
+
+- **[ConcurrentMap](sync/concurrentmap.md)** — A Map wrapper over `java.util.concurrent.ConcurrentHashMap`
+- **[ConcurrentSet](sync/concurrentset.md)** — A Set implementation over `java.util.concurrent.ConcurrentHashMap`
+- **[CountdownLatch](sync/countdownlatch.md)** — A synchronization aid that allows one or more fibers to wait until a
+ set of operations being performed in other fibers completes.
+- **[CyclicBarrier](sync/cyclicbarrier.md)** — A synchronization aid that allows a set of fibers to all wait for each
+ other to reach a common barrier point.
+
+### STM
+
+- **[STM](stm/stm.md)** - An `STM` represents an effect that can be performed transactionally resulting in a failure or success.
+- **[TArray](stm/tarray.md)** - A `TArray` is an array of mutable references that can participate in transactions.
+- **[TSet](stm/tset.md)** - A `TSet` is a mutable set that can participate in transactions.
+- **[TMap](stm/tmap.md)** - A `TMap` is a mutable map that can participate in transactions.
+- **[TRef](stm/tref.md)** - A `TRef` is a mutable reference to an immutable value that can participate in transactions.
+- **[TPriorityQueue](stm/tpriorityqueue.md)** - A `TPriorityQueue` is a mutable priority queue that can participate in transactions.
+- **[TPromise](stm/tpromise.md)** - A `TPromise` is a mutable reference that can be set exactly once and can participate in transactions.
+- **[TQueue](stm/tqueue.md)** - A `TQueue` is a mutable queue that can participate in transactions.
+- **[TReentrantLock](stm/treentrantlock.md)** - A `TReentrantLock` is a reentrant read / write lock that can be composed.
+- **[TSemaphore](stm/tsemaphore.md)** - A `TSemaphore` is a semaphore that can participate in transactions.
+
+## Resource Management
+
+ - **[Managed](resource/managed.md)** — A `Managed` is a value that describes a perishable resource that may be consumed only once inside a given scope.
+
+## Streaming
+The following datatypes can be found in ZIO streams library:
+ - **[ZStream](stream/zstream.md)** — A `ZStream` is a lazy, concurrent, asynchronous source of values.
+ - **[ZSink](stream/zsink.md)** — A `ZSink` is a consumer of values from a `ZStream`, which may produces a value when it has consumed enough.
+
+## Miscellaneous
+ - **[Chunk](misc/chunk.md)** — ZIO `Chunk`: Fast, Pure Alternative to Arrays
+ - **[Schedule](misc/schedule.md)** — A `Schedule` is a model of a recurring schedule, which can be used for repeating successful `IO` values, or retrying failed `IO` values.
+
+To learn more about these data types, please explore the pages above, or check out the Scaladoc documentation.
diff --git a/website/versioned_docs/version-1.0.18/reference/misc/chunk.md b/website/versioned_docs/version-1.0.18/reference/misc/chunk.md
new file mode 100644
index 000000000000..63e219127bd9
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/misc/chunk.md
@@ -0,0 +1,150 @@
+---
+id: chunk
+title: "Chunk"
+---
+A `Chunk[A]` represents a chunk of values of type `A`. Chunks are usually backed by arrays, but expose a purely functional, safe interface to the underlying elements, and they become lazy on operations that would be costly with arrays, such as repeated concatenation.
+
+
+## Why Chunk?
+Arrays are fast and don’t box primitive values. ZIO Chunk is a wrapper on Java array. So also Chunks have zero boxing for primitives, but due to ClassTag requirements and mutability, they are painful to use and don’t integrate well into functional code.
+
+Lets to get more details behind why Chunk invented:
+
+### Immutability
+In Scala, there is no immutable data type that can efficiently represent primitive data types. There is Array, but Array is a mutable interface. The Array data type can efficiently represent primitives without boxing but only by exposing some unsafe mutable methods like `update`.
+
+### Ergonomic Design
+Every time, when we create an array of generic types in Scala, we need a [ClassTag](https://www.scala-lang.org/api/current/scala/reflect/ClassTag.html) to provide runtime information about that generic type, which is very inconvenient and isn't ergonomic. It leads us to a very cumbersome API.
+
+Chunk does not have the inconvenience of Array in Scala. **Chunk dispenses with the need to have ClassTags**. It utilizes a different approach to solve that problem.
+
+### High Performance
+In addition to being an immutable array and zero boxing of Chunks that leads us to a high performant data type, Chunk has specialized operations for things like appending a single element or concatenating two Chunks together which have significantly higher performance than doing these same operations on the Array. Many Chunk methods have been handwritten to achieve better performance than their corresponding Array implementations in the Scala standard library.
+
+Although Chunk is a common data type in ZIO, it exists primarily to support streaming use cases.
+
+When we are doing data streaming, a lot of times the source stream is a stream of bytes. Hence, internally we use a Chunk of bytes to represent that, so we don't have to box the bytes. Of course, it can be utilized for Chunks of Ints and many other types. Using Chunk is especially common when we are encoding and decoding at the level of streams. It is a very efficient, high-performance data type.
+
+## Operations
+
+### Creating a Chunk
+
+Creating empty `Chunk`:
+```
+val emptyChunk = Chunk.empty
+```
+
+Creating a `Chunk` with specified values:
+```scala
+val specifiedValuesChunk = Chunk(1,2,3)
+// specifiedValuesChunk: Chunk[Int] = IndexedSeq(1, 2, 3)
+```
+
+Alternatively, we can create a `Chunk` by providing a collection of values:
+```scala
+val fromIterableChunk: Chunk[Int] = Chunk.fromIterable(List(1, 2, 3))
+// fromIterableChunk: Chunk[Int] = IndexedSeq(1, 2, 3)
+val fromArrayChunk: Chunk[Int] = Chunk.fromArray(Array(1, 2, 3))
+// fromArrayChunk: Chunk[Int] = IndexedSeq(1, 2, 3)
+```
+
+Creating a `Chunk` using filling same n element into it:
+```scala
+val chunk: Chunk[Int] = Chunk.fill(3)(0)
+// chunk: Chunk[Int] = IndexedSeq(0, 0, 0)
+```
+
+Creating a `Chunk` using unfold method by repeatedly applying the given function, as long as it returns Some:
+```scala
+val unfolded = Chunk.unfold(0)(n => if (n < 8) Some((n*2, n+2)) else None)
+// unfolded: Chunk[Int] = IndexedSeq(0, 4, 8, 12)
+```
+
+### Concatenating chunk
+
+`++` Returns the concatenation of this chunk with the specified chunk. For example:
+
+```scala
+Chunk(1,2,3) ++ Chunk(4,5,6)
+// res0: Chunk[Int] = IndexedSeq(1, 2, 3, 4, 5, 6)
+```
+
+### Collecting chunk
+
+`collect` Returns a filtered, mapped subset of the elements of this chunk.
+How to use `collect` function to cherry-pick all strings from Chunk[A]:
+
+```scala
+val collectChunk = Chunk("Hello ZIO", 1.5, "Hello ZIO NIO", 2.0, "Some string", 2.5)
+// collectChunk: Chunk[Any] = IndexedSeq(
+// "Hello ZIO",
+// 1.5,
+// "Hello ZIO NIO",
+// 2.0,
+// "Some string",
+// 2.5
+// )
+
+collectChunk.collect { case string: String => string }
+// res1: Chunk[String] = IndexedSeq(
+// "Hello ZIO",
+// "Hello ZIO NIO",
+// "Some string"
+// )
+```
+How to use `collect` function to cherry-pick all the digits from Chunk[A]:
+
+```scala
+collectChunk.collect { case digit: Double => digit }
+// res2: Chunk[Double] = IndexedSeq(1.5, 2.0, 2.5)
+```
+
+`collectWhile` collects the elements (from left to right) until the predicate returns "false" for the first time:
+
+```scala
+Chunk("Sarah", "Bob", "Jane").collectWhile { case element if element != "Bob" => true }
+// res3: Chunk[Boolean] = IndexedSeq(true)
+```
+or another example:
+
+```scala
+Chunk(9, 2, 5, 1, 6).collectWhile { case element if element >= 2 => true }
+// res4: Chunk[Boolean] = IndexedSeq(true, true, true)
+```
+### Dropping chunk
+
+`drop` drops the first `n` elements of the chunk:
+
+```scala
+Chunk("Sarah", "Bob", "Jane").drop(1)
+// res5: Chunk[String] = IndexedSeq("Bob", "Jane")
+```
+
+`dropWhile` drops all elements so long as the predicate returns true:
+
+```scala
+Chunk(9, 2, 5, 1, 6).dropWhile(_ >= 2)
+// res6: Chunk[Int] = IndexedSeq(1, 6)
+```
+
+### Comparing chunks
+
+```scala
+Chunk("A","B") == Chunk("A", "C")
+// res7: Boolean = false
+```
+
+### Converting chunks
+
+`toArray` converts the chunk into an Array.
+
+```scala
+Chunk(1,2,3).toArray
+```
+
+`toSeq`converts the chunk into `Seq`.
+
+``` scala mdoc
+Chunk(1,2,3).toSeq
+```
+
diff --git a/website/versioned_docs/version-1.0.18/reference/misc/index.md b/website/versioned_docs/version-1.0.18/reference/misc/index.md
new file mode 100644
index 000000000000..b97875eb88e7
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/misc/index.md
@@ -0,0 +1,8 @@
+---
+id: index
+title: "Summary"
+---
+
+- **[Chunk](chunk.md)** — ZIO `Chunk`: Fast, Pure Alternative to Arrays
+- **[Schedule](schedule.md)** — A `Schedule` is a model of a recurring schedule, which can be used for repeating successful `IO` values, or retrying failed `IO` values.
+- **[Supervisor](supervisor.md)** — Supervising the launching and termination of fibers.
diff --git a/website/versioned_docs/version-1.0.18/reference/misc/schedule.md b/website/versioned_docs/version-1.0.18/reference/misc/schedule.md
new file mode 100644
index 000000000000..809fdd35cecf
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/misc/schedule.md
@@ -0,0 +1,333 @@
+---
+id: schedule
+title: "Schedule"
+---
+
+
+A `Schedule[Env, In, Out]` is an **immutable value** that **describes** a recurring effectful schedule, which runs in some environment `Env`, after consuming values of type `In` (errors in the case of `retry`, or values in the case of `repeat`) produces values of type `Out`, and in every step based on input values and the internal state decides to halt or continue after some delay **d**.
+
+Schedules are defined as a possibly infinite set of intervals spread out over time. Each interval defines a window in which recurrence is possible.
+
+When schedules are used to repeat or retry effects, the starting boundary of each interval produced by a schedule is used as the moment when the effect will be executed again.
+
+A variety of other operators exist for transforming and combining schedules, and the companion object for `Schedule` contains all common types of schedules, both for performing retrying, as well as performing repetition.
+
+## Repeat and Retry
+Schedules allow us to define and compose flexible recurrence schedules, which can be used to **repeat** actions, or **retry** actions in the event of errors.
+
+Repetition and retrying are two similar concepts in the domain of scheduling. It is the same concept and idea, only one of them looks for successes and the other one looks for failures.
+
+### Repeat
+In the case of repetition, ZIO has a `ZIO#repeat` function, which takes a schedule as a repetition policy and returns another effect that describes an effect with repetition strategy according to that policy.
+
+Repeat policies are used in the following functions:
+
+* `ZIO#repeat` — Repeats an effect until the schedule is done.
+* `ZIO#repeatOrElse` — Repeats an effect until the schedule is done, with a fallback for errors.
+
+> _**Note:**_
+>
+> Scheduled recurrences are in addition to the first execution, so that `io.repeat(Schedule.once)` yields an effect that executes `io`, and then if that succeeds, executes `io` an additional time.
+
+Let's see how we can create a repeated effect by using `ZIO#repeat` function:
+
+```scala
+val action: ZIO[R, E, A] = ???
+val policy: Schedule[R1, A, B] = ???
+
+val repeated = action repeat policy
+```
+
+There is another version of `repeat` that helps us to have a fallback strategy in case of erros, if something goes wrong we can handle that by using the `ZIO#repeatOrElse` function, which helps up to add an `orElse` callback that will run in case of repetition failure:
+
+```scala
+val action: ZIO[R, E, A] = ???
+val policy: Schedule[R1, A, B] = ???
+
+val orElse: (E, Option[B]) => ZIO[R1, E2, B] = ???
+
+val repeated = action repeatOrElse (policy, orElse)
+```
+
+### Retry
+In the case of retrying, ZIO has a `ZIO#retry` function, which takes a schedule as a repetition policy and returns another effect that describes an effect with repetition strategy which will retry following the failure of the original effect.
+
+Repeat policies are used in the following functions:
+
+* `ZIO#retry` – Retries an effect until it succeeds.
+* `ZIO#retryOrElse` — Retries an effect until it succeeds, with a fallback for errors.
+
+Let's see how we can create a repeated effect by using `ZIO#retry` function:
+
+```scala
+val action: ZIO[R, E, A] = ???
+val policy: Schedule[R1, E, S] = ???
+
+val repeated = action retry policy
+
+```
+
+There is another version of `retry` that helps us to have a fallback strategy in case of erros, if something goes wrong we can handle that by using the `ZIO#retryOrElse` function, which helps up to add an `orElse` callback that will run in case of failure of repetition failure:
+
+
+```scala
+val action: ZIO[R, E, A] = ???
+val policy: Schedule[R1, A, B] = ???
+
+val orElse: (E, S) => ZIO[R1, E1, A1] = ???
+
+val repeated = action retryOrElse (policy, orElse)
+```
+
+## Base Schedules
+### stop
+A schedule that does not recur, just stops and returns one `Unit` element:
+
+```scala
+val stop = Schedule.stop
+```
+
+### once
+A schedule that recurs one time an returns one `Unit` element:
+
+```scala
+val once = Schedule.once
+```
+
+### forever
+A schedule that always recurs and produces number of recurrence at each run:
+
+```scala
+val forever = Schedule.forever
+```
+
+### recurs
+A schedule that only recurs the specified number of times:
+
+```scala
+val recurs = Schedule.recurs(5)
+```
+
+### spaced
+A schedule that recurs continuously, each repetition spaced the specified duration from the last run:
+
+```scala
+val spaced = Schedule.spaced(10.milliseconds)
+```
+
+### fixed
+A schedule that recurs on a fixed interval. Returns the number of repetitions of the schedule so far:
+
+```scala
+val fixed = Schedule.fixed(10.seconds)
+```
+
+### exponential
+A schedule that recurs using exponential backoff:
+
+```scala
+val exponential = Schedule.exponential(10.milliseconds)
+```
+
+### fibonacci
+A schedule that always recurs, increasing delays by summing the preceding two delays (similar to the fibonacci sequence). Returns the current duration between recurrences:
+
+```scala
+val fibonacci = Schedule.fibonacci(10.milliseconds)
+```
+### identity
+A schedule that always decides to continue. It recurs forever, without any delay. `identity` schedule consumes input, and emit the same as output (`Schedule[Any, A, A]`):
+
+```scala
+val identity = Schedule.identity[Int]
+```
+
+### unfold
+A schedule that repeats one time from the specified state and iterator:
+
+```scala
+val unfold = Schedule.unfold(0)(_ + 1)
+```
+
+### succeed
+Returns a schedule that repeats one time, producing the specified constant value:
+
+```scala
+val constant = Schedule.succeed(5)
+```
+
+### fromFunction
+A schedule that always recurs, mapping input values through the specified function:
+
+```scala
+val inc = Schedule.fromFunction[Int, Int](_ + 1)
+```
+
+## Schedule Combinators
+Schedules define stateful, possibly effectful, recurring schedules of events, and compose in a variety of ways. Combinators allow us to take schedules and combine them together to get other schedules and if we have combinators with just the right properties. Then in theory we should be able to solve an infinite number of problems, with only a few combinators and few base schedules.
+
+### Composition
+Schedules compose in the following primary ways:
+
+ * **Union**. This performs the union of the intervals of two schedules.
+ * **Intersection**. This performs the intersection of the intervals of two schedules.
+ * **Sequence**. This concatenates the intervals of one schedule onto another.
+
+#### Union
+Combines two schedules through union, by recurring if either schedule wants to
+recur, using the minimum of the two delays between recurrences.
+
+| | `s1` | `s2` | `s1` | | `s2` |
+|----------------------|---------------------|---------------------|--------------------------|
+| Type | `Schedule[R, A, B]` | `Schedule[R, A, C]` | `Schedule[R, A, (B, C)]` |
+| Continute: `Boolean` | `b1` | `b2` | `b1` | | `b2` |
+| Delay: `Duration` | `d1` | `d2` | `d1.min(d2)` |
+| Emit: `(A, B)` | `a` | `b` | `(a, b)` |
+
+We can combine two schedule through union with `||` operator:
+
+```scala
+val expCapped = Schedule.exponential(100.milliseconds) || Schedule.spaced(1.second)
+```
+
+#### Intersection
+Combines two schedules through the intersection, by recurring only if both schedules want to recur, using the maximum of the two delays between recurrences.
+
+| | `s1` | `s2` | `s1 && s2` |
+|----------------------|---------------------|---------------------|--------------------------|
+| Type | `Schedule[R, A, B]` | `Schedule[R, A, C]` | `Schedule[R, A, (B, C)]` |
+| Continute: `Boolean` | `b1` | `b2` | `b1 && b2` |
+| Delay: `Duration` | `d1` | `d2` | `d1.max(d2)` |
+| Emit: `(A, B)` | `a` | `b` | `(a, b)` |
+
+
+We can intersect two schedule with `&&` operator:
+
+```scala
+val expUpTo10 = Schedule.exponential(1.second) && Schedule.recurs(10)
+```
+
+#### Sequence
+Combines two schedules sequentially, by following the first policy until it ends, and then following the second policy.
+
+| | `s1` | `s2` | `s1 andThen s2` |
+|-------------------|---------------------|---------------------|---------------------|
+| Type | `Schedule[R, A, B]` | `Schedule[R, A, C]` | `Schedule[R, A, C]` |
+| Delay: `Duration` | `d1` | `d2` | `d1 + d2` |
+| Emit: `B` | `a` | `b` | `b` |
+
+
+We can sequence two schedule by using `andThen`:
+
+```scala
+val sequential = Schedule.recurs(10) andThen Schedule.spaced(1.second)
+```
+
+### Piping
+Combine two schedules by piping the output of the first schedule to the input of the other. Effects described by the first schedule will always be executed before the effects described by the second schedule.
+
+| | `s1` | `s2` | `s1 >>> s2` |
+|-------------------|---------------------|---------------------|---------------------|
+| Type | `Schedule[R, A, B]` | `Schedule[R, B, C]` | `Schedule[R, A, C]` |
+| Delay: `Duration` | `d1` | `d2` | `d1 + d2` |
+| Emit: `B` | `a` | `b` | `b` |
+
+We can pipe two schedule by using `>>>` operator:
+
+```scala
+val totalElapsed = Schedule.spaced(1.second) <* Schedule.recurs(5) >>> Schedule.elapsed
+```
+
+### Jittering
+A `jittered` is a combinator that takes one schedule and returns another schedule of the same type except for the delay which is applied randomly:
+
+| Function | Input Type | Output Type |
+|------------|----------------------------|--------------------------------------|
+| `jittered` | | `Schedule[Env with Random, In, Out]` |
+| `jittered` | `min: Double, max: Double` | `Schedule[Env with Random, In, Out]` |
+
+We can jitter any schedule by calling `jittered` on it:
+
+```scala
+val jitteredExp = Schedule.exponential(10.milliseconds).jittered
+```
+
+When a resource is out of service due to overload or contention, retrying and backing off doesn't help us. If all failed API calls are backed off to the same point of time, they cause another overload or contention. Jitter adds some amount of randomness to the delay of the schedule. This helps us to avoid ending up accidentally synchronizing and taking the service down by accident.
+
+### Collecting
+A `collectAll` is a combinator that when we call it on a schedule, produces a new schedule that collects the outputs of the first schedule into a chunk.
+
+| Function | Input Type | Output Type |
+|--------------|--------------------------|---------------------------------|
+| `collectAll` | `Schedule[Env, In, Out]` | `Schedule[Env, In, Chunk[Out]]` |
+
+In the following example, we are catching all recurrence of schedule into `Chunk`, so at the end, it would contain `Chunk(0, 1, 2, 3, 4)`:
+
+```scala
+val collect = Schedule.recurs(5).collectAll
+```
+
+### Filtering
+We can filter inputs or outputs of a schedule with `whileInput` and `whileOutput`. Alse ZIO schedule has an effectful version of these two functions, `whileInputM` and `whileOutputM`.
+
+| Function | Input Type | Output Type |
+|----------------|------------------------------|----------------------------|
+| `whileInput` | `In1 => Boolean` | `Schedule[Env, In1, Out]` |
+| `whileOutput` | `Out => Boolean` | `Schedule[Env, In, Out]` |
+| `whileInputM` | `In1 => URIO[Env1, Boolean]` | `Schedule[Env1, In1, Out]` |
+| `whileOutputM` | `Out => URIO[Env1, Boolean]` | `Schedule[Env1, In, Out]` |
+
+In following example we collect all emiting outputs before reaching the 5 output, so it would return `Chunk(0, 1, 2, 3, 4)`:
+
+```scala
+val res = Schedule.unfold(0)(_ + 1).whileOutput(_ < 5).collectAll
+```
+
+### Mapping
+There are two versions for mapping schedules, `map` and its effectful version `mapM`.
+
+| Function | Input Type | Output Type |
+|----------|------------------------------|----------------------------|
+| `map` | `f: Out => Out2` | `Schedule[Env, In, Out2]` |
+| `mapM` | `f: Out => URIO[Env1, Out2]` | `Schedule[Env1, In, Out2]` |
+
+### Left/Right Ap
+Sometimes when we intersect two schedules with the `&&` operator, we just need to ignore the left or the right output.
+- * `*>` ignore the left output
+- * `<*` ignore the right output
+
+### Modifying
+Modifies the delay of a schedule:
+
+```scala
+val boosted = Schedule.spaced(1.second).delayed(_ => 100.milliseconds)
+```
+
+### Tapping
+Whenever we need to effectfully process each schedule input/output, we can use `tapInput` and `tapOutput`.
+
+We can use these two functions for logging purposes:
+
+```scala
+val tappedSchedule = Schedule.count.whileOutput(_ < 5).tapOutput(o => putStrLn(s"retrying $o").orDie)
+```
+
+
+## Examples
+
+Stops retrying after a specified amount of time has elapsed:
+
+```scala
+val expMaxElapsed = (Schedule.exponential(10.milliseconds) >>> Schedule.elapsed).whileOutput(_ < 30.seconds)
+```
+
+Retry only when a specific exception occurs:
+
+```scala
+import scala.concurrent.TimeoutException
+
+val whileTimeout = Schedule.exponential(10.milliseconds) && Schedule.recurWhile[Throwable] {
+ case _: TimeoutException => true
+ case _ => false
+}
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/misc/supervisor.md b/website/versioned_docs/version-1.0.18/reference/misc/supervisor.md
new file mode 100644
index 000000000000..f08e2b885ff1
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/misc/supervisor.md
@@ -0,0 +1,86 @@
+---
+id: supervisor
+title: "Supervisor"
+---
+
+A `Supervisor[A]` is allowed to supervise the launching and termination of fibers, producing some visible value of type `A` from the supervision.
+
+## Creation
+
+### track
+The `track` creates a new supervisor that tracks children in a set. It takes a boolean `weak` parameter as input, which indicates whether track children in a `Weakset` or not.
+
+
+
+```scala
+val supervisor = Supervisor.track(true)
+// supervisor: zio.package.UIO[Supervisor[zio.Chunk[zio.Fiber.Runtime[Any, Any]]]] = zio.ZIO$EffectTotal@1fab7211
+```
+
+We can periodically, report the status of the fibers of our program with the help of the Supervisor.
+
+### fibersIn
+The `fibersIn` creates a new supervisor with an initial sorted set of fibers.
+
+In the following example we are creating a new supervisor from an initial set of fibers:
+
+
+```scala
+def fiberListSupervisor = for {
+ ref <- Ref.make(SortedSet.from(fibers))
+ s <- Supervisor.fibersIn(ref)
+} yield (s)
+```
+
+## Supervising
+
+Whenever we need to supervise a ZIO effect, we can call `ZIO#supervised` function, `supervised` takes a supervisor and return another effect. The behavior of children fibers is reported to the provided supervisor:
+
+
+```scala
+val supervised = supervisor.flatMap(s => fib(20).supervised(s))
+```
+
+Now we can access all information of children fibers through the supervisor.
+
+## Example
+In the following example we are going to periodically monitor the number of fibers throughout our application life cycle:
+
+```scala
+object SupervisorExample extends zio.App {
+ import zio.duration._
+
+ val program = for {
+ supervisor <- Supervisor.track(true)
+ fiber <- fib(20).supervised(supervisor).fork
+ policy = Schedule
+ .spaced(500.milliseconds)
+ .whileInputM[Any, Unit](_ => fiber.status.map(x => !x.isDone))
+ logger <- monitorFibers(supervisor)
+ .repeat(policy).fork
+ _ <- logger.join
+ result <- fiber.join
+ _ <- putStrLn(s"fibonacci result: $result")
+ } yield ()
+
+ def monitorFibers(supervisor: Supervisor[Chunk[Fiber.Runtime[Any, Any]]]) = for {
+ length <- supervisor.value.map(_.length)
+ _ <- putStrLn(s"number of fibers: $length")
+ } yield ()
+
+ def fib(n: Int): ZIO[Clock, Nothing, Int] =
+ if (n <= 1) {
+ ZIO.succeed(1)
+ } else {
+ for {
+ _ <- sleep(500.milliseconds)
+ fiber1 <- fib(n - 2).fork
+ fiber2 <- fib(n - 1).fork
+ v2 <- fiber2.join
+ v1 <- fiber1.join
+ } yield v1 + v2
+ }
+
+ override def run(args: List[String]) = program.exitCode
+}
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/resource/index.md b/website/versioned_docs/version-1.0.18/reference/resource/index.md
new file mode 100644
index 000000000000..de852635dcf5
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/resource/index.md
@@ -0,0 +1,195 @@
+---
+id: index
+title: "Introduction"
+---
+
+When we are writing a long-lived application, resource management is very important. Proper resource management is vital to any large-scale application. We need to make sure that our application is resource-safe, and it doesn't leak any resource.
+
+Leaking socket connections, database connections or file descriptors is not acceptable in a web application. ZIO provides some good construct to make sure about this concern.
+
+To write a resource-safe application, we need to make sure whenever we are opening a resource, we have a mechanism to close that resource whether we use that resource completely or not, for example, an exception occurred during resource usage.
+
+## Try / Finally
+Before we dive into the ZIO solution, it's better to review the `try` / `finally` which is the standard approach in the Scala language to manage resources.
+
+Scala has a `try` / `finally` construct which helps us to make sure we don't leak resources because no matter what happens in the try, the `finally` block will be executed. So we can open files in the try block, and then we can close them in the `finally` block, and that gives us the guarantee that we will not leak resources.
+
+Assume we want to read a file and return the number of its lines:
+
+
+```scala
+def lines(file: String): Task[Long] = Task.effect {
+ def countLines(br: BufferedReader): Long = br.lines().count()
+ val bufferedReader = new BufferedReader(
+ new InputStreamReader(new FileInputStream("file.txt")),
+ 2048
+ )
+ val count = countLines(bufferedReader)
+ bufferedReader.close()
+ count
+}
+```
+
+What happens if after opening the file and before closing the file, an exception occurs? So, the `bufferedReader.close()` line, doesn't have a chance to close the resource. This creates a resource leakage. The Scala language has `try...finally` construct, which helps up to prevent these situations.
+
+Let's rewrite the above example with `try..finally`:
+
+```scala
+def lines(file: String): Task[Long] = Task.effect {
+ def countLines(br: BufferedReader): Long = br.lines().count()
+ val bufferedReader = new BufferedReader(
+ new InputStreamReader(new FileInputStream("file.txt")),
+ 2048
+ )
+ try countLines(bufferedReader)
+ finally bufferedReader.close()
+}
+```
+
+Now, we are sure that if our program is interrupted during the process of a file, the `finally` block will be executed.
+
+The `try` / `finally` solve simple problems, but it has some drawbacks:
+
+1. It's not composable; We can't compose multiple resources together.
+
+2. When we have multiple resources, we end up with messy and ugly code, hard to reason about, and refactoring.
+3. We don't have any control over the order of resource clean-up
+4. It only helps us to handle resources sequentially. It can't compose multiple resources, concurrently.
+5. It doesn't support asynchronous workflows.
+6. It's a manual way of resource management, not automatic. To have a resource-safe application we need to manually check that all resources are managed correctly. This way of resource management is error-prone in case of forgetting to manage resources, correctly.
+
+## ZIO Solution
+
+ZIO's resource management features work across synchronous, asynchronous, concurrent, and other effect types, and provide strong guarantees even in the presence of failure, interruption, or defects in the application.
+
+ZIO has two major mechanisms to manage resources.
+
+### bracket
+
+ZIO generalized the pattern of `try` / `finally` and encoded it in `ZIO.bracket` or `ZIO#bracket` operations.
+
+Every bracket requires three actions:
+1. **Acquiring Resource**— An effect describing the acquisition of resource. For example, opening a file.
+2. **Using Resource**— An effect describing the actual process to produce a result. For example, counting the number of lines in a file.
+3. **Releasing Resource**— An effect describing the final step of releasing or cleaning up the resource. For example, closing a file.
+
+
+```scala
+def use(resource: Resource): Task[Any] = Task.effect(???)
+def release(resource: Resource): UIO[Unit] = Task.effectTotal(???)
+def acquire: Task[Resource] = Task.effect(???)
+
+val result1: Task[Any] = acquire.bracket(release, use)
+val result2: Task[Any] = acquire.bracket(release)(use) // More ergonomic API
+
+val result3: Task[Any] = Task.bracket(acquire, release, use)
+val result4: Task[Any] = Task.bracket(acquire)(release)(use) // More ergonomic API
+```
+
+The bracket guarantees us that the `acquiring` and `releasing` of a resource will not be interrupted. These two guarantees ensure us that the resource will always be released.
+
+Let's try a real example. We are going to write a function which count line number of given file. As we are working with file resource, we should separate our logic into three part. At the first part, we create a `BufferedReader`. At the second, we count the file lines with given `BufferedReader` resource, and at the end we close that resource:
+
+```scala:mdoc:silent
+def lines(file: String): Task[Long] = {
+ def countLines(reader: BufferedReader): Task[Long] = Task.effect(reader.lines().count())
+ def releaseReader(reader: BufferedReader): UIO[Unit] = Task.effectTotal(reader.close())
+ def acquireReader(file: String): Task[BufferedReader] = Task.effect(new BufferedReader(new FileReader(file), 2048))
+
+ Task.bracket(acquireReader(file), releaseReader, countLines)
+}
+```
+
+Let's write another function which copy a file from source to destination file. We can do that by nesting two brackets one for the `FileInputStream` and the other for `FileOutputStream`:
+
+```scala
+def is(file: String): Task[FileInputStream] = Task.effect(???)
+def os(file: String): Task[FileOutputStream] = Task.effect(???)
+
+def close(resource: Closeable): UIO[Unit] = Task.effectTotal(???)
+def copy(from: FileInputStream, to: FileOutputStream): Task[Unit] = ???
+
+def transfer(src: String, dst: String): ZIO[Any, Throwable, Unit] = {
+ Task.bracket(is(src))(close) { in =>
+ Task.bracket(os(dst))(close) { out =>
+ copy(in, out)
+ }
+ }
+}
+```
+
+As there isn't any dependency between our two resources (`is` and `os`), it doesn't make sense to use nested brackets, so let's `zip` these two acquisition into one effect, and the use one bracket on them:
+
+```scala
+def transfer(src: String, dst: String): ZIO[Any, Throwable, Unit] = {
+ is(src)
+ .zipPar(os(dst))
+ .bracket { case (in, out) =>
+ Task
+ .effectTotal(in.close())
+ .zipPar(Task.effectTotal(out.close()))
+ } { case (in, out) =>
+ copy(in, out)
+ }
+}
+```
+
+While using bracket is a nice and simple way of managing resources, but there are some cases where a bracket is not the best choice:
+
+1. Bracket is not composable— When we have multiple resources, composing them with a bracket is not straightforward.
+
+2. Messy nested brackets— In the case of multiple resources, nested brackets remind us of callback hell awkwardness. The bracket is designed with nested resource acquisition. In the case of multiple resources, we encounter inefficient nested bracket calls, and it causes refactoring a complicated process.
+
+Using brackets is simple and straightforward, but in the case of multiple resources, it isn't a good player. This is where we need another abstraction to cover these issues.
+
+### ZManaged
+
+`ZManage` is a composable data type for resource management, which wraps the acquisition and release action of a resource. We can think of `ZManage` as a handle with build-in acquisition and release logic.
+
+To create a managed resource, we need to provide `acquire` and `release` action of that resource to the `make` constructor:
+
+```scala
+val managed = ZManaged.make(acquire)(release)
+```
+
+We can use managed resources by calling `use` on that. A managed resource is meant to be used only inside of the `use` block. So that resource is not available outside of the `use` block.
+
+The `ZManaged` is a separate world like `ZIO`; In this world, we have a lot of combinators to combine `ZManaged` and create another `ZManaged`. At the end of the day, when our composed `ZManaged` prepared, we can run any effect on this resource and convert that into a `ZIO` world.
+
+Let's try to rewrite a `transfer` example with `ZManaged`:
+
+```scala
+def transfer(from: String, to: String): ZIO[Any, Throwable, Unit] = {
+ val resource = for {
+ from <- ZManaged.make(is(from))(close)
+ to <- ZManaged.make(os(to))(close)
+ } yield (from, to)
+
+ resource.use { case (in, out) =>
+ copy(in, out)
+ }
+}
+```
+
+Also, we can get rid of this ceremony and treat the `Managed` like a `ZIO` effect:
+
+```scala
+def transfer(from: String, to: String): ZIO[Any, Throwable, Unit] = {
+ val resource: ZManaged[Any, Throwable, Unit] = for {
+ from <- ZManaged.make(is(from))(close)
+ to <- ZManaged.make(os(to))(close)
+ _ <- copy(from, to).toManaged_
+ } yield ()
+ resource.useNow
+}
+```
+
+This is where the `ZManaged` provides us a composable and flexible way of allocating resources. They can be composed with any `ZIO` effect by converting them using the `ZIO#toManaged_` operator.
+
+`ZManaged` has several type aliases, each of which is useful for a specific workflow:
+
+- **[Managed](managed.md)**— `Managed[E, A]` is a type alias for `Managed[Any, E, A]`.
+- **[TaskManaged](task-managed.md)**— `TaskManaged[A]` is a type alias for `ZManaged[Any, Throwable, A]`.
+- **[RManaged](rmanaged.md)**— `RManaged[R, A]` is a type alias for `ZManaged[R, Throwable, A]`.
+- **[UManaged](umanaged.md)**— `UManaged[A]` is a type alias for `ZManaged[Any, Nothing, A]`.
+- **[URManaged](urmanaged.md)**— `URManaged[R, A]` is a type alias for `ZManaged[R, Nothing, A]`.
diff --git a/website/versioned_docs/version-1.0.18/reference/resource/managed.md b/website/versioned_docs/version-1.0.18/reference/resource/managed.md
new file mode 100644
index 000000000000..1021b4a98e93
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/resource/managed.md
@@ -0,0 +1,79 @@
+---
+id: managed
+title: "Managed"
+---
+
+`Managed[E, A]` is a type alias for `ZManaged[Any, E, A]`, which represents a managed resource that has no requirements, and may fail with an `E`, or succeed with an `A`.
+
+
+The `Managed` type alias is defined as follows:
+
+```scala
+type Managed[+E, +A] = ZManaged[Any, E, A]
+```
+
+`Managed` is a data structure that encapsulates the acquisition and the release of a resource, which may be used by invoking the `use` method of the resource. The resource will be automatically acquired before the resource is used, and automatically released after the resource is used.
+
+Resources do not survive the scope of `use`, meaning that if you attempt to capture the resource, leak it from `use`, and then use it after the resource has been consumed, the resource will not be valid anymore and may fail with some checked error, as per the type of the functions provided by the resource.
+
+```scala
+import zio._
+def doSomething(queue: Queue[Int]): UIO[Unit] = IO.unit
+
+val managedResource = Managed.make(Queue.unbounded[Int])(_.shutdown)
+val usedResource: UIO[Unit] = managedResource.use { queue => doSomething(queue) }
+```
+
+In this example, the queue will be created when `use` is called, and `shutdown` will be called when `doSomething` completes.
+
+## Creating a Managed
+
+As shown in the previous example, a `Managed` can be created by passing an `acquire` function and a `release` function.
+
+It can also be created from an effect. In this case the release function will do nothing.
+```scala
+import zio._
+def acquire: IO[Throwable, Int] = IO.effect(???)
+
+val managedFromEffect: Managed[Throwable, Int] = Managed.fromEffect(acquire)
+```
+
+You can create a `Managed` from a pure value as well.
+```scala
+import zio._
+val managedFromValue: Managed[Nothing, Int] = Managed.succeed(3)
+```
+
+## Managed with ZIO environment
+
+`Managed[E, A]` is actually an alias for `ZManaged[Any, E, A]`. If you'd like your `acquire`, `release` or `use` functions to require an environment R, just use `ZManaged` instead of `Managed`.
+
+```scala
+import zio._
+import zio.console._
+
+val zManagedResource: ZManaged[Console, Nothing, Unit] = ZManaged.make(console.putStrLn("acquiring").orDie)(_ => console.putStrLn("releasing").orDie)
+val zUsedResource: URIO[Console, Unit] = zManagedResource.use { _ => console.putStrLn("running").orDie }
+```
+
+## Combining Managed
+
+It is possible to combine multiple `Managed` using `flatMap` to obtain a single `Managed` that will acquire and release all the resources.
+
+```scala
+import zio._
+```
+
+
+```scala
+val managedQueue: Managed[Nothing, Queue[Int]] = Managed.make(Queue.unbounded[Int])(_.shutdown)
+val managedFile: Managed[IOException, File] = Managed.make(openFile("data.json"))(closeFile)
+
+val combined: Managed[IOException, (Queue[Int], File)] = for {
+ queue <- managedQueue
+ file <- managedFile
+} yield (queue, file)
+
+val usedCombinedRes: IO[IOException, Unit] = combined.use { case (queue, file) => doSomething(queue, file) }
+
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/resource/rmanaged.md b/website/versioned_docs/version-1.0.18/reference/resource/rmanaged.md
new file mode 100644
index 000000000000..902658e15fb5
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/resource/rmanaged.md
@@ -0,0 +1,13 @@
+---
+id: rmanaged
+title: "RManaged"
+---
+
+`RManaged[R, A]` is a type alias for `ZManaged[R, Throwable, A]`, which represents a managed resource that requires an `R`, and may fail with a `Throwable` value, or succeed with an `A`.
+
+
+The `RManaged` type alias is defined as follows:
+
+```scala
+type RManaged[-R, +A] = ZManaged[R, Throwable, A]
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/resource/task-managed.md b/website/versioned_docs/version-1.0.18/reference/resource/task-managed.md
new file mode 100644
index 000000000000..76dc243dd2ed
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/resource/task-managed.md
@@ -0,0 +1,13 @@
+---
+id: task-managed
+title: "TaskManaged"
+---
+
+`TaskManaged[A]` is a type alias for `ZManaged[Any, Throwable, A]`, which represents a managed resource that has no requirements, and may fail with a `Throwable` value, or succeed with an `A`.
+
+
+The `TaskManaged` type alias is defined as follows:
+
+```scala
+type TaskManaged[+A] = ZManaged[Any, Throwable, A]
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/resource/umanaged.md b/website/versioned_docs/version-1.0.18/reference/resource/umanaged.md
new file mode 100644
index 000000000000..e5323a19c0b0
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/resource/umanaged.md
@@ -0,0 +1,13 @@
+---
+id: umanaged
+title: "UManaged"
+---
+
+`UManaged[A]` is a type alias for `ZManaged[Any, Nothing, A]`, which represents an **unexceptional** managed resource that doesn't require any specific environment, and cannot fail, but can succeed with an `A`.
+
+
+The `UMManaged` type alias is defined as follows:
+
+```scala
+type UManaged[+A] = ZManaged[Any, Nothing, A]
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/resource/urmanaged.md b/website/versioned_docs/version-1.0.18/reference/resource/urmanaged.md
new file mode 100644
index 000000000000..e3ee4de1be40
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/resource/urmanaged.md
@@ -0,0 +1,13 @@
+---
+id: urmanaged
+title: "URManaged"
+---
+
+`URManaged[R, A]` is a type alias for `ZManaged[R, Nothing, A]`, which represents a managed resource that requires an `R`, and cannot fail, but can succeed with an `A`.
+
+
+The `URManaged` type alias is defined as follows:
+
+```scala
+type URManaged[-R, +A] = ZManaged[R, Nothing, A]
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/resource/zmanaged.md b/website/versioned_docs/version-1.0.18/reference/resource/zmanaged.md
new file mode 100644
index 000000000000..a6b111e7cb0f
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/resource/zmanaged.md
@@ -0,0 +1,209 @@
+---
+id: zmanaged
+title: "ZManaged"
+---
+
+A `ZManaged[R, E, A]` is a managed resource, that requires an `R`, and may fail with an `E` value, or succeed with an `A`.
+
+ `ZManaged` is a data structure that encapsulates the acquisition and the release of a resource, which may be used by invoking the `use` method of the resource. The resource will be automatically acquired before the resource is used and automatically released after the resource is used.
+
+Resources do not survive the scope of `use`, meaning that if we attempt to capture the resource, leak it from `use`, and then use it after the resource has been consumed, the resource will not be valid anymore and may fail with some checked error, as per the type of the functions provided by the resource.
+
+## Creation
+
+In this section, we explore some common ways to create managed resources.
+
+### Making
+
+`ZManaged` has a `make` constructor which requires `acquire` and `release` actions:
+
+
+```scala
+val managed = ZManaged.make(acquire)(release)
+```
+
+In the following example, we have a managed resource which requires `Console` as an environment to print the first line of a given file. The `BufferedReader` will be acquired before printing the first line and automatically will be released after using `BufferedReader`:
+
+```scala
+import zio.console._
+def printFirstLine(file: String): ZIO[Console, Throwable, Unit] = {
+ def acquire(file: String) = ZIO.effect(new BufferedReader(new FileReader(file)))
+ def release(reader: BufferedReader) = ZIO.effectTotal(reader.close())
+
+ ZManaged.make(acquire(file))(release).use { reader =>
+ putStrLn(reader.readLine())
+ }
+}
+```
+
+If we need to have different logic in `release` action based on exit status of `acquire` action, we can use `ZManaged.makeExit` constructor:
+
+```scala
+trait ZManaged[-R, +E, +A] {
+ def makeExit[R, R1 <: R, E, A](
+ acquire: ZIO[R, E, A]
+ )(release: (A, Exit[Any, Any]) => ZIO[R1, Nothing, Any]): ZManaged[R1, E, A]
+}
+```
+
+Not that like `ZManaged.make`, both of `acquire` and `release` actions are uninterruptible in `ZManaged.makeExit`.
+
+### Lifting a pure value
+
+We can lift pure values to `ZManaged` with `ZIO.succeed` method:
+
+```scala
+val managedString = ZManaged.succeed("Hello, World!")
+val managedBoolean = ZManaged.succeed(true)
+```
+
+### Lifting a ZIO effect
+
+Every `ZIO` effect can be lifted to `ZManaged` with `ZManaged.fromEffect` or `ZIO#toZManaged_` operations:
+
+```scala
+val managedHello = ZManaged.fromEffect(putStrLn("Hello, World!"))
+val managedHello_ = putStrLn("Hello, World!").toManaged_
+```
+
+This is useful when we want to combine `ZManaged` effects with `ZIO` effects. Assume during creation of managed resource, we need to log some information, we can lift a `ZIO` effect to `ZManaged` world:
+
+
+```scala
+def userRepository: ZManaged[Blocking with Console, Throwable, UserRepository] = for {
+ cfg <- dbConfig.toManaged_
+ _ <- putStrLn("Read database config").toManaged_
+ _ <- initializeDb(cfg).toManaged_
+ _ <- putStrLn("Database initialized").toManaged_
+ xa <- makeTransactor(cfg)
+ _ <- putStrLn("Created new blocking transactor").toManaged_
+} yield new UserRepository(xa)
+```
+
+### Making from AutoClosable Resources
+
+If the resource implemented the `AutoClosable` interface, we can easily make a `ZManaged` from it by using `ZManaged.fromAutoClosable` constructor:
+
+```scala
+ZManaged.fromAutoCloseable(ZIO.effect(new FileInputStream("file.txt")))
+// res1: ZManaged[Any, Throwable, FileInputStream] = zio.ZManaged$$anon$2@19dd8c22
+ZManaged.fromAutoCloseable(ZIO.effect(fromResource("file.txt")))
+// res2: ZManaged[Any, Throwable, scala.io.BufferedSource] = zio.ZManaged$$anon$2@60c09e2e
+ZManaged.fromAutoCloseable(ZIO.effect(fromFile("file.txt")))
+// res3: ZManaged[Any, Throwable, scala.io.BufferedSource] = zio.ZManaged$$anon$2@210bd8dd
+```
+
+### Making Interruptible Acquires
+
+By default, when we create a `ZManaged` via `ZManaged.make` constructor, the `acquire` and `release` actions are _uninterruptible_. But what if we want to make the `acquire` action interruptible? The `makeInterruptible` constructor does that for us:
+
+```scala
+trait ZManaged[-R, +E, +A] {
+ def makeInterruptible[R, E, A](
+ acquire: ZIO[R, E, A]
+ )(release: A => URIO[R, Any]): ZManaged[R, E, A]
+}
+```
+
+Making `ZManaged` via this constructor makes the `acquire` action interruptible, the release action remains uninterruptible.
+
+If we want to decide what to do in the `release` action based on how the `acquire` action is completed, whether by success, failure, or interruption; we can use the `makeReserve` constructor. The type of `release` action is `Exit[Any, Any] => URIO[R, Any]` which provides us the `Exit` status of the `acquire` action, so we can decide what to do based on the exit status of `acquire` action:
+
+```scala
+trait ZManaged[-R, +E, +A] {
+ def makeReserve[R, E, A](reservation: ZIO[R, E, Reservation[R, E, A]]): ZManaged[R, E, A]
+}
+```
+
+`Reservation` data type is defined as follows:
+
+
+```scala
+final case class Reservation[-R, +E, +A](acquire: ZIO[R, E, A], release: Exit[Any, Any] => URIO[R, Any])
+```
+
+## Usage
+
+### use
+
+Inside the `use` block, we can use the managed resource and return a new value. The `use` method converts a managed resource from `ZManaged` world to `ZIO` world:
+
+```scala
+def firstLine(file: String): ZIO[Console, Throwable, Unit] =
+ ZManaged.fromAutoCloseable(ZIO.effect(fromFile(file))).use { reader =>
+ putStrLn(reader.bufferedReader().readLine())
+ }
+```
+
+### useNow
+
+If our managed resource could be valid after releasing resources, we can convert that `ZManaged` to `ZIO` effect by calling `ZManaged#useNow`.
+
+```scala
+val hello: UIO[String] = ZManaged.succeed("Hello, World!").useNow
+```
+
+This is useful when we have composed some `ZManaged` with some `ZIO` effects, and the result can be outlived outside the `use` block:
+
+```scala
+def is(file: String): Task[FileInputStream] = Task.effect(???)
+def os(file: String): Task[FileOutputStream] = Task.effect(???)
+
+def close(resource: Closeable): UIO[Unit] = Task.effectTotal(???)
+def copy(from: FileInputStream, to: FileOutputStream): Task[Unit] = ???
+
+def transfer(from: String, to: String): ZIO[Any, Throwable, Unit] = {
+ val resource: ZManaged[Any, Throwable, Unit] = for {
+ from <- ZManaged.make(is(from))(close)
+ to <- ZManaged.make(os(to))(close)
+ _ <- copy(from, to).toManaged_
+ } yield ()
+ resource.useNow
+}
+```
+
+> **Note:**
+>
+> Be careful, don’t call the `useNow` method on a managed resource that isn’t valid after its release actions. For example, running `useNow` on `ZManaged.fromAutoCloseable(ZIO.effect(fromFile("file.txt")))` doesn’t make sense, because after releasing a file, we haven’t any handle to that file.
+
+### useForever
+
+Assume we are going to make a managed resource long-lived. The `ZManaged#useForever` does that for us, it converts a `ZManaged` effect to a `ZIO` effect which will remain forever running.
+
+## Combinators
+
+`ZManaged` like the `ZIO` effect has almost all combinators that we introduced on the [ZIO](../core/zio.md) page. We can use them to create more complicated `ZManaged` ones.
+
+There is also some combinators which specific for `ZManaged`:
+
+1. **ensuringFirst**— This combinator adds a `ZIO` effect as a finalizer to an existing `ZManaged` effect. This finalizer will be executed before the existing finalizers:
+
+```scala
+trait ZManaged[-R, +E, +A] {
+ def ensuringFirst[R1 <: R](f: ZIO[R1, Nothing, Any]): ZManaged[R1, E, A]
+}
+```
+
+2. **onExitFirst**— Like `ensuringFirst`, but it has access to the `ZManaged`’s result:
+
+```scala
+trait ZManaged[-R, +E, +A] {
+ def onExitFirst[R1 <: R](cleanup: Exit[E, A] => ZIO[R1, Nothing, Any]): ZManaged[R1, E, A]
+}
+```
+
+3. **withEarlyRelease**— It will produce another `ZManaged` which provides a canceler that can be used to eagerly execute the finalizer of this `ZManaged`:
+
+```scala
+trait ZManaged[-R, +E, +A] {
+ def withEarlyRelease: ZManaged[R, E, (UIO[Any], A)]
+}
+```
+
+4. **withEarlyReleaseExit**— Like `withEarlyRelease`, but allows us to specify an exit value in the event of early release:
+
+```scala
+trait ZManaged[-R, +E, +A] {
+ def withEarlyReleaseExit(e: Exit[Any, Any]): ZManaged[R, E, (UIO[Any], A)]
+}
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/services/blocking.md b/website/versioned_docs/version-1.0.18/reference/services/blocking.md
new file mode 100644
index 000000000000..bd2ad9a7482c
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/services/blocking.md
@@ -0,0 +1,157 @@
+---
+id: blocking
+title: "Blocking"
+---
+
+
+## Introduction
+
+The **Blocking** service provides access to a thread pool that can be used for performing
+blocking operations, such as thread sleeps, synchronous socket/file reads, and so forth.
+
+By default, ZIO is asynchronous and all effects will be executed on a default primary thread pool which is optimized for asynchronous operations. As ZIO uses a fiber-based concurrency model, if we run **Blocking I/O** or **CPU Work** workloads on a primary thread pool, they are going to monopolize all threads of **primary thread pool**.
+
+In the following example, we create 20 blocking tasks to run parallel on the primary async thread pool. Assume we have a machine with an 8 CPU core, so the ZIO creates a thread pool of size 16 (2 * 8). If we run this program, all of our threads got stuck, and the remaining 4 blocking tasks (20 - 16) haven't any chance to run on our thread pool:
+
+```scala
+import zio.{ZIO, URIO}
+import zio.console._
+def blockingTask(n: Int): URIO[Console, Unit] =
+ putStrLn(s"running blocking task number $n").orDie *>
+ ZIO.effectTotal(Thread.sleep(3000)) *>
+ blockingTask(n)
+
+val program = ZIO.foreachPar((1 to 100).toArray)(blockingTask)
+```
+
+## Creating Blocking Effects
+
+ZIO has a separate **blocking thread pool** specially designed for **Blocking I/O** and, also **CPU Work** workloads. We should run blocking workloads on this thread pool to prevent interfering with the primary thread pool.
+
+The contract is that the thread pool will accept unlimited tasks (up to the available memory)
+and continuously create new threads as necessary.
+
+The `blocking` operator takes a ZIO effect and return another effect that is going to run on a blocking thread pool:
+
+
+Also, we can directly import a synchronous effect that does blocking operation into ZIO effect by using `effectBlocking`:
+
+```scala
+import zio.blocking._
+def blockingTask(n: Int) = effectBlocking {
+ do {
+ println(s"Running blocking task number $n on dedicated blocking thread pool")
+ Thread.sleep(3000)
+ } while (true)
+}
+```
+
+## Interruption of Blocking Operations
+
+By default, when we convert a blocking operation into the ZIO effects using `effectBlocking`, there is no guarantee that if that effect is interrupted the underlying effect will be interrupted.
+
+Let's create a blocking effect from an endless loop:
+
+```scala
+for {
+ _ <- putStrLn("Starting a blocking operation")
+ fiber <- effectBlocking {
+ while (true) {
+ Thread.sleep(1000)
+ println("Doing some blocking operation")
+ }
+ }.ensuring(
+ putStrLn("End of a blocking operation").orDie
+ ).fork
+ _ <- fiber.interrupt.schedule(
+ Schedule.delayed(
+ Schedule.duration(1.seconds)
+ )
+ )
+} yield ()
+```
+
+When we interrupt this loop after one second, it will not interrupted. It will only stop when the entire JVM stops. So the `effectBlocking` doesn't translate the ZIO interruption into thread interruption (`Thread.interrupt`).
+
+Instead, we should use `effectBlockingInterrupt` to create interruptible blocking effects:
+
+```scala
+for {
+ _ <- putStrLn("Starting a blocking operation")
+ fiber <- effectBlockingInterrupt {
+ while(true) {
+ Thread.sleep(1000)
+ println("Doing some blocking operation")
+ }
+ }.ensuring(
+ putStrLn("End of the blocking operation").orDie
+ ).fork
+ _ <- fiber.interrupt.schedule(
+ Schedule.delayed(
+ Schedule.duration(3.seconds)
+ )
+ )
+} yield ()
+```
+
+Notes:
+
+1. If we are converting a blocking I/O to the ZIO effect, it would be better to use `effectBlockingIO` which refines the error type to the `java.io.IOException`.
+
+2. The `effectBlockingInterrupt` method adds significant overhead. So for performance-sensitive applications, it is better to handle interruptions manually using `effectBlockingCancel`.
+
+## Cancellation of Blocking Operation
+
+Some blocking operations do not respect `Thread#interrupt` by swallowing `InterruptedException`. So, they will not be interrupted via `effectBlockingInterrupt`. Instead, they may provide us an API to signal them to _cancel_ their operation.
+
+The following `BlockingService` will not be interrupted in case of `Thread#interrupt` call, but it checks the `released` flag constantly. If this flag becomes true, the blocking service will finish its job:
+
+```scala
+import java.util.concurrent.atomic.AtomicReference
+final case class BlockingService() {
+ private val released = new AtomicReference(false)
+
+ def start(): Unit = {
+ while (!released.get()) {
+ println("Doing some blocking operation")
+ try Thread.sleep(1000)
+ catch {
+ case _: InterruptedException => () // Swallowing InterruptedException
+ }
+ }
+ println("Blocking operation closed.")
+ }
+
+ def close(): Unit = {
+ println("Releasing resources and ready to be closed.")
+ released.getAndSet(true)
+ }
+}
+```
+
+So, to translate ZIO interruption into cancellation of these types of blocking operations we should use `effectBlockingCancelation`. This method takes a `cancel` effect which responsible to signal the blocking code to close itself when ZIO interruption occurs:
+
+```scala
+val myApp =
+ for {
+ service <- ZIO.effect(BlockingService())
+ fiber <- effectBlockingCancelable(
+ effect = service.start()
+ )(
+ cancel = UIO.effectTotal(service.close())
+ ).fork
+ _ <- fiber.interrupt.schedule(
+ Schedule.delayed(
+ Schedule.duration(3.seconds)
+ )
+ )
+ } yield ()
+```
+
+Here is another example of the cancelation of a blocking operation. When we `accept` a server socket, this blocking operation will never interrupted until we close that using `ServerSocket#close` method:
+
+```scala
+import java.net.{Socket, ServerSocket}
+def accept(ss: ServerSocket): RIO[Blocking, Socket] =
+ effectBlockingCancelable(ss.accept())(UIO.effectTotal(ss.close()))
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/services/clock.md b/website/versioned_docs/version-1.0.18/reference/services/clock.md
new file mode 100644
index 000000000000..fe964cc578aa
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/services/clock.md
@@ -0,0 +1,28 @@
+---
+id: clock
+title: "Clock"
+---
+
+Clock service contains some functionality related to time and scheduling.
+
+To get the current time in a specific time unit, the `currentTime` function takes a unit as `TimeUnit` and returns `UIO[Long]`:
+
+
+```scala
+val inMiliseconds: URIO[Clock, Long] = currentTime(TimeUnit.MILLISECONDS)
+val inDays: URIO[Clock, Long] = currentTime(TimeUnit.DAYS)
+```
+
+To get current data time in the current timezone the `currentDateTime` function returns a ZIO effect containing `OffsetDateTime`.
+
+Also, the Clock service has a very useful functionality for sleeping and creating a delay between jobs. The `sleep` takes a `Duration` and sleep for the specified duration. It is analogous to `java.lang.Thread.sleep` function, but it doesn't block any underlying thread. It's completely non-blocking.
+
+In following example we are going to print the current time periodically by placing a one second`sleep` between each print call:
+
+```scala
+def printTimeForever: ZIO[Console with Clock, Throwable, Nothing] =
+ currentDateTime.flatMap(time => putStrLn(time.toString)) *>
+ sleep(1.seconds) *> printTimeForever
+```
+
+For scheduling purposes like retry and repeats, ZIO has a great data type called [Schedule](../misc/schedule.md).
diff --git a/website/versioned_docs/version-1.0.18/reference/services/console.md b/website/versioned_docs/version-1.0.18/reference/services/console.md
new file mode 100644
index 000000000000..af42925479fa
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/services/console.md
@@ -0,0 +1,37 @@
+---
+id: console
+title: "Console"
+---
+
+Console service contains simple I/O operations for reading/writing strings from/to the standard input, output, and error console.
+
+| Function | Input Type | Output Type |
+|---------------|-------------------|-------------------------------------|
+| `putStr` | `line: => String` | `URIO[Console, Unit]` |
+| `putStrErr` | `line: => String` | `URIO[Console, Unit]` |
+| `putStrLn` | `line: => String` | `URIO[Console, Unit]` |
+| `putStrLnErr` | `line: => String` | `URIO[Console, Unit]` |
+| `getStrLn` | | `ZIO[Console, IOException, String]` |
+
+All functions of console service are effectful, this means they are just descriptions of reading/writing from/to the console.
+
+As ZIO data type support monadic operations, we can compose these functions with for-comprehension which helps us to write our program pretty much like an imperative program:
+
+```scala
+import java.io.IOException
+
+import zio.ZIO
+import zio.console._
+
+object MyHelloApp extends zio.App {
+ val program: ZIO[Console, IOException, Unit] = for {
+ _ <- putStrLn("Hello, what is you name?")
+ name <- getStrLn
+ _ <- putStrLn(s"Hello $name, welcome to ZIO!")
+ } yield ()
+
+ override def run(args: List[String]) = program.exitCode
+}
+```
+
+Note again, every line of our `program` are descriptions, not statements. As we can see the type of our `program` is `ZIO[Console, IOException, Unit]`, it means to run `program` we need the `Console` service as an environment, it may fail due to failure of `getStrLn` and it will produce `Unit` value.
diff --git a/website/versioned_docs/version-1.0.18/reference/services/index.md b/website/versioned_docs/version-1.0.18/reference/services/index.md
new file mode 100644
index 000000000000..99ce0f6f82be
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/services/index.md
@@ -0,0 +1,12 @@
+---
+id: index
+title: "Introduction"
+---
+
+ZIO already provided 5 build-in services, when we use these services we don't need to provide their corresponding environment explicitly. The `ZEnv` environment is a type alias for all of these services and will be provided by ZIO to our effects:
+
+- **[Console](console.md)** — Operations for reading/writing strings from/to the standard input, output, and error console.
+- **[Clock](clock.md)** — Contains some functionality related to time and scheduling.
+- **[Random](random.md)** — Provides utilities to generate random numbers.
+- **[Blocking](blocking.md)** — Provides access to a thread pool that can be used for performing blocking operations.
+- **[System](system.md)** — Contains several useful functions related to system environments and properties.
diff --git a/website/versioned_docs/version-1.0.18/reference/services/random.md b/website/versioned_docs/version-1.0.18/reference/services/random.md
new file mode 100644
index 000000000000..92c2f1ade98e
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/services/random.md
@@ -0,0 +1,37 @@
+---
+id: random
+title: "Random"
+---
+
+Random service provides utilities to generate random numbers. It's a functional wrapper of `scala.util.Random`. This service contains various different pseudo-random generators like `nextInt`, `nextBoolean` and `nextDouble`. Each random number generator functions return a `URIO[Random, T]` value.
+
+```scala
+import zio.random._
+import zio.console._
+for {
+ randomInt <- nextInt
+ _ <- putStrLn(s"A random Int: $randomInt")
+ randomChar <- nextPrintableChar
+ _ <- putStrLn(s"A random Char: $randomChar")
+ randomDouble <- nextDoubleBetween(1.0, 5.0)
+ _ <- putStrLn(s"A random double between 1.0 and 5.0: $randomDouble")
+} yield ()
+```
+
+Random service has a `setSeed` which helps us to alter the state of the random generator. It is useful when writing the test version of Random service when we need a generation of the same sequence of numbers.
+
+```scala
+for {
+ _ <- setSeed(0)
+ nextInts <- (nextInt zip nextInt)
+} yield assert(nextInts == (-1155484576,-723955400))
+```
+
+Also, it has a utility to shuffle a list or generating random samples from Gaussian distribution:
+
+* **shuffle** - Takes a list as an input and shuffles it.
+* **nextGaussian** — Returns the next pseudorandom, Gaussian ("normally") distributed double value with mean 0.0 and standard deviation 1.0.
+
+> _**Note**:_
+>
+> Random numbers that are generated via Random service are not cryptographically strong. Therefore it's not safe to use the ZIO Random service for security domains where a high level of security and randomness is required, such as password generation.
diff --git a/website/versioned_docs/version-1.0.18/reference/services/system.md b/website/versioned_docs/version-1.0.18/reference/services/system.md
new file mode 100644
index 000000000000..f36870b2ae50
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/services/system.md
@@ -0,0 +1,36 @@
+---
+id: system
+title: "System"
+---
+
+System service contains several useful functions related to system environments and properties. Both of **system environments** and **system properties** are key/value pairs. They used to pass user-defined information to our application.
+
+Environment variables are global operating system level variables available to all applications running on the same machine while the properties are application-level variables provided to our application.
+
+## System Environment
+The `env` function retrieve the value of an environment variable:
+
+```scala
+import zio.console._
+import zio.system._
+for {
+ user <- env("USER")
+ _ <- user match {
+ case Some(value) => putStr(s"The USER env is: $value")
+ case None => putStr("Oops! The USER env is not set")
+ }
+} yield ()
+```
+
+## System Property
+Also, the System service has a `property function to retrieve the value of a system property:
+
+```scala
+for {
+ user <- property("LOG_LEVEL")
+ _ <- user match {
+ case Some(value) => putStr(s"The LOG_LEVEL property is: $value")
+ case None => putStr("Oops! The LOG_LEVEL property is not set")
+ }
+} yield ()
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stm/index.md b/website/versioned_docs/version-1.0.18/reference/stm/index.md
new file mode 100644
index 000000000000..fee528d15214
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stm/index.md
@@ -0,0 +1,211 @@
+---
+id: index
+title: "Introduction"
+---
+
+## Overview
+
+ZIO supports Software Transactional Memory (STM) which is a modular composable concurrency data structure. It allows us to combine and compose a group of memory operations and perform all of them in one single atomic operation.
+
+Software Transactional Memory is an abstraction for concurrent communications. The main benefits of STM are composability and modularity. We can write concurrent abstractions that can be composed with any other abstraction built using STM, without exposing the details of how our abstraction ensures safety. This is typically not the case with the locking mechanism.
+
+The idea of the transactional operation is not new, they have been the fundamental of distributed systems, and those databases that guarantee us an ACID property. Software transactional memory is just all about memory operations. All operations performed on memory. It is not related to a remote system or a database. Very similar to the database concept of ACID property, but the _durability_, is missing which doesn't make sense for in-memory operations.
+
+In transactional memory we get these aspects of ACID properties:
+
+- **Atomicity** — On write operations, we want _atomic update_, which means the update operation either should run at once or not at all.
+
+- **Consistency** — On read operations, we want _consistent view_ of the state of the program that ensures us all reference to the state, gets the same value whenever they get the state.
+
+- **Isolated** — If we have multiple updates, we need to perform these updates in isolated transactions. So each transaction doesn't affect other concurrent transactions. No matter how many fibers are running any number of transactions. None of them have to worry about what is happening in the other transactions.
+
+The ZIO STM API is inspired by Haskell's [STM library](http://hackage.haskell.org/package/stm-2.5.0.0/docs/Control-Concurrent-STM.html) although the implementation in ZIO is completely different.
+
+## The Problem
+
+Let's start from a simple `inc` function, which takes a mutable reference of `Int` and increase it by `amount`:
+
+
+```scala
+def inc(counter: Ref[Int], amount: Int) = for {
+ c <- counter.get
+ _ <- counter.set(c + amount)
+} yield c
+```
+
+If there is only one fiber in the world, it is not a problem. This function sounds correct. But what happens if in between reading the value of the counter and setting a new value, another fiber comes and mutates the value of the counter? Another fiber is just updating the counter just after we read the counter. So this function is subject to a race condition, we can test that with the following program:
+
+```scala
+for {
+ counter <- Ref.make(0)
+ _ <- ZIO.collectAllPar(ZIO.replicate(10)(inc(counter, 1)))
+ value <- counter.get
+} yield (value)
+```
+
+As the above program runs 10 concurrent fibers to increase the counter value. However, we cannot expect this program to always return 10 as a result.
+
+To fix this issue, we need to perform the `get` and `set` operation atomically. The `Ref` data type some other api like `update`, `updateAndGet`, and `modify` which perform the reading and writing atomically:
+
+```scala
+def inc(counter: Ref[Int], amount: Int) = counter.updateAndGet(_ + amount)
+```
+
+The most important note about the `modify` operation is that it doesn't use pessimistic locking. It doesn't use any locking primitives for the critical section. It has an optimistic assumption on occurring collisions.
+
+The `modify` function takes these three steps:
+
+1. It assumes that other fibers don't change the shared state and don't interferer in most cases. So it read the shared state without using any locking primitives.
+
+2. It should be prepared itself for the worst cases. If another fiber was accessing at the same time, what would happen? So when we came to writing a new value it should check everything. It should make sure that it saw a consistent state of the universe and if it had, then it can change that value.
+
+3. If it encounters an inconsistent value, it shouldn't continue. So it aborts updating the shared state with invalidated assumption. It should retry the `modify` operation with an updated state.
+
+Let's see how the `modify` function of `Ref` is implemented without any locking mechanism:
+
+
+```scala
+ final case class Ref[A](value: AtomicReference[A]) { self =>
+ def modify[B](f: A => (B, A)): UIO[B] = UIO.effectTotal {
+ var loop = true
+ var b: B = null.asInstanceOf[B]
+ while (loop) {
+ val current = value.get
+ val tuple = f(current)
+ b = tuple._1
+ loop = !value.compareAndSet(current, tuple._2)
+ }
+ b
+ }
+ }
+```
+
+As we see, the `modify` operation is implemented in terms of the `compare-and-swap` operation which helps us to perform read and update atomically.
+
+Let's rename the `inc` function to the `deposit` as follows to try the classic problem of transferring money from one account to another:
+
+
+```scala
+def deposit(accountBalance: Ref[Int], amount: Int) = accountBalance.update(_ + amount)
+```
+
+And the `withdraw` function:
+
+```scala
+def withdraw(accountBalance: Ref[Int], amount: Int) = accountBalance.update(_ - amount)
+```
+
+It seems pretty good, but we also need to check that there is sufficient balance in the account to withdraw. So let's add an invariant to check that:
+
+```scala
+def withdraw(accountBalance: Ref[Int], amount: Int) = for {
+ balance <- accountBalance.get
+ _ <- if (balance < amount) ZIO.fail("Insufficient funds in you account") else
+ accountBalance.update(_ - amount)
+} yield ()
+```
+
+What if in between checking and updating the balance, another fiber comes and withdraws all money in the account? This solution has a bug. It has the potential to reach a negative balance.
+
+Suppose we finally reached a solution to do withdraw atomically, the problem remains. We need a way to compose `withdraw` with `deposit` atomically to create a `transfer function:
+
+```scala
+def transfer(from: Ref[Int], to: Ref[Int], amount: Int) = for {
+ _ <- withdraw(from, amount)
+ _ <- deposit(to, amount)
+} yield ()
+```
+
+In the above example, even we assume that the `withdraw` and `deposit` are atomic, we can't compose these two transactions. They produce bugs in a concurrent environment. This code doesn't guarantee us that both `withdraw` and `deposit` are performed in one single atomic operation. Other fibers which are executing this `transfer` method can override the shared state and introduce a race condition.
+
+We need a solution to **atomically compose transactions**. This is where software transactional memory comes to into play.
+
+## Composable Concurrency
+
+Software transactional memory provides us a way to compose multiple transactions and perform them in one single transaction.
+
+Let's continue our last effort to convert our `withdraw` method to be one atomic operation. To solve the problem using STM, we replace `Ref` with `TRef`. `TRef` stands for _Transactional Reference_; it is a mutable reference contained in the `STM` world. `STM` is a monadic data structure that represents an effect that can be performed transactionally:
+
+```scala
+def withdraw(accountBalance: TRef[Int], amount: Int): STM[String, Unit] =
+ for {
+ balance <- accountBalance.get
+ _ <- if (balance < amount)
+ STM.fail("Insufficient funds in you account")
+ else
+ accountBalance.update(_ - amount)
+ } yield ()
+```
+
+Although the `deposit` operation is atomic, to be able to compose with `withdraw` we need to refactor it to takes `TRef` and returns `STM`:
+
+```scala
+def deposit(accountBalance: TRef[Int], amount: Int): STM[Nothing, Unit] =
+ accountBalance.update(_ + amount)
+```
+
+In the `STM` world we can compose all operations and at the end of the world, we perform all of them in one single operation atomically. To be able to compose `withdraw` with `deposit` we need to stay in the `STM` world. Therefore, we didn't perform `STM.atomically` or `STM#commit` methods on each of them.
+
+Now we can define the `transfer` method by composing these two function in the `STM` world and converting them into the `IO` atomically:
+
+```scala
+def transfer(from: TRef[Int], to: TRef[Int], amount: Int): IO[String, Unit] =
+ STM.atomically {
+ for {
+ _ <- withdraw(from, amount)
+ _ <- deposit(to, amount)
+ } yield ()
+ }
+```
+
+Assume we are in the middle of transferring money from one account to the other. If we withdraw the first account but haven't deposited the second account, that kind of intermediate state is not visible to any external fibers. The transaction completely successful if there are not any conflicting changes. And if there are any conflicts or conflicting changes then the whole transaction, the entire STM will be retried.
+
+## How Does it Work?
+
+The `STM` uses the same idea of the `Ref#modify` function, but with a composability feature. The main goal of `STM` is to provide a mechanism to compose multiple transactions and perform them in one single atomic operation.
+
+The mechanism behind the compositional part is obvious. The `STM` has its own world. It has lots of useful combinators like `flatMap` and `orElse` to compose multiple `STM` and create more elegant ones. After we perform a transaction with `STM#commit` or `STM.atomically` the runtime system does the following steps. These steps are not exactly accurate, but they draw an outline of what happens during the transaction:
+
+1. **Starting a Transaction** — When we start a transaction, the runtime system creates a virtual space to keep track of the transaction logs which is build up by recording the reads and tentative writes that the transaction will perform during the transaction steps.
+
+2. **Virtual Execution** — The runtime starts speculating the execution of transactions on every read and write operation. It has two internal logs; the read and the write log. On the read log, it saves the version of all variables it reads during the intermediate steps, and on the write log, it saves the intermediate result of the transaction. It doesn't change the shared state on the main memory. Anything that is inside an atomic block is not executed immediately, it's executed in the virtual world, just by putting stuff in the internal log, not in the main memory. In this particular model, we guarantee that all computations are isolated from one another.
+
+3. **Commit Phase (Real Execution)** — When it came to the end of the transaction the runtime system should check everything it has read. It should make sure that it saw a consistent state of the universe and if it had, then it atomically commits. As the STM is optimistic, it assumes that in the middle of a transaction the chance of interfering with the shared state by other fibers is very rare. But it must ready itself for the worst cases. It should validate its assumption in the final stage. It checks whether the transactional variables involved were modified by any other threads or not. If its assumption got invalidated in the meanwhile of the transaction, it should abandon the transaction and retry it again. It jumps to the start of the transaction with the original and default values and tries again until it succeeds; This is necessary to resolve conflicts. Otherwise, if there was no conflict, it commits the final value atomically to the memory and succeeds. From point of view of other fibers, all values in memory exchanging in one blink of an eye. It's all atomic.
+
+Everything done within a transaction to other transactions looks like it happens at once or not at all. So no matter how many pieces of memory it touches during the transaction. From the other transaction perspective, all of these changes happen at once.
+
+
+## STM Data Types
+There are a variety of transactional data structures that can take part in an STM transaction:
+
+- **[TArray](tarray.md)** - A `TArray[A]` is an array of mutable references that can participate in transactions.
+- **[TSet](tset.md)** - A `TSet` is a mutable set that can participate in transactions.
+- **[TMap](tmap.md)** - A `TMap[A]` is a mutable map that can participate in transactions.
+- **[TRef](tref.md)** - A `TRef` is a mutable reference to an immutable value that can participate in transactions.
+- **[TPriorityQueue](tpriorityqueue.md)** - A `TPriorityQueue[A]` is a mutable priority queue that can participate in transactions.
+- **[TPromise](tpromise.md)** - A `TPromise` is a mutable reference that can be set exactly once and can participate in transactions.
+- **[TQueue](tqueue.md)** - A `TQueue` is a mutable queue that can participate in transactions.
+- **[TReentrantLock](treentrantlock.md)** - A `TReentrantLock` is a reentrant read / write lock that can be composed.
+- **[TSemaphore](tsemaphore.md)** - A `TSemaphore` is a semaphore that can participate in transactions.
+
+Since STM places a great emphasis on compositionality, we can build upon these data structures and define our very own concurrent data structures. For example, we can build a transactional priority queue using `TRef`, `TMap` and `TQueue`.
+
+## Advantage of Using STM
+
+1. **Composable Transaction** — Combining atomic operations using locking-oriented programming is almost impossible. ZIO provides the `STM` data type, which has lots of combinators to compose transactions.
+
+2. **Declarative** — ZIO STM is completely declarative. It doesn't require us to think about low-level primitives. It doesn't force us to think about the ordering of locks. Reasoning concurrent program in a declarative fashion is very simple. We can just focus on the logic of our program and run it in a concurrent environment deterministically. The user code is much simpler of course because it doesn't have to deal with the concurrency at all.
+
+3. **Optimistic Concurrency** — In most cases, we are allowed to be optimistic, unless there is tremendous contention. So if we haven't tremendous contention it really pays to be optimistic. It allows a higher volume of concurrent transactions.
+
+4. **Lock-Free** — All operations are non-blocking using lock-free algorithms.
+
+5. **Fine-Grained Locking**— Coarse-grained locking is very simple to implement, but it has a negative impact on performance, while fine-grained locking significantly has better performance, but it is very cumbersome, sophisticated, and error-prone even for experienced programmers. We would like to have the ease of use of coarse-grain locking, but at the same time, we would like to have the efficiency of fine-grain locking. ZIO provides several data types which are a very coarse way of using concurrency, but they are implemented as if every single word were lockable. So the granularity of concurrency is fine-grained. It increases the performance and concurrency. For example, if we have two fibers accessing the same `TArray`, one of them read and write on the first index of our array, and another one is read and write to the second index of that array, they will not conflict. It is just like as if we were locking the indices, not the whole array.
+
+## Implication of Using STM
+
+1. **Running I/O Inside STM**— There is a strict boundary between the `STM` world and the `ZIO` world. This boundary propagates even deeper because we are not allowed to execute arbitrary effects in the `STM` universe. Performing side effects and I/O operation inside a transaction is problematic. In the `STM` the only effect that exists is the `STM` itself. We cannot print something or launch a missile inside a transaction as it will nondeterministically get printed on every reties that transaction do that.
+
+2. **Large Allocations** — We should be very careful in choosing the best data structure using for using STM operations. For example, if we use a single data structure with `TRef` and that data structure occupies a big chunk of memory. Every time we are updating this data structure during the transaction, the runtime system needs a fresh copy of this chunk of memory.
+
+3. **Running Expensive Operations**— The beautiful feature of the `retry` combinator is when we decide to retry the transaction, the `retry` avoids the busy loop. It waits until any of the underlying transactional variables have changed. However, we should be careful about running expensive operations multiple times.
diff --git a/website/versioned_docs/version-1.0.18/reference/stm/stm.md b/website/versioned_docs/version-1.0.18/reference/stm/stm.md
new file mode 100644
index 000000000000..a33c4444544a
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stm/stm.md
@@ -0,0 +1,79 @@
+---
+id: stm
+title: "STM"
+---
+
+An `STM[E, A]` represents an effect that can be performed transactionally resulting in a failure `E` or a success `A`. There is a more powerful variant `ZSTM[R, E, A]` which supports an environment type `R` like `ZIO[R, E, A]`.
+
+The `STM` (and `ZSTM` variant) data-type is _not_ as powerful as the `ZIO[R, E, A]` datatype as it does not allow you to perform arbitrary effects. These are because actions inside STM actions can be executed an arbitrary amount of times (and rolled-back as well). Only STM actions and pure computation may be performed inside a memory transaction.
+
+No STM actions can be performed outside a transaction, so you cannot accidentally read or write a transactional data structure outside the protection of `STM.atomically` (or without explicitly `commit`ting the transaction). For example:
+
+```scala
+import zio._
+import zio.stm._
+
+def transferMoney(from: TRef[Long], to: TRef[Long], amount: Long): STM[String, Long] =
+ for {
+ senderBal <- from.get
+ _ <- if (senderBal < amount) STM.fail("Not enough money")
+ else STM.unit
+ _ <- from.update(existing => existing - amount)
+ _ <- to.update(existing => existing + amount)
+ recvBal <- to.get
+ } yield recvBal
+
+val program: IO[String, Long] = for {
+ sndAcc <- STM.atomically(TRef.make(1000L))
+ rcvAcc <- STM.atomically(TRef.make(0L))
+ recvAmt <- STM.atomically(transferMoney(sndAcc, rcvAcc, 500L))
+} yield recvAmt
+```
+
+`transferMoney` describes an atomic transfer process between a sender and a receiver. The transaction will fail if the sender does not have enough of money in their account. This means that individual accounts will be debited and credited atomically. If the transaction fails in the middle, the entire process will be rolled back, and it will appear that nothing has happened.
+
+Here, we see that `STM` effects compose using a for-comprehension and that wrapping an `STM` effect with `STM.atomically` (or calling `commit` on any STM effect) turns the `STM` effect into a `ZIO` effect which can be executed.
+
+STM transactions compose sequentially. By using `STM.atomically` (or `commit`), the programmer identifies atomic transaction in the sense that the entire set of operations within `STM.atomically` appears to take place indivisibly.
+
+## Errors
+
+`STM` supports errors just like `ZIO` via the error channel. In `transferMoney`, we saw an example of an error (`STM.fail`).
+
+Errors in `STM` have abort semantics: if an atomic transaction encounters an error, the transaction is rolled back with no effect.
+
+## `retry`
+
+`STM.retry` is central to making transactions composable when they may block. For example, if we wanted to ensure that the money transfer took place when the sender had enough of money (instead of failing right away), we can use `STM.retry` instead:
+
+```scala
+def transferMoneyNoMatterWhat(from: TRef[Long], to: TRef[Long], amount: Long): STM[String, Long] =
+ for {
+ senderBal <- from.get
+ _ <- if (senderBal < amount) STM.retry else STM.unit
+ _ <- from.update(existing => existing - amount)
+ _ <- to.update(existing => existing + amount)
+ recvBal <- to.get
+ } yield recvBal
+```
+
+`STM.retry` will abort and retry the entire transaction until it succeeds (instead of failing like the previous example).
+
+Note that the transaction will only be retried when one of the underlying transactional data structures have been changed.
+
+There are many other variants of the `STM.retry` combinator like `STM.check` so rather than writing `if (senderBal < amount) STM.retry else STM.unit`, you can replace it with `STM.check(senderBal < amount)`.
+
+## Composing alternatives
+
+STM transactions compose sequentially so that both STM effects are executed. However, STM transactions can also compose transactions as alternatives so that only one STM effect is executed by making use of `orTry` on STM effects.
+
+Provided we have two STM effects `sA` and `sB`, you can express that you would like to compose the two using `sA orTry sB`. The transaction would first attempt to run `sA` and if it retries then `sA` is abandoned with no effect and then `sB` runs. Now if `sB` also retries then the entire call retries. However, it waits for the transactional data structures to change that are involved in either `sA` or `sB`.
+
+Using `orTry` is an elegant technique that can be used to determine whether or not an STM transaction needs to block. For example, we can take `transferMoneyNoMatterWhat` and turn it into an STM transaction that will fail immediately if the sender does not have enough of money instead of retrying by doing:
+
+```scala
+def transferMoneyFailFast(from: TRef[Long], to: TRef[Long], amount: Long): STM[String, Long] =
+ transferMoneyNoMatterWhat(from, to, amount) orTry STM.fail("Sender does not have enough of money")
+```
+
+This will cause the transfer to fail immediately if the sender does not have money because of the semantics of `orTry`.
diff --git a/website/versioned_docs/version-1.0.18/reference/stm/tarray.md b/website/versioned_docs/version-1.0.18/reference/stm/tarray.md
new file mode 100644
index 000000000000..6366f2d7be8d
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stm/tarray.md
@@ -0,0 +1,143 @@
+---
+id: tarray
+title: "TArray"
+---
+
+`TArray` is an array of mutable references that can participate in transactions in STM.
+
+## Create a TArray
+
+Creating an empty `TArray`:
+
+```scala
+import zio._
+import zio.stm._
+
+val emptyTArray: STM[Nothing, TArray[Int]] = TArray.empty[Int]
+```
+
+Or creating a `TArray` with specified values:
+
+```scala
+import zio._
+import zio.stm._
+
+val specifiedValuesTArray: STM[Nothing, TArray[Int]] = TArray.make(1, 2, 3)
+```
+
+Alternatively, you can create a `TArray` by providing a collection of values:
+
+```scala
+import zio._
+import zio.stm._
+
+val iterableTArray: STM[Nothing, TArray[Int]] = TArray.fromIterable(List(1, 2, 3))
+```
+
+## Retrieve the value from a TArray
+
+The n-th element of the array can be obtained as follows:
+
+```scala
+import zio._
+import zio.stm._
+
+val tArrayGetElem: UIO[Int] = (for {
+ tArray <- TArray.make(1, 2, 3, 4)
+ elem <- tArray(2)
+} yield elem).commit
+```
+
+Accessing the non-existing indexes aborts the transaction with `ArrayIndexOutOfBoundsException`.
+
+## Update the value of a TArray
+
+Updating the n-th element of an array can be done as follows:
+
+```scala
+import zio._
+import zio.stm._
+
+val tArrayUpdateElem: UIO[TArray[Int]] = (for {
+ tArray <- TArray.make(1, 2, 3, 4)
+ _ <- tArray.update(2, el => el + 10)
+} yield tArray).commit
+```
+
+Updating the n-th element of an array can be done effectfully via `updateM`:
+
+```scala
+import zio._
+import zio.stm._
+
+val tArrayUpdateMElem: UIO[TArray[Int]] = (for {
+ tArray <- TArray.make(1, 2, 3, 4)
+ _ <- tArray.updateM(2, el => STM.succeed(el + 10))
+} yield tArray).commit
+```
+
+Updating the non-existing indexes aborts the transaction with `ArrayIndexOutOfBoundsException`.
+
+## Transform elements of a TArray
+
+The transform function `A => A` allows computing a new value for every element in the array:
+
+```scala
+import zio._
+import zio.stm._
+
+val transformTArray: UIO[TArray[Int]] = (for {
+ tArray <- TArray.make(1, 2, 3, 4)
+ _ <- tArray.transform(a => a * a)
+} yield tArray).commit
+```
+
+The elements can be mapped effectfully via `transformM`:
+
+```scala
+import zio._
+import zio.stm._
+
+val transformMTArray: UIO[TArray[Int]] = (for {
+ tArray <- TArray.make(1, 2, 3, 4)
+ _ <- tArray.transformM(a => STM.succeed(a * a))
+} yield tArray).commit
+```
+
+Folds the elements of a `TArray` using the specified associative binary operator:
+
+```scala
+import zio._
+import zio.stm._
+
+val foldTArray: UIO[Int] = (for {
+ tArray <- TArray.make(1, 2, 3, 4)
+ sum <- tArray.fold(0)(_ + _)
+} yield sum).commit
+```
+
+The elements can be folded effectfully via `foldM`:
+
+```scala
+import zio._
+import zio.stm._
+
+val foldMTArray: UIO[Int] = (for {
+ tArray <- TArray.make(1, 2, 3, 4)
+ sum <- tArray.foldM(0)((acc, el) => STM.succeed(acc + el))
+} yield sum).commit
+```
+
+## Perform side-effect for TArray elements
+
+`foreach` is used for performing side-effect for each element in the array:
+
+```scala
+import zio._
+import zio.stm._
+
+val foreachTArray = (for {
+ tArray <- TArray.make(1, 2, 3, 4)
+ _ <- tArray.foreach(a => STM.succeed(println(a)))
+} yield tArray).commit
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stm/tmap.md b/website/versioned_docs/version-1.0.18/reference/stm/tmap.md
new file mode 100644
index 000000000000..9313b7190cc6
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stm/tmap.md
@@ -0,0 +1,283 @@
+---
+id: tmap
+title: "TMap"
+---
+
+A `TMap[A]` is a mutable map that can participate in transactions in STM.
+
+## Create a TMap
+
+Creating an empty `TMap`:
+
+```scala
+import zio._
+import zio.stm._
+
+val emptyTMap: STM[Nothing, TMap[String, Int]] = TMap.empty[String, Int]
+```
+
+Or creating a `TMap` with specified values:
+
+```scala
+import zio._
+import zio.stm._
+
+val specifiedValuesTMap: STM[Nothing, TMap[String, Int]] = TMap.make(("a", 1), ("b", 2), ("c", 3))
+```
+
+Alternatively, you can create a `TMap` by providing a collection of tuple values:
+
+```scala
+import zio._
+import zio.stm._
+
+val iterableTMap: STM[Nothing, TMap[String, Int]] = TMap.fromIterable(List(("a", 1), ("b", 2), ("c", 3)))
+```
+
+## Put a key-value pair to a TMap
+
+New key-value pair can be added to the map in the following way:
+
+```scala
+import zio._
+import zio.stm._
+
+val putElem: UIO[TMap[String, Int]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2))
+ _ <- tMap.put("c", 3)
+} yield tMap).commit
+```
+
+Another way of adding an entry in the map is by using `merge`:
+
+```scala
+import zio._
+import zio.stm._
+
+val mergeElem: UIO[TMap[String, Int]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ _ <- tMap.merge("c", 4)((x, y) => x * y)
+} yield tMap).commit
+```
+
+If the key is not present in the map it behaves as a simple `put` method. It merges the existing value with the new one using the provided function otherwise.
+
+## Remove an element from a TMap
+
+The simplest way to remove a key-value pair from `TMap` is using `delete` method that takes key:
+
+```scala
+import zio._
+import zio.stm._
+
+val deleteElem: UIO[TMap[String, Int]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ _ <- tMap.delete("b")
+} yield tMap).commit
+```
+
+Also, it is possible to remove every key-value pairs that satisfy provided predicate:
+
+```scala
+import zio._
+import zio.stm._
+
+val removedEvenValues: UIO[TMap[String, Int]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3), ("d", 4))
+ _ <- tMap.removeIf((_, v) => v % 2 == 0)
+} yield tMap).commit
+```
+
+Or you can keep all key-value pairs that match predicate function:
+
+```scala
+import zio._
+import zio.stm._
+
+val retainedEvenValues: UIO[TMap[String, Int]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3), ("d", 4))
+ _ <- tMap.retainIf((_, v) => v % 2 == 0)
+} yield tMap).commit
+```
+
+Note that `retainIf` and `removeIf` serve the same purpose as `filter` and `filterNot`. The reason for naming them differently was to emphasize a distinction in their nature. Namely, both `retainIf` and `removeIf` are destructive - calling them can modify the collection.
+
+## Retrieve the value from a TMap
+
+Value associated with the key can be obtained as follows:
+
+```scala
+import zio._
+import zio.stm._
+
+val elemGet: UIO[Option[Int]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ elem <- tMap.get("c")
+} yield elem).commit
+```
+
+Alternatively, you can provide a default value if entry by key is not present in the map:
+
+```scala
+import zio._
+import zio.stm._
+
+val elemGetOrElse: UIO[Int] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ elem <- tMap.getOrElse("d", 4)
+} yield elem).commit
+```
+
+## Transform entries of a TMap
+
+The transform function `(K, V) => (K, V)` allows computing a new value for every entry in the map:
+
+```scala
+import zio._
+import zio.stm._
+
+val transformTMap: UIO[TMap[String, Int]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ _ <- tMap.transform((k, v) => k -> v * v)
+} yield tMap).commit
+```
+
+Note that it is possible to shrink a `TMap`:
+
+```scala
+import zio._
+import zio.stm._
+
+val shrinkTMap: UIO[TMap[String, Int]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ _ <- tMap.transform((_, v) => "d" -> v)
+} yield tMap).commit
+```
+
+The entries can be mapped effectfully via `transformM`:
+
+```scala
+import zio._
+import zio.stm._
+
+val transformMTMap: UIO[TMap[String, Int]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ _ <- tMap.transformM((k, v) => STM.succeed(k -> v * v))
+} yield tMap).commit
+```
+
+The `transformValues` function `V => V` allows computing a new value for every value in the map:
+
+```scala
+import zio._
+import zio.stm._
+
+val transformValuesTMap: UIO[TMap[String, Int]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ _ <- tMap.transformValues(v => v * v)
+} yield tMap).commit
+```
+
+The values can be mapped effectfully via `transformValuesM`:
+
+```scala
+import zio._
+import zio.stm._
+
+val transformValuesMTMap: UIO[TMap[String, Int]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ _ <- tMap.transformValuesM(v => STM.succeed(v * v))
+} yield tMap).commit
+```
+
+Note that both `transform` and `transformValues` serve the same purpose as `map` and `mapValues`. The reason for naming them differently was to emphasize a distinction in their nature. Namely, both `transform` and `transformValues` are destructive - calling them can modify the collection.
+
+Folds the elements of a `TMap` using the specified associative binary operator:
+
+```scala
+import zio._
+import zio.stm._
+
+val foldTMap: UIO[Int] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ sum <- tMap.fold(0) { case (acc, (_, v)) => acc + v }
+} yield sum).commit
+```
+
+The elements can be folded effectfully via `foldM`:
+
+```scala
+import zio._
+import zio.stm._
+
+val foldMTMap: UIO[Int] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ sum <- tMap.foldM(0) { case (acc, (_, v)) => STM.succeed(acc + v) }
+} yield sum).commit
+```
+
+## Perform side-effect for TMap key-value pairs
+
+`foreach` is used for performing side-effect for each key-value pair in the map:
+
+```scala
+import zio._
+import zio.stm._
+
+val foreachTMap = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ _ <- tMap.foreach((k, v) => STM.succeed(println(s"$k -> $v")))
+} yield tMap).commit
+```
+
+## Check TMap membership
+
+Checking whether key-value pair is present in a `TMap`:
+
+```scala
+import zio._
+import zio.stm._
+
+val tMapContainsValue: UIO[Boolean] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ res <- tMap.contains("a")
+} yield res).commit
+```
+
+## Convert TMap to a List
+
+List of tuples can be obtained as follows:
+
+```scala
+import zio._
+import zio.stm._
+
+val tMapTuplesList: UIO[List[(String, Int)]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ list <- tMap.toList
+} yield list).commit
+```
+
+List of keys can be obtained as follows:
+
+```scala
+import zio._
+import zio.stm._
+
+val tMapKeysList: UIO[List[String]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ list <- tMap.keys
+} yield list).commit
+```
+
+List of values can be obtained as follows:
+
+```scala
+import zio._
+import zio.stm._
+
+val tMapValuesList: UIO[List[Int]] = (for {
+ tMap <- TMap.make(("a", 1), ("b", 2), ("c", 3))
+ list <- tMap.values
+} yield list).commit
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stm/tpriorityqueue.md b/website/versioned_docs/version-1.0.18/reference/stm/tpriorityqueue.md
new file mode 100644
index 000000000000..99344945204d
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stm/tpriorityqueue.md
@@ -0,0 +1,70 @@
+---
+id: tpriorityqueue
+title: "TPriorityQueue"
+---
+
+A `TPriorityQueue[A]` is a mutable queue that can participate in STM transactions. A `TPriorityQueue` contains values of type `A` for which an `Ordering` is defined. Unlike a `TQueue`, `take` returns the highest priority value (the value that is first in the specified ordering) as opposed to the first value offered to the queue. The ordering of elements sharing the same priority when taken from the queue is not guaranteed.
+
+## Creating a TPriorityQueue
+
+You can create an empty `TPriorityQueue` using the `empty` constructor:
+
+```scala
+import zio._
+import zio.stm._
+
+val minQueue: STM[Nothing, TPriorityQueue[Int]] =
+ TPriorityQueue.empty
+```
+
+Notice that a `TPriorityQueue` is created with an implicit `Ordering`. By default, `take` will return the value that is first in the specified ordering. For example, in a queue of events ordered by time the earliest event would be taken first. If you want a different behavior you can use a custom `Ordering`.
+
+```scala
+val maxQueue: STM[Nothing, TPriorityQueue[Int]] =
+ TPriorityQueue.empty(Ordering[Int].reverse)
+```
+
+You can also create a `TPriorityQueue` initialized with specified elements using the `fromIterable` or `make` constructors". The `fromIterable` constructor takes a `Iterable` while the `make` constructor takes a variable arguments sequence of elements.
+
+## Offering elements to a TPriorityQueue
+
+You can offer elements to a `TPriorityQueue` using the `offer` or `offerAll` methods. The `offerAll` method is more efficient if you want to offer more than one element to the queue at the same time.
+
+```scala
+val queue: STM[Nothing, TPriorityQueue[Int]] =
+ for {
+ queue <- TPriorityQueue.empty[Int]
+ _ <- queue.offerAll(List(2, 4, 6, 3, 5, 6))
+ } yield queue
+```
+
+## Taking elements from a TPriorityQueue
+
+Take an element from a `TPriorityQueue` using the `take`. `take` will semantically block until there is at least one value in the queue to take. You can also use `takeAll` to immediately take all values that are currently in the queue, or `takeUpTo` to immediately take up to the specified number of elements from the queue.
+
+```scala
+val sorted: STM[Nothing, Chunk[Int]] =
+ for {
+ queue <- TPriorityQueue.empty[Int]
+ _ <- queue.offerAll(List(2, 4, 6, 3, 5, 6))
+ sorted <- queue.takeAll
+ } yield sorted
+```
+
+You can also use `takeOption` method to take the first value from the queue if it exists without suspending or the `peek` method to observe the first element of the queue if it exists without removing it from the queue.
+
+Sometimes you want to take a snapshot of the current state of the queue without modifying it. For this the `toChunk` combinator or its variants `toList` or `toVector` are extremely helpful. These will return an immutable collection that consists of all of the elements currently in the queue, leaving the state of the queue unchanged.
+
+## Size of a TPriorityQueue
+
+You can check the size of the `TPriorityQueue` using the `size` method:
+
+```scala
+
+val size: STM[Nothing, Int] =
+ for {
+ queue <- TPriorityQueue.empty[Int]
+ _ <- queue.offerAll(List(2, 4, 6, 3, 5, 6))
+ size <- queue.size
+ } yield size
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stm/tpromise.md b/website/versioned_docs/version-1.0.18/reference/stm/tpromise.md
new file mode 100644
index 000000000000..955cab30785b
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stm/tpromise.md
@@ -0,0 +1,90 @@
+---
+id: tpromise
+title: "TPromise"
+---
+
+`TPromise` is a mutable reference that can be set exactly once and can participate in transactions in STM.
+
+## Create a TPromise
+
+Creating a `TPromise`:
+
+```scala
+import zio._
+import zio.stm._
+
+val tPromise: STM[Nothing, TPromise[String, Int]] = TPromise.make[String, Int]
+```
+
+## Complete a TPromise
+
+In order to successfully complete a `TPromise`:
+
+```scala
+import zio._
+import zio.stm._
+
+val tPromiseSucceed: UIO[TPromise[String, Int]] = for {
+ tPromise <- TPromise.make[String, Int].commit
+ _ <- tPromise.succeed(0).commit
+} yield tPromise
+```
+
+In order to fail a `TPromise` use:
+
+```scala
+import zio._
+import zio.stm._
+
+val tPromiseFail: UIO[TPromise[String, Int]] = for {
+ tPromise <- TPromise.make[String, Int].commit
+ _ <- tPromise.fail("failed").commit
+} yield tPromise
+```
+
+Alternatively, you can use `done` combinator and complete the promise by passing it `Either[E, A]`:
+
+```scala
+import zio._
+import zio.stm._
+
+val tPromiseDoneSucceed: UIO[TPromise[String, Int]] = for {
+ tPromise <- TPromise.make[String, Int].commit
+ _ <- tPromise.done(Right(0)).commit
+} yield tPromise
+
+val tPromiseDoneFail: UIO[TPromise[String, Int]] = for {
+ tPromise <- TPromise.make[String, Int].commit
+ _ <- tPromise.done(Left("failed")).commit
+} yield tPromise
+```
+
+Once the value is set, any following attempts to set it will result in `false`.
+
+## Retrieve the value of a TPromise
+
+Returns the result if the promise has already been completed or a `None` otherwise:
+
+```scala
+import zio._
+import zio.stm._
+
+val tPromiseOptionValue: UIO[Option[Either[String, Int]]] = for {
+ tPromise <- TPromise.make[String, Int].commit
+ _ <- tPromise.succeed(0).commit
+ res <- tPromise.poll.commit
+} yield res
+```
+
+Alternatively, you can wait for the promise to be completed and return the value:
+
+```scala
+import zio._
+import zio.stm._
+
+val tPromiseValue: IO[String, Int] = for {
+ tPromise <- TPromise.make[String, Int].commit
+ _ <- tPromise.succeed(0).commit
+ res <- tPromise.await.commit
+} yield res
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stm/tqueue.md b/website/versioned_docs/version-1.0.18/reference/stm/tqueue.md
new file mode 100644
index 000000000000..1a41d6297e47
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stm/tqueue.md
@@ -0,0 +1,125 @@
+---
+id: tqueue
+title: "TQueue"
+---
+
+A `TQueue[A]` is a mutable queue that can participate in transactions in STM.
+
+## Create a TQueue
+
+Creating an empty bounded `TQueue` with specified capacity:
+
+```scala
+import zio._
+import zio.stm._
+
+val tQueueBounded: STM[Nothing, TQueue[Int]] = TQueue.bounded[Int](5)
+```
+
+Creating an empty unbounded `TQueue`:
+
+```scala
+import zio._
+import zio.stm._
+
+val tQueueUnbounded: STM[Nothing, TQueue[Int]] = TQueue.unbounded[Int]
+```
+
+## Put element(s) in a TQueue
+
+In order to put an element to a `TQueue`:
+
+```scala
+import zio._
+import zio.stm._
+
+val tQueueOffer: UIO[TQueue[Int]] = (for {
+ tQueue <- TQueue.bounded[Int](3)
+ _ <- tQueue.offer(1)
+} yield tQueue).commit
+```
+
+The specified element will be successfully added to a queue if the queue is not full.
+It will wait for an empty slot in the queue otherwise.
+
+Alternatively, you can provide a list of elements:
+
+```scala
+import zio._
+import zio.stm._
+
+val tQueueOfferAll: UIO[TQueue[Int]] = (for {
+ tQueue <- TQueue.bounded[Int](3)
+ _ <- tQueue.offerAll(List(1, 2))
+} yield tQueue).commit
+```
+
+## Retrieve element(s) from a TQueue
+
+The first element of the queue can be obtained as follows:
+
+```scala
+import zio._
+import zio.stm._
+
+val tQueueTake: UIO[Int] = (for {
+ tQueue <- TQueue.bounded[Int](3)
+ _ <- tQueue.offerAll(List(1, 2))
+ res <- tQueue.take
+} yield res).commit
+```
+
+In case the queue is empty it will block execution waiting for the element you're asking for.
+
+This behavior can be avoided by using `poll` method that will return an element if exists or `None` otherwise:
+
+```scala
+import zio._
+import zio.stm._
+
+val tQueuePoll: UIO[Option[Int]] = (for {
+ tQueue <- TQueue.bounded[Int](3)
+ res <- tQueue.poll
+} yield res).commit
+```
+
+Retrieving first `n` elements of the queue:
+
+```scala
+import zio._
+import zio.stm._
+
+val tQueueTakeUpTo: UIO[List[Int]] = (for {
+ tQueue <- TQueue.bounded[Int](4)
+ _ <- tQueue.offerAll(List(1, 2))
+ res <- tQueue.takeUpTo(3)
+} yield res).commit
+```
+
+All elements of the queue can be obtained as follows:
+
+```scala
+import zio._
+import zio.stm._
+
+val tQueueTakeAll: UIO[List[Int]] = (for {
+ tQueue <- TQueue.bounded[Int](4)
+ _ <- tQueue.offerAll(List(1, 2))
+ res <- tQueue.takeAll
+} yield res).commit
+```
+
+## Size of a TQueue
+
+The number of elements in the queue can be obtained as follows:
+
+```scala
+import zio._
+import zio.stm._
+
+val tQueueSize: UIO[Int] = (for {
+ tQueue <- TQueue.bounded[Int](3)
+ _ <- tQueue.offerAll(List(1, 2))
+ size <- tQueue.size
+} yield size).commit
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stm/treentrantlock.md b/website/versioned_docs/version-1.0.18/reference/stm/treentrantlock.md
new file mode 100644
index 000000000000..cea76e57f010
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stm/treentrantlock.md
@@ -0,0 +1,150 @@
+---
+id: treentrantlock
+title: "TReentrantLock"
+---
+
+A TReentrantLock allows safe concurrent access to some mutable state efficiently, allowing multiple fibers to read the
+state (because that is safe to do) but only one fiber to modify the state (to prevent data corruption). Also, even though
+the TReentrantLock is implemented using STM; reads and writes can be committed, allowing this to be used as a building
+block for solutions that expose purely ZIO effects and internally allow locking on more than one piece of state in a
+simple and composable way (thanks to STM).
+
+A `TReentrantLock` is a _reentrant_ read/write lock. A reentrant lock is one where a fiber can claim the lock multiple
+times without blocking on itself. It's useful in situations where it's not easy to keep track of whether you have already
+grabbed a lock. If a lock is non re-entrant you could grab the lock, then block when you go to grab it again, effectively
+causing a deadlock.
+
+## Semantics
+
+This lock allows both readers and writers to reacquire read or write locks with reentrancy guarantees. Readers are not
+allowed until all write locks held by the writing fiber have been released. Writers are not allowed unless there are no
+other locks or the fiber wanting to hold a write lock already has a read lock and there are no other fibers holding a
+read lock.
+
+This lock also allows upgrading from a read lock to a write lock (automatically) and downgrading
+from a write lock to a read lock (automatically provided that you upgraded from a read lock to a write lock).
+
+## Creating a reentrant lock
+
+```scala
+import zio.stm._
+
+val reentrantLock = TReentrantLock.make
+```
+
+## Acquiring a read lock
+
+```scala
+import zio.stm._
+
+val program =
+ (for {
+ lock <- TReentrantLock.make
+ _ <- lock.acquireRead
+ rst <- lock.readLocked // lock is read-locked once transaction completes
+ wst <- lock.writeLocked // lock is not write-locked
+ } yield rst && !wst).commit
+```
+
+## Acquiring a write lock
+
+```scala
+import zio._
+import zio.stm._
+
+val writeLockProgram: UIO[Boolean] =
+ (for {
+ lock <- TReentrantLock.make
+ _ <- lock.acquireWrite
+ wst <- lock.writeLocked // lock is write-locked once transaction completes
+ rst <- lock.readLocked // lock is not read-locked
+ } yield !rst && wst).commit
+```
+
+## Multiple fibers can hold read locks
+
+```scala
+import zio._
+import zio.stm._
+
+val multipleReadLocksProgram: UIO[(Int, Int)] = for {
+ lock <- TReentrantLock.make.commit
+ fiber0 <- lock.acquireRead.commit.fork // fiber0 acquires a read-lock
+ currentState1 <- fiber0.join // 1 read lock held
+ fiber1 <- lock.acquireRead.commit.fork // fiber1 acquires a read-lock
+ currentState2 <- fiber1.join // 2 read locks held
+} yield (currentState1, currentState2)
+```
+
+## Upgrading and downgrading locks
+
+If your fiber already has a read lock then it is possible to upgrade the lock to a write lock provided that no other
+reader (other than your fiber) holds a lock
+```scala
+import zio._
+import zio.stm._
+
+val upgradeDowngradeProgram: UIO[(Boolean, Boolean, Boolean, Boolean)] = for {
+ lock <- TReentrantLock.make.commit
+ _ <- lock.acquireRead.commit
+ _ <- lock.acquireWrite.commit // upgrade
+ isWriteLocked <- lock.writeLocked.commit // now write-locked
+ isReadLocked <- lock.readLocked.commit // and read-locked
+ _ <- lock.releaseWrite.commit // downgrade
+ isWriteLockedAfter <- lock.writeLocked.commit // no longer write-locked
+ isReadLockedAfter <- lock.readLocked.commit // still read-locked
+} yield (isWriteLocked, isReadLocked, isWriteLockedAfter, isReadLockedAfter)
+```
+
+## Acquiring a write lock in a contentious scenario
+
+A write lock can be acquired immediately only if one of the following conditions are satisfied:
+1. There are no other holders of the lock
+2. The current fiber is already holding a read lock and there are no other parties holding a read lock
+
+If either of the above scenarios are untrue then attempting to acquire a write lock will semantically block the fiber.
+Here is an example which demonstrates that a write lock can only be obtained by the fiber once all other readers (except
+the fiber attempting to acquire the write lock) have released their hold on the (read or write) lock.
+
+```scala
+import zio._
+import zio.clock._
+import zio.console._
+import zio.stm._
+import zio.duration._
+
+val writeLockDemoProgram: URIO[Console with Clock, Unit] = for {
+ l <- TReentrantLock.make.commit
+ _ <- putStrLn("Beginning test").orDie
+ f1 <- (l.acquireRead.commit *> ZIO.sleep(5.seconds) *> l.releaseRead.commit).fork
+ f2 <- (l.acquireRead.commit *> putStrLn("read-lock").orDie *> l.acquireWrite.commit *> putStrLn("I have upgraded!").orDie).fork
+ _ <- (f1 zip f2).join
+} yield ()
+```
+
+Here fiber `f1` acquires a read lock and sleeps for 5 seconds before releasing it. Fiber `f2` also acquires a read
+lock and immediately tries to acquire a write lock. However, `f2` will have to semantically block for approximately 5
+seconds to obtain a write lock because `f1` will release its hold on the lock and only then can `f2` acquire a hold for
+the write lock.
+
+## Safer methods (`readLock` and `writeLock`)
+
+Using `acquireRead`, `acquireWrite`, `releaseRead` and `releaseWrite` should be avoided for simple use cases relying on
+methods like `readLock` and `writeLock` instead. `readLock` and `writeLock` automatically acquire and release the lock
+thanks to the `Managed` construct. The program described below is a safer version of the program above and ensures we
+don't hold onto any resources once we are done using the reentrant lock.
+
+```scala
+import zio._
+import zio.clock._
+import zio.console._
+import zio.stm._
+import zio.duration._
+
+val saferProgram: URIO[Console with Clock, Unit] = for {
+ lock <- TReentrantLock.make.commit
+ f1 <- lock.readLock.use_(ZIO.sleep(5.seconds) *> putStrLn("Powering down").orDie).fork
+ f2 <- lock.readLock.use_(lock.writeLock.use_(putStrLn("Huzzah, writes are mine").orDie)).fork
+ _ <- (f1 zip f2).join
+} yield ()
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stm/tref.md b/website/versioned_docs/version-1.0.18/reference/stm/tref.md
new file mode 100644
index 000000000000..0f8a4ed94a74
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stm/tref.md
@@ -0,0 +1,178 @@
+---
+id: tref
+title: "TRef"
+---
+
+A `TRef[A]` is a mutable reference to an immutable value, which can participate in transactions in STM. The mutable reference can be retrieved and set from within transactions, with strong guarantees for atomicity, consistency, and isolation from other transactions.
+
+`TRef` provides the low-level machinery to create transactions from modifications of STM memory.
+
+## Create a TRef
+
+Creating a `TRef` inside a transaction:
+
+```scala
+import zio._
+import zio.stm._
+
+val createTRef: STM[Nothing, TRef[Int]] = TRef.make(10)
+```
+
+Or creating a `TRef` inside a transaction, and immediately committing the transaction, which allows you to store and pass along the reference.
+
+```scala
+import zio._
+import zio.stm._
+
+val commitTRef: UIO[TRef[Int]] = TRef.makeCommit(10)
+```
+
+## Retrieve the value out of a TRef
+
+Retrieving the value in a single transaction:
+
+```scala
+import zio._
+import zio.stm._
+
+val retrieveSingle: UIO[Int] = (for {
+ tRef <- TRef.make(10)
+ value <- tRef.get
+} yield value).commit
+```
+
+Or on multiple transactional statements:
+
+```scala
+import zio._
+import zio.stm._
+
+val retrieveMultiple: UIO[Int] = for {
+ tRef <- TRef.makeCommit(10)
+ value <- tRef.get.commit
+} yield value
+```
+
+## Set a value to a TRef
+
+Setting the value overwrites the existing content of a reference.
+
+Setting the value in a single transaction:
+
+```scala
+import zio._
+import zio.stm._
+
+val setSingle: UIO[Int] = (for {
+ tRef <- TRef.make(10)
+ _ <- tRef.set(20)
+ nValue <- tRef.get
+} yield nValue).commit
+```
+
+Or on multiple transactions:
+
+```scala
+import zio._
+import zio.stm._
+
+val setMultiple: UIO[Int] = for {
+ tRef <- TRef.makeCommit(10)
+ nValue <- tRef.set(20).flatMap(_ => tRef.get).commit
+} yield nValue
+```
+
+## Update the value of the TRef
+
+The update function `A => A` allows computing a new value for the `TRef` using the old value.
+
+Updating the value in a single transaction:
+
+```scala
+import zio._
+import zio.stm._
+
+val updateSingle: UIO[Int] = (for {
+ tRef <- TRef.make(10)
+ nValue <- tRef.updateAndGet(_ + 20)
+} yield nValue).commit
+```
+
+Or on multiple transactions:
+
+```scala
+import zio._
+import zio.stm._
+
+val updateMultiple: UIO[Int] = for {
+ tRef <- TRef.makeCommit(10)
+ nValue <- tRef.updateAndGet(_ + 20).commit
+} yield nValue
+```
+
+## Modify the value of the TRef
+
+The modify function `A => (B, A): B` works similar to `update`, but allows extracting some information (the `B`) out of the update operation.
+
+Modify the value in a single transaction:
+
+```scala
+import zio._
+import zio.stm._
+
+val modifySingle: UIO[(String, Int)] = (for {
+ tRef <- TRef.make(10)
+ mValue <- tRef.modify(v => ("Zee-Oh", v + 10))
+ nValue <- tRef.get
+} yield (mValue, nValue)).commit
+```
+
+Or on multiple transactions:
+
+```scala
+import zio._
+import zio.stm._
+
+val modifyMultiple: UIO[(String, Int)] = for {
+ tRef <- TRef.makeCommit(10)
+ tuple2 <- tRef.modify(v => ("Zee-Oh", v + 10)).zip(tRef.get).commit
+} yield tuple2
+```
+
+## Example usage
+
+Here is a scenario where we use a `TRef` to hand-off a value between two `Fiber`s
+
+```scala
+import zio._
+import zio.stm._
+
+def transfer(tSender: TRef[Int],
+ tReceiver: TRef[Int],
+ amount: Int): UIO[Int] = {
+ STM.atomically {
+ for {
+ _ <- tSender.get.retryUntil(_ >= amount)
+ _ <- tSender.update(_ - amount)
+ nAmount <- tReceiver.updateAndGet(_ + amount)
+ } yield nAmount
+ }
+}
+
+val transferredMoney: UIO[String] = for {
+ tSender <- TRef.makeCommit(50)
+ tReceiver <- TRef.makeCommit(100)
+ _ <- transfer(tSender, tReceiver, 50).fork
+ _ <- tSender.get.retryUntil(_ == 0).commit
+ tuple2 <- tSender.get.zip(tReceiver.get).commit
+ (senderBalance, receiverBalance) = tuple2
+} yield s"sender: $senderBalance & receiver: $receiverBalance"
+```
+
+In this example, we create and commit two transactional references for the sender and receiver to be able to extract their value.
+On the following step, we create an atomic transactional that updates both accounts only when there is sufficient balance available in the sender account. In the end, we fork to run asynchronously.
+On the running fiber, we suspend until the sender balance suffers changes, in this case, to reach `zero`. Finally, we extract the new values out of the accounts and combine them in one result.
+
+## ZTRef
+
+Like `Ref[A]`, `TRef[A]` is actually a type alias for `ZTRef[+EA, +EB, -A, +B]`, a polymorphic, transactional reference and supports all the transformations that `ZRef` does. For more discussion regarding polymorphic references see the documentation on [`ZRef`](../concurrency/ref.md).
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/reference/stm/tsemaphore.md b/website/versioned_docs/version-1.0.18/reference/stm/tsemaphore.md
new file mode 100644
index 000000000000..91ce128f049e
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stm/tsemaphore.md
@@ -0,0 +1,115 @@
+---
+id: tsemaphore
+title: "TSemaphore"
+---
+
+`TSemaphore` is a semaphore with transactional semantics that can be used to control access to a common resource. It
+holds a certain number of permits, and permits may be acquired or released.
+
+## Create a TSemaphore
+Creating a `TSemaphore` with 10 permits:
+```scala
+import zio._
+import zio.stm._
+
+val tSemaphoreCreate: STM[Nothing, TSemaphore] = TSemaphore.make(10L)
+```
+
+## Acquire a permit
+Acquiring a permit reduces the number of remaining permits that the `TSemaphore` contains. Acquiring a permit is done
+when a user wants to access a common shared resource:
+```scala
+import zio._
+import zio.stm._
+
+val tSemaphoreAcq: STM[Nothing, TSemaphore] = for {
+ tSem <- TSemaphore.make(2L)
+ _ <- tSem.acquire
+} yield tSem
+
+tSemaphoreAcq.commit
+```
+Note that if you try to acquire a permit when there are no more remaining permits in the semaphore then execution will be blocked semantically until a permit is ready to be acquired. Note that semantic blocking does not block threads and the STM transaction will only be retried when a permit is released.
+
+## Release a permit
+Once you have finished accessing the shared resource, you must release your permit so other parties can access the
+shared resource:
+```scala
+import zio._
+import zio.stm._
+
+val tSemaphoreRelease: STM[Nothing, TSemaphore] = for {
+ tSem <- TSemaphore.make(1L)
+ _ <- tSem.acquire
+ _ <- tSem.release
+} yield tSem
+
+tSemaphoreRelease.commit
+```
+
+## Retrieve available permits
+You can query for the remaining amount of permits in the TSemaphore by using `available`:
+```scala
+import zio._
+import zio.stm._
+
+val tSemaphoreAvailable: STM[Nothing, Long] = for {
+ tSem <- TSemaphore.make(2L)
+ _ <- tSem.acquire
+ cap <- tSem.available
+} yield cap
+
+tSemaphoreAvailable.commit
+```
+The above code creates a TSemaphore with two permits and acquires one permit without releasing it. Here, `available`
+will report that there is a single permit left.
+
+## Execute an arbitrary STM action with automatic acquire and release
+You can choose to execute any arbitrary STM action that requires acquiring and releasing permit on TSemaphore as part
+of the same transaction. Rather than doing:
+```scala
+import zio._
+import zio.stm._
+
+def yourSTMAction: STM[Nothing, Unit] = STM.unit
+
+val tSemaphoreWithoutPermit: STM[Nothing, Unit] =
+ for {
+ sem <- TSemaphore.make(1L)
+ _ <- sem.acquire
+ a <- yourSTMAction
+ _ <- sem.release
+ } yield a
+
+tSemaphoreWithoutPermit.commit
+```
+You can simply use `withPermit` instead:
+```scala
+import zio._
+import zio.stm._
+
+val tSemaphoreWithPermit: STM[Nothing, Unit] = for {
+ sem <- TSemaphore.make(1L)
+ a <- sem.withPermit(yourSTMAction)
+} yield a
+
+tSemaphoreWithPermit.commit
+```
+
+It is considered best practice to use `withPermit` over using an `acquire` and a `release` directly unless dealing with more complicated use cases that involve multiple STM actions where `acquire` is not at the start and `release` is not at the end of the STM transaction.
+
+## Acquire and release multiple permits
+It is possible to acquire and release multiple permits at a time using `acquireN` and `releaseN`:
+```scala
+import zio._
+import zio.stm._
+
+val tSemaphoreAcquireNReleaseN: STM[Nothing, Boolean] = for {
+ sem <- TSemaphore.make(3L)
+ _ <- sem.acquireN(3L)
+ cap <- sem.available
+ _ <- sem.releaseN(3L)
+} yield cap == 0
+
+tSemaphoreAcquireNReleaseN.commit
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stm/tset.md b/website/versioned_docs/version-1.0.18/reference/stm/tset.md
new file mode 100644
index 000000000000..ca670d90207d
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stm/tset.md
@@ -0,0 +1,265 @@
+---
+id: tset
+title: "TSet"
+---
+
+A `TSet[A]` is a mutable set that can participate in transactions in STM.
+
+## Create a TSet
+
+Creating an empty `TSet`:
+
+```scala
+import zio._
+import zio.stm._
+
+val emptyTSet: STM[Nothing, TSet[Int]] = TSet.empty[Int]
+```
+
+Or creating a `TSet` with specified values:
+
+```scala
+import zio._
+import zio.stm._
+
+val specifiedValuesTSet: STM[Nothing, TSet[Int]] = TSet.make(1, 2, 3)
+```
+
+Alternatively, you can create a `TSet` by providing a collection of values:
+
+```scala
+import zio._
+import zio.stm._
+
+val iterableTSet: STM[Nothing, TSet[Int]] = TSet.fromIterable(List(1, 2, 3))
+```
+
+In case there are duplicates provided, the last one is taken.
+
+## Put an element to a TSet
+
+The new element can be added to the set in the following way:
+
+```scala
+import zio._
+import zio.stm._
+
+val putElem: UIO[TSet[Int]] = (for {
+ tSet <- TSet.make(1, 2)
+ _ <- tSet.put(3)
+} yield tSet).commit
+```
+
+In case the set already contains the element, no modification will happen.
+
+## Remove an element from a TSet
+
+The simplest way to remove an element from `TSet` is using `delete` method:
+
+```scala
+import zio._
+import zio.stm._
+
+val deleteElem: UIO[TSet[Int]] = (for {
+ tSet <- TSet.make(1, 2, 3)
+ _ <- tSet.delete(1)
+} yield tSet).commit
+```
+
+Also, it is possible to remove every element that satisfies provided predicate:
+
+```scala
+import zio._
+import zio.stm._
+
+val removedEvenElems: UIO[TSet[Int]] = (for {
+ tSet <- TSet.make(1, 2, 3, 4)
+ _ <- tSet.removeIf(_ % 2 == 0)
+} yield tSet).commit
+```
+
+Or you can keep all the elements that match predicate function:
+
+```scala
+import zio._
+import zio.stm._
+
+val retainedEvenElems: UIO[TSet[Int]] = (for {
+ tSet <- TSet.make(1, 2, 3, 4)
+ _ <- tSet.retainIf(_ % 2 == 0)
+} yield tSet).commit
+```
+
+Note that `retainIf` and `removeIf` serve the same purpose as `filter` and `filterNot`. The reason for naming them differently was to emphasize a distinction in their nature. Namely, both `retainIf` and `removeIf` are destructive - calling them can modify the collection.
+
+## Union of a TSet
+
+Union of the sets A and B represents the set of elements belonging to set A or set B, or both.
+Using `A union B` method modifies set `A`.
+
+```scala
+import zio._
+import zio.stm._
+
+// unionTSet = {1, 2, 3, 4, 5, 6}
+val unionTSet: UIO[TSet[Int]] = (for {
+ tSetA <- TSet.make(1, 2, 3, 4)
+ tSetB <- TSet.make(3, 4, 5, 6)
+ _ <- tSetA.union(tSetB)
+} yield tSetA).commit
+```
+
+## Intersection of a TSet
+
+The intersection of the sets A and B is the set of elements belonging to both A and B.
+Using `A intersect B` method modifies set `A`.
+
+```scala
+import zio._
+import zio.stm._
+
+// intersectionTSet = {3, 4}
+val intersectionTSet: UIO[TSet[Int]] = (for {
+ tSetA <- TSet.make(1, 2, 3, 4)
+ tSetB <- TSet.make(3, 4, 5, 6)
+ _ <- tSetA.intersect(tSetB)
+} yield tSetA).commit
+```
+
+## Difference of a TSet
+
+The difference between sets A and B is the set containing elements of set A but not in B.
+Using `A diff B` method modifies set `A`.
+
+```scala
+import zio._
+import zio.stm._
+
+// diffTSet = {1, 2}
+val diffTSet: UIO[TSet[Int]] = (for {
+ tSetA <- TSet.make(1, 2, 3, 4)
+ tSetB <- TSet.make(3, 4, 5, 6)
+ _ <- tSetA.diff(tSetB)
+} yield tSetA).commit
+```
+
+## Transform elements of a TSet
+
+The transform function `A => A` allows computing a new value for every element in the set:
+
+```scala
+import zio._
+import zio.stm._
+
+val transformTSet: UIO[TSet[Int]] = (for {
+ tSet <- TSet.make(1, 2, 3, 4)
+ _ <- tSet.transform(a => a * a)
+} yield tSet).commit
+```
+
+Note that it is possible to shrink a `TSet`:
+
+```scala
+import zio._
+import zio.stm._
+
+val shrinkTSet: UIO[TSet[Int]] = (for {
+ tSet <- TSet.make(1, 2, 3, 4)
+ _ <- tSet.transform(_ => 1)
+} yield tSet).commit
+```
+Resulting set in example above has only one element.
+
+Note that `transform` serves the same purpose as `map`. The reason for naming it differently was to emphasize a distinction in its nature. Namely, `transform` is destructive - calling it can modify the collection.
+
+The elements can be mapped effectfully via `transformM`:
+
+```scala
+import zio._
+import zio.stm._
+
+val transformMTSet: UIO[TSet[Int]] = (for {
+ tSet <- TSet.make(1, 2, 3, 4)
+ _ <- tSet.transformM(a => STM.succeed(a * a))
+} yield tSet).commit
+```
+
+Folds the elements of a `TSet` using the specified associative binary operator:
+
+```scala
+import zio._
+import zio.stm._
+
+val foldTSet: UIO[Int] = (for {
+ tSet <- TSet.make(1, 2, 3, 4)
+ sum <- tSet.fold(0)(_ + _)
+} yield sum).commit
+```
+
+The elements can be folded effectfully via `foldM`:
+
+```scala
+import zio._
+import zio.stm._
+
+val foldMTSet: UIO[Int] = (for {
+ tSet <- TSet.make(1, 2, 3, 4)
+ sum <- tSet.foldM(0)((acc, el) => STM.succeed(acc + el))
+} yield sum).commit
+```
+
+## Perform side-effect for TSet elements
+
+`foreach` is used for performing side-effect for each element in set:
+
+```scala
+import zio._
+import zio.stm._
+
+val foreachTSet = (for {
+ tSet <- TSet.make(1, 2, 3, 4)
+ _ <- tSet.foreach(a => STM.succeed(println(a)))
+} yield tSet).commit
+```
+
+## Check TSet membership
+
+Checking whether the element is present in a `TSet`:
+
+```scala
+import zio._
+import zio.stm._
+
+val tSetContainsElem: UIO[Boolean] = (for {
+ tSet <- TSet.make(1, 2, 3, 4)
+ res <- tSet.contains(3)
+} yield res).commit
+```
+
+## Convert TSet to a List
+
+List of set elements can be obtained as follows:
+
+```scala
+import zio._
+import zio.stm._
+
+val tSetToList: UIO[List[Int]] = (for {
+ tSet <- TSet.make(1, 2, 3, 4)
+ list <- tSet.toList
+} yield list).commit
+```
+
+## Size of a TSet
+
+Set's size can be obtained as follows:
+
+```scala
+import zio._
+import zio.stm._
+
+val tSetSize: UIO[Int] = (for {
+ tSet <- TSet.make(1, 2, 3, 4)
+ size <- tSet.size
+} yield size).commit
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stream/index.md b/website/versioned_docs/version-1.0.18/reference/stream/index.md
new file mode 100644
index 000000000000..75f56e7bec11
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stream/index.md
@@ -0,0 +1,238 @@
+---
+id: index
+title: "Introduction"
+---
+
+
+## Introduction
+
+The primary goal of a streaming library is to introduce **a high-level API that abstracts the mechanism of reading and writing operations using data sources and destinations**.
+
+A streaming library helps us to concentrate on the business logic and separates us from low-level implementation details.
+
+There are lots of examples of streaming that people might not recognize, this is a common problem especially for beginners. A beginner might say "I don't need a streaming library. Why should I use that?". It's because they don't see streams. Once we use a streaming library, we start to see streams everywhere but until then we don't understand where they are.
+
+Before diving into ZIO Streams, let's list some use cases of a streaming solution and see why we would want to program in a streaming fashion:
+
+- **Files** — Every time an old school API interacting with a file has very low-level operators like "Open a file, get me an InputStream, and a method to read the next chunk from that InputStream, and also another method to close the file". Although that is a very low-level imperative API, there is a way to see files as streams of bytes.
+
+- **Sockets** — Instead of working with low-level APIs, we can use streams to provide a stream-based implementation of server socket that hides the low-level implementation details of sockets. We could model socket communication as a function from a stream of bytes to a stream of bytes. We can view the input of that socket as being a stream, and its output as being another stream.
+
+- **Event-Sourcing** — In these days and age, it is common to want to build event-sourced applications which work on events or messages in a queuing system like Kafka or AMQP systems and so forth. The foundation of this architecture is streaming. Also, they are useful when we want to do a lot of data analytics and so forth.
+
+- **UI Applications** — Streams are the foundation of almost every single modern UI application. Every time we click on something, under the hood that is an event. We can use low-level APIs like subscribing callbacks to the user events but also we can view those as streams of events. So we can model subscriptions as streams of events in UI applications.
+
+- **HTTP Server** — An HTTP Server can be viewed as a stream. We have a stream of requests that are being transformed to a stream of responses; a function from a stream of bytes that go to a stream of bytes.
+
+So streams are everywhere. We can see all of these different things as being streams. Everywhere we look we can find streams. Basically, all data-driven applications, almost all data-driven applications can benefit from streams.
+
+## Motivation
+
+Assume, we would like to take a list of numbers and grab all the prime numbers and then do some more hard work on each of these prime numbers. We can do it using `ZIO.foreachParN` and `ZIO.filterPar` operators like this:
+
+```scala
+def isPrime(number: Int): Task[Boolean] = Task.succeed(???)
+def moreHardWork(i: Int): Task[Boolean] = Task.succeed(???)
+
+val numbers = 1 to 1000
+
+for {
+ primes <- ZIO.filterPar(numbers)(isPrime)
+ _ <- ZIO.foreachParN(20)(primes)(moreHardWork)
+} yield ()
+```
+
+This processes the list in parallel and filters all the prime numbers, then takes all the prime numbers and does some more hard work on them.
+
+There are two problems with this example:
+
+- **High Latency** — We are not getting any pipelining, we are doing batch processing. We need to wait for the entire list to be processed in the first step before we can continue to the next step. This can lead to a pretty severe loss of performance.
+
+- **Limited Memory** — We need to keep the entire list in memory as we process it and this doesn't work if we are working with an infinite data stream.
+
+With ZIO stream we can change this program to the following code:
+
+```scala
+def prime(number: Int): Task[(Boolean, Int)] = Task.succeed(???)
+
+ZStream.fromIterable(numbers)
+ .mapMParUnordered(20)(prime(_))
+ .filter(_._1).map(_._2)
+ .mapMParUnordered(20)(moreHardWork(_))
+```
+
+We converted the list of numbers using `ZStream.fromIterable` into a `ZStream`, then we mapped it in parallel, twenty items at a time, and then we performed the hard work problem, twenty items of a time. This is a pipeline, and this easily works for an infinite list.
+
+One might ask, "Okay, I can get the pipelining by using fibers and queues. So why should I use ZIO streams?". It is extremely tempting to write up the pipeline look like this. We can create a bunch of queues and fibers, then we have fibers that copy information between the queues and perform the processing concurrently. It ends up something like this:
+
+```scala
+def writeToInput(q: Queue[Int]): Task[Unit] = Task.succeed(???)
+def processBetweenQueues(from: Queue[Int], to: Queue[Int]): Task[Unit] = Task.succeed(???)
+def printElements(q: Queue[Int]): Task[Unit] = Task.succeed(???)
+
+for {
+ input <- Queue.bounded[Int](16)
+ middle <- Queue.bounded[Int](16)
+ output <- Queue.bounded[Int](16)
+ _ <- writeToInput(input).fork
+ _ <- processBetweenQueues(input, middle).fork
+ _ <- processBetweenQueues(middle, output).fork
+ _ <- printElements(output).fork
+} yield ()
+```
+
+We created a bunch of queues for buffering source, destination elements, and intermediate results.
+
+There are some problems with this solution. As fibers are low-level concurrency tools, using them to create a data pipeline is not straightforward. We need to handle interruptions properly. We should care about resources and prevent them to leak. We need to shutdown the pipeline in a right way by waiting for queues to be drained.
+
+Although fibers are very efficient and more performant than threads. They are advanced concurrency tools. So it is better to avoid using them to do manual pipelining. Instead, we can use ZIO streams:
+
+```scala
+def generateElement: Task[Int] = Task.succeed(???)
+def process(i: Int): Task[Int] = Task.succeed(???)
+def printElem(i: Int): Task[Unit] = Task.succeed(???)
+
+ZStream
+ .repeatEffect(generateElement)
+ .buffer(16)
+ .mapM(process(_))
+ .buffer(16)
+ .mapM(process(_))
+ .buffer(16)
+ .tap(printElem(_))
+```
+
+We have a buffer in between each step. We performed our computations in between. This is everything we need to get that pipelining in the same fashion that it looked before.
+
+## Why Streams?
+
+ZIO stream has super compelling advantages of using high-level streams. ZIO solution to streaming solves a lot of common streaming pain points. It shines in the following topics:
+
+### 1. High-level and Declarative
+
+This means in a very short snippet of a fluent code we can solve very outrageously complicated problems with just a few simple lines.
+
+### 2. Asynchronous and Non-blocking
+
+They're reactive streams, they don't block threads. They're super-efficient and very scalable. We can minimize our application latency and increase its performance. We can avoid wasting precious thread resources by using non-blocking and asynchronous ZIO streams.
+
+### 3. Concurrency and Parallelism
+
+Streams are concurrent. They have a lot of concurrent operators. All the operations on them are safe to use in presence of concurrency. And also just like ZIO gives us parallel operators with everything, there are lots of parallel operators. We can use the parallel version of operators, like `mapMPar`, `flatMapPar`.
+
+Parallel operators allow us to fully saturate and utilize all CPU cores of our machine. If we need to do bulk processing on a lot of data and use all the cores on our machine, so we can speed up the process by using these parallel operators.
+
+### 4. Resource Safety
+
+Resource safety is not a simple thing to guarantee. Assume when we have several streams, some of them are sockets and files, some of them are API calls and database queries. When we have all these streams, and we are transforming and combining them, and we are timing some out, and also some of them are doing concurrent merges; what happens when things go wrong in one part of that stream graph? ZIO streams provides the guarantee that it will never leak resources.
+
+So when streams have to be terminated for error or timeout or interruption reasons or whatever, ZIO will always safely shutdown and release the resources associated with that stream usage.
+
+We don't have to worry about resource management anymore. We can work at high-level and just declaratively describe our stream graph and then ZIO will handle the tricky job of executing that and taking care to make sure that no resources are leaked in an event of something bad happens or even just a timeout, or interruption, or just we are done with a result. So resources are always safely released without any leaks.
+
+### 5. High Performance and Efficiency
+
+When we are doing an I/O job, the granularity of data is not at the level of a single byte. For example, we never read or write a single element from/to a file descriptor. We always use multiple elements. So when we are doing an I/O operation it is a poor practice to read/write element by element and this decreases the performance of our program.
+
+In order to achieve high efficiency, ZIO stream implicitly chunks everything, but it still presents us with a nice API that is at the level of every single element. So we can always deal with streams of individual elements even though behind-the-scenes ZIO is doing some chunking to make that performant. This is one of the tricks that enables ZIO streams to have such great performance.
+
+ZIO Streams are working at the level of chunks. Every time we are working with ZIO streams, we are also working with chunks implicitly. So there are no streams with individual elements. Streams always use chunks. Every time we pull an element out of a ZIO stream, we end up pulling a chunk of elements under the hood.
+
+### 6. Seamless Integration with ZIO
+
+ZIO stream has a powerful seamless integrated support for ZIO. It uses `ZManaged`, `Schedule`, and any other powerful data types in ZIO. So we can stay within the same ecosystem and get all its significant benefits.
+
+### 7. Back-Pressure
+
+We get back-pressuring for free. With ZIO streams it is actually not a back-pressuring, but it is equivalent. In push-based streams like Akka Streams, streams are push-based; when an element comes in, it is pushed downward in the pipeline. That is what leads to the need for back-pressuring. Back-pressuring makes the push-based stream much more complicated than it needs to be.
+
+Push-based streams are good at splitting streams because we have one element, and we can push it to two different places. That is nice and elegant, but they're terrible at merging streams and that is because you end up needing to use queues, and then we run into a problem. In the case of using queues, we need back-pressuring, which leads to a complicated architecture.
+
+In ZIO when we merge streams, ZIO uses pull-based streams. They need minimal computation because we pull elements at the end of our data pipeline when needed. When the sink asks for one element, then that ripples all the way back through the very edges of the system.
+
+So when we pull one element at the end, no additional computation takes place until we pull the next element or decide that we are done pulling, and we close the stream. It causes the minimum amount of computation necessary to produce the result.
+
+Using the pull-based mechanism we have no producers, and it prevents producing more events than necessary. So ZIO streams does not need back-pressure even though it provides a form of that because it is lazy and on-demand and uses pull-based streams.
+
+So ZIO stream gives us the benefits of back-pressuring, but in a cleaner conceptual model that is very efficient and does not require all these levels of buffering.
+
+### 8. Infinite Data using Finite Memory
+
+Streams let us work on infinite data in a finite amount of memory. When we are writing streaming logic, we don't have to worry about how much data we are ultimately going to be processed.
+
+That is because we are just building a workflow, a description of the processing. We are not manually loading up everything into memory, into a list, and then doing our processing on a list. That doesn't work very well because we can only fit a finite amount of memory into our computer at one time.
+
+ZIO streams enable us just concentrate on our business problem, and not on how much memory this program is going to consume. So we can write these computations that work over streams that are totally infinite but in a finite amount of memory and ZIO handles that for us.
+
+Assume we have the following code. This is a snippet of a code that reads a file into a string and splits the string into new lines, then iterates over lines and prints them out. It is pretty simple and easy to read and also it is simple to understand:
+
+```scala
+for (line <- FileUtils.readFileToString(new File("file.txt")).split('\n'))
+ println(line)
+```
+
+The only problem here is that if we run this code with a file that is very large which is bigger than our memory, that is not going to work. Instead, we can reach the same functionality, by using the stream API:
+
+```scala
+ZStream.fromFile(Paths.get("file.txt"))
+ .transduce(ZTransducer.utf8Decode >>> ZTransducer.splitLines)
+ .foreach(putStrLn(_))
+```
+
+By using ZIO streams, we do not care how big is a file, we just concentrate on the logic of our application.
+
+## Core Abstractions
+
+To define a stream workflow there are three core abstraction in ZIO stream; _Streams_, _Sinks_, and _Transducers_:
+
+1. **[ZStream](zstream.md)** — Streams act as _sources_ of values. We get elements from them. They produce values.
+
+2. **[ZSink](zsink.md)** — Sinks act as _receptacles_ or _sinks_ for values. They consume values.
+
+3. **[Transducer](ztransducer.md)** — Transducers act as _transformers_ of values. They take individual values, and they transform or decode them.
+
+### Stream
+
+The `ZStream` data type similar to the `ZIO` effect has `R`, `E`, and `A`. It has environment, error, and element type.
+
+The difference between the `ZIO` and `ZStream` is that:
+
+- A `ZIO` effect will always succeed or fail. If it succeeds, it will succeed with a single element.
+
+- A `ZStream` can succeed with zero or more elements. So we can have an _empty stream_. A `ZStream[R, E, A]` doesn't necessarily produce any `A`s, it produces zero or more `A`s.
+
+So, that is a big difference. There is no such thing as a non-empty `ZStream`. All `ZStreams` are empty, they can produce any number of `A`s, which could be an infinite number of `A`s.
+
+There is no way to check to see if a stream is empty or not, because that computation hasn't started. Streams are super lazy, so there is no way to say "Oh! does this stream contain anything?" No! We can't figure that out. We have to use it and try to do something with it, and then we are going to figure out whether it had something.
+
+### Sink
+
+The basic idea behind the `Sink` is that **it consumes values of some type, and then it ends up when it is done. When the sink is done, it produces the value of a different type**.
+
+Sinks are a bit like **parsers**; they consume some input, when they're done, they produce a value. Also, they are like **databases**; they read enough from input when they don't want anymore, they can produce some value or return unit.
+
+Some sinks will produce nothing as their return type parameter is `Nothing`, which means that the sink is always going to accept more and more input; it is never ever going to be done.
+
+Just like Streams, sinks are super compositional. Sink's operators allow us to combine two sinks together or transform them. That allows us to generate a vast variety of sinks.
+
+Streams and Sinks are duals in category theory. One produces value, and the other one consumes them. They are mere images of each other. They both have to exist. A streaming library cannot be complete unless it has streams and sinks. That is why ZIO has a sort of better design than FS2 because FS2 has a stream, but it doesn't have a sink. Its Sink is just faked. It doesn't actually have a real sink. ZIO has a real sink, and we can compose them to generate new sinks.
+
+### Transducer
+
+With `Transducer`s, we can transform streams from one type to another, in a **stateful fashion**, which is sometimes necessary when we are doing encoding and decoding.
+
+Transducer is a transformer of element types. Transducer accepts some element of type `A` and produces some element of type `B`, and it may fail along the way or use the environment. It just transforms elements from one type to another type in a stateful way.
+
+For example, we can write counter with transducers. We take strings and then split them into lines, and then we take the lines, and we split them into words, and then we count them.
+
+Another common use case of transducers is **writing codecs**. We can use them to decode the bytes into strings. We have a bunch of bytes, and we want to end up with a JSON and then once we are in JSON land we want to go from JSON to our user-defined data type. So, by writing a transducer we can convert that JSON to our user-defined data type.
+
+**Transducers are very efficient**. They only exist for efficiency reasons because we can do everything we need actually with Sinks. Transducers exist only to make transformations faster. Sinks are not super fast to change from one sink to another. So transducers were invented to make it possible to transform element types in a compositional way without any of the performance overhead associated with changing over a Sink.
+
+Transducers can be thought of as **element transformers**. They transform elements of a stream:
+
+1. We can take a transducer, and we can stack it onto a stream to change the element type. For example, we have a Stream of `A`s, and a transducer that goes from `A` to `B`, so we can take that transducer from `A` to `B` and stack it on the stream to get back a stream of `B`s.
+
+2. Also, we can stack a transducer onto the front of a sink to change the input element type. If some sink consumes `B`s, and we have a transducer from `A` to `B` we can take that transducer stack it onto the front of the sink and get back a new sink that consumes `A`s.
+
+Assume we are building the data pipeline, the elements come from the far left, and they end up on the far right. Events come from the stream, they end up on the sink, along the way they're transformed by transducers. **Transducers are the middle section of the pipe that keep on transforming those elements in a stateful way**.
diff --git a/website/versioned_docs/version-1.0.18/reference/stream/sink.md b/website/versioned_docs/version-1.0.18/reference/stream/sink.md
new file mode 100644
index 000000000000..e1dae360e7e1
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stream/sink.md
@@ -0,0 +1,10 @@
+---
+id: sink
+title: "Sink"
+---
+
+`Sink[E, A, L, B]` is a type alias for `ZSink[Any, E, A, L, B]`. We can think of a `Sink` as a function that does not require any services and will consume a variable amount of `A` elements (could be 0, 1, or many!), might fail with an error of type `E`, and will eventually yield a value of type `B`. The `L` is the type of elements in the leftover.
+
+```scala
+type Sink[+E, A, +L, +B] = ZSink[Any, E, A, L, B]
+```
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/reference/stream/stream.md b/website/versioned_docs/version-1.0.18/reference/stream/stream.md
new file mode 100644
index 000000000000..df833849d145
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stream/stream.md
@@ -0,0 +1,10 @@
+---
+id: stream
+title: "Stream"
+---
+
+`Stream[E, A]` is a type alias for `ZStream[Any, E, A]`, which represents a ZIO stream that does not require any services, and may fail with an `E`, or produce elements with an `A`.
+
+```scala
+type Stream[+E, +A] = ZStream[Any, E, A]
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stream/subscriptionref.md b/website/versioned_docs/version-1.0.18/reference/stream/subscriptionref.md
new file mode 100644
index 000000000000..86bd06e3af31
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stream/subscriptionref.md
@@ -0,0 +1,69 @@
+---
+id: subscription-ref
+title: "SubscriptionRef"
+---
+
+A `SubscriptionRef[A]` contains a current value of type `A` and a stream that can be consumed to observe all changes to that value.
+
+```scala
+import zio._
+import zio.stream._
+
+trait SubscriptionRef[A] {
+ def changes: ZStream[Any, Nothing, A]
+ def ref: RefM[A]
+}
+```
+
+The `ref` allows us to access a `RefM` containing the current value. We can use all the normal methods on `RefM` to `get`, `set`, or `modify` the current value.
+
+The `changes` stream can be consumed to observe the current value as well as all changes to that value. Since `changes` is just a description of a stream, each time we run the stream we will observe the current value as of that point in time as well as all changes after that.
+
+To create a `SubscriptionRef` you can use the `make` constructor, which makes a new `SubscriptionRef` with the specified initial value.
+
+```scala
+object SubscriptionRef {
+ def make[A](a: A): UIO[SubscriptionRef[A]] =
+ ???
+}
+```
+
+A `SubscriptionRef` can be extremely useful to model some shared state where one or more observers must perform some action for all changes in that shared state. For example, in a functional reactive programming context the value of the `SubscriptionRef` might represent one part of the application state and each observer would need to update various user interface elements based on changes in that state.
+
+To see how this works, let's create a simple example where a "server" repeatedly updates a value that is observed by multiple "clients".
+
+
+```scala
+def server(ref: RefM[Long]): UIO[Nothing] =
+ ref.update(n => ZIO.succeed(n + 1)).forever
+```
+
+Notice that `server` just takes a `RefM` and does not need to know anything about `SubscriptionRef`. From its perspective it is just updating a value.
+
+```scala
+import zio.random._
+
+def client(changes: ZStream[Any, Nothing, Long]): URIO[Random, Chunk[Long]] =
+ for {
+ n <- random.nextLongBetween(1, 200)
+ chunk <- changes.take(n).runCollect
+ } yield chunk
+```
+
+Similarly `client` just takes a `ZStream` of values and does not have to know anything about the source of these values. In this case we will simply observe a fixed number of values.
+
+To wire everything together, we start the server, then start multiple instances of the client in parallel, and finally shut down the server when we are done. We also actually create the `SubscriptionRef` here.
+
+```scala
+for {
+ subscriptionRef <- SubscriptionRef.make(0L)
+ server <- server(subscriptionRef.ref).fork
+ chunks <- ZIO.collectAllPar(List.fill(100)(client(subscriptionRef.changes)))
+ _ <- server.interrupt
+ _ <- ZIO.foreach(chunks)(chunk => console.putStrLn(chunk.toString))
+} yield ()
+```
+
+This will ensure that each client observes the current value when it starts and all changes to the value after that.
+
+Since the changes are just streams it is also easy to build much more complex programs using all the stream operators we are accustomed to. For example, we can transform these streams, filter them, or merge them with other streams.
diff --git a/website/versioned_docs/version-1.0.18/reference/stream/transducer.md b/website/versioned_docs/version-1.0.18/reference/stream/transducer.md
new file mode 100644
index 000000000000..a37128f8196f
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stream/transducer.md
@@ -0,0 +1,10 @@
+---
+id: transducer
+title: "Transducer"
+---
+
+`Transducer[E, A, B]` is a type alias for `ZTransducer[Any, E, A, B]`. It is a stream transducer that doesn't require any services, so except the `R` type-parameter, all other things are the same.
+
+```scala
+type Transducer[+E, -A, +B] = ZTransducer[Any, E, A, B]
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stream/ustream.md b/website/versioned_docs/version-1.0.18/reference/stream/ustream.md
new file mode 100644
index 000000000000..6006bd132c85
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stream/ustream.md
@@ -0,0 +1,10 @@
+---
+id: ustream
+title: "UStream"
+---
+
+`UStream[A]` is a type alias for `ZStream[Any, Nothing, A]`, which represents a ZIO stream that does not require any services, it cannot fail, and after evaluation, it may emit zero or more values of type `A`.
+
+```scala
+type UStream[+A] = ZStream[Any, Nothing, A]
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stream/zsink.md b/website/versioned_docs/version-1.0.18/reference/stream/zsink.md
new file mode 100644
index 000000000000..3185504b7297
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stream/zsink.md
@@ -0,0 +1,426 @@
+---
+id: zsink
+title: "ZSink"
+---
+
+## Introduction
+
+A `ZSink[R, E, I, L, Z]` is used to consume elements produced by a `ZStream`. You can think of a sink as a function that will consume a variable amount of `I` elements (could be 0, 1, or many!), might fail with an error of type `E`, and will eventually yield a value of type `Z` together with a remainder of type `L` as leftover.
+
+To consume a stream using `ZSink` we can pass `ZSink` to the `ZStream#run` function:
+
+```scala
+import zio._
+import zio.stream._
+
+val stream = ZStream.fromIterable(1 to 1000)
+val sink = ZSink.sum[Int]
+val sum = stream.run(sink)
+```
+
+## Creating sinks
+
+The `zio.stream` provides numerous kinds of sinks to use.
+
+### Common Constructors
+
+**ZSink.head** — It creates a sink containing the first element, returns `None` for empty streams:
+
+```scala
+val sink: ZSink[Any, Nothing, Int, Int, Option[Int]] = ZSink.head[Int]
+val head: ZIO[Any, Nothing, Option[Int]] = ZStream(1, 2, 3, 4).run(sink)
+// Result: Some(1)
+```
+
+**ZSink.last** — It consumes all elements of a stream and returns the last element of the stream:
+
+```scala
+val sink: ZSink[Any, Nothing, Int, Nothing, Option[Int]] = ZSink.last[Int]
+val last: ZIO[Any, Nothing, Option[Int]] = ZStream(1, 2, 3, 4).run(sink)
+// Result: Some(4)
+```
+
+**ZSink.count** — A sink that consumes all elements of the stream and counts the number of elements fed to it:
+
+```scala
+val sink : ZSink[Any, Nothing, Int, Nothing, Int] = ZSink.sum[Int]
+val count: ZIO[Any, Nothing, Int] = ZStream(1, 2, 3, 4, 5).run(sink)
+// Result: 5
+```
+
+**ZSink.sum** — A sink that consumes all elements of the stream and sums incoming numeric values:
+
+```scala
+val sink : ZSink[Any, Nothing, Int, Nothing, Int] = ZSink.sum[Int]
+val sum: ZIO[Any, Nothing, Int] = ZStream(1, 2, 3, 4, 5).run(sink)
+// Result: 15
+```
+
+**ZSink.take** — A sink that takes the specified number of values and result in a `Chunk` data type:
+
+```scala
+val sink : ZSink[Any, Nothing, Int, Int, Chunk[Int]] = ZSink.take[Int](3)
+val stream: ZIO[Any, Nothing, Chunk[Int]] = ZStream(1, 2, 3, 4, 5).run(sink)
+// Result: Chunk(1, 2, 3)
+```
+
+**ZSink.drain** — A sink that ignores its inputs:
+
+```scala
+val drain: ZSink[Any, Nothing, Any, Nothing, Unit] = ZSink.drain
+```
+
+**ZSink.timed** — A sink that executes the stream and times its execution:
+
+```scala
+val timed: ZSink[Clock, Nothing, Any, Nothing, Duration] = ZSink.timed
+val stream: ZIO[Clock, Nothing, Long] =
+ ZStream(1, 2, 3, 4, 5).fixed(2.seconds).run(timed).map(_.getSeconds)
+// Result: 10
+```
+
+**ZSink.foreach** — A sink that executes the provided effectful function for every element fed to it:
+
+```scala
+val printer: ZSink[Console, IOException, Int, Int, Unit] =
+ ZSink.foreach((i: Int) => zio.console.putStrLn(i.toString))
+val stream : ZIO[Console, IOException, Unit] =
+ ZStream(1, 2, 3, 4, 5).run(printer)
+```
+
+### From Success and Failure
+
+Similar to the `ZStream` data type, we can create a `ZSink` using `fail` and `succeed` methods.
+
+A sink that doesn't consume any element of type `String` from its upstream and successes with a value of `Int` type:
+
+```scala
+val succeed: ZSink[Any, Nothing, String, String, Int] = ZSink.succeed[String, Int](5)
+```
+
+A sink that doesn't consume any element of type `Int` from its upstream and intentionally fails with a message of `String` type:
+
+```scala
+val failed : ZSink[Any, String, Int, Int, Nothing] = ZSink.fail[String, Int]("fail!")
+```
+
+### Collecting
+
+To create a sink that collects all elements of a stream into a `Chunk[A]`, we can use `ZSink.collectAll`:
+
+```scala
+val stream : UStream[Int] = UStream(1, 2, 3, 4, 5)
+val collection: UIO[Chunk[Int]] = stream.run(ZSink.collectAll[Int])
+// Output: Chunk(1, 2, 3, 4, 5)
+```
+
+We can collect all elements into a `Set`:
+
+```scala
+val collectAllToSet: ZSink[Any, Nothing, Int, Nothing, Set[Int]] = ZSink.collectAllToSet[Int]
+val stream: ZIO[Any, Nothing, Set[Int]] = ZStream(1, 3, 2, 3, 1, 5, 1).run(collectAllToSet)
+// Output: Set(1, 3, 2, 5)
+```
+
+Or we can collect and merge them into a `Map[K, A]` using a merge function. In the following example, we use `(_:Int) % 3` to determine map keys and, we provide `_ + _` function to merge multiple elements with the same key:
+
+```scala
+val collectAllToMap: ZSink[Any, Nothing, Int, Nothing, Map[Int, Int]] = ZSink.collectAllToMap((_: Int) % 3)(_ + _)
+val stream: ZIO[Any, Nothing, Map[Int, Int]] = ZStream(1, 3, 2, 3, 1, 5, 1).run(collectAllToMap)
+// Output: Map(1 -> 3, 0 -> 6, 2 -> 7)
+```
+
+### Folding
+
+Basic fold accumulation of received elements:
+
+```scala
+ZSink.foldLeft[Int, Int](0)(_ + _)
+```
+
+A fold with short-circuiting has a termination predicate that determines the end of the folding process:
+
+```scala
+ZStream.iterate(0)(_ + 1).run(
+ ZSink.fold(0)(sum => sum <= 10)((acc, n: Int) => acc + n)
+)
+// Output: 15
+```
+
+### From Effect
+
+The `ZSink.fromEffect` creates a single-value sink produced from an effect:
+
+```scala
+val sink = ZSink.fromEffect(ZIO.succeed(1))
+```
+
+### From File
+
+The `ZSink.fromFile` creates a file sink that consumes byte chunks and writes them to the specified file:
+
+```scala
+def fileSink(path: Path): ZSink[Blocking, Throwable, String, Byte, Long] =
+ ZSink
+ .fromFile(path)
+ .contramapChunks[String](_.flatMap(_.getBytes))
+
+val result = ZStream("Hello", "ZIO", "World!")
+ .intersperse("\n")
+ .run(fileSink(Paths.get("file.txt")))
+```
+
+### From OutputStream
+
+The `ZSink.fromOutputStream` creates a sink that consumes byte chunks and write them to the `OutputStream`:
+
+```scala
+ZStream("Application", "Error", "Logs")
+ .intersperse("\n")
+ .run(
+ ZSink
+ .fromOutputStream(System.err)
+ .contramapChunks[String](_.flatMap(_.getBytes))
+ )
+```
+
+### From Queue
+
+A queue has a finite or infinite buffer size, so they are useful in situations where we need to consume streams as fast as we can, and then do some batching operations on consumed messages. By using `ZSink.fromQueue` we can create a sink that is backed by a queue; it enqueues each element into the specified queue:
+
+```scala
+val myApp: ZIO[Console with Clock, IOException, Unit] =
+ for {
+ queue <- ZQueue.bounded[Int](32)
+ producer <- ZStream
+ .iterate(1)(_ + 1)
+ .fixed(200.millis)
+ .run(ZSink.fromQueue(queue))
+ .fork
+ consumer <- queue.take.flatMap(x => putStrLn(x.toString)).forever
+ _ <- producer.zip(consumer).join
+ } yield ()
+```
+
+### From Hub
+
+`Hub` is an asynchronous data type in which publisher can publish their messages to that and subscribers can subscribe to take messages from the `Hub`. The `ZSink.fromHub` takes a `ZHub` and returns a `ZSink` which publishes each element to that `ZHub`.
+
+In the following example, the `sink` consumes elements of the `producer` stream and publishes them to the `hub`. We have two consumers that are subscribed to that hub and they are taking its elements forever:
+
+```scala
+val myApp: ZIO[Console with Clock, IOException, Unit] =
+ for {
+ promise <- Promise.make[Nothing, Unit]
+ hub <- ZHub.bounded[Int](1)
+ sink <- ZIO.succeed(ZSink.fromHub(hub))
+ producer <- ZStream.iterate(0)(_ + 1).fixed(1.seconds).run(sink).fork
+ consumers <- hub.subscribe.zip(hub.subscribe).use { case (left, right) =>
+ for {
+ _ <- promise.succeed(())
+ f1 <- left.take.flatMap(e => putStrLn(s"Left Queue: $e")).forever.fork
+ f2 <- right.take.flatMap(e => putStrLn(s"Right Queue: $e")).forever.fork
+ _ <- f1.zip(f2).join
+ } yield ()
+ }.fork
+ _ <- promise.await
+ _ <- producer.zip(consumers).join
+ } yield ()
+```
+
+### From Push
+
+Before deepening into creating a `ZSink` using `Push` data-type, we need to learn more about the implementation details of `ZSink`. Note that this topic is for advanced users, and we do not require using `Push` data-type to create ZIO sinks, most of the time.
+
+#### ZSink's Encoding
+
+`ZSink` is a wrapper data-type around _managed_ `Push`:
+
+```scala
+abstract class ZSink[-R, +E, -I, +L, +Z] private (
+ val push: ZManaged[R, Nothing, ZSink.Push[R, E, I, L, Z]]
+)
+
+object ZSink {
+ type Push[-R, +E, -I, +L, +Z] =
+ Option[Chunk[I]] => ZIO[R, (Either[E, Z], Chunk[L]), Unit]
+}
+```
+
+`Push` is a function from `Option[Chunk[I]]` to `ZIO[R, (Either[E, Z], Chunk[L]), Unit]`. We can create four different data-types using its smart constructors:
+
+1. **Push.more** — Using this constructor we create a `Push` data-type that requires more values to consume (`Option[Chunk[I]] => UIO[Unit]`):
+
+```scala
+object Push {
+ val more: ZIO[Any, Nothing, Unit] = UIO.unit
+}
+```
+
+2. **Push.emit** — By providing `z` (as an _end_ value) and `leftover` arguments to this constructor we can create a `Push` data-type describing a sink that ends with `z` value and emits its leftovers (`Option[Chunk[I]] => IO[(Right[Nothing, Z], Chunk[I]), Nothing]`):
+
+```scala
+object Push {
+def emit[I, Z](
+ z: Z,
+ leftover: Chunk[I]
+): IO[(Right[Nothing, Z], Chunk[I]), Nothing] =
+ IO.fail((Right(z), leftover))
+}
+```
+
+3. **Push.fail** — By providing an error message and leftover to this constructor, we can create a `Push` data-type describing a sink that fails with `e` and emits the leftover (`Option[Chunk[I]] => IO[(Left[E, Nothing], Chunk[I]), Nothing]`):
+
+```scala
+def fail[I, E](
+ e: E,
+ leftover: Chunk[I]
+): IO[(Left[E, Nothing], Chunk[I]), Nothing] =
+ IO.fail((Left(e), leftover))
+```
+
+4. **Push.halt** — By providing a `Cause` we can create a `Push` data-type describing a sink that halts the process of consuming elements (`Option[Chunk[I]] => ZIO[Any, (Left[E, Nothing], Chunk[Nothing]), Nothing]`):
+
+```scala
+def halt[E](
+ c: Cause[E]
+): ZIO[Any, (Left[E, Nothing], Chunk[Nothing]), Nothing] =
+ IO.halt(c).mapError(e => (Left(e), Chunk.empty))
+```
+
+Now, we are ready to see how the existing `ZSink.head` sink is implemented using `Push` data-type:
+
+```scala
+def head[I]: ZSink[Any, Nothing, I, I, Option[I]] =
+ ZSink[Any, Nothing, I, I, Option[I]](ZManaged.succeed({
+ case Some(ch) =>
+ if (ch.isEmpty) { // If the chunk is empty, we require more elements
+ Push.more
+ } else {
+ Push.emit(Some(ch.head), ch.drop(1))
+ }
+ case None => Push.emit(None, Chunk.empty)
+ }))
+```
+
+#### Creating ZSink using Push
+
+To create a ZSink using `Push` data-type, we should use `ZSink.fromPush` constructor. This constructor is implemented as below:
+
+```scala
+object ZSink {
+ def fromPush[R, E, I, L, Z](sink: Push[R, E, I, L, Z]): ZSink[R, E, I, L, Z] =
+ ZSink(Managed.succeed(sink))
+}
+```
+
+So nothing special, it just creates us a new `ZSink` containing a managed push.
+
+Let's rewrite `ZSink.succeed` and `ZSink.fail` — the two existing ZIO sinks — using `fromPush`:
+
+```scala
+def succeed[I, Z](z: => Z): ZSink[Any, Nothing, I, I, Z] =
+ ZSink.fromPush[Any, Nothing, I, I, Z] { c =>
+ val leftover = c.fold[Chunk[I]](Chunk.empty)(identity)
+ Push.emit(z, leftover)
+ }
+
+def fail[E, I](e: => E): ZSink[Any, E, I, I, Nothing] =
+ ZSink.fromPush[Any, E, I, I, Nothing] { c =>
+ val leftover = c.fold[Chunk[I]](Chunk.empty)(identity)
+ Push.fail(e, leftover)
+ }
+```
+
+## Operations
+
+Having created the sink, we can transform it with provided operations.
+
+### contramap
+
+Contramap is a simple combinator to change the domain of an existing function. While _map_ changes the co-domain of a function, the _contramap_ changes the domain of a function. So the _contramap_ takes a function and maps over its input.
+
+This is useful when we have a fixed output, and our existing function cannot consume those outputs. So we can use _contramap_ to create a new function that can consume that fixed output. Assume we have a `ZSink.sum` that sums incoming numeric values, but we have a `ZStream` of `String` values. We can convert the `ZSink.sum` to a sink that can consume `String` values;
+
+```scala
+val numericSum: ZSink[Any, Nothing, Int, Nothing, Int] =
+ ZSink.sum[Int]
+val stringSum : ZSink[Any, Nothing, String, Nothing, Int] =
+ numericSum.contramap((x: String) => x.toInt)
+
+val sum: ZIO[Any, Nothing, Int] =
+ ZStream("1", "2", "3", "4", "5").run(stringSum)
+// Output: 15
+```
+
+### dimap
+
+A `dimap` is an extended `contramap` that additionally transforms sink's output:
+
+```scala
+// Convert its input to integers, do the computation and then convert them back to a string
+val sumSink: ZSink[Any, Nothing, String, Nothing, String] =
+ numericSum.dimap[String, String](_.toInt, _.toString)
+
+val sum: ZIO[Any, Nothing, String] =
+ ZStream("1", "2", "3", "4", "5").run(sumSink)
+// Output: 15
+```
+
+## Concurrency and Parallelism
+
+### Parallel Zipping
+
+Like `ZStream`, two `ZSink` can be zipped together. Both of them will be run in parallel, and their results will be combined in a tuple:
+
+
+```scala
+val kafkaSink: ZSink[Any, Throwable, Record, Record, Unit] =
+ ZSink.foreach[Any, Throwable, Record](record => ZIO.effect(???))
+
+val pulsarSink: ZSink[Any, Throwable, Record, Record, Unit] =
+ ZSink.foreach[Any, Throwable, Record](record => ZIO.effect(???))
+
+val stream: ZSink[Any, Throwable, Record, Record, (Unit, Unit)] =
+ kafkaSink zipPar pulsarSink
+```
+
+### Racing
+
+We are able to `race` multiple sinks, they will run in parallel, and the one that wins will provide the result of our program:
+
+```scala
+val stream: ZSink[Any, Throwable, Record, Record, Unit] =
+ kafkaSink race pulsarSink
+```
+
+To determine which one succeeded, we should use the `ZSink#raceBoth` combinator, it returns an `Either` result.
+
+## Leftovers
+
+### Exposing Leftovers
+
+A sink consumes a variable amount of `I` elements (zero or more) from the upstream. If the upstream is finite, we can expose leftover values by calling `ZSink#exposeLeftOver`. It returns a tuple that contains the result of the previous sink and its leftovers:
+
+```scala
+val s1: ZIO[Any, Nothing, (Chunk[Int], Chunk[Int])] =
+ ZStream(1, 2, 3, 4, 5).run(
+ ZSink.take(3).exposeLeftover
+ )
+// Output: (Chunk(1, 2, 3), Chunk(4, 5))
+
+
+val s2: ZIO[Any, Nothing, (Option[Int], Chunk[Int])] =
+ ZStream(1, 2, 3, 4, 5).run(
+ ZSink.head[Int].exposeLeftover
+ )
+// Output: (Some(1), Chunk(2, 3, 4, 5))
+```
+
+### Dropping Leftovers
+
+If we don't need leftovers, we can drop them by using `ZSink#dropLeftover`:
+
+```scala
+ZSink.take[Int](3).dropLeftover
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stream/zstream.md b/website/versioned_docs/version-1.0.18/reference/stream/zstream.md
new file mode 100644
index 000000000000..21e6ba00c069
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stream/zstream.md
@@ -0,0 +1,1858 @@
+---
+id: zstream
+title: "ZStream"
+---
+
+## Introduction
+
+A `ZStream[R, E, O]` is a description of a program that, when evaluated, may emit zero or more values of type `O`, may fail with errors of type `E`, and uses an environment of type `R`.
+
+One way to think of `ZStream` is as a `ZIO` program that could emit multiple values. As we know, a `ZIO[R, E, A]` data type, is a functional effect which is a description of a program that needs an environment of type `R`, it may end with an error of type `E`, and in case of success, it returns a value of type `A`. The important note about `ZIO` effects is that in the case of success they always end with exactly one value. There is no optionality here, no multiple infinite values, we always get exact value:
+
+
+```scala
+val failedEffect: ZIO[Any, String, Nothing] = ZIO.fail("fail!")
+val oneIntValue : ZIO[Any, Nothing, Int] = ZIO.succeed(3)
+val oneListValue: ZIO[Any, Nothing, List[Int]] = ZIO.succeed(List(1, 2, 3))
+val oneOption : ZIO[Any, Nothing , Option[Int]] = ZIO.succeed(None)
+```
+
+A functional stream is pretty similar, it is a description of a program that requires an environment of type `R` and it may signal with errors of type `E` and it yields `O`, but the difference is that it will yield zero or more values.
+
+So a `ZStream` represents one of the following cases in terms of its elements:
+- **An Empty Stream** — It might end up empty; which represent an empty stream, e.g. `ZStream.empty`.
+- **One Element Stream** — It can represent a stream with just one value, e.g. `ZStream.succeed(3)`.
+- **Multiple Finite Element Stream** — It can represent a stream of finite values, e.g. `ZStream.range(1, 10)`
+- **Multiple Infinite Element Stream** — It can even represent a stream that _never ends_ as an infinite stream, e.g. `ZStream.iterate(1)(_ + 1)`.
+
+```scala
+import zio.stream.ZStream
+val emptyStream : ZStream[Any, Nothing, Nothing] = ZStream.empty
+val oneIntValueStream : ZStream[Any, Nothing, Int] = ZStream.succeed(4)
+val oneListValueStream : ZStream[Any, Nothing, List[Int]] = ZStream.succeed(List(1, 2, 3))
+val finiteIntStream : ZStream[Any, Nothing, Int] = ZStream.range(1, 10)
+val infiniteIntStream : ZStream[Any, Nothing, Int] = ZStream.iterate(1)(_ + 1)
+```
+
+Another example of a stream is when we're pulling a Kafka topic or reading from a socket. There is no inherent definition of an end there. Stream elements arrive at some point, or even they might never arrive at any point.
+
+## Stream Types
+Based on type parameters of `ZStream`, there are 4 types of streams:
+
+1. `ZStream[Any, Nothing, O]` — A stream that emits `O` values and cannot fail.
+2. `ZStream[Any, Throwable, O]` — A stream that emits `O` values and can fail with `Throwable`.
+3. `ZStream[Any, Nothing, Nothing]` — A stream that emits no elements.
+4. `ZStream[R, E, O]` — A stream that requires access to the `R` service, can fail with error of type `E` and emits `O` values.
+
+## Chunking
+
+Every time we are working with streams, we are always working with chunks. There are no streams with individual elements, these streams have always chunks in their underlying implementation. So every time we evaluate a stream, when we pull an element out of a stream, we are actually pulling out a chunk of elements.
+
+So why streams are designed in this way? This is because of the **efficiency and performance** issues. Every I/O operation in the programming world works with batches. We never work with a single element. For example, whenever we are reading or writing from/to a file descriptor, or a socket we are reading or writing multiple elements at a time. This is also true when we are working with an HTTP server or even JDBC drivers. We always read and write multiple bytes to be more performant.
+
+So let's talk a bit about Chunk. Chunk is a ZIOs immutable array-backed collection. It is initially written for ZIO stream, but later it has been evolved into a very attractive general collection type which is also useful for other purposes. It is an immutable array-backed collection. Most importantly it tries to keep primitives unboxed. This is super important for the efficient processing of files and sockets. They are also very useful and efficient for encoding and decoding and writing transducers. To learn more about this data type, we have introduced that at the [Chunk](../misc/chunk.md) section.
+
+## Creating a Stream
+
+There are several ways to create ZIO Stream. In this section, we are going to enumerate some of the important ways of creating `ZStream`.
+
+### Common Constructors
+
+**ZStream.apply** — Creates a pure stream from a variable list of values:
+
+```scala
+val stream: ZStream[Any, Nothing, Int] = ZStream(1, 2, 3)
+```
+
+**ZStream.unit** — A stream that contains a single `Unit` value:
+
+```scala
+val unit: ZStream[Any, Nothing, Unit] = ZStream.unit
+```
+
+**ZStream.never** — A stream that produces no value or fails with an error:
+
+```scala
+val never: ZStream[Any, Nothing, Nothing] = ZStream.never
+```
+
+**ZStream.repeat** — Takes an initial value and applies the given function to the initial value iteratively. The initial value is the first value produced by the stream, followed by f(init), f(f(init)), ...
+
+```scala
+val nats: ZStream[Any, Nothing, Int] =
+ ZStream.iterate(1)(_ + 1) // 1, 2, 3, ...
+```
+
+**ZStream.range** — A stream from a range of integers `[min, max)`:
+
+```scala
+val range: ZStream[Any, Nothing, Int] = ZStream.range(1, 5) // 1, 2, 3, 4
+```
+
+**ZStream.environment[R]** — Create a stream that extract the request service from the environment:
+
+```scala
+val clockStream: ZStream[Clock, Nothing, Clock] = ZStream.environment[Clock]
+```
+
+**ZStream.managed** — Creates a single-valued stream from a managed resource:
+
+```scala
+val managedStream: ZStream[Blocking, Throwable, BufferedReader] =
+ ZStream.managed(
+ ZManaged.fromAutoCloseable(
+ zio.blocking.effectBlocking(
+ Files.newBufferedReader(java.nio.file.Paths.get("file.txt"))
+ )
+ )
+ )
+```
+
+### From Success and Failure
+
+Similar to `ZIO` data type, we can create a `ZStream` using `fail` and `succeed` methods:
+
+```scala
+val s1: ZStream[Any, String, Nothing] = ZStream.fail("Uh oh!")
+val s2: ZStream[Any, Nothing, Int] = ZStream.succeed(5)
+```
+
+### From Chunks
+
+We can create a stream from a `Chunk`:
+
+```scala
+val s1 = ZStream.fromChunk(Chunk(1, 2, 3))
+// s1: ZStream[Any, Nothing, Int] = zio.stream.ZStream$$anon$1@29f143ce
+```
+
+Or from multiple `Chunks`:
+
+```scala
+val s2 = ZStream.fromChunks(Chunk(1, 2, 3), Chunk(4, 5, 6))
+// s2: ZStream[Any, Nothing, Int] = zio.stream.ZStream$$anon$1@57458a29
+```
+
+### From Effect
+
+**ZStream.fromEffect** — We can create a stream from an effect by using `ZStream.fromEffect` constructor. For example, the following stream is a stream that reads a line from a user:
+
+```scala
+val readline: ZStream[Console, IOException, String] =
+ ZStream.fromEffect(zio.console.getStrLn)
+```
+
+A stream that produces one random number:
+
+```scala
+val randomInt: ZStream[Random, Nothing, Int] =
+ ZStream.fromEffect(zio.random.nextInt)
+```
+
+**ZStream.fromEffectOption** — In some cases, depending on the result of the effect, we should decide to emit an element or return an empty stream. In these cases, we can use `fromEffectOption` constructor:
+
+```scala
+object ZStream {
+ def fromEffectOption[R, E, A](fa: ZIO[R, Option[E], A]): ZStream[R, E, A] = ???
+}
+```
+
+Let's see an example of using this constructor. In this example, we read a string from user input, and then decide to emit that or not; If the user enters an `EOF` string, we emit an empty stream, otherwise we emit the user input:
+
+```scala
+val userInput: ZStream[Console, IOException, String] =
+ ZStream.fromEffectOption(
+ zio.console.getStrLn.mapError(Option(_)).flatMap {
+ case "EOF" => ZIO.fail[Option[IOException]](None)
+ case o => ZIO.succeed(o)
+ }
+ )
+```
+
+### From Asynchronous Callback
+
+Assume we have an asynchronous function that is based on callbacks. We would like to register a callbacks on that function and get back a stream of the results emitted by those callbacks. We have `ZStream.effectAsync` which can adapt functions that call their callbacks multiple times and emit the results over a stream:
+
+```scala
+// Asynchronous Callback-based API
+def registerCallback(
+ name: String,
+ onEvent: Int => Unit,
+ onError: Throwable => Unit
+): Unit = ???
+
+// Lifting an Asynchronous API to ZStream
+val stream = ZStream.effectAsync[Any, Throwable, Int] { cb =>
+ registerCallback(
+ "foo",
+ event => cb(ZIO.succeed(Chunk(event))),
+ error => cb(ZIO.fail(error).mapError(Some(_)))
+ )
+}
+```
+
+The error type of the `register` function is optional, so by setting the error to the `None` we can use it to signal the end of the stream.
+
+### From Iterators
+
+Iterators are data structures that allow us to iterate over a sequence of elements. Similarly, we can think of ZIO Streams as effectual Iterators; every `ZStream` represents a collection of one or more, but effectful values.
+
+**ZStream.fromIteratorTotal** — We can convert an iterator that does not throw exception to `ZStream` by using `ZStream.fromIteratorTotal`:
+
+```scala
+val s1: ZStream[Any, Throwable, Int] = ZStream.fromIterator(Iterator(1, 2, 3))
+val s2: ZStream[Any, Throwable, Int] = ZStream.fromIterator(Iterator.range(1, 4))
+val s3: ZStream[Any, Throwable, Int] = ZStream.fromIterator(Iterator.continually(0))
+```
+
+Also, there is another constructor called **`ZStream.fromIterator`** that creates a stream from an iterator which may throw an exception.
+
+**ZStream.fromIteratorEffect** — If we have an effectful Iterator that may throw Exception, we can use `fromIteratorEffect` to convert that to the ZIO Stream:
+
+```scala
+import scala.io.Source
+val lines: ZStream[Any, Throwable, String] =
+ ZStream.fromIteratorEffect(Task(Source.fromFile("file.txt").getLines()))
+```
+
+Using this method is not good for resourceful effects like above, so it's better to rewrite that using `ZStream.fromIteratorManaged` function.
+
+**ZStream.fromIteratorManaged** — Using this constructor we can convert a managed iterator to ZIO Stream:
+
+```scala
+val lines: ZStream[Any, Throwable, String] =
+ ZStream.fromIteratorManaged(
+ ZManaged.fromAutoCloseable(
+ Task(scala.io.Source.fromFile("file.txt"))
+ ).map(_.getLines())
+ )
+```
+
+**ZStream.fromJavaIterator** — It is the Java version of these constructors which create a stream from Java iterator that may throw an exception. We can convert any Java collection to an iterator and then lift them to the ZIO Stream.
+
+For example, to convert the Java Stream to the ZIO Stream, `ZStream` has a `fromJavaStream` constructor which convert the Java Stream to the Java Iterator and then convert that to the ZIO Stream using `ZStream.fromJavaIterator` constructor:
+
+```scala
+def fromJavaStream[A](stream: => java.util.stream.Stream[A]): ZStream[Any, Throwable, A] =
+ ZStream.fromJavaIterator(stream.iterator())
+```
+
+Similarly, `ZStream` has `ZStream.fromJavaIteratorTotal`, `ZStream.fromJavaIteratorEffect` and `ZStream.fromJavaIteratorManaged` constructors.
+
+### From Iterables
+
+**ZStream.fromIterable** — We can create a stream from `Iterable` collection of values:
+
+```scala
+val list = ZStream.fromIterable(List(1, 2, 3))
+```
+
+**ZStream.fromIterableM** — If we have an effect producing a value of type `Iterable` we can use `fromIterableM` constructor to create a stream of that effect.
+
+Assume we have a database that returns a list of users using `Task`:
+
+
+```scala
+trait Database {
+ def getUsers: Task[List[User]]
+}
+
+object Database {
+ def getUsers: ZIO[Has[Database], Throwable, List[User]] =
+ ZIO.serviceWith[Database](_.getUsers)
+}
+```
+
+As this operation is effectful, we can use `ZStream.fromIterableM` to convert the result to the `ZStream`:
+
+```scala
+val users: ZStream[Has[Database], Throwable, User] =
+ ZStream.fromIterableM(Database.getUsers)
+```
+
+### From Repetition
+
+**ZStream.repeat** — Repeats the provided value infinitely:
+
+```scala
+val repeatZero: ZStream[Any, Nothing, Int] = ZStream.repeat(0)
+```
+
+**ZStream.repeatWith** — This is another variant of `repeat`, which repeats according to the provided schedule. For example, the following stream produce zero value every second:
+
+```scala
+import zio.clock._
+import zio.duration._
+import zio.random._
+import zio.Schedule
+val repeatZeroEverySecond: ZStream[Clock, Nothing, Int] =
+ ZStream.repeatWith(0, Schedule.spaced(1.seconds))
+```
+
+**ZStream.repeatEffect** — Assume we have an effectful API, and we need to call that API and create a stream from the result of that. We can create a stream from that effect that repeats forever.
+
+Let's see an example of creating a stream of random numbers:
+
+```scala
+val randomInts: ZStream[Random, Nothing, Int] =
+ ZStream.repeatEffect(zio.random.nextInt)
+```
+
+**ZStream.repeatEffectOption** — We can repeatedly evaluate the given effect and terminate the stream based on some conditions.
+
+Let's create a stream repeatedly from user inputs until user enter "EOF" string:
+
+```scala
+val userInputs: ZStream[Console, IOException, String] =
+ ZStream.repeatEffectOption(
+ zio.console.getStrLn.mapError(Option(_)).flatMap {
+ case "EOF" => ZIO.fail[Option[IOException]](None)
+ case o => ZIO.succeed(o)
+ }
+ )
+```
+
+Here is another interesting example of using `repeatEffectOption`; In this example, we are draining an `Iterator` to create a stream of that iterator:
+
+```scala
+def drainIterator[A](it: Iterator[A]): ZStream[Any, Throwable, A] =
+ ZStream.repeatEffectOption {
+ ZIO(it.hasNext).mapError(Some(_)).flatMap { hasNext =>
+ if (hasNext) ZIO(it.next()).mapError(Some(_))
+ else ZIO.fail(None)
+ }
+ }
+```
+
+**ZStream.tick** — A stream that emits Unit values spaced by the specified duration:
+
+```scala
+val stream: ZStream[Clock, Nothing, Unit] =
+ ZStream.tick(1.seconds)
+```
+
+There are some other variant of repetition API like `repeatEffectWith`, `repeatEffectOption`, `repeatEffectChunk` and `repeatEffectChunkOption`.
+
+### From Unfolding/Pagination
+
+In functional programming, `unfold` is dual to `fold`.
+
+With `fold` we can process a data structure and build a return value. For example, we can process a `List[Int]` and return the sum of all its elements.
+
+The `unfold` represents an operation that takes an initial value and generates a recursive data structure, one-piece element at a time by using a given state function. For example, we can create a natural number by using `one` as the initial element and the `inc` function as the state function.
+
+#### Unfold
+
+**ZStream.unfold** — `ZStream` has `unfold` function, which is defined as follows:
+
+```scala
+object ZStream {
+ def unfold[S, A](s: S)(f: S => Option[(A, S)]): ZStream[Any, Nothing, A] = ???
+}
+```
+
+- **s** — An initial state value
+- **f** — A state function `f` that will be applied to the initial state `s`. If the result of this application is `None` the stream will end, otherwise the result is `Some`, so the next element in the stream would be `A` and the current state of transformation changed to the new `S`, this new state is the basis of the next unfold process.
+
+For example, we can a stream of natural numbers using `ZStream.unfold`:
+
+```scala
+val nats: ZStream[Any, Nothing, Int] = ZStream.unfold(1)(n => Some((n, n + 1)))
+```
+
+We can write `countdown` function using `unfold`:
+
+```scala
+def countdown(n: Int) = ZStream.unfold(n) {
+ case 0 => None
+ case s => Some((s, s - 1))
+}
+```
+
+Running this function with an input value of 3 returns a `ZStream` which contains 3, 2, 1 values.
+
+**ZStream.unfoldM** — `unfoldM` is an effectful version of `unfold`. It helps us to perform _effectful state transformation_ when doing unfold operation.
+
+Let's write a stream of lines of input from a user until the user enters the `exit` command:
+
+```scala
+val inputs: ZStream[Console, IOException, String] = ZStream.unfoldM(()) { _ =>
+ zio.console.getStrLn.map {
+ case "exit" => None
+ case i => Some((i, ()))
+ }
+}
+```
+
+`ZStream.unfoldChunk`, and `ZStream.unfoldChunkM` are other variants of `unfold` operations but for `Chunk` data type.
+
+#### Pagination
+
+**ZStream.paginate** — This is similar to `unfold`, but allows the emission of values to end one step further. For example the following stream emits `0, 1, 2, 3` elements:
+
+```scala
+val stream = ZStream.paginate(0) { s =>
+ s -> (if (s < 3) Some(s + 1) else None)
+}
+```
+
+Similar to `unfold` API, `ZStream` has various other forms as well as `ZStream.paginateM`, `ZStream.paginateChunk` and `ZStream.paginateChunkM`.
+
+#### Unfolding vs. Pagination
+
+One might ask what is the difference between `unfold` and `paginate` combinators? When we should prefer one over another? So, let's find the answer to this question by doing another example.
+
+Assume we have a paginated API that returns an enormous amount of data in a paginated fashion. When we call that API, it returns a data type `ResultPage` which contains the first-page result and, it also contains a flag indicating whether that result is the last one, or we have more data on the next page:
+
+
+```scala
+case class PageResult(results: Chunk[RowData], isLast: Boolean)
+
+def listPaginated(pageNumber: Int): ZIO[Console, Throwable, PageResult] = ???
+```
+
+We want to convert this API to a stream of `RowData` events. For the first attempt, we might think we can do it by using `unfold` operation as below:
+
+```scala
+val firstAttempt: ZStream[Console, Throwable, RowData] =
+ ZStream.unfoldChunkM(0) { pageNumber =>
+ for {
+ page <- listPaginated(pageNumber)
+ } yield
+ if (page.isLast) None
+ else Some((page.results, pageNumber + 1))
+ }
+```
+
+But it doesn't work properly; it doesn't include the last page result. So let's do a trick and to perform another API call to include the last page results:
+
+```scala
+val secondAttempt: ZStream[Console, Throwable, RowData] =
+ ZStream.unfoldChunkM(Option[Int](0)) {
+ case None => ZIO.none // We already hit the last page
+ case Some(pageNumber) => // We did not hit the last page yet
+ for {
+ page <- listPaginated(pageNumber)
+ } yield Some(page.results, if (page.isLast) None else Some(pageNumber + 1))
+ }
+```
+
+This works and contains all the results of returned pages. It works but as we saw, `unfold` is not friendliness to retrieve data from paginated APIs.
+
+We need to do some hacks and extra works to include results from the last page. This is where `ZStream.paginate` operation comes to play, it helps us to convert a paginated API to ZIO stream in a more ergonomic way. Let's rewrite this solution by using `paginate`:
+
+```scala
+val finalAttempt: ZStream[Console, Throwable, RowData] =
+ ZStream.paginateChunkM(0) { pageNumber =>
+ for {
+ page <- listPaginated(pageNumber)
+ } yield page.results -> (if (!page.isLast) Some(pageNumber + 1) else None)
+ }
+```
+
+### From Wrapped Streams
+
+Sometimes we have an effect that contains a `ZStream`, we can unwrap the embedded stream and produce a stream from those effects. If the stream is wrapped with the `ZIO` effect, we use `unwrap`, and if it is wrapped with `ZManaged` we use `unwrapManaged`:
+
+```scala
+val wrappedWithZIO: UIO[ZStream[Any, Nothing, Int]] =
+ ZIO.succeed(ZStream(1, 2, 3))
+val s1: ZStream[Any, Nothing, Int] =
+ ZStream.unwrap(wrappedWithZIO)
+
+val wrappedWithZManaged = ZManaged.succeed(ZStream(1, 2, 3))
+val s2: ZStream[Any, Nothing, Int] =
+ ZStream.unwrapManaged(wrappedWithZManaged)
+```
+
+### From Java IO
+
+**ZStream.fromFile** — Create ZIO Stream from a file:
+
+```scala
+import java.nio.file.Paths
+val file: ZStream[Blocking, Throwable, Byte] =
+ ZStream.fromFile(Paths.get("file.txt"))
+```
+
+**ZStream.fromInputStream** — Creates a stream from a `java.io.InputStream`:
+
+```scala
+val stream: ZStream[Blocking, IOException, Byte] =
+ ZStream.fromInputStream(new FileInputStream("file.txt"))
+```
+
+Note that the InputStream will not be explicitly closed after it is exhausted. Use `ZStream.fromInputStreamEffect`, or `ZStream.fromInputStreamManaged` instead.
+
+**ZStream.fromInputStreamEffect** — Creates a stream from a `java.io.InputStream`. Ensures that the InputStream is closed after it is exhausted:
+
+```scala
+val stream: ZStream[Blocking, IOException, Byte] =
+ ZStream.fromInputStreamEffect(
+ ZIO.effect(new FileInputStream("file.txt"))
+ .refineToOrDie[IOException]
+ )
+```
+
+**ZStream.fromInputStreamManaged** — Creates a stream from a managed `java.io.InputStream` value:
+
+```scala
+val managed: ZManaged[Any, IOException, FileInputStream] =
+ ZManaged.fromAutoCloseable(
+ ZIO.effect(new FileInputStream("file.txt"))
+ ).refineToOrDie[IOException]
+
+val stream: ZStream[Blocking, IOException, Byte] =
+ ZStream.fromInputStreamManaged(managed)
+```
+
+**ZStream.fromResource** — Create a stream from resource file:
+```scala
+val stream: ZStream[Blocking, IOException, Byte] =
+ ZStream.fromResource("file.txt")
+```
+
+**ZStream.fromReader** — Creates a stream from a `java.io.Reader`:
+
+```scala
+val stream: ZStream[Blocking, IOException, Char] =
+ ZStream.fromReader(new FileReader("file.txt"))
+```
+
+ZIO Stream also has `ZStream.fromReaderEffect` and `ZStream.fromReaderManaged` variants.
+
+### From Java Stream
+
+We can use `ZStream.fromJavaStreamTotal` to convert a Java Stream to ZIO Stream:
+
+```scala
+val stream: ZStream[Any, Throwable, Int] =
+ ZStream.fromJavaStream(java.util.stream.Stream.of(1, 2, 3))
+```
+
+ZIO Stream also has `ZStream.fromJavaStream`, `ZStream.fromJavaStreamEffect` and `ZStream.fromJavaStreamManaged` variants.
+
+### From Queue and Hub
+
+`Queue` and `Hub` are two asynchronous messaging data types in ZIO that can be converted into the ZIO Stream:
+
+```scala
+object ZStream {
+ def fromQueue[R, E, O](
+ queue: ZQueue[Nothing, R, Any, E, Nothing, O],
+ maxChunkSize: Int = DefaultChunkSize
+ ): ZStream[R, E, O] = ???
+
+ def fromHub[R, E, A](
+ hub: ZHub[Nothing, R, Any, E, Nothing, A]
+ ): ZStream[R, E, A] = ???
+}
+```
+
+If they contain `Chunk` of elements, we can use `ZStream.fromChunk...` constructors to create a stream from those elements (e.g. `ZStream.fromChunkQueue`):
+
+```scala
+for {
+ promise <- Promise.make[Nothing, Unit]
+ hub <- ZHub.unbounded[Chunk[Int]]
+ managed = ZStream.fromChunkHubManaged(hub).tapM(_ => promise.succeed(()))
+ stream = ZStream.unwrapManaged(managed)
+ fiber <- stream.foreach(i => putStrLn(i.toString)).fork
+ _ <- promise.await
+ _ <- hub.publish(Chunk(1, 2, 3))
+ _ <- fiber.join
+} yield ()
+```
+
+Also, If we need to shutdown a `Queue` or `Hub`, once the stream is closed, we should use `ZStream.from..Shutdown` constructors (e.g. `ZStream.fromQueueWithShutdown`).
+
+Also, we can lift a `TQueue` to the ZIO Stream:
+
+```scala
+for {
+ q <- STM.atomically(TQueue.unbounded[Int])
+ stream = ZStream.fromTQueue(q)
+ fiber <- stream.foreach(i => putStrLn(i.toString)).fork
+ _ <- STM.atomically(q.offer(1))
+ _ <- STM.atomically(q.offer(2))
+ _ <- fiber.join
+} yield ()
+```
+
+### From Schedule
+
+We can create a stream from a `Schedule` that does not require any further input. The stream will emit an element for each value output from the schedule, continuing for as long as the schedule continues:
+
+```scala
+val stream: ZStream[Clock, Nothing, Long] =
+ ZStream.fromSchedule(Schedule.spaced(1.second) >>> Schedule.recurs(10))
+```
+
+### Resourceful Streams
+
+Most of the constructors of `ZStream` have a special variant to lift a Managed resource to a Stream (e.g. `ZStream.fromReaderManaged`). By using these constructors, we are creating streams that are resource-safe. Before creating a stream, they acquire the resource, and after usage; they close the stream.
+
+ZIO Stream also has `bracket` and `finalizer` constructors which are similar to `ZManaged`. They allow us to clean up or finalizing before the stream ends:
+
+#### Bracket
+
+We can provide `acquire` and `release` actions to `ZStream.bracket` to create a resourceful stream:
+
+```scala
+object ZStream {
+ def bracket[R, E, A](
+ acquire: ZIO[R, E, A]
+ )(
+ release: A => URIO[R, Any]
+ ): ZStream[R, E, A] = ???
+```
+
+Let's see an example of using a bracket when reading a file. In this example, by providing `acquire` and `release` actions to `ZStream.bracket`, it gives us a managed stream of `BufferedSource`. As this stream is managed, we can convert that `BufferedSource` to a stream of its lines and then run it, without worrying about resource leakage:
+
+```scala
+import zio.console._
+val lines: ZStream[Console, Throwable, String] =
+ ZStream
+ .bracket(
+ ZIO.effect(Source.fromFile("file.txt")) <* putStrLn("The file was opened.")
+ )(x => URIO.effectTotal(x.close()) <* putStrLn("The file was closed.").orDie)
+ .flatMap { is =>
+ ZStream.fromIterator(is.getLines())
+ }
+```
+
+#### Finalization
+
+We can also create a stream that never fails and define a finalizer for it, so that finalizer will be executed before that stream ends.
+
+```scala
+object ZStream {
+ def finalizer[R](
+ finalizer: URIO[R, Any]
+ ): ZStream[R, Nothing, Any] = ???
+}
+```
+
+It is useful when need to add a finalizer to an existing stream. Assume we need to clean up the temporary directory after our streaming application ends:
+
+```scala
+import zio.console._
+def application: ZStream[Console, IOException, Unit] = ZStream.fromEffect(putStrLn("Application Logic."))
+def deleteDir(dir: Path): ZIO[Console, IOException, Unit] = putStrLn("Deleting file.")
+
+val myApp: ZStream[Console, IOException, Any] =
+ application ++ ZStream.finalizer(
+ (deleteDir(Paths.get("tmp")) *>
+ putStrLn("Temporary directory was deleted.")).orDie
+ )
+```
+
+#### Ensuring
+
+We might want to run some code before or after the execution of the stream's finalization. To do so, we can use `ZStream#ensuringFirst` and `ZStream#ensuring` operators:
+
+```scala
+ZStream
+ .finalizer(zio.console.putStrLn("Finalizing the stream").orDie)
+ .ensuringFirst(
+ putStrLn("Doing some works before stream's finalization").orDie
+ )
+ .ensuring(
+ putStrLn("Doing some other works after stream's finalization").orDie
+ )
+
+// Output:
+// Doing some works before stream's finalization
+// Finalizing the stream
+// Doing some other works after stream's finalization
+```
+
+## Operations
+
+### Tapping
+
+Tapping is an operation of running an effect on each emission of the ZIO Stream. We can think of `ZStream#tap` as an operation that allows us to observe each element of the stream, do some effectful operation and discard the result of this observation. The `tap` operation does not change elements of the stream, it does not affect the return type of the stream.
+
+For example, we can print each element of a stream by using the `tap` operation:
+
+```scala
+val stream: ZStream[Console, IOException, Int] =
+ ZStream(1, 2, 3)
+ .tap(x => putStrLn(s"before mapping: $x"))
+ .map(_ * 2)
+ .tap(x => putStrLn(s"after mapping: $x"))
+```
+
+### Taking Elements
+
+We can take a certain number of elements from a stream:
+
+```scala
+val stream = ZStream.iterate(0)(_ + 1)
+val s1 = stream.take(5)
+// Output: 0, 1, 2, 3, 4
+
+val s2 = stream.takeWhile(_ < 5)
+// Output: 0, 1, 2, 3, 4
+
+val s3 = stream.takeUntil(_ == 5)
+// Output: 0, 1, 2, 3, 4, 5
+
+val s4 = s3.takeRight(3)
+// Output: 3, 4, 5
+```
+
+### Mapping
+
+**map** — Applies a given function to all element of this stream to produce another stream:
+```scala
+import zio.stream._
+
+val intStream: UStream[Int] = Stream.fromIterable(0 to 100)
+val stringStream: UStream[String] = intStream.map(_.toString)
+```
+
+If our transformation is effectful, we can use `ZStream#mapM` instead.
+
+**mapMPar** — It is similar to `mapM`, but will evaluate effects in parallel. It will emit the results downstream in the original order. The `n` argument specifies the number of concurrent running effects.
+
+Let's write a simple page downloader, which download URLs concurrently:
+
+```scala
+def fetchUrl(url: URL): Task[String] = Task.succeed(???)
+def getUrls: Task[List[URL]] = Task.succeed(???)
+
+val pages = ZStream.fromIterableM(getUrls).mapMPar(8)(fetchUrl)
+```
+
+**mapChunk** — Each stream is backed by some `Chunk`s. By using `mapChunk` we can batch the underlying stream and map every `Chunk` at once:
+
+```scala
+val chunked =
+ ZStream
+ .fromChunks(Chunk(1, 2, 3), Chunk(4, 5), Chunk(6, 7, 8, 9))
+
+val stream = chunked.mapChunks(x => x.tail)
+
+// Input: 1, 2, 3, 4, 5, 6, 7, 8, 9
+// Output: 2, 3, 5, 7, 8, 9
+```
+
+If our transformation is effectful we can use `mapChunkM` combinator.
+
+**mapAccum** — It is similar to a `map`, but it **transforms elements statefully**. `mapAccum` allows us to _map_ and _accumulate_ in the same operation.
+
+```scala
+abstract class ZStream[-R, +E, +O] {
+ def mapAccum[S, O1](s: S)(f: (S, O) => (S, O1)): ZStream[R, E, O1]
+}
+```
+
+Let's write a transformation, which calculate _running total_ of input stream:
+
+```scala
+def runningTotal(stream: UStream[Int]): UStream[Int] =
+ stream.mapAccum(0)((acc, next) => (acc + next, acc + next))
+
+// input: 0, 1, 2, 3, 4, 5
+// output: 0, 1, 3, 6, 10, 15
+```
+
+**mapConcat** — It is similar to `map`, but maps each element to zero or more elements with the type of `Iterable` and then flattens the whole stream:
+
+```scala
+val numbers: UStream[Int] =
+ ZStream("1-2-3", "4-5", "6")
+ .mapConcat(_.split("-"))
+ .map(_.toInt)
+
+// Input: "1-2-3", "4-5", "6"
+// Output: 1, 2, 3, 4, 5, 6
+```
+
+The effectful version of `mapConcat` is `mapConcatM`.
+
+`ZStream` also has chunked versions of that which are `mapConcatChunk` and `mapConcatChunkM`.
+
+**as** — The `ZStream#as` method maps the success values of this stream to the specified constant value.
+
+For example, we can map all element to the unit value:
+
+```scala
+val unitStream: ZStream[Any, Nothing, Unit] =
+ ZStream.range(1, 5).as(())
+```
+
+### Filtering
+
+The `ZStream#filter` allows us to filter emitted elements:
+
+```scala
+val s1 = ZStream.range(1, 11).filter(_ % 2 == 0)
+// Output: 2, 4, 6, 8, 10
+
+// The `ZStream#withFilter` operator enables us to write filter in for-comprehension style
+val s2 = for {
+ i <- ZStream.range(1, 11).take(10)
+ if i % 2 == 0
+} yield i
+// Output: 2, 4, 6, 8, 10
+
+val s3 = ZStream.range(1, 11).filterNot(_ % 2 == 0)
+// Output: 1, 3, 5, 7, 9
+```
+
+### Scanning
+
+Scans are like folds, but with a history. Like folds, they take a binary operator with an initial value. A fold combines elements of a stream and emits every intermediary result as an output of the stream:
+
+```scala
+val scan = ZStream(1, 2, 3, 4, 5).scan(0)(_ + _)
+// Output: 0, 1, 3, 6, 10
+// Iterations:
+// => 0 (initial value)
+// 0 + 1 => 1
+// 1 + 2 => 3
+// 3 + 3 => 6
+// 6 + 4 => 10
+// 10 + 5 => 15
+
+val fold = ZStream(1, 2, 3, 4, 5).fold(0)(_ + _)
+// Output: 10 (ZIO effect containing 10)
+```
+
+### Draining
+
+Assume we have an effectful stream, which contains a sequence of effects; sometimes we might want to execute its effect without emitting any element, in these situations to discard the results we should use the `ZStream#drain` method. It removes all output values from the stream:
+
+```scala
+val s1: ZStream[Any, Nothing, Nothing] = ZStream(1, 2, 3, 4, 5).drain
+// Emitted Elements:
+
+val s2: ZStream[Console with Random, IOException, Int] =
+ ZStream
+ .repeatEffect {
+ for {
+ nextInt <- zio.random.nextInt
+ number = Math.abs(nextInt % 10)
+ _ <- zio.console.putStrLn(s"random number: $number")
+ } yield (number)
+ }
+ .take(3)
+// Emitted Elements: 1, 4, 7
+// Result of Stream Effect on the Console:
+// random number: 1
+// random number: 4
+// random number: 7
+
+val s3: ZStream[Console with Random, IOException, Nothing] = s2.drain
+// Emitted Elements:
+// Result of Stream Effect on the Console:
+// random number: 4
+// random number: 8
+// random number: 2
+```
+
+The `ZStream#drain` often used with `ZStream#merge` to run one side of the merge for its effect while getting outputs from the opposite side of the merge:
+
+```scala
+val logging = ZStream.fromEffect(
+ putStrLn("Starting to merge with the next stream")
+)
+val stream = ZStream(1, 2, 3) ++ logging.drain ++ ZStream(4, 5, 6)
+
+// Emitted Elements: 1, 2, 3, 4, 5, 6
+// Result of Stream Effect on the Console:
+// Starting to merge with the next stream
+```
+
+Note that if we do not drain the `logging` stream, the emitted elements would be contained unit value:
+
+```scala
+val stream = ZStream(1, 2, 3) ++ logging ++ ZStream(4, 5, 6)
+
+// Emitted Elements: 1, 2, 3, (), 4, 5, 6
+// Result of Stream Effect on the Console:
+// Starting to merge with the next stream
+```
+
+### Changes
+
+The `ZStream#changes` emits elements that are not equal to the previous element:
+
+```scala
+val changes = ZStream(1, 1, 1, 2, 2, 3, 4).changes
+// Output: 1, 2, 3, 4
+```
+
+The `ZStream#changes` operator, uses natural equality to determine whether two elements are equal. If we prefer the specialized equality checking, we can provide a function of type `(O, O) => Boolean` to the `ZStream#changesWith` operator.
+
+Assume we have a stream of events with a composite key of _partition_ and _offset_ attributes, and we know that the offset is monotonic in each partition. So, we can use the `changesWith` operator to create a stream of unique elements:
+
+```scala
+case class Event(partition: Long, offset: Long, metadata: String)
+val events: ZStream[Any, Nothing, Event] = ZStream.fromIterable(???)
+
+val uniques = events.changesWith((e1, e2) => (e1.partition == e2.partition && e1.offset == e2.offset))
+```
+
+### Collecting
+
+We can perform `filter` and `map` operations in a single step using the `ZStream#collect` operation:
+
+```scala
+val source1 = ZStream(1, 2, 3, 4, 0, 5, 6, 7, 8)
+
+val s1 = source1.collect { case x if x < 6 => x * 2 }
+// Output: 2, 4, 6, 8, 0, 10
+
+val s2 = source1.collectWhile { case x if x != 0 => x * 2 }
+// Output: 2, 4, 6, 8
+
+val source2 = ZStream(Left(1), Right(2), Right(3), Left(4), Right(5))
+
+val s3 = source2.collectLeft
+// Output: 1, 4
+
+val s4 = source2.collectWhileLeft
+// Output: 1
+
+val s5 = source2.collectRight
+// Output: 2, 3, 5
+
+val s6 = source2.drop(1).collectWhileRight
+// Output: 2, 3
+
+val s7 = source2.map(_.toOption).collectSome
+// Output: 2, 3, 5
+
+val s8 = source2.map(_.toOption).collectWhileSome
+// Output: empty stream
+```
+
+We can also do effectful collect using `ZStream#collectM` and `ZStream#collectWhileM`.
+
+ZIO stream has `ZStream#collectSuccess` which helps us to perform effectful operations and just collect the success values:
+
+```scala
+val urls = ZStream(
+ "dotty.epfl.ch",
+ "zio.dev",
+ "zio.github.io/zio-json",
+ "zio.github.io/zio-nio/"
+)
+
+def fetch(url: String): ZIO[Blocking, Throwable, String] =
+ zio.blocking.effectBlocking(???)
+
+val pages = urls
+ .mapM(url => fetch(url).run)
+ .collectSuccess
+```
+
+### Zipping
+
+We can zip two stream by using `ZStream.zipN` or `ZStream#zipWith` operator:
+
+```scala
+val s1: UStream[(Int, String)] =
+ ZStream.zipN(
+ ZStream(1, 2, 3, 4, 5, 6),
+ ZStream("a", "b", "c")
+ )((a, b) => (a, b))
+
+val s2: UStream[(Int, String)] =
+ ZStream(1, 2, 3, 4, 5, 6).zipWith(ZStream("a", "b", "c"))((a, b) => (a, b))
+
+val s3: UStream[(Int, String)] =
+ ZStream(1, 2, 3, 4, 5, 6).zip(ZStream("a", "b", "c"))
+
+// Output: (1, "a"), (2, "b"), (3, "c")
+```
+
+The new stream will end when one of the streams ends.
+
+In case of ending one stream before another, we might need to zip with default values; the `ZStream#zipAll` or `ZStream#zipAllWith` takes default values of both sides to perform such mechanism for us:
+
+```scala
+val s1 = ZStream(1, 2, 3)
+ .zipAll(ZStream("a", "b", "c", "d", "e"))(0, "x")
+val s2 = ZStream(1, 2, 3).zipAllWith(
+ ZStream("a", "b", "c", "d", "e")
+)(_ => 0, _ => "x")((a, b) => (a, b))
+
+// Output: (1, a), (2, b), (3, c), (0, d), (0, e)
+```
+
+ZIO Stream also has a `ZStream#zipAllWithExec` function, which takes `ExecutionStrategy` as an argument. The execution strategy will be used to determine whether to pull from the streams sequentially or in parallel:
+
+```scala
+def zipAllWithExec[R1 <: R, E1 >: E, O2, O3](
+ that: ZStream[R1, E1, O2]
+)(exec: ExecutionStrategy)(
+ left: O => O3, right: O2 => O3
+)(both: (O, O2) => O3): ZStream[R1, E1, O3] = ???
+```
+
+Sometimes we want to zip stream, but we do not want to zip two elements one by one. For example, we may have two streams with two different speeds, we do not want to wait for the slower one when zipping elements, assume need to zip elements with the latest element of the slower stream. The `ZStream#zipWithLates` do this for us. It zips two streams so that when a value is emitted by either of the two streams; it is combined with the latest value from the other stream to produce a result:
+
+```scala
+val s1 = ZStream(1, 2, 3)
+ .schedule(Schedule.spaced(1.second))
+
+val s2 = ZStream("a", "b", "c", "d")
+ .schedule(Schedule.spaced(500.milliseconds))
+ .chunkN(3)
+
+s1.zipWithLatest(s2)((a, b) => (a, b))
+
+// Output: (1, a), (1, b), (1, c), (1, d), (2, d), (3, d)
+```
+
+ZIO Stream also has three useful operators for zipping element of a stream with their previous/next elements and also both of them:
+
+```scala
+val stream: UStream[Int] = ZStream.fromIterable(1 to 5)
+
+val s1: UStream[(Option[Int], Int)] = stream.zipWithPrevious
+val s2: UStream[(Int, Option[Int])] = stream.zipWithNext
+val s3: UStream[(Option[Int], Int, Option[Int])] = stream.zipWithPreviousAndNext
+```
+
+By using `ZStream#zipWithIndex` we can index elements of a stream:
+
+```scala
+val indexedStream: ZStream[Any, Nothing, (String, Long)] =
+ ZStream("Mary", "James", "Robert", "Patricia").zipWithIndex
+
+// Output: ("Mary", 0L), ("James", 1L), ("Robert", 2L), ("Patricia", 3L)
+```
+
+### Cross Product
+
+ZIO stream has `ZStram#cross` and its variants to compute _Cartesian Product_ of two streams:
+
+```scala
+val first = ZStream(1, 2, 3)
+val second = ZStream("a", "b")
+
+val s1 = first cross second
+val s2 = first <*> second
+val s3 = first.crossWith(second)((a, b) => (a, b))
+// Output: (1,a), (1,b), (2,a), (2,b), (3,a), (3,b)
+
+val s4 = first crossLeft second
+val s5 = first <* second
+// Keep only elements from the left stream
+// Output: 1, 1, 2, 2, 3, 3
+
+val s6 = first crossRight second
+val s7 = first *> second
+// Keep only elements from the right stream
+// Output: a, b, a, b, a, b
+```
+
+Note that the right-hand side stream would be run multiple times, for every element in the left stream.
+
+ZIO stream also has `ZStream.crossN` which takes streams up to four one.
+
+### Partitioning
+
+#### partition
+`ZStream#partition` function splits the stream into tuple of streams based on the predicate. The first stream contains all element evaluated to true, and the second one contains all element evaluated to false.
+
+The faster stream may advance by up to `buffer` elements further than the slower one. Two streams are wrapped by `ZManaged` type.
+
+In the example below, left stream consists of even numbers only:
+
+```scala
+val partitionResult: ZManaged[Any, Nothing, (ZStream[Any, Nothing, Int], ZStream[Any, Nothing, Int])] =
+ Stream
+ .fromIterable(0 to 100)
+ .partition(_ % 2 == 0, buffer = 50)
+```
+
+#### partitionEither
+If we need to partition a stream using an effectful predicate we can use `ZStream.partitionEither`.
+
+```scala
+abstract class ZStream[-R, +E, +O] {
+ final def partitionEither[R1 <: R, E1 >: E, O2, O3](
+ p: O => ZIO[R1, E1, Either[O2, O3]],
+ buffer: Int = 16
+ ): ZManaged[R1, E1, (ZStream[Any, E1, O2], ZStream[Any, E1, O3])]
+}
+```
+
+Here is a simple example of using this function:
+
+```scala
+val partitioned: ZManaged[Any, Nothing, (ZStream[Any, Nothing, Int], ZStream[Any, Nothing, Int])] =
+ ZStream
+ .fromIterable(1 to 10)
+ .partitionEither(x => ZIO.succeed(if (x < 5) Left(x) else Right(x)))
+```
+
+### GroupBy
+
+#### groupByKey
+
+To partition the stream by function result we can use `groupBy` by providing a function of type `O => K` which determines by which keys the stream should be partitioned.
+
+```scala
+abstract class ZStream[-R, +E, +O] {
+ final def groupByKey[K](
+ f: O => K,
+ buffer: Int = 16
+ ): ZStream.GroupBy[R, E, K, O]
+}
+```
+
+In the example below, exam results are grouped into buckets and counted:
+
+```scala
+import zio._
+import zio.stream._
+
+ case class Exam(person: String, score: Int)
+
+ val examResults = Seq(
+ Exam("Alex", 64),
+ Exam("Michael", 97),
+ Exam("Bill", 77),
+ Exam("John", 78),
+ Exam("Bobby", 71)
+ )
+
+ val groupByKeyResult: ZStream[Any, Nothing, (Int, Int)] =
+ Stream
+ .fromIterable(examResults)
+ .groupByKey(exam => exam.score / 10 * 10) {
+ case (k, s) => ZStream.fromEffect(s.runCollect.map(l => k -> l.size))
+ }
+```
+
+> **Note**:
+>
+> `groupByKey` partition the stream by a simple function of type `O => K`; It is not an effectful function. In some cases we need to partition the stream by using an _effectful function_ of type `O => ZIO[R1, E1, (K, V)]`; So we can use `groupBy` which is the powerful version of `groupByKey` function.
+
+#### groupBy
+It takes an effectful function of type `O => ZIO[R1, E1, (K, V)]`; ZIO Stream uses this function to partition the stream and gives us a new data type called `ZStream.GroupBy` which represent a grouped stream. `GroupBy` has an `apply` method, that takes a function of type `(K, ZStream[Any, E, V]) => ZStream[R1, E1, A]`; ZIO Runtime runs this function across all groups and then merges them in a non-deterministic fashion as a result.
+
+```scala
+abstract class ZStream[-R, +E, +O] {
+ final def groupBy[R1 <: R, E1 >: E, K, V](
+ f: O => ZIO[R1, E1, (K, V)],
+ buffer: Int = 16
+ ): ZStream.GroupBy[R1, E1, K, V]
+}
+```
+
+In the example below, we are going `groupBy` given names by their first character and then count the number of names in each group:
+
+```scala
+val counted: UStream[(Char, Long)] =
+ ZStream("Mary", "James", "Robert", "Patricia", "John", "Jennifer", "Rebecca", "Peter")
+ .groupBy(x => ZIO.succeed((x.head, x))) { case (char, stream) =>
+ ZStream.fromEffect(stream.runCount.map(count => char -> count))
+ }
+// Input: Mary, James, Robert, Patricia, John, Jennifer, Rebecca, Peter
+// Output: (P, 2), (R, 2), (M, 1), (J, 3)
+```
+
+Let's change the above example a bit into an example of classifying students. The teacher assigns the student to a specific class based on the student's talent. Note that the partitioning operation is an effectful:
+
+```scala
+val classifyStudents: ZStream[Console, IOException, (String, Seq[String])] =
+ ZStream.fromEffect(
+ putStrLn("Please assign each student to one of the A, B, or C classrooms.")
+ ) *> ZStream("Mary", "James", "Robert", "Patricia", "John", "Jennifer", "Rebecca", "Peter")
+ .groupBy(student =>
+ putStr(s"What is the classroom of $student? ") *>
+ getStrLn.map(classroom => (classroom, student))
+ ) { case (classroom, students) =>
+ ZStream.fromEffect(
+ students
+ .fold(Seq.empty[String])((s, e) => s :+ e)
+ .map(students => classroom -> students)
+ )
+ }
+
+// Input:
+// Please assign each student to one of the A, B, or C classrooms.
+// What is the classroom of Mary? A
+// What is the classroom of James? B
+// What is the classroom of Robert? A
+// What is the classroom of Patricia? C
+// What is the classroom of John? B
+// What is the classroom of Jennifer? A
+// What is the classroom of Rebecca? C
+// What is the classroom of Peter? A
+//
+// Output:
+// (B,List(James, John))
+// (A,List(Mary, Robert, Jennifer, Peter))
+// (C,List(Patricia, Rebecca))
+```
+
+### Grouping
+
+#### grouped
+To partition the stream results with the specified chunk size, we can use the `grouped` function.
+
+```scala
+val groupedResult: ZStream[Any, Nothing, Chunk[Int]] =
+ Stream.fromIterable(0 to 8).grouped(3)
+
+// Input: 0, 1, 2, 3, 4, 5, 6, 7, 8
+// Output: Chunk(0, 1, 2), Chunk(3, 4, 5), Chunk(6, 7, 8)
+```
+
+#### groupedWithin
+It allows grouping events by time or chunk size, whichever is satisfied first. In the example below every chunk consists of 30 elements and is produced every 3 seconds.
+
+```scala
+import zio._
+import zio.stream._
+import zio.duration._
+import zio.clock.Clock
+
+val groupedWithinResult: ZStream[Any with Clock, Nothing, Chunk[Int]] =
+ Stream.fromIterable(0 to 10)
+ .repeat(Schedule.spaced(1.seconds))
+ .groupedWithin(30, 10.seconds)
+```
+
+### Concatenation
+
+We can concatenate two streams by using `ZStream#++` or `ZStream#concat` operator which returns a stream that emits the elements from the left-hand stream and then emits the elements from the right stream:
+
+```scala silent:nest
+val a = ZStream(1, 2, 3)
+val b = ZStream(4, 5)
+val c1 = a ++ b
+val c2 = a concat b
+```
+
+Also, we can use `ZStream.concatAll` constructor to concatenate given streams together:
+
+
+```scala
+val c3 = ZStream.concatAll(Chunk(a, b))
+```
+
+There is also the `ZStream#flatMap` combinator which create a stream which elements are generated by applying a function of type `O => ZStream[R1, E1, O2]` to each output of the source stream and concatenated all of the results:
+
+```scala
+val stream = ZStream(1, 2, 3).flatMap(x => ZStream.repeat(x).take(4))
+// Input: 1, 2, 3
+// Output: 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3
+```
+
+Assume we have an API that takes an author name and returns all its book:
+
+
+```scala
+def getAuthorBooks(author: String): ZStream[Any, Throwable, Book] = ZStream(???)
+```
+
+If we have a stream of author's names, we can use `ZStream#flatMap` to concatenate the results of all API calls:
+
+```scala
+val authors: ZStream[Any, Throwable, String] =
+ ZStream("Mary", "James", "Robert", "Patricia", "John")
+val allBooks: ZStream[Any, Throwable, Book] =
+ authors.flatMap(getAuthorBooks _)
+```
+
+If we need to do the `flatMap` concurrently, we can use `ZStream#flatMapPar`, and also if the order of concatenation is not important for us, we can use the `ZStream#flatMapParSwitch` operator.
+
+### Merging
+
+Sometimes we need to interleave the emission of two streams and create another stream. In these cases, we can't use the `ZStream.concat` operation because the `concat` operation waits for the first stream to finish and then consumes the second stream. So we need a non-deterministic way of picking elements from different sources. ZIO Stream's `merge` operations, do this for use. Let's discuss some variant of this operation:
+
+#### merge
+
+The `ZSstream#merge` picks elements randomly from specified streams:
+
+```scala
+val s1 = ZStream(1, 2, 3).chunkN(1)
+val s2 = ZStream(4, 5, 6).chunkN(1)
+
+val merged = s1 merge s2
+// As the merge operation is not deterministic, it may output the following stream of numbers:
+// Output: 4, 1, 2, 5, 6, 3
+```
+
+Merge operation always try to pull one chunk from each stream, if we chunk our streams equal or over 3 elements in the last example, we encounter a new stream containing one of the `1, 2, 3, 4, 5, 6` or `4, 5, 6, 1, 2, 3` elements.
+
+#### Termination Strategy
+
+When we merge two streams, we should think about the _termination strategy_ of this operation. Each stream has a specific lifetime. One stream may emit all its elements and finish its job, another stream may end after one hour of emission, one another may have a long-running lifetime and never end. So when we merge two streams with different lifetimes, what is the termination strategy of the resulting stream?
+
+By default, when we merge two streams using `ZStream#merge` operation, the newly produced stream will terminate when both specified streams terminate. We can also define the _termination strategy_ corresponding to our requirement. ZIO Stream supports four different termination strategies:
+
+- **Left** — The resulting stream will terminate when the left-hand side stream terminates.
+- **Right** — The resulting stream will terminate when the right-hand side stream finishes.
+- **Both** — The resulting stream will terminate when both streams finish.
+- **Either** — The resulting stream will terminate when one of the streams finishes.
+
+Here is an example of specifying termination strategy when merging two streams:
+
+```scala
+import zio.stream.ZStream.TerminationStrategy
+val s1 = ZStream.iterate(1)(_+1).take(5).chunkN(1)
+val s2 = ZStream.repeat(0).chunkN(1)
+
+val merged = s1.merge(s2, TerminationStrategy.Left)
+```
+
+We can also use `ZStream#mergeTerminateLeft`, `ZStream#mergeTerminateRight` or `ZStream#mergeTerminateEither` operations instead of specifying manually the termination strategy.
+
+#### mergeAll
+
+Usually, micro-services or long-running applications are composed of multiple components that need to run infinitely in the background and if something happens to them, or they terminate abruptly we should crash the entire application.
+
+So our main fiber should perform these three things:
+
+* **Launch and wait** — It should launch all of those background components and wait infinitely. It should not exit prematurely, because then our application won't be running.
+* **Interrupt everything** — It should interrupt all those components whenever we receive a termination signal from the operating system.
+* **Watch all fibers** — It should watch all those fibers (background components), and quickly exit if something goes wrong.
+
+So how should we do that with our main fiber? Let's try to create a long-running application:
+
+```scala
+val main =
+ kafkaConsumer.runDrain.fork *>
+ httpServer.fork *>
+ scheduledJobRunner.fork *>
+ ZIO.never
+```
+
+We can launch the Kafka consumer, the HTTP server, and our job runner and fork them, and then wait using `ZIO.never`. This will indeed wait, but if something happens to any of them and if they crash, nothing happens. So our application just hangs and remains up without anything working in the background. So this approach does not work properly.
+
+So another idea is to watch background components. The `ZIO#forkManaged` enables us to race all forked fibers in a `ZManaged` context. By using `ZIO.raceAll` as soon as one of those fibers terminates with either success or failure, it will interrupt all the rest components as the part of the release action of `ZManaged`:
+
+
+```scala
+val managedApp = for {
+ kafka <- kafkaConsumer.runDrain.forkManaged
+ http <- httpServer.forkManaged
+ jobs <- scheduledJobRunner.forkManaged
+} yield ZIO.raceAll(kafka.await, List(http.await, jobs.await))
+
+val mainApp = managedApp.use(identity).exitCode
+```
+
+This solution is very nice and elegant, but we can do it in a more declarative fashion with ZIO streams:
+
+```scala
+val managedApp =
+ for {
+ //_ <- other resources
+ _ <- ZStream
+ .mergeAllUnbounded(16)(
+ kafkaConsumer.drain,
+ ZStream.fromEffect(httpServer),
+ ZStream.fromEffect(scheduledJobRunner)
+ )
+ .runDrain
+ .toManaged_
+ } yield ()
+
+val myApp = managedApp.use_(ZIO.unit).exitCode
+```
+
+Using `ZStream.mergeAll` we can combine all these streaming components concurrently into one application.
+
+#### mergeWith
+
+Sometimes we need to merge two streams and after that, unify them and convert them to new element types. We can do this by using the `ZStream#mergeWith` operation:
+
+```scala
+val s1 = ZStream("1", "2", "3")
+val s2 = ZStream(4.1, 5.3, 6.2)
+
+val merged = s1.mergeWith(s2)(_.toInt, _.toInt)
+```
+
+### Interleaving
+
+When we `merge` two streams, the ZIO Stream picks elements from two streams randomly. But how to merge two streams deterministically? The answer is the `ZStream#interleave` operation.
+
+The `ZStream#interleave` operator pulls an element from each stream, one by one, and then returns an interleaved stream. When one stream is exhausted, all remaining values in the other stream will be pulled:
+
+```scala
+val s1 = ZStream(1, 2, 3)
+val s2 = ZStream(4, 5, 6, 7, 8)
+
+val interleaved = s1 interleave s2
+
+// Output: 1, 4, 2, 5, 3, 6, 7, 8
+```
+
+ZIO Stream also has the `interleaveWith` operator, which is a more powerful version of `interleave`. By using `ZStream#interleaveWith`, we can specify the logic of interleaving:
+
+```scala
+val s1 = ZStream(1, 3, 5, 7, 9)
+val s2 = ZStream(2, 4, 6, 8, 10)
+
+val interleaved = s1.interleaveWith(s2)(ZStream(true, false, false).forever)
+// Output: 1, 2, 4, 3, 6, 8, 5, 10, 7, 9
+```
+
+`ZStream#interleaveWith` uses a stream of boolean to decide which stream to choose. If it reaches a true value, it will pick a value from the left-hand side stream, otherwise, it will pick from the right-hand side.
+
+### Interspersing
+
+We can intersperse any stream by using `ZStream#intersperse` operator:
+
+```scala
+val s1 = ZStream(1, 2, 3, 4, 5).intersperse(0)
+// Output: 1, 0, 2, 0, 3, 0, 4, 0, 5
+
+val s2 = ZStream("a", "b", "c", "d").intersperse("[", "-", "]")
+// Output: [, -, a, -, b, -, c, -, d]
+```
+
+### Broadcasting
+
+We can broadcast a stream by using `ZStream#broadcast`, it returns a managed list of streams that have the same elements as the source stream. The `broadcast` operation emits each element to the inputs of returning streams. The upstream stream can emit events as much as `maximumLag`, then it decreases its speed by the slowest downstream stream.
+
+In the following example, we are broadcasting stream of random numbers to the two downstream streams. One of them is responsible to compute the maximum number, and the other one does some logging job with additional delay. The upstream stream decreases its speed by the logging stream:
+
+```scala
+val stream: ZIO[Console with Random with Clock, IOException, Unit] =
+ ZStream
+ .fromIterable(1 to 20)
+ .mapM(_ => zio.random.nextInt)
+ .map(Math.abs)
+ .map(_ % 100)
+ .tap(e => putStrLn(s"Emit $e element before broadcasting"))
+ .broadcast(2, 5)
+ .use {
+ case s1 :: s2 :: Nil =>
+ for {
+ out1 <- s1.fold(0)((acc, e) => Math.max(acc, e))
+ .flatMap(x => putStrLn(s"Maximum: $x"))
+ .fork
+ out2 <- s2.schedule(Schedule.spaced(1.second))
+ .foreach(x => putStrLn(s"Logging to the Console: $x"))
+ .fork
+ _ <- out1.join.zipPar(out2.join)
+ } yield ()
+
+ case _ => ZIO.dieMessage("unhandled case")
+ }
+```
+### Distribution
+
+The `ZStream#distributedWith` operator is a more powerful version of `ZStream#broadcast`. It takes a `decide` function, and based on that decide how to distribute incoming elements into the downstream streams:
+
+```scala
+abstract class ZStream[-R, +E, +O] {
+ final def distributedWith[E1 >: E](
+ n: Int,
+ maximumLag: Int,
+ decide: O => UIO[Int => Boolean]
+ ): ZManaged[R, Nothing, List[Dequeue[Exit[Option[E1], O]]]] = ???
+}
+```
+
+In the example below, we are partitioning incoming elements into three streams using `ZStream#distributedWith` operator:
+
+```scala
+val partitioned: ZManaged[Clock, Nothing, (UStream[Int], UStream[Int], UStream[Int])] =
+ ZStream
+ .iterate(1)(_ + 1)
+ .fixed(1.seconds)
+ .distributedWith(3, 10, x => ZIO.succeed(q => x % 3 == q))
+ .flatMap { case q1 :: q2 :: q3 :: Nil =>
+ ZManaged.succeed(
+ ZStream.fromQueue(q1).flattenExitOption,
+ ZStream.fromQueue(q2).flattenExitOption,
+ ZStream.fromQueue(q3).flattenExitOption
+ )
+ }
+```
+
+### Buffering
+
+Since the ZIO streams are pull-based, it means the consumers do not need to message the upstream to slow down. Whenever a downstream stream pulls a new element, the upstream produces a new element. So, the upstream stream is as fast as the slowest downstream stream. Sometimes we need to run producer and consumer independently, in such a situation we can use an asynchronous non-blocking queue for communication between faster producer and slower consumer; the queue can buffer elements between two streams. ZIO stream also has a built-in `ZStream#buffer` operator which does the same thing for us.
+
+The `ZStream#buffer` allows a faster producer to progress independently of a slower consumer by buffering up to `capacity` chunks in a queue.
+
+In the following example, we are going to buffer a stream. We print each element to the console as they are emitting before and after the buffering:
+
+```scala
+ZStream
+ .fromIterable(1 to 10)
+ .chunkN(1)
+ .tap(x => zio.console.putStrLn(s"before buffering: $x"))
+ .buffer(4)
+ .tap(x => zio.console.putStrLn(s"after buffering: $x"))
+ .schedule(Schedule.spaced(5.second))
+```
+
+We spaced 5 seconds between each emission to show the lag between producing and consuming messages.
+
+Based on the type of underlying queue we can use one the buffering operators:
+- **Bounded Queue** — `ZStream#buffer(capacity: Int)`
+- **Unbounded Queue** — `ZStream#bufferUnbounded`
+- **Sliding Queue** — `ZStream#bufferDropping(capacity: Int)`
+- **Dropping Queue** `ZStream#bufferSliding(capacity: Int)`
+
+### Debouncing
+
+The `ZStream#debounce` method debounces the stream with a minimum period of `d` between each element:
+
+```scala
+val stream = (
+ ZStream(1, 2, 3) ++
+ ZStream.fromEffect(ZIO.sleep(500.millis)) ++ ZStream(4, 5) ++
+ ZStream.fromEffect(ZIO.sleep(10.millis)) ++
+ ZStream(6)
+).debounce(100.millis) // emit only after a pause of at least 100 ms
+// Output: 3, 6
+```
+
+### Aggregation
+
+Aggregation is the process of converting one or more elements of type `A` into elements of type `B`. This operation takes a transducer as an aggregation unit and returns another stream that is aggregated. We have two types of aggregation:
+
+#### Synchronous Aggregation
+
+They are synchronous because the upstream emits an element when the _transducer_ emits one. To apply a synchronous aggregation to the stream we can use `ZStream#aggregate` or `ZStream#transduce` operations.
+
+Let's see an example of synchronous aggregation:
+
+```scala
+val stream = ZStream(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
+val s1 = stream.transduce(ZTransducer.collectAllN(3))
+// Output Chunk(1,2,3), Chunk(4,5,6), Chunk(7,8,9), Chunk(10)
+
+val s2 = stream.aggregate(ZTransducer.collectAllN(3))
+// Output Chunk(1,2,3), Chunk(4,5,6), Chunk(7,8,9), Chunk(10)
+```
+
+Sometimes stream processing element by element is not efficient, specially when we are working with files or doing I/O works; so we might need to aggregate them and process them in a batch way:
+
+```scala
+val source =
+ ZStream
+ .iterate(1)(_ + 1)
+ .take(200)
+ .tap(x =>
+ putStrLn(s"Producing Element $x")
+ .schedule(Schedule.duration(1.second).jittered)
+ )
+
+val sink =
+ ZSink.foreach((e: Chunk[Int]) =>
+ putStrLn(s"Processing batch of events: $e")
+ .schedule(Schedule.duration(3.seconds).jittered)
+ )
+
+val myApp =
+ source.aggregate(ZTransducer.collectAllN[Int](5)).run(sink)
+```
+
+Let's see one output of running this program:
+
+```
+Producing element 1
+Producing element 2
+Producing element 3
+Producing element 4
+Producing element 5
+Processing batch of events: Chunk(1,2,3,4,5)
+Producing element 6
+Producing element 7
+Producing element 8
+Producing element 9
+Producing element 10
+Processing batch of events: Chunk(6,7,8,9,10)
+Producing element 11
+Producing element 12
+Processing batch of events: Chunk(11,12)
+```
+
+Elements are grouped into Chunks of 5 elements and then processed in a batch way.
+
+#### Asynchronous Aggregation
+
+Asynchronous aggregations, aggregate elements of upstream as long as the downstream operators are busy. To apply an asynchronous aggregation to the stream, we can use `ZStream#aggregateAsync`, `ZStream#aggregateAsyncWithin`, and `ZStream#aggregateAsyncWithinEither` operations.
+
+
+For example, consider `source.aggregateAsync(ZTransducer.collectAllN(5)).mapM(processChunks)`. Whenever the downstream (`mapM(processChunks)`) is ready for consumption and pulls the upstream, the transducer `(ZTransducer.collectAllN(5))` will flush out its buffer, regardless of whether the `collectAllN` buffered all its 5 elements or not. So the `ZStream#aggregateAsync` will emit when downstream pulls:
+
+```scala
+val myApp =
+ source.aggregateAsync(ZTransducer.collectAllN[Int](5)).run(sink)
+```
+
+Let's see one output of running this program:
+
+```
+Producing element 1
+Producing element 2
+Producing element 3
+Producing element 4
+Processing batch of events: Chunk(1,2)
+Processing batch of events: Chunk(3,4)
+Producing element 5
+Processing batch of events: Chunk(5)
+Producing element 6
+Processing batch of events: Chunk(6)
+Producing element 7
+Producing element 8
+Producing element 9
+Processing batch of events: Chunk(7)
+Producing element 10
+Producing element 11
+Processing batch of events: Chunk(8,9)
+Producing element 12
+Processing batch of events: Chunk(10,11)
+Processing batch of events: Chunk(12)
+```
+
+The `ZStream#aggregateAsyncWithin` is another aggregator which takes a scheduler. This scheduler will consume all events produced by the given transducer. So the `aggregateAsyncWithin` will emit when the transducer emits or when the scheduler expires:
+
+```scala
+abstract class ZStream[-R, +E, +O] {
+ def aggregateAsyncWithin[R1 <: R, E1 >: E, P](
+ transducer: ZTransducer[R1, E1, O, P],
+ schedule: Schedule[R1, Chunk[P], Any]
+ ): ZStream[R1 with Clock, E1, P] = ???
+}
+```
+
+When we are doing I/O, batching is very important. With ZIO streams, we can create user-defined batches. It is pretty easy to do that with the `ZStream#aggregateAsyncWithin` operator. Let's see the below snippet code:
+
+
+```scala
+dataStream.aggregateAsyncWithin(
+ ZTransducer.collectAllN(2000),
+ Schedule.fixed(30.seconds)
+ )
+```
+
+So it will collect elements into a chunk up to 2000 elements and if we have got less than 2000 elements and 30 seconds have passed, it will pass currently collected elements down the stream whether it has collected zero, one, or 2000 elements. So this is a sort of timeout for aggregation operation. This approach aggressively favors **throughput** over **latency**. It will introduce a fixed amount of latency into a stream. We will always wait for up to 30 seconds if we haven't reached this sort of boundary value.
+
+Instead, thanks to `Schedule` we can create a much smarter **adaptive batching algorithm** that can balance between **throughput** and **latency*. So what we are doing here is that we are creating a schedule that operates on chunks of records. What the `Schedule` does is that it starts off with 30-second timeouts for as long as its input has a size that is lower than 1000, now once we see an input that has a size look higher than 1000, we will switch to a second schedule with some jittery, and we will remain with this schedule for as long as the batch size is over 1000:
+
+```scala
+val schedule: Schedule[Clock with Random, Chunk[Chunk[Record]], Long] =
+ // Start off with 30-second timeouts as long as the batch size is < 1000
+ Schedule.fixed(30.seconds).whileInput[Chunk[Chunk[Record]]](_.flatten.length < 100) andThen
+ // and then, switch to a shorter jittered schedule for as long as batches remain over 1000
+ Schedule.fixed(5.seconds).jittered.whileInput[Chunk[Chunk[Record]]](_.flatten.length >= 1000)
+
+dataStream
+ .aggregateAsyncWithin(ZTransducer.collectAllN(2000), schedule)
+```
+
+## Scheduling
+
+To schedule the output of a stream we use `ZStream#schedule` combinator.
+
+Let's space between each emission of the given stream:
+
+```scala
+val stream = Stream(1, 2, 3, 4, 5).schedule(Schedule.spaced(1.second))
+```
+
+## Consuming a Stream
+
+```scala
+import zio._
+import zio.console._
+import zio.stream._
+
+val result: RIO[Console, Unit] = Stream.fromIterable(0 to 100).foreach(i => putStrLn(i.toString))
+```
+
+### Using a Sink
+
+To consume a stream using `ZSink` we can pass `ZSink` to the `ZStream#run` function:
+
+```scala
+val sum: UIO[Int] = ZStream(1,2,3).run(Sink.sum)
+```
+
+### Using fold
+
+The `ZStream#fold` method executes the fold operation over the stream of values and returns a `ZIO` effect containing the result:
+
+```scala
+val s1: ZIO[Any, Nothing, Int] = ZStream(1, 2, 3, 4, 5).fold(0)(_ + _)
+val s2: ZIO[Any, Nothing, Int] = ZStream.iterate(1)(_ + 1).foldWhile(0)(_ <= 5)(_ + _)
+```
+
+### Using foreach
+
+Using `ZStream#foreach` is another way of consuming elements of a stream. It takes a callback of type `O => ZIO[R1, E1, Any]` which passes each element of a stream to this callback:
+
+```scala
+ZStream(1, 2, 3).foreach(x => putStrLn(x.toString))
+```
+
+## Error Handling
+
+### Recovering from Failure
+
+If we have a stream that may fail, we might need to recover from the failure and run another stream, the `ZStream#orElse` takes another stream, so when the failure occurs it will switch over to the provided stream:
+
+```scala
+val s1 = ZStream(1, 2, 3) ++ ZStream.fail("Oh! Error!") ++ ZStream(4, 5)
+val s2 = ZStream(7, 8, 9)
+
+val stream = s1.orElse(s2)
+// Output: 1, 2, 3, 7, 8, 9
+```
+
+Another variant of `orElse` is `ZStream#orElseEither`, which distinguishes elements of the two streams using the `Either` data type. Using this operator, the result of the previous example should be `Left(1), Left(2), Left(3), Right(6), Right(7), Right(8)`.
+
+ZIO stream has `ZStream#catchAll` which is powerful version of `ZStream#orElse`. By using `catchAll` we can decide what to do based on the type and value of the failure:
+
+```scala
+val first =
+ ZStream(1, 2, 3) ++
+ ZStream.fail("Uh Oh!") ++
+ ZStream(4, 5) ++
+ ZStream.fail("Ouch")
+
+val second = ZStream(6, 7, 8)
+val third = ZStream(9, 10, 11)
+
+val stream = first.catchAll {
+ case "Uh Oh!" => second
+ case "Ouch" => third
+}
+// Output: 1, 2, 3, 6, 7, 8
+```
+
+### Recovering from Defects
+
+If we need to recover from all causes of failures including defects we should use the `ZStream#catchAllCause` method:
+
+```scala
+val s1 = ZStream(1, 2, 3) ++ ZStream.dieMessage("Oh! Boom!") ++ ZStream(4, 5)
+val s2 = ZStream(7, 8, 9)
+
+val stream = s1.catchAllCause(_ => s2)
+// Output: 1, 2, 3, 7, 8, 9
+```
+
+### Recovery from Some Errors
+
+If we need to recover from specific failure we should use `ZStream#catchSome`:
+
+```scala
+val s1 = ZStream(1, 2, 3) ++ ZStream.fail("Oh! Error!") ++ ZStream(4, 5)
+val s2 = ZStream(7, 8, 9)
+val stream = s1.catchSome {
+ case "Oh! Error!" => s2
+}
+// Output: 1, 2, 3, 7, 8, 9
+```
+
+And, to recover from a specific cause, we should use `ZStream#catchSomeCause` method:
+
+```scala
+val s1 = ZStream(1, 2, 3) ++ ZStream.dieMessage("Oh! Boom!") ++ ZStream(4, 5)
+val s2 = ZStream(7, 8, 9)
+val stream = s1.catchSomeCause { case Die(value) => s2 }
+```
+
+### Recovering to ZIO Effect
+
+If our stream encounters an error, we can provide some cleanup task as ZIO effect to our stream by using the `ZStream#onError` method:
+
+```scala
+val stream =
+ (ZStream(1, 2, 3) ++ ZStream.dieMessage("Oh! Boom!") ++ ZStream(4, 5))
+ .onError(_ => putStrLn("Stream application closed! We are doing some cleanup jobs.").orDie)
+```
+
+### Retry a Failing Stream
+
+When a stream fails, it can be retried according to the given schedule to the `ZStream#retry` operator:
+
+```scala
+val numbers = ZStream(1, 2, 3) ++
+ ZStream
+ .fromEffect(
+ zio.console.putStr("Enter a number: ") *> zio.console.getStrLn
+ .flatMap(x =>
+ x.toIntOption match {
+ case Some(value) => ZIO.succeed(value)
+ case None => ZIO.fail("NaN")
+ }
+ )
+ )
+ .retry(Schedule.exponential(1.second))
+```
+
+### From/To Either
+
+Sometimes, we might be working with legacy API which does error handling with the `Either` data type. We can _absolve_ their error types into the ZStream effect using `ZStream.absolve`:
+
+```scala
+def legacyFetchUrlAPI(url: URL): Either[Throwable, String] = ???
+
+def fetchUrl(
+ url: URL
+): ZStream[Blocking, Throwable, String] =
+ ZStream.fromEffect(
+ zio.blocking.effectBlocking(legacyFetchUrlAPI(url))
+ ).absolve
+```
+
+The type of this stream before absolving is `ZStream[Blocking, Throwable, Either[Throwable, String]]`, this operation let us submerge the error case of an `Either` into the `ZStream` error type.
+
+We can do the opposite by exposing an error of type `ZStream[R, E, A]` as a part of the `Either` by using `ZStream#either`:
+
+```scala
+val inputs: ZStream[Console, Nothing, Either[IOException, String]] =
+ ZStream.fromEffect(zio.console.getStrLn).either
+```
+
+When we are working with streams of `Either` values, we might want to fail the stream as soon as the emission of the first `Left` value:
+
+```scala
+// Stream of Either values that cannot fail
+val eitherStream: ZStream[Any, Nothing, Either[String, Int]] =
+ ZStream(Right(1), Right(2), Left("failed to parse"), Right(4))
+
+// A Fails with the first emission of the left value
+val stream: ZStream[Any, String, Int] = eitherStream.rightOrFail("fail")
+```
+
+
+### Refining Errors
+
+We can keep one or some errors and terminate the fiber with the rest by using `ZStream#refineOrDie`:
+
+```scala
+val stream: ZStream[Any, Throwable, Int] =
+ ZStream.fail(new Throwable)
+
+val res: ZStream[Any, IllegalArgumentException, Int] =
+ stream.refineOrDie { case e: IllegalArgumentException => e }
+```
+
+### Timing Out
+
+We can timeout a stream if it does not produce a value after some duration using `ZStream#timeout`, `ZStream#timeoutError` and `timeoutErrorCause` operators:
+
+```scala
+stream.timeoutError(new TimeoutException)(10.seconds)
+```
+
+Or we can switch to another stream if the first stream does not produce a value after some duration:
+
+```scala
+val alternative = ZStream.fromEffect(ZIO.effect(???))
+stream.timeoutTo(10.seconds)(alternative)
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/stream/ztransducer.md b/website/versioned_docs/version-1.0.18/reference/stream/ztransducer.md
new file mode 100644
index 000000000000..9b1edba1805d
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/stream/ztransducer.md
@@ -0,0 +1,456 @@
+---
+id: ztransducer
+title: "ZTransducer"
+---
+
+
+## Introduction
+
+A `ZTransducer[R, E, I, O]` is a stream transformer. Transducers accept a stream as input, and return the transformed stream as output.
+
+ZTransducers can be thought of as a recipe for calling a bunch of methods on a source stream, to yield a new (transformed) stream. A nice mental model is the following type alias:
+
+```scala
+type ZTransducer[Env, Err, In, Out] = ZStream[Env, Err, In] => ZStream[Env, Err, Out]
+```
+
+There is no fundamental requirement for transducers to exist, because everything transducers do can be done directly on a stream. However, because transducers separate the stream transformation from the source stream itself, it becomes possible to abstract over stream transformations at the level of values, creating, storing, and passing around reusable transformation pipelines that can be applied to many different streams.
+
+## Creation
+
+### From Effect
+
+The `ZTransducer.fromEffect` creates a transducer that always evaluates the specified effect. Let's write a transducer that fails with a message:
+
+```scala
+val error: ZTransducer[Any, String, Any, Nothing] = ZTransducer.fromEffect(IO.fail("Ouch"))
+```
+
+### From Function
+
+By using `ZTransducer.fromFunction` we convert a function into a transducer. Let's create a transducer which converts a stream of strings into a stream of characters:
+
+```scala
+val chars: ZTransducer[Any, Nothing, String, Char] =
+ ZTransducer
+ .fromFunction[String, Chunk[Char]](s => Chunk.fromArray(s.toArray))
+ .mapChunks(_.flatten)
+```
+
+There is also a `ZTransducer.fromFunctionM` which is an effecful version of this constructor.
+
+## Built-in Transducers
+
+### Identity
+
+The identity transducer passes elements through without any modification:
+
+```scala
+ZStream(1,2,3).transduce(ZTransducer.identity)
+// Ouput: 1, 2, 3
+```
+
+### head and last
+
+The `ZTransducer.head` and `ZTransducer.last` are two transducers that return the _first_ and _last_ element of a stream:
+
+```scala
+val stream: UStream[Int] = ZStream(1, 2, 3, 4)
+val head: UStream[Option[Int]] = stream.transduce(ZTransducer.head)
+val last: UStream[Option[Int]] = stream.transduce(ZTransducer.last)
+```
+
+### Splitting
+
+**ZTransducer.splitOn** — A transducer that splits strings on a delimiter:
+
+```scala
+ZStream("1-2-3", "4-5", "6", "7-8-9-10")
+ .transduce(ZTransducer.splitOn("-"))
+ .map(_.toInt)
+// Ouput: 1, 2, 3, 4, 5, 6, 7, 8, 9 10
+```
+
+**ZTransducer.splitLines** — A transducer that splits strings on newlines. Handles both Windows newlines (`\r\n`) and UNIX newlines (`\n`):
+
+```scala
+ZStream("This is the first line.\nSecond line.\nAnd the last line.")
+ .transduce(ZTransducer.splitLines)
+// Output: "This is the first line.", "Second line.", "And the last line."
+```
+
+**ZTransducer.splitOnChunk** — A transducer that splits elements on a delimiter and transforms the splits into desired output:
+
+```scala
+ZStream(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
+ .transduce(ZTransducer.splitOnChunk(Chunk(4, 5, 6)))
+// Output: Chunk(1, 2, 3), Chunk(7, 8, 9, 10)
+```
+
+### Dropping
+
+**ZTransducer.dropWhile** — Creates a transducer that starts consuming values as soon as one fails the given predicate:
+
+```scala
+ZStream(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
+ .transduce(ZTransducer.dropWhile(_ <= 5))
+// Output: 6, 7, 8, 9, 10
+```
+
+The `ZTransducer` also has `dropWhileM` which takes an effectful predicate `p: I => ZIO[R, E, Boolean]`.
+
+### Folding
+
+**ZTransducer.fold** — Using `ZTransudcer.fold` we can fold incoming elements until we reach the false predicate, then the transducer emits the computed value and restarts the folding process:
+
+```scala
+ZStream
+ .range(0, 8)
+ .transduce(
+ ZTransducer.fold[Int, Chunk[Int]](Chunk.empty)(_.length < 3)((s, i) =>
+ s ++ Chunk(i)
+ )
+ )
+// Ouput: Chunk(0, 1, 2), Chunk(3, 4, 5), Chunk(6, 7)
+```
+
+Note that the `ZTransducer.foldM` is like `fold`, but it folds effectfully.
+
+**ZTransducer.foldWeighted** — Creates a transducer that folds incoming elements until reaches the `max` worth of elements determined by the `costFn`, then the transducer emits the computed value and restarts the folding process:
+
+```scala
+object ZTransducer {
+ def foldWeighted[I, O](z: O)(costFn: (O, I) => Long, max: Long)(
+ f: (O, I) => O
+ ): ZTransducer[Any, Nothing, I, O] = ???
+}
+```
+
+In the following example, each time we consume a new element we return one as the weight of that element using cost function. After three times, the sum of the weights reaches to the `max` number, and the folding process restarted. So we expect this transducer to group each three elements in one `Chunk`:
+
+```scala
+ZStream(3, 2, 4, 1, 5, 6, 2, 1, 3, 5, 6)
+ .aggregate(
+ ZTransducer
+ .foldWeighted(Chunk[Int]())(
+ (_, _: Int) => 1,
+ 3
+ ) { (acc, el) =>
+ acc ++ Chunk(el)
+ }
+ )
+// Output: Chunk(3,2,4),Chunk(1,5,6),Chunk(2,1,3),Chunk(5,6)
+```
+
+Another example is when we want to group element which sum of them equal or less than a specific number:
+
+```scala
+ZStream(1, 2, 2, 4, 2, 1, 1, 1, 0, 2, 1, 2)
+ .aggregate(
+ ZTransducer
+ .foldWeighted(Chunk[Int]())(
+ (_, i: Int) => i.toLong,
+ 5
+ ) { (acc, el) =>
+ acc ++ Chunk(el)
+ }
+ )
+// Output: Chunk(1,2,2),Chunk(4),Chunk(2,1,1,1,0),Chunk(2,1,2)
+```
+
+> _**Note**_
+>
+> The `ZTransducer.foldWeighted` cannot decompose elements whose weight is more than the `max` number. So elements that have an individual cost larger than `max` will force the transducer to cross the `max` cost. In the last example, if the source stream was `ZStream(1, 2, 2, 4, 2, 1, 6, 1, 0, 2, 1, 2)` the output would be `Chunk(1,2,2),Chunk(4),Chunk(2,1),Chunk(6),Chunk(1,0,2,1),Chunk(2)`. As we see, the `6` element crossed the `max` cost.
+>
+> To decompose these elements, we should use `ZTransducer.foldWeightedDecompose` function.
+
+**ZTransducer.foldWeightedDecompose** — As we saw in the previous section, we need a way to decompose elements — whose cause the output aggregate cross the `max` — into smaller elements. This version of fold takes `decompose` function and enables us to do that:
+
+```scala
+object ZTransducer {
+ def foldWeightedDecompose[I, O](
+ z: O
+ )(costFn: (O, I) => Long, max: Long, decompose: I => Chunk[I])(
+ f: (O, I) => O
+ ): ZTransducer[Any, Nothing, I, O] = ???
+}
+```
+
+In the following example, we are break down elements that are bigger than 5, using `decompose` function:
+
+```scala
+ZStream(1, 2, 2, 2, 1, 6, 1, 7, 2, 1, 2)
+ .aggregate(
+ ZTransducer
+ .foldWeightedDecompose(Chunk[Int]())(
+ (_, i: Int) => i.toLong,
+ 5,
+ (i: Int) =>
+ if (i > 5) Chunk(i - 1, 1) else Chunk(i)
+ )((acc, el) => acc ++ Chunk.succeed(el))
+ )
+// Ouput: Chunk(1,2,2),Chunk(2,1),Chunk(5),Chunk(1,1),Chunk(5),Chunk(1,1,2,1),Chunk(2)
+```
+
+**ZTransducer.foldUntil** — Creates a transducer that folds incoming element until specific `max` elements have been folded:
+
+```scala
+ZStream(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
+ .transduce(ZTransducer.foldUntil(0, 3)(_ + _))
+// Output: 6, 15, 24, 10
+```
+
+**ZTransducer.foldLeft** — This transducer will fold the inputs until the stream ends, resulting in a stream with one element:
+
+```scala
+val stream: ZStream[Any, Nothing, Int] =
+ ZStream(1, 2, 3, 4).transduce(ZTransducer.foldLeft(0)(_ + _))
+// Output: 10
+```
+
+### Prepending
+
+The `ZTransducer.prepend` creates a transducer that emits the provided chunks before emitting any other values:
+
+```scala
+ZStream(2, 3, 4).transduce(
+ ZTransducer.prepend(Chunk(0, 1))
+)
+// Output: 0, 1, 2, 3, 4
+```
+
+### Branching/Switching
+
+The `ZTransducer.branchAfter` takes `n` as an input and creates a transducer that reads the first `n` values from the stream and uses them to choose the transducer that will be used for the rest of the stream.
+
+In the following example, we are prompting the user to enter a series of numbers. If the sum of the first three elements is less than 5, we continue to emit the remaining elements by using `ZTransducer.identity`, otherwise, we retry prompting the user to enter another series of numbers:
+
+```scala
+ZStream
+ .fromEffect(
+ putStr("Enter numbers separated by comma: ") *> getStrLn
+ )
+ .mapConcat(_.split(","))
+ .map(_.trim.toInt)
+ .transduce(
+ ZTransducer.branchAfter(3) { elements =>
+ if (elements.sum < 5)
+ ZTransducer.identity
+ else
+ ZTransducer.fromEffect(
+ putStrLn(s"received elements are not applicable: $elements")
+ ) >>> ZTransducer.fail("boom")
+ }
+ )
+ .retry(Schedule.forever)
+```
+
+### Collecting
+
+**ZTransducer.collectAllN** — Collects incoming values into chunk of maximum size of `n`:
+
+```scala
+ZStream(1, 2, 3, 4, 5).transduce(
+ ZTransducer.collectAllN(3)
+)
+// Output: Chunk(1,2,3), Chunk(4,5)
+```
+
+**ZTransducer.collectAllWhile** — Accumulates incoming elements into a chunk as long as they verify the given predicate:
+
+```scala
+ZStream(1, 2, 0, 4, 0, 6, 7).transduce(
+ ZTransducer.collectAllWhile(_ != 0)
+)
+// Output: Chunk(1,2), Chunk(4), Chunk(6,7)
+```
+
+**ZTransducer.collectAllToMapN** — Creates a transducer accumulating incoming values into maps of up to `n` keys. Elements are mapped to keys using the function `key`; elements mapped to the same key will be merged with the function `f`:
+
+```scala
+object ZTransducer {
+ def collectAllToMapN[K, I](n: Long)(key: I => K)(
+ f: (I, I) => I
+ ): ZTransducer[Any, Nothing, I, Map[K, I]] = ???
+}
+```
+
+Let's do an example:
+
+```scala
+ZStream(1, 2, 0, 4, 5).transduce(
+ ZTransducer.collectAllToMapN[Int, Int](10)(_ % 3)(_ + _)
+)
+// Output: Map(1 -> 5, 2 -> 7, 0 -> 0)
+```
+
+**ZTransducer.collectAllToSetN** — Creates a transducer accumulating incoming values into sets of maximum size `n`:
+
+```scala
+ZStream(1, 2, 1, 2, 1, 3, 0, 5, 0, 2).transduce(
+ ZTransducer.collectAllToSetN(3)
+)
+// Output: Set(1,2,3), Set(0,5,2), Set(1)
+```
+
+### Compression
+
+**ZTransducer.deflate** — The `deflate` transducer compresses a stream of bytes as specified by [RFC 1951](https://tools.ietf.org/html/rfc1951).
+
+```scala
+import zio.stream.ZStream
+import zio.stream.Transducer.deflate
+import zio.stream.compression.{CompressionLevel, CompressionStrategy, FlushMode}
+
+def compressWithDeflate(clearText: ZStream[Any, Nothing, Byte]): ZStream[Any, Nothing, Byte] = {
+ val bufferSize: Int = 64 * 1024 // Internal buffer size. Few times bigger than upstream chunks should work well.
+ val noWrap: Boolean = false // For HTTP Content-Encoding should be false.
+ val level: CompressionLevel = CompressionLevel.DefaultCompression
+ val strategy: CompressionStrategy = CompressionStrategy.DefaultStrategy
+ val flushMode: FlushMode = FlushMode.NoFlush
+ clearText.transduce(deflate(bufferSize, noWrap, level, strategy, flushMode))
+}
+
+def deflateWithDefaultParameters(clearText: ZStream[Any, Nothing, Byte]): ZStream[Any, Nothing, Byte] =
+ clearText.transduce(deflate())
+```
+
+**ZTransducer.gzip** — The `gzip` transducer compresses a stream of bytes as using _gzip_ method:
+
+```scala
+import zio.stream.compression._
+
+ZStream
+ .fromFile(Paths.get("file.txt"))
+ .transduce(
+ ZTransducer.gzip(
+ bufferSize = 64 * 1024,
+ level = CompressionLevel.DefaultCompression,
+ strategy = CompressionStrategy.DefaultStrategy,
+ flushMode = FlushMode.NoFlush
+ )
+ )
+ .run(
+ ZSink.fromFile(Paths.get("file.gz"))
+ )
+```
+
+### Decompression
+
+If we are reading `Content-Encoding: deflate`, `Content-Encoding: gzip` streams, or other such streams of compressed data, the following transducers can be helpful. Both decompression methods will fail with `CompressionException` when input wasn't properly compressed:
+
+**ZTransducer.inflate** — This transducer allows decompressing stream of _deflated_ inputs, according to [RFC 1951](https://tools.ietf.org/html/rfc1951).
+
+```scala
+import zio.stream.ZStream
+import zio.stream.Transducer.{ gunzip, inflate }
+import zio.stream.compression.CompressionException
+
+def decompressDeflated(deflated: ZStream[Any, Nothing, Byte]): ZStream[Any, CompressionException, Byte] = {
+ val bufferSize: Int = 64 * 1024 // Internal buffer size. Few times bigger than upstream chunks should work well.
+ val noWrap: Boolean = false // For HTTP Content-Encoding should be false.
+ deflated.transduce(inflate(bufferSize, noWrap))
+}
+```
+
+**ZTransducer.gunzip** — This transducer can be used to decompress stream of _gzipped_ inputs, according to [RFC 1952](https://tools.ietf.org/html/rfc1952):
+
+```scala
+import zio.stream.ZStream
+import zio.stream.Transducer.{ gunzip, inflate }
+import zio.stream.compression.CompressionException
+
+def decompressGzipped(gzipped: ZStream[Any, Nothing, Byte]): ZStream[Any, CompressionException, Byte] = {
+ val bufferSize: Int = 64 * 1024 // Internal buffer size. Few times bigger than upstream chunks should work well.
+ gzipped.transduce(gunzip(bufferSize))
+}
+```
+
+### Decoders
+
+ZIO stream has a wide variety of transducers to decode chunks of bytes into strings:
+
+| Decoder | Input | Output |
+|-----------------------------|----------------|--------|
+| `ZTransducer.utfDecode` | Unicode bytes | String |
+| `ZTransducer.utf8Decode` | UTF-8 bytes | String |
+| `ZTransducer.utf16Decode` | UTF-16 | String |
+| `ZTransducer.utf16BEDecode` | UTF-16BE bytes | String |
+| `ZTransducer.utf16LEDecode` | UTF-16LE bytes | String |
+| `ZTransducer.utf32Decode` | UTF-32 bytes | String |
+| `ZTransducer.utf32BEDecode` | UTF-32BE bytes | String |
+| `ZTransducer.utf32LEDecode` | UTF-32LE bytes | String |
+| `ZTransducer.usASCIIDecode` | US-ASCII bytes | String |
+
+## Operations
+
+### Filtering
+
+Transducers have two types of filtering operations, the `ZTransducer#filter` used for filtering outgoing elements and the `ZTransducer#filterInput` is used for filtering incoming elements:
+
+```scala
+ZStream(1, -2, 0, 1, 3, -3, 4, 2, 0, 1, -3, 1, 1, 6)
+ .transduce(
+ ZTransducer
+ .collectAllN[Int](3)
+ .filterInput[Int](_ > 0)
+ .filter(_.sum > 5)
+ )
+// Output: Chunk(4,2,1), Chunk(1,1,6)
+```
+
+### Input Transformation (Mapping)
+
+To transform the _outputs_ of the transducer, we can use the `ZTransducer#map` combinator for the success channel, and the `ZTransducer#mapError` combinator for the failure channel. Also, the `ZTransducer.mapChunks` takes a function of type `Chunk[O] => Chunk[O2]` and transforms chunks emitted by the transducer.
+
+### Output Transformation (Contramap)
+
+To transform the _inputs_ of the transducer, we can use the `ZTransducer#contramap` combinator. It takes a map function of type `J => I` and convert a `ZTransducer[R, E, I, O]` to `ZTransducer[R, E, J, O]`:
+
+```scala
+class ZTransducer[-R, +E, -I, +O] {
+ final def contramap[J](f: J => I): ZTransducer[R, E, J, O] = ???
+}
+```
+
+Let's create an integer parser transducer using `ZTransducer.contramap`:
+
+```scala
+val numbers: ZStream[Any, Nothing, Int] =
+ ZStream("1-2-3-4-5")
+ .mapConcat(_.split("-"))
+ .transduce(
+ ZTransducer.identity[Int].contramap[String](_.toInt)
+ )
+```
+
+### Composing
+
+We can compose transducers in two ways:
+
+1. **Composing Two Transducers** — One transducer can be composed with another transducer, resulting in a composite transducer:
+
+```scala
+val lines: ZStream[Blocking, Throwable, String] =
+ ZStream
+ .fromFile(Paths.get("file.txt"))
+ .transduce(
+ ZTransducer.utf8Decode >>> ZTransducer.splitLines
+ )
+```
+
+2. **Composing ZTransducer with ZSink** — One transducer can be composed with a sink, resulting in a sink that processes elements by piping them through the transducer and piping the results into the sink:
+
+```scala
+val refine: ZIO[Blocking, Throwable, Long] =
+ ZStream
+ .fromFile(Paths.get("file.txt"))
+ .run(
+ ZTransducer.utf8Decode >>> ZTransducer.splitLines.filter(_.contains('₿')) >>>
+ ZSink
+ .fromFile(Paths.get("file.refined.txt"))
+ .contramapChunks[String](
+ _.flatMap(line => (line + System.lineSeparator()).getBytes())
+ )
+ )
+```
diff --git a/website/versioned_docs/version-1.0.18/reference/sync/concurrentmap.md b/website/versioned_docs/version-1.0.18/reference/sync/concurrentmap.md
new file mode 100644
index 000000000000..632bcdae1ec2
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/sync/concurrentmap.md
@@ -0,0 +1,79 @@
+---
+id: concurrentmap
+title: "ConcurrentMap"
+---
+
+A `ConcurrentMap` is a wrapper over `java.util.concurrent.ConcurrentHashMap`.
+
+## Operations
+
+### Creation
+
+| Method | Definition |
+|-----------------------------------------------------------------------|-----------------------------------------------------------------------------------------|
+|`empty[K, V]: UIO[ConcurrentMap[K, V]]` | Makes an empty `ConcurrentMap` |
+|`fromIterable[K, V](pairs: Iterable[(K, V)]): UIO[ConcurrentMap[K, V]]`| Makes a new `ConcurrentMap` initialized with the provided collection of key-value pairs |
+|`make[K, V](pairs: (K, V)*): UIO[ConcurrentMap[K, V]]` | Makes a new `ConcurrentMap` initialized with the provided key-value pairs |
+
+### Use
+
+| Method | Definition |
+|-------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------|
+| `collectFirst[B](pf: PartialFunction[(K, V), B]): UIO[Option[B]]` | Finds the first element of a map for which the partial function is defined and applies the function to it. |
+| `compute(key: K, remap: (K, V) => V): UIO[Option[V]]` | Attempts to compute a mapping for the given key and its current mapped value. |
+| `def computeIfAbsent(key: K, map: K => V): UIO[V]` | Computes a value of a non-existing key. |
+| `computeIfPresent(key: K, remap: (K, V) => V): UIO[Option[V]]` | Attempts to compute a new mapping of an existing key. |
+| `exists(p: (K, V) => Boolean): UIO[Boolean]` | Tests whether a given predicate holds true for at least one element in a map. |
+| `fold[S](zero: S)(f: (S, (K, V)) => S): UIO[S]` | Folds the elements of a map using the given binary operator. |
+| `forall(p: (K, V) => Boolean): UIO[Boolean]` | Tests whether a predicate is satisfied by all elements of a map. |
+| `get(key: K): UIO[Option[V]]` | Retrieves the value associated with the given key. |
+| `put(key: K, value: V): UIO[Option[V]]` | Adds a new key-value pair and optionally returns previously bound value. |
+| `putAll(keyValues: (K, V)*): UIO[Unit]` | Adds all new key-value pairs. | |
+| `putIfAbsent(key: K, value: V): UIO[Option[V]]` | Adds a new key-value pair, unless the key is already bound to some other value. |
+| `remove(key: K): UIO[Option[V]]` | Removes the entry for the given key, optionally returning value associated with it. |
+| `remove(key: K, value: V): UIO[Boolean]` | Removes the entry for the given key if it is mapped to a given value. |
+| `removeIf(p: (K, V) => Boolean): UIO[Unit]` | Removes all elements which do not satisfy the given predicate. |
+| `retainIf(p: (K, V) => Boolean): UIO[Unit]` | Removes all elements which do not satisfy the given predicate. |
+| `replace(key: K, value: V): UIO[Option[V]]` | Replaces the entry for the given key only if it is mapped to some value. |
+| `replace(key: K, oldValue: V, newValue: V): UIO[Boolean]` | Replaces the entry for the given key only if it was previously mapped to a given value. |
+| `toChunk: UIO[Chunk[(K, V)]]` | Collects all entries into a chunk. |
+| `toList: UIO[List[(K, V)]]` | Collects all entries into a list. |
+
+## Example Usage
+
+Given:
+
+```scala
+import zio.concurrent.ConcurrentMap
+import zio.{Chunk, UIO}
+
+for {
+ emptyMap <- ConcurrentMap.empty[Int, String]
+ data <- UIO(Chunk(1 -> "A", 2 -> "B", 3 -> "C"))
+ mapA <- ConcurrentMap.fromIterable(data)
+ map100 <- ConcurrentMap.make(1 -> 100)
+ mapB <- ConcurrentMap.make(("A", 1), ("B", 2), ("C", 3))
+} yield ()
+```
+
+| Operation | Result |
+|----------------------------------------------------------|---------|
+| `mapA.collectFirst { case (3, _) => "Three" }` | "Three" |
+| `mapA.collectFirst { case (4, _) => "Four" }` | Empty |
+| `map100.compute(1, _+_).get(1)` | 101 |
+| `emptyMap.computeIfAbsent("abc", _.length).get("abc")` | 3 |
+| `map100.computeIfPresent(1, _+_).get(1)` | 101 |
+| `mapA.exists((k, _) => k % 2 == 0)` | true |
+| `mapA.exists((k, _) => k == 4)` | false |
+| `mapB.fold(0) { case (acc, (_, value)) => acc + value }` | 6 |
+| `mapB.forall((_, v) => v < 4)` | true |
+| `emptyMap.get(1)` | None |
+| `emptyMap.put(1, "b").get(1)` | "b" |
+| `mapA.putIfAbsent(2, "b").get(2)` | "B" |
+| `emptyMap.putAll((1, "A"), (2, "B"), (3, "C")).get(1)` | "A" |
+| `mapA.remove(1).get(1)` | None |
+| `mapA.remove(1,"b").get(1)` | "A" |
+| `mapA.removeIf((k, _) => k != 1).get(1)` | "A" |
+| `mapA.removeIf((k, _) => k != 1).get(2)` | None |
+| `mapA.retainIf((k, _) => k == 1).get(1)` | "A" |
+| `mapA.retainIf((k, _) => k == 1).get(2)` | None |
diff --git a/website/versioned_docs/version-1.0.18/reference/sync/concurrentset.md b/website/versioned_docs/version-1.0.18/reference/sync/concurrentset.md
new file mode 100644
index 000000000000..a5db2ef2b0f6
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/sync/concurrentset.md
@@ -0,0 +1,74 @@
+---
+id: concurrentSet
+title: "ConcurrentSet"
+---
+
+A `ConcurrentSet` is a Set wrapper over `java.util.concurrent.ConcurrentHashMap`.
+
+## Operations
+
+### Creation
+
+| Method | Definition |
+|-------------------------------------------------------------|----------------------------------------------------------------------|
+| `empty[A]: UIO[ConcurrentSet[A]]` | Makes an empty `ConcurrentSet` |
+| `empty[A](initialCapacity: Int): UIO[ConcurrentSet[A]]` | Makes an empty `ConcurrentSet` with ìnitial capacity |
+| `fromIterable[A](as: Iterable[(A)]): UIO[ConcurrentSet[A]]` | Makes a new `ConcurrentSet` initialized with the provided collection |
+| `make[A](as: A*): UIO[ConcurrentSet[A]]` | Makes a new `ConcurrentSet` initialized with the provided elements |
+
+### Use
+
+| Method | Definition |
+|---------------------------------------------------------------|------------------------------------------------------------------------------------------------------------|
+| `add(x: A): UIO[Boolean]` | Adds a new value. |
+| `addAll(xs: Iterable[A]): UIO[Boolean]` | Adds all new values. |
+| `collectFirst[B](pf: PartialFunction[(A, B)): UIO[Option[B]]` | Finds the first element of a set for which the partial function is defined and applies the function to it. |
+| `exists(p: A => Boolean): UIO[Boolean]` | Tests whether a given predicate holds true for at least one element in the set. |
+| `fold[R, E, S](zero: S)(f: (S, A) => S): UIO[S]` | Folds the elements of a set using the given binary operator. |
+| `forall(p: A => Boolean): UIO[Boolean]` | Tests whether a predicate is satisfied by all elements of a set. |
+| `find[B](p: A => Boolean): UIO[Option[A]]` | Retrieves the elements in which predicate is satisfied. |
+| `remove(x: A): UIO[Boolean]` | Removes the entry for the given value if it is mapped to an existing element. |
+| `removeAll(xs: Iterable[A]): UIO[Boolean]` | Removes all the entries for the given values if they are mapped to an existing element. |
+| `removeIf(p: A => Boolean): UIO[Boolean]` | Removes all elements which satisfy the given predicate. |
+| `retainAll(xs: Iterable[A]): UIO[Boolean]` | Retain all the entries for the given values if they are mapped to an existing element. |
+| `retainIf(p: A => Boolean): UIO[Boolean]` | Removes all elements which do not satisfy the given predicate. |
+| `clear: UIO[Unit]` | Removes all elements. |
+| `contains(x: A): UIO[Boolean]` | Tests whether if the element is in the set. |
+| `containsAll(xs: Iterable[A]): UIO[Boolean]` | Tests if the elements in the collection are a subset of the set. |
+| `size: UIO[Int]` | Number of elements in the set. |
+| `isEmpty: UIO[Boolean]` | True if there are no elements in the set. |
+| `toSet: UIO[Set[A]]` | Create a concurrent set from a set. |
+| `transform(f: A => A): UIO[Unit]` | Create a concurrent set from a collection. |
+
+## Example Usage
+
+Given:
+
+```scala
+import zio.concurrent.ConcurrentSet
+
+for {
+ emptySet <- ConcurrentSet.empty[Int]
+ setA <- ConcurrentSet.make[Int](1, 2, 3, 4)
+} yield ()
+```
+
+| Operation | Result |
+|-------------------------------------------|-----------------------|
+| `emptySet.add(1).toSet` | Set(1) |
+| `setA.addAll(Chunk(5, 6).toSet)` | Set(1, 2, 3, 4, 5, 6) |
+| `setA.remove(1).toSet` | Set(2, 3, 4) |
+| `setA.removeAll(1, 3).toSet` | Set(2, 4) |
+| `setA.retainAll(List(1, 3, 5, 6)).toSet` | Set(1, 3) |
+| `setA.clear.isEmpty` | true |
+| `setA.contains(5)` | false |
+| `setA.containsAll(Chunk(1, 2, 3))` | true |
+| `setA.exists(_ > 4)` | false |
+| `setA.forAll(_ < 5)` | true |
+| `setA.removeIf(_ % 2 == 0)` | Set(2, 4) |
+| `setA.retainIf(_ % 2 == 0)` | Set(1, 3) |
+| `setA.find(_ > 2)` | Set(3, 4) |
+| `setA.collectFirst { case 3 => "Three" }` | Set(3) |
+| `setA.size` | 4 |
+| `setA.transform(_ + 10)` | Set(11, 12, 13, 14) |
+| `setA.fold(0)(_ + _)` | 10 |
diff --git a/website/versioned_docs/version-1.0.18/reference/sync/countdownlatch.md b/website/versioned_docs/version-1.0.18/reference/sync/countdownlatch.md
new file mode 100644
index 000000000000..baa62dfffe26
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/sync/countdownlatch.md
@@ -0,0 +1,52 @@
+---
+id: countdownlatch
+title: "CountdownLatch"
+---
+A synchronization aid that allows one or more fibers to wait until a set of operations being performed in other fibers
+completes.
+
+A `CountDownLatch` is initialized with a given count. The `await` method block until the current count reaches zero due
+to invocations of the `countDown` method, after which all waiting fibers are released and any subsequent invocations
+of `await` return immediately. This is a one-shot phenomenon -- the count cannot be reset. If you need a version that
+resets the count, consider using a [[CyclicBarrier]].
+
+A `CountDownLatch` is a versatile synchronization tool and can be used for a number of purposes. A `CountDownLatch`
+initialized with a count of one serves as a simple on/off latch, or gate: all fibers invoking `await` wait at the gate
+until it is opened by a fiber invoking `countDown`. A `CountDownLatch`initialized to N can be used to make one fiber
+wait until N fibers have completed some action, or some action has been completed N times.
+
+A useful property of a `CountDownLatch` is that it doesn't require that fibers calling `countDown` wait for the count to
+reach zero before proceeding, it simply prevents any fiber from proceeding past an `await`until all fibers could pass.
+
+## Operations
+
+### Creation
+
+| Method | Definition |
+|-------------------------------------------------------------|-------------------------------|
+| `make(n: Int): IO[Option[Nothing], CountdownLatch]` | Makes a new `CountdownLatch`. |
+
+### Use
+
+| Method | Definition |
+|------------------------|--------------------------------------------------------------------------------------------|
+| `await: UIO[Unit]` | Causes the current fiber to wait until the latch has counted down to zero. |
+| `countDown: UIO[Unit]` | Decrements the count of the latch, releasing all waiting fibers if the count reaches zero. |
+| `count: UIO[Int]` | Returns the current count. |
+
+## Example Usage
+
+```scala
+import zio._
+import zio.concurrent.CountdownLatch
+
+for {
+ latch <- CountdownLatch.make(100)
+ count <- Ref.make(0)
+ ps <- ZIO.collectAll(List.fill(10)(Promise.make[Nothing, Unit]))
+ _ <- ZIO.forkAll(ps.map(p => latch.await *> count.update(_ + 1) *> p.succeed(())))
+ _ <- latch.countDown.repeat(Schedule.recurs(99))
+ _ <- ZIO.foreach_(ps)(_.await)
+ result <- count.get
+} yield assert(result == 10)
+```
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/reference/sync/cyclicbarrier.md b/website/versioned_docs/version-1.0.18/reference/sync/cyclicbarrier.md
new file mode 100644
index 000000000000..dfdba70b2f14
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/reference/sync/cyclicbarrier.md
@@ -0,0 +1,139 @@
+---
+id: cyclicbarrier
+title: "CyclicBarrier"
+---
+
+A synchronization aid that allows a set of fibers to all wait for each other to reach a common barrier point.
+
+CyclicBarriers are useful in programs involving a fixed sized party of fibers that must occasionally wait for each
+other. The barrier is called cyclic because it can be re-used after the waiting fibers are released.
+
+A CyclicBarrier supports an optional action command that is run once per barrier point, after the last fiber in the
+party arrives, but before any fibers are released. This barrier action is useful for updating shared-state before any of
+the parties continue.
+
+## Operations
+
+### Creation
+
+| Method | Definition |
+|-------------------------------------------------------------|--------------------------------------------------|
+| `make(parties: Int): UIO[CyclicBarrier]` | Makes an `CyclicBarrier` with n parties |
+| `make(parties: Int, action: UIO[Any]): UIO[CyclicBarrier]` | Makes an `CyclicBarrier` with parties and action |
+
+### Use
+
+| Method | Definition |
+|--------------------------|---------------------------------------------------------------------------------------------|
+| `parties: Int` | The number of parties required to trip this barrier. |
+| `waiting: UIO[Int]` | The number of parties currently waiting at the barrier. |
+| `await: IO[Unit, Int]` | Waits until all parties have invoked await on this barrier. Fails if the barrier is broken. |
+| `reset: UIO[Unit]` | Resets the barrier to its initial state. Breaks any waiting party. |
+| `isBroken: UIO[Boolean]` | Queries if this barrier is in a broken state. |
+
+## Example Usage
+
+Construction:
+
+```scala
+import zio.concurrent.CyclicBarrier
+
+for {
+ barrier <- CyclicBarrier.make(100)
+ isBroken <- barrier.isBroken
+ waiting <- barrier.waiting
+} yield assert(!isBroken && waiting == 0)
+```
+
+Releasing the barrier:
+
+```scala
+import zio.concurrent.CyclicBarrier
+import zio._
+
+for {
+ barrier <- CyclicBarrier.make(2)
+ f1 <- barrier.await.fork
+ _ <- f1.status.repeatWhile(!_.isInstanceOf[Fiber.Status.Suspended])
+ f2 <- barrier.await.fork
+ ticket1 <- f1.join
+ ticket2 <- f2.join
+} yield assert(ticket1 == 1 && ticket2 == 0)
+```
+
+Releasing the barrier and performing the action:
+
+```scala
+import zio.concurrent.CyclicBarrier
+import zio._
+
+for {
+ promise <- Promise.make[Nothing, Unit]
+ barrier <- CyclicBarrier.make(2, promise.succeed(()))
+ f1 <- barrier.await.fork
+ _ <- f1.status.repeatWhile(!_.isInstanceOf[Fiber.Status.Suspended])
+ f2 <- barrier.await.fork
+ _ <- f1.join
+ _ <- f2.join
+ isComplete <- promise.isDone
+} yield assert(isComplete)
+```
+
+Releases the barrier and cycles:
+
+```scala
+import zio.concurrent.CyclicBarrier
+
+for {
+ barrier <- CyclicBarrier.make(2)
+ f1 <- barrier.await.fork
+ _ <- f1.status.repeatWhile(!_.isInstanceOf[Fiber.Status.Suspended])
+ f2 <- barrier.await.fork
+ ticket1 <- f1.join
+ ticket2 <- f2.join
+ f3 <- barrier.await.fork
+ _ <- f3.status.repeatWhile(!_.isInstanceOf[Fiber.Status.Suspended])
+ f4 <- barrier.await.fork
+ ticket3 <- f3.join
+ ticket4 <- f4.join
+} yield assert(ticket1 == 1 && ticket2 == 0 && ticket3 == 1 && ticket4 == 0)
+```
+
+Breaks on reset:
+
+```scala
+import zio.concurrent.CyclicBarrier
+
+for {
+ barrier <- CyclicBarrier.make(100)
+ f1 <- barrier.await.fork
+ f2 <- barrier.await.fork
+ _ <- f1.status.repeatWhile(!_.isInstanceOf[Fiber.Status.Suspended])
+ _ <- f2.status.repeatWhile(!_.isInstanceOf[Fiber.Status.Suspended])
+ _ <- barrier.reset
+ res1 <- f1.await
+ res2 <- f2.await
+} yield ()
+```
+
+Breaks on party interruption:
+
+```scala
+import zio.concurrent.CyclicBarrier
+import zio._
+import zio.duration._
+import zio.test.environment.TestClock
+
+for {
+ barrier <- CyclicBarrier.make(100)
+ f1 <- barrier.await.timeout(1.second).fork
+ f2 <- barrier.await.fork
+ _ <- f1.status.repeatWhile(!_.isInstanceOf[Fiber.Status.Suspended])
+ _ <- f2.status.repeatWhile(!_.isInstanceOf[Fiber.Status.Suspended])
+ isBroken1 <- barrier.isBroken
+ _ <- TestClock.adjust(1.second)
+ isBroken2 <- barrier.isBroken
+ res1 <- f1.await
+ res2 <- f2.await
+} yield assert(!isBroken1 && isBroken2)
+```
diff --git a/website/versioned_docs/version-1.0.18/resources/ecosystem/community.md b/website/versioned_docs/version-1.0.18/resources/ecosystem/community.md
new file mode 100644
index 000000000000..94a9b3198ff1
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/resources/ecosystem/community.md
@@ -0,0 +1,1656 @@
+---
+id: community
+title: "Community ZIO Libraries"
+---
+
+In this section we are going to introduce some of the most important libraries that have first-class ZIO support from the community.
+
+If you know a useful library that has first-class ZIO support, please consider [submitting a pull request](https://github.com/zio/zio/pulls) to add it to this list.
+
+## Caliban
+
+[Caliban](https://ghostdogpr.github.io/caliban/) is a purely functional library for creating GraphQL servers and clients in Scala.
+
+### Introduction
+
+Key features of Caliban
+- **Purely Functional** — All interfaces are pure and types are referentially transparent.
+- **Type Safety** — Schemas are type safe and derived at compile time.
+- **Minimal Boilerplate** — No need to manually define a schema for every type in your API.
+- **Excellent Interoperability** — Out-of-the-box support for major HTTP server libraries, effect types, JSON libraries, and more.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "com.github.ghostdogpr" %% "caliban" % "1.1.0"
+```
+
+Caliban also have lots of optional modules to inter-operate with other various libraries:
+
+```scala
+libraryDependencies += "com.github.ghostdogpr" %% "caliban-http4s" % "1.1.0" // routes for http4s
+libraryDependencies += "com.github.ghostdogpr" %% "caliban-akka-http" % "1.1.0" // routes for akka-http
+libraryDependencies += "com.github.ghostdogpr" %% "caliban-play" % "1.1.0" // routes for play
+libraryDependencies += "com.github.ghostdogpr" %% "caliban-finch" % "1.1.0" // routes for finch
+libraryDependencies += "com.github.ghostdogpr" %% "caliban-zio-http" % "1.1.0" // routes for zio-http
+libraryDependencies += "com.github.ghostdogpr" %% "caliban-cats" % "1.1.0" // interop with cats effect
+libraryDependencies += "com.github.ghostdogpr" %% "caliban-monix" % "1.1.0" // interop with monix
+libraryDependencies += "com.github.ghostdogpr" %% "caliban-tapir" % "1.1.0" // interop with tapir
+libraryDependencies += "com.github.ghostdogpr" %% "caliban-federation" % "1.1.0" // interop with apollo federation
+```
+
+### Example
+
+First, to define Caliban API, we should define data models using case classes and ADTs. Then the Caliban can derive the whole GraphQL schema from these data models:
+
+```scala modc:silent:nest
+import caliban.GraphQL.graphQL
+import caliban.schema.Annotations.GQLDescription
+import caliban.{RootResolver, ZHttpAdapter}
+import zhttp.http._
+import zhttp.service.Server
+import zio.{ExitCode, ZEnv, ZIO}
+
+import scala.language.postfixOps
+
+sealed trait Role
+
+object Role {
+ case object SoftwareDeveloper extends Role
+ case object SiteReliabilityEngineer extends Role
+ case object DevOps extends Role
+}
+
+case class Employee(
+ name: String,
+ role: Role
+)
+
+case class EmployeesArgs(role: Role)
+case class EmployeeArgs(name: String)
+
+case class Queries(
+ @GQLDescription("Return all employees with specific role")
+ employees: EmployeesArgs => List[Employee],
+ @GQLDescription("Find an employee by its name")
+ employee: EmployeeArgs => Option[Employee]
+)
+object CalibanExample extends zio.App {
+
+ val employees = List(
+ Employee("Alex", Role.DevOps),
+ Employee("Maria", Role.SoftwareDeveloper),
+ Employee("James", Role.SiteReliabilityEngineer),
+ Employee("Peter", Role.SoftwareDeveloper),
+ Employee("Julia", Role.SiteReliabilityEngineer),
+ Employee("Roberta", Role.DevOps)
+ )
+
+ val myApp = for {
+ interpreter <- graphQL(
+ RootResolver(
+ Queries(
+ args => employees.filter(e => args.role == e.role),
+ args => employees.find(e => e.name == args.name)
+ )
+ )
+ ).interpreter
+ _ <- Server
+ .start(
+ port = 8088,
+ http = Http.route { case _ -> Root / "api" / "graphql" =>
+ ZHttpAdapter.makeHttpService(interpreter)
+ }
+ )
+ .forever
+ } yield ()
+
+ override def run(args: List[String]): ZIO[ZEnv, Nothing, ExitCode] =
+ myApp.exitCode
+
+}
+```
+
+Now let's query all software developers using GraphQL query language:
+
+```graphql
+query{
+ employees(role: SoftwareDeveloper){
+ name
+ role
+ }
+}
+```
+
+Here is the _curl_ request of this query:
+
+```bash
+curl 'http://localhost:8088/api/graphql' --data-binary '{"query":"query{\n employees(role: SoftwareDeveloper){\n name\n role\n}\n}"}'
+```
+
+And the response:
+
+```json
+{
+ "data" : {
+ "employees" : [
+ {
+ "name" : "Maria",
+ "role" : "SoftwareDeveloper"
+ },
+ {
+ "name" : "Peter",
+ "role" : "SoftwareDeveloper"
+ }
+ ]
+ }
+}
+```
+
+## ZIO gRPC
+
+[ZIO-gRPC](https://scalapb.github.io/zio-grpc/) lets us write purely functional gRPC servers and clients.
+
+### Introduction
+
+Key features of ZIO gRPC:
+- **Functional and Type-safe** — Use the power of Functional Programming and the Scala compiler to build robust, correct and fully featured gRPC servers.
+- **Support for Streaming** — Use ZIO's feature-rich `ZStream`s to create server-streaming, client-streaming, and bi-directionally streaming RPC endpoints.
+- **Highly Concurrent** — Leverage the power of ZIO to build asynchronous clients and servers without deadlocks and race conditions.
+- **Resource Safety** — Safely cancel an RPC call by interrupting the effect. Resources on the server will never leak!
+- **Scala.js Support** — ZIO gRPC comes with Scala.js support, so we can send RPCs to our service from the browser.
+
+### Installation
+
+First of all we need to add following lines to the `project/plugins.sbt` file:
+
+```scala
+addSbtPlugin("com.thesamet" % "sbt-protoc" % "1.0.2")
+
+libraryDependencies +=
+ "com.thesamet.scalapb.zio-grpc" %% "zio-grpc-codegen" % "0.5.0"
+```
+
+Then in order to use this library, we need should add the following line in our `build.sbt` file:
+
+```scala
+PB.targets in Compile := Seq(
+ scalapb.gen(grpc = true) -> (sourceManaged in Compile).value / "scalapb",
+ scalapb.zio_grpc.ZioCodeGenerator -> (sourceManaged in Compile).value / "scalapb"
+)
+
+libraryDependencies ++= Seq(
+ "io.grpc" % "grpc-netty" % "1.39.0",
+ "com.thesamet.scalapb" %% "scalapb-runtime-grpc" % scalapb.compiler.Version.scalapbVersion
+)
+```
+
+### Example
+
+In this section, we are going to implement a simple server and client for the following gRPC _proto_ file:
+
+```protobuf
+syntax = "proto3";
+
+option java_multiple_files = true;
+option java_package = "io.grpc.examples.helloworld";
+option java_outer_classname = "HelloWorldProto";
+option objc_class_prefix = "HLW";
+
+package helloworld;
+
+// The greeting service definition.
+service Greeter {
+ rpc SayHello (HelloRequest) returns (HelloReply) {}
+}
+
+// The request message containing the user's name.
+message HelloRequest {
+ string name = 1;
+}
+
+// The response message containing the greetings
+message HelloReply {
+ string message = 1;
+}
+```
+
+The hello world server would be like this:
+
+```scala
+import io.grpc.ServerBuilder
+import io.grpc.examples.helloworld.helloworld.ZioHelloworld.ZGreeter
+import io.grpc.examples.helloworld.helloworld.{HelloReply, HelloRequest}
+import io.grpc.protobuf.services.ProtoReflectionService
+import scalapb.zio_grpc.{ServerLayer, ServiceList}
+import zio.console.putStrLn
+import zio.{ExitCode, URIO, ZEnv, ZIO}
+
+object HelloWorldServer extends zio.App {
+
+ val helloService: ZGreeter[ZEnv, Any] =
+ (request: HelloRequest) =>
+ putStrLn(s"Got request: $request") *>
+ ZIO.succeed(HelloReply(s"Hello, ${request.name}"))
+
+
+ val myApp = for {
+ _ <- putStrLn("Server is running. Press Ctrl-C to stop.")
+ _ <- ServerLayer
+ .fromServiceList(
+ ServerBuilder
+ .forPort(9000)
+ .addService(ProtoReflectionService.newInstance()),
+ ServiceList.add(helloService))
+ .build.useForever
+ } yield ()
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.exitCode
+}
+```
+
+And this is an example of using its client:
+
+```scala
+import io.grpc.ManagedChannelBuilder
+import io.grpc.examples.helloworld.helloworld.HelloRequest
+import io.grpc.examples.helloworld.helloworld.ZioHelloworld.GreeterClient
+import scalapb.zio_grpc.ZManagedChannel
+import zio.console._
+import zio.{ExitCode, URIO}
+
+object HelloWorldClient extends zio.App {
+ def myApp =
+ for {
+ r <- GreeterClient.sayHello(HelloRequest("World"))
+ _ <- putStrLn(r.message)
+ } yield ()
+
+ val clientLayer =
+ GreeterClient.live(
+ ZManagedChannel(
+ ManagedChannelBuilder.forAddress("localhost", 9000).usePlaintext()
+ )
+ )
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.provideCustomLayer(clientLayer).exitCode
+}
+```
+
+## Distage
+
+[Distage](https://izumi.7mind.io/distage/) is a compile-time safe, transparent, and debuggable Dependency Injection framework for pure FP Scala.
+
+### Introduction
+
+By using _Distage_ we can auto-wire all components of our application.
+- We don't need to manually link components together
+- We don't need to manually specify the order of allocation and allocation of dependencies. This will be derived automatically from the dependency order.
+- We can override any component within the dependency graph.
+- It helps us to create different configurations of our components for different use cases.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "io.7mind.izumi" %% "distage-core" % "1.0.8"
+```
+
+### Example
+
+In this example we create a `RandomApp` comprising two `Random` and `Logger` services. By using `ModuleDef` we _bind_ services to their implementations:
+
+```scala
+import distage.{Activation, Injector, ModuleDef, Roots}
+import izumi.distage.model.Locator
+import izumi.distage.model.definition.Lifecycle
+import zio.{ExitCode, Task, UIO, URIO, ZIO}
+
+import java.time.LocalDateTime
+
+trait Random {
+ def nextInteger: UIO[Int]
+}
+
+final class ScalaRandom extends Random {
+ override def nextInteger: UIO[Int] =
+ ZIO.effectTotal(scala.util.Random.nextInt())
+}
+
+trait Logger {
+ def log(name: String): Task[Unit]
+}
+
+final class ConsoleLogger extends Logger {
+ override def log(line: String): Task[Unit] = {
+ val timeStamp = LocalDateTime.now()
+ ZIO.effect(println(s"$timeStamp: $line"))
+ }
+}
+
+final class RandomApp(random: Random, logger: Logger) {
+ def run: Task[Unit] = for {
+ random <- random.nextInteger
+ _ <- logger.log(s"random number generated: $random")
+ } yield ()
+}
+
+object DistageExample extends zio.App {
+ def RandomAppModule: ModuleDef = new ModuleDef {
+ make[Random].from[ScalaRandom]
+ make[Logger].from[ConsoleLogger]
+ make[RandomApp] // `.from` is not required for concrete classes
+ }
+
+ val resource: Lifecycle[Task, Locator] = Injector[Task]().produce(
+ plan = Injector[Task]().plan(
+ bindings = RandomAppModule,
+ activation = Activation.empty,
+ roots = Roots.target[RandomApp]
+ )
+ )
+
+ val myApp: Task[Unit] = resource.use(locator => locator.get[RandomApp].run)
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.exitCode
+}
+```
+
+## LogStage
+
+[LogStage](https://izumi.7mind.io/logstage/) is a zero-cost structural logging framework for Scala & Scala.js.
+
+### Introduction
+
+Some key features of _LogStage_:
+
+1. LogStage extracts structure from ordinary string interpolations in your log messages with zero changes to code.
+2. LogStage uses macros to extract log structure, it is faster at runtime than a typical reflective structural logging frameworks
+3. Log contexts
+4. Console, File, and SLF4J sinks included, File sink supports log rotation,
+5. Human-readable output and JSON output included,
+6. Method-level logging granularity. Can configure methods com.example.Service.start and com.example.Service.doSomething independently,
+7. Slf4J adapters: route legacy Slf4J logs into LogStage router
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+// LogStage core library
+libraryDependencies += "io.7mind.izumi" %% "logstage-core" % "1.0.8"
+```
+
+There are also some optional modules:
+
+```scala
+libraryDependencies ++= Seq(
+ // Json output
+ "io.7mind.izumi" %% "logstage-rendering-circe" % "1.0.8",
+ // Router from Slf4j to LogStage
+ "io.7mind.izumi" %% "logstage-adapter-slf4j" % "1.0.8",
+ // LogStage integration with DIStage
+ "io.7mind.izumi" %% "distage-extension-logstage" % "1.0.8",
+ // Router from LogStage to Slf4J
+ "io.7mind.izumi" %% "logstage-sink-slf4j " % "1.0.8",
+)
+```
+
+### Example
+
+Let's try a simple example of using _LogStage_:
+
+```scala
+import izumi.fundamentals.platform.uuid.UUIDGen
+import logstage.LogZIO.log
+import logstage.{IzLogger, LogIO2, LogZIO}
+import zio.{Has, URIO, _}
+
+object LogStageExample extends zio.App {
+ val myApp = for {
+ _ <- log.info("I'm logging with logstage!")
+ userId = UUIDGen.getTimeUUID()
+ _ <- log.info(s"Current $userId")
+ _ <- log.info("I'm logging within the same fiber!")
+ f <- log.info("I'm logging within a new fiber!").fork
+ _ <- f.join
+ } yield ()
+
+ val loggerLayer: ULayer[Has[LogIO2[IO]]] =
+ ZLayer.succeed(LogZIO.withFiberId(IzLogger()))
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.provideLayer(loggerLayer).exitCode
+}
+```
+
+The output of this program would be something like this:
+
+```
+I 2021-07-26T21:27:35.164 (LogStageExample.scala:8) …mpty>.LogStageExample.myApp [14:zio-default-async-1] fiberId=Id(1627318654646,1) I'm logging with logstage!
+I 2021-07-26T21:27:35.252 (LogStageExample.scala:10) <.LogStageExample.myApp.8 [14:zio-default-async-1] fiberId=Id(1627318654646,1) Current userId=93546810-ee32-11eb-a393-11bc5b145beb
+I 2021-07-26T21:27:35.266 (LogStageExample.scala:11) <.L.myApp.8.10 [14:zio-default-async-1] fiberId=Id(1627318654646,1) I'm logging within the same fiber!
+I 2021-07-26T21:27:35.270 (LogStageExample.scala:12) <.L.m.8.10.11 [16:zio-default-async-2] fiberId=Id(1627318655269,2) I'm logging within a new fiber!
+```
+
+## MUnit ZIO
+
+[MUnit ZIO](https://github.com/poslegm/munit-zio) is an integration library between MUnit and ZIO.
+
+### Introduction
+
+[MUnit](https://scalameta.org/munit/) is a Scala testing library that is implemented as a JUnit runner. It has _actionable errors_, so the test reports are colorfully pretty-printed, stack traces are highlighted, error messages are pointed to the source code location where the failure happened.
+
+The MUnit ZIO enables us to write tests that return `ZIO` values without needing to call any unsafe methods (e.g. `Runtime#unsafeRun`).
+
+### Installation
+
+In order to use this library, we need to add the following lines in our `build.sbt` file:
+
+```scala
+libraryDependencies += "org.scalameta" %% "munit" % "0.7.27" % Test
+libraryDependencies += "com.github.poslegm" %% "munit-zio" % "0.0.2" % Test
+```
+
+If we are using a version of sbt lower than 1.5.0, we will also need to add:
+
+```scala
+testFrameworks += new TestFramework("munit.Framework")
+```
+
+### Example
+
+Here is a simple MUnit spec that is integrated with the `ZIO` effect:
+
+```scala
+import munit._
+import zio._
+
+class SimpleZIOSpec extends ZSuite {
+ testZ("1 + 1 = 2") {
+ for {
+ a <- ZIO(1)
+ b <- ZIO(1)
+ }
+ yield assertEquals(a + b, 2)
+ }
+}
+```
+
+## Rezilience
+
+[Rezilience](https://github.com/svroonland/rezilience) is a ZIO-native library for making resilient distributed systems.
+
+### Introduction
+
+Rezilience is a ZIO-native fault tolerance library with a collection of policies for making asynchronous systems more resilient to failures inspired by Polly, Resilience4J, and Akka. It does not have external library dependencies other than ZIO.
+
+It comprises these policies:
+- **CircuitBreaker** — Temporarily prevent trying calls after too many failures
+- **RateLimiter** — Limit the rate of calls to a system
+- **Bulkhead** — Limit the number of in-flight calls to a system
+- **Retry** — Try again after transient failures
+- **Timeout** — Interrupt execution if a call does not complete in time
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "nl.vroste" %% "rezilience" % "0.7.0"
+```
+
+### Example
+
+Let's try an example of writing _Circuit Breaker_ policy for calling an external API:
+
+```scala
+import nl.vroste.rezilience.CircuitBreaker.{CircuitBreakerCallError, State}
+import nl.vroste.rezilience._
+import zio._
+import zio.clock.Clock
+import zio.console.{Console, putStrLn}
+import zio.duration._
+
+object CircuitBreakerExample extends zio.App {
+
+ def callExternalSystem: ZIO[Console, String, Nothing] =
+ putStrLn("External service called, but failed!").orDie *>
+ ZIO.fail("External service failed!")
+
+ val myApp: ZIO[Console with Clock, Nothing, Unit] =
+ CircuitBreaker.withMaxFailures(
+ maxFailures = 10,
+ resetPolicy = Schedule.exponential(1.second),
+ onStateChange = (state: State) =>
+ ZIO(println(s"State changed to $state")).orDie
+ ).use { cb =>
+ for {
+ _ <- ZIO.foreach_(1 to 10)(_ => cb(callExternalSystem).either)
+ _ <- cb(callExternalSystem).catchAll(errorHandler)
+ _ <- ZIO.sleep(2.seconds)
+ _ <- cb(callExternalSystem).catchAll(errorHandler)
+ } yield ()
+ }
+
+ def errorHandler: CircuitBreakerCallError[String] => URIO[Console, Unit] = {
+ case CircuitBreaker.CircuitBreakerOpen =>
+ putStrLn("Circuit breaker blocked the call to our external system").orDie
+ case CircuitBreaker.WrappedError(error) =>
+ putStrLn(s"External system threw an exception: $error").orDie
+ }
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.exitCode
+}
+```
+
+## Tamer
+
+[Tamer](https://github.com/laserdisc-io/tamer) is a multi-functional Kafka connector for producing data based on [ZIO Kafka](https://github.com/zio/zio-kafka).
+
+### Introduction
+
+Tamer is a completely customizable source connector that produces to Kafka. It ships with preconfigured modules for SQL, cloud storage and REST API, but you can provide your own functions and Tamer will take care of the rest.
+
+### Installation
+
+Depending on the source you have at hand you can add the correct dependency in your `build.sbt`:
+
+```scala
+libraryDependencies += "io.laserdisc" %% "tamer-db" % "0.16.1"
+libraryDependencies += "io.laserdisc" %% "tamer-oci-objectstorage" % "0.16.1"
+libraryDependencies += "io.laserdisc" %% "tamer-rest" % "0.16.1"
+libraryDependencies += "io.laserdisc" %% "tamer-s3" % "0.16.1"
+```
+
+### Example
+
+Let's say you have a inventory DB that's compatible with [Doobie](https://github.com/tpolecat/doobie), you can get all of your items with just a few lines of code:
+
+```scala
+import tamer._
+import tamer.db._
+
+import doobie.implicits.legacy.instant._
+import doobie.syntax.string._
+import zio._
+import zio.duration._
+import zio.json._
+
+import java.time.Instant
+
+case class Row(id: String, name: String, description: Option[String], modifiedAt: Instant)
+ extends tamer.db.Timestamped(modifiedAt)
+
+object Row {
+ implicit val rowJsonCodec = DeriveJsonCodec.gen[Row]
+}
+
+object DatabaseSimple extends zio.App {
+ // Here we'll go with zio-json codec, you can use avro, circe and jsoniter
+ // out-of-the box or plug yours!
+ implicit val stateKeyJsonCodec = DeriveJsonCodec.gen[tamer.Tamer.StateKey]
+ implicit val windowJsonCodec = DeriveJsonCodec.gen[tamer.db.Window]
+
+ val program: RIO[ZEnv, Unit] = tamer.db.DbSetup
+ .tumbling(window =>
+ sql"""SELECT id, name, description, modified_at
+ |FROM users
+ |WHERE modified_at > ${window.from} AND modified_at <= ${window.to}""".stripMargin
+ .query[Row]
+ )(
+ recordKey = (_, v) => v.id,
+ from = Instant.parse("2020-01-01T00:00:00.00Z"),
+ tumblingStep = 5.days
+ )
+ .runWith(dbLayerFromEnvironment ++ tamer.kafkaConfigFromEnvironment)
+
+ override final def run(args: List[String]): URIO[ZEnv, ExitCode] =
+ program.exitCode
+
+ // If you have other codecs like circe in the classpath you have to disambiguate
+ implicit lazy val stateKeyCodec: Codec[Tamer.StateKey] = Codec.optionalZioJsonCodec
+ implicit lazy val windowCodec: Codec[tamer.db.Window] = Codec.optionalZioJsonCodec
+ implicit lazy val stringCodec: Codec[String] = Codec.optionalZioJsonCodec
+}
+```
+See full example [on the GitHub repo](https://github.com/laserdisc-io/tamer/blob/4e1a7646fb44041648d9aa3ba089decb81ebe487/example/src/main/scala/tamer/db/DatabaseSimple.scala)
+
+## TranzactIO
+
+[TranzactIO](https://github.com/gaelrenoux/tranzactio) is a ZIO wrapper for some Scala database access libraries, currently for [Doobie](https://github.com/tpolecat/doobie) and [Anorm](https://github.com/playframework/anorm).
+
+### Introduction
+
+Using functional effect database access libraries like _Doobie_ enforces us to use their specialized monads like `ConnectionIO` for _Doobie_. The goal of _TranzactIO_ is to provide seamless integration with these libraries to help us to stay in the `ZIO` world.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "io.github.gaelrenoux" %% "tranzactio" % "2.1.0"
+```
+
+In addition, we need to declare the database access library we are using. For example, for the next example we need to add following dependencies for _Doobie_ integration:
+
+```scala
+libraryDependencies += "org.tpolecat" %% "doobie-core" % "0.13.4"
+libraryDependencies += "org.tpolecat" %% "doobie-h2" % "0.13.4"
+```
+
+### Example
+
+Let's try an example of simple _Doobie_ program:
+
+```scala
+import doobie.implicits._
+import io.github.gaelrenoux.tranzactio.doobie
+import io.github.gaelrenoux.tranzactio.doobie.{Connection, Database, TranzactIO, tzio}
+import org.h2.jdbcx.JdbcDataSource
+import zio.blocking.Blocking
+import zio.clock.Clock
+import zio.console.{Console, putStrLn}
+import zio.{ExitCode, Has, URIO, ZIO, ZLayer, blocking}
+
+import javax.sql.DataSource
+
+object TranzactIOExample extends zio.App {
+
+ val query: ZIO[Connection with Console, Throwable, Unit] = for {
+ _ <- PersonQuery.setup
+ _ <- PersonQuery.insert(Person("William", "Stewart"))
+ _ <- PersonQuery.insert(Person("Michelle", "Streeter"))
+ _ <- PersonQuery.insert(Person("Johnathon", "Martinez"))
+ users <- PersonQuery.list
+ _ <- putStrLn(users.toString)
+ } yield ()
+
+ val myApp: ZIO[zio.ZEnv, Throwable, Unit] =
+ Database.transactionOrWidenR(query).provideCustomLayer(services.database)
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.exitCode
+}
+
+case class Person(firstName: String, lastName: String)
+
+object PersonQuery {
+ def list: TranzactIO[List[Person]] = tzio {
+ sql"""SELECT first_name, last_name FROM person""".query[Person].to[List]
+ }
+
+ def setup: TranzactIO[Unit] = tzio {
+ sql"""
+ CREATE TABLE person (
+ first_name VARCHAR NOT NULL,
+ last_name VARCHAR NOT NULL
+ )
+ """.update.run.map(_ => ())
+ }
+
+ def insert(p: Person): TranzactIO[Unit] = tzio {
+ sql"""INSERT INTO person (first_name, last_name) VALUES (${p.firstName}, ${p.lastName})""".update.run
+ .map(_ => ())
+ }
+}
+
+object services {
+ val datasource: ZLayer[Blocking, Throwable, Has[DataSource]] =
+ ZLayer.fromEffect(
+ blocking.effectBlocking {
+ val ds = new JdbcDataSource
+ ds.setURL(s"jdbc:h2:mem:mydb;DB_CLOSE_DELAY=10")
+ ds.setUser("sa")
+ ds.setPassword("sa")
+ ds
+ }
+ )
+
+ val database: ZLayer[Any, Throwable, doobie.Database.Database] =
+ (Blocking.live >>> datasource ++ Blocking.live ++ Clock.live) >>> Database.fromDatasource
+}
+```
+
+## ZIO Arrow
+
+[ZIO Arrow](https://github.com/zio-mesh/zio-arrow/) provides the `ZArrow` effect, which is a high-performance composition effect for the ZIO ecosystem.
+
+### Introduction
+
+`ZArrow[E, A, B]` is an effect representing a computation parametrized over the input (`A`), and the output (`B`) that may fail with an `E`. Arrows focus on **composition** and **high-performance computation**. They are like simple functions, but they are lifted into the `ZArrow` context.
+
+`ZArrow` delivers three main capabilities:
+
+- ** High-Performance** — `ZArrow` exploits `JVM` internals to dramatically decrease the number of allocations and dispatches, yielding an unprecedented runtime performance.
+
+- **Abstract interface** — `Arrow` is a more abstract data type, than ZIO Monad. It's more abstract than ZIO Streams. In a nutshell, `ZArrow` allows a function-like interface that can have both different inputs and different outputs.
+
+- **Easy Integration** — `ZArrow` can both input and output `ZIO Monad` and `ZIO Stream`, simplifying application development with different ZIO Effect types.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "io.github.neurodyne" %% "zio-arrow" % "0.2.1"
+```
+
+### Example
+
+In this example we are going to write a repetitive task of reading a number from standard input and then power by 2 and then print the result:
+
+```scala
+import zio.arrow.ZArrow
+import zio.arrow.ZArrow._
+import zio.console._
+import zio.{ExitCode, URIO}
+
+import java.io.IOException
+
+object ArrowExample extends zio.App {
+
+ val isPositive : ZArrow[Nothing, Int, Boolean] = ZArrow((_: Int) > 0)
+ val toStr : ZArrow[Nothing, Any, String] = ZArrow((i: Any) => i.toString)
+ val toInt : ZArrow[Nothing, String, Int] = ZArrow((i: String) => i.toInt)
+ val getLine : ZArrow[IOException, Any, String] = ZArrow.liftM((_: Any) => getStrLn.provideLayer(Console.live))
+ val printStr : ZArrow[IOException, String, Unit] = ZArrow.liftM((line: String) => putStr(line).provideLayer(Console.live))
+ val printLine : ZArrow[IOException, String, Unit] = ZArrow.liftM((line: String) => putStrLn(line).provideLayer(Console.live))
+ val power2 : ZArrow[Nothing, Int, Double] = ZArrow((i: Int) => Math.pow(i, 2))
+ val enterNumber: ZArrow[Nothing, Unit, String] = ZArrow((_: Unit) => "Enter positive number (-1 to exit): ")
+ val goodbye : ZArrow[Nothing, Any, String] = ZArrow((_: Any) => "Goodbye!")
+
+ val app: ZArrow[IOException, Unit, Boolean] =
+ enterNumber >>> printStr >>> getLine >>> toInt >>>
+ ifThenElse(isPositive)(
+ power2 >>> toStr >>> printLine >>> ZArrow((_: Any) => true)
+ )(
+ ZArrow((_: Any) => false)
+ )
+
+ val myApp = whileDo(app)(ZArrow(_ => ())) >>> goodbye >>> printLine
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.run(()).exitCode
+}
+```
+
+Let's see an example of running this program:
+
+```
+Enter positive number (-1 to exit): 25
+625.0
+Enter positive number (-1 to exit): 8
+64.0
+Enter positive number (-1 to exit): -1
+Goodbye!
+```
+
+## ZIO AMQP
+
+[ZIO AMQP](https://github.com/svroonland/zio-amqp) is a ZIO-based AMQP client for Scala.
+
+### Introduction
+
+ZIO AMQP is a ZIO-based wrapper around the RabbitMQ client. It provides a streaming interface to AMQP queues and helps to prevent us from shooting ourselves in the foot with thread-safety issues.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "nl.vroste" %% "zio-amqp" % "0.2.0"
+```
+
+### Example
+
+First, let's create an instance of RabbitMQ:
+
+```
+docker run -d --name some-rabbit -p 5672:5672 -p 5673:5673 -p 15672:15672 rabbitmq:3-management
+```
+
+Then we need to create `my_exchange` and `my_queue` and bind the queue to the exchange via the RabbitMQ management dashboard (`localhost:15672`).
+
+Now we can run the example below:
+
+```scala
+import nl.vroste.zio.amqp._
+import zio._
+import zio.blocking._
+import zio.clock.Clock
+import zio.console._
+import zio.duration.durationInt
+import zio.random.Random
+
+import java.net.URI
+
+object ZIOAMQPExample extends zio.App {
+
+ val channelM: ZManaged[Blocking, Throwable, Channel] = for {
+ connection <- Amqp.connect(URI.create("amqp://localhost:5672"))
+ channel <- Amqp.createChannel(connection)
+ } yield channel
+
+ val myApp: ZIO[Blocking with Console with Clock with Random, Throwable, Unit] =
+ channelM.use { channel =>
+ val producer: ZIO[Blocking with Random with Clock, Throwable, Long] =
+ zio.random.nextUUID
+ .flatMap(uuid =>
+ channel.publish("my_exchange", uuid.toString.getBytes)
+ .map(_ => ())
+ ).schedule(Schedule.spaced(1.seconds))
+
+ val consumer: ZIO[Blocking with Console, Throwable, Unit] = channel
+ .consume(queue = "my_queue", consumerTag = "my_consumer")
+ .mapM { record =>
+ val deliveryTag = record.getEnvelope.getDeliveryTag
+ putStrLn(s"Received $deliveryTag: ${new String(record.getBody)}") *>
+ channel.ack(deliveryTag)
+ }
+ .runDrain
+
+ for {
+ p <- producer.fork
+ c <- consumer.fork
+ _ <- p.zip(c).join
+ } yield ()
+ }
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.exitCode
+}
+```
+
+## ZIO AWS
+
+[ZIO AWS](https://github.com/vigoo/zio-aws) is a low-level AWS wrapper for ZIO for all the AWS services using the AWS Java SDK v2.
+
+### Introduction
+
+The goal is to have access to all AWS functionality for cases when only simple, direct access is needed from a ZIO application, or to be used as a building block for higher-level wrappers around specific services.
+
+Key features of ZIO AWS:
+
+- Common configuration layer
+- ZIO module layer per AWS service
+- Wrapper for all operations on all services
+- HTTP service implementations for functional Scala HTTP libraries, injected through ZIO’s module system
+- ZStream wrapper around paginated operations
+- Service-specific extra configuration
+- More idiomatic Scala request and response types wrapping the Java classes
+
+### Installation
+
+There are tones of artifacts [published](https://vigoo.github.io/zio-aws/docs/artifacts.html) for each AWS service. We can pick whichever services we need.
+
+### Example
+
+The following example uses the _ElasticBeanstalk_ and _EC2_ APIs:
+
+```scala
+libraryDependencies += "io.github.vigoo" %% "zio-aws-core" % "3.17.8.4",
+libraryDependencies += "io.github.vigoo" %% "zio-aws-ec2" % "3.17.8.4",
+libraryDependencies += "io.github.vigoo" %% "zio-aws-elasticbeanstalk" % "3.17.8.4",
+libraryDependencies += "io.github.vigoo" %% "zio-aws-netty" % "3.17.8.4"
+```
+
+And here is the example code:
+
+```scala
+import io.github.vigoo.zioaws.core.AwsError
+import io.github.vigoo.zioaws.ec2.Ec2
+import io.github.vigoo.zioaws.ec2.model._
+import io.github.vigoo.zioaws.elasticbeanstalk.ElasticBeanstalk
+import io.github.vigoo.zioaws.elasticbeanstalk.model._
+import io.github.vigoo.zioaws.{core, ec2, elasticbeanstalk, netty}
+import zio.console._
+import zio.stream._
+import zio.{console, _}
+
+object ZIOAWSExample extends zio.App {
+ val program: ZIO[Console with Ec2 with ElasticBeanstalk, AwsError, Unit] =
+ for {
+ appsResult <- elasticbeanstalk.describeApplications(
+ DescribeApplicationsRequest(applicationNames = Some(List("my-service")))
+ )
+ app <- appsResult.applications.map(_.headOption)
+ _ <- app match {
+ case Some(appDescription) =>
+ for {
+ applicationName <- appDescription.applicationName
+ _ <- console.putStrLn(
+ s"Got application description for $applicationName"
+ ).ignore
+
+ envStream = elasticbeanstalk.describeEnvironments(
+ DescribeEnvironmentsRequest(applicationName =
+ Some(applicationName)
+ )
+ )
+
+ _ <- envStream.run(Sink.foreach { env =>
+ env.environmentName.flatMap { environmentName =>
+ (for {
+ environmentId <- env.environmentId
+ _ <- console.putStrLn(
+ s"Getting the EB resources of $environmentName"
+ ).ignore
+
+ resourcesResult <-
+ elasticbeanstalk.describeEnvironmentResources(
+ DescribeEnvironmentResourcesRequest(environmentId =
+ Some(environmentId)
+ )
+ )
+ resources <- resourcesResult.environmentResources
+ _ <- console.putStrLn(
+ s"Getting the EC2 instances in $environmentName"
+ ).ignore
+ instances <- resources.instances
+ instanceIds <- ZIO.foreach(instances)(_.id)
+ _ <- console.putStrLn(
+ s"Instance IDs are ${instanceIds.mkString(", ")}"
+ ).ignore
+
+ reservationsStream = ec2.describeInstances(
+ DescribeInstancesRequest(instanceIds = Some(instanceIds))
+ )
+ _ <- reservationsStream.run(Sink.foreach { reservation =>
+ reservation.instances
+ .flatMap { instances =>
+ ZIO.foreach(instances) { instance =>
+ for {
+ id <- instance.instanceId
+ typ <- instance.instanceType
+ launchTime <- instance.launchTime
+ _ <- console.putStrLn(s" instance $id:").ignore
+ _ <- console.putStrLn(s" type: $typ").ignore
+ _ <- console.putStrLn(
+ s" launched at: $launchTime"
+ ).ignore
+ } yield ()
+ }
+ }
+ })
+ } yield ()).catchAll { error =>
+ console.putStrLnErr(
+ s"Failed to get info for $environmentName: $error"
+ ).ignore
+ }
+ }
+ })
+ } yield ()
+ case None =>
+ ZIO.unit
+ }
+ } yield ()
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] = { //
+ val httpClient = netty.default
+ val awsConfig = httpClient >>> core.config.default
+ val aws = awsConfig >>> (ec2.live ++ elasticbeanstalk.live)
+
+ program
+ .provideCustomLayer(aws)
+ .either
+ .flatMap {
+ case Left(error) =>
+ console.putStrErr(s"AWS error: $error").ignore.as(ExitCode.failure)
+ case Right(_) =>
+ ZIO.unit.as(ExitCode.success)
+ }
+ }
+}
+```
+
+## ZIO AWS S3
+
+[ZIO AWS S3](https://github.com/zio-mesh/zio-aws-s3) is a ZIO integration with AWS S3 SDK.
+
+### Introduction
+
+This project aims to ease ZIO integration with AWS S3, providing a clean, simple and efficient API.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "io.github.neurodyne" %% "zio-aws-s3" % "0.4.13"
+```
+
+### Example
+
+```scala
+import software.amazon.awssdk.regions.Region
+import software.amazon.awssdk.services.s3.S3AsyncClient
+import zio.{ExitCode, URIO, _}
+import zio_aws_s3.AwsApp.AwsLink
+import zio_aws_s3.{AwsAgent, AwsApp}
+
+import scala.jdk.CollectionConverters._
+
+object ZIOAWSS3Example extends zio.App {
+ val BUCKET = ""
+
+ val awsEnv: ZLayer[S3AsyncClient, Throwable, AwsLink] =
+ AwsApp.ExtDeps.live >>> AwsApp.AwsLink.live
+
+ val app: ZIO[Any, Throwable, Unit] = for {
+ s3 <- AwsAgent.createClient(Region.US_WEST_2, "")
+ response <- AwsApp.listBuckets().provideLayer(awsEnv).provide(s3)
+ buckets <- Task(response.buckets.asScala.toList.map(_.name))
+ _ = buckets.foreach(println)
+ } yield ()
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ app.exitCode
+}
+```
+
+## ZIO HTTP
+
+[ZIO HTTP](https://github.com/dream11/zio-http) is a scala library to write HTTP applications.
+
+### Introduction
+
+ZIO HTTP is a Scala library for building HTTP applications. It is powered by ZIO and netty and aims at being the defacto solution for writing, highly scalable, and performant web applications using idiomatic scala.
+
+### Installation
+
+In order to use this library, we need to add the following lines in our `build.sbt` file:
+
+```scala
+libraryDependencies += "io.d11" %% "zhttp" % "1.0.0.0-RC13"
+libraryDependencies += "io.d11" %% "zhttp-test" % "1.0.0.0-RC13" % Test
+```
+
+### Example
+
+```scala
+import zhttp.http._
+import zhttp.service.Server
+import zio._
+
+object ZIOHTTPExample extends zio.App {
+
+ // Create HTTP route
+ val app: HttpApp[Any, Nothing] = HttpApp.collect {
+ case Method.GET -> Root / "text" => Response.text("Hello World!")
+ case Method.GET -> Root / "json" => Response.jsonString("""{"greetings": "Hello World!"}""")
+ }
+
+ // Run it like any simple app
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ Server.start(8090, app.silent).exitCode
+}
+```
+
+## ZIO K8s
+
+[ZIO K8S](https://github.com/coralogix/zio-k8s) is an idiomatic ZIO client for the Kubernetes API.
+
+### Introduction
+
+This library provides a client for the full Kubernetes API as well as providing code generator support for custom resources and higher-level concepts such as operators, taking full advantage of the ZIO library.
+
+Using ZIO K8S we can talk to the Kubernetes API that helps us to:
+- Write an operator for our custom resource types
+- Schedule some jobs in our cluster
+- Query the cluster for monitoring purposes
+- Write some cluster management tools
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "com.coralogix" %% "zio-k8s-client" % "1.3.3"
+```
+
+And then we need to choose the proper sttp backend:
+
+```scala
+"com.softwaremill.sttp.client3" %% "httpclient-backend-zio" % "3.1.1",
+"com.softwaremill.sttp.client3" %% "slf4j-backend" % "3.1.1"
+```
+
+Or the asynchronous version:
+
+```scala
+"com.softwaremill.sttp.client3" %% "async-http-client-backend-zio" % "3.1.1"
+"com.softwaremill.sttp.client3" %% "slf4j-backend" % "3.1.1"
+```
+
+### Example
+
+This is an example of printing the tail logs of a container:
+
+```scala
+import com.coralogix.zio.k8s.client.K8sFailure
+import com.coralogix.zio.k8s.client.config.httpclient._
+import com.coralogix.zio.k8s.client.model.K8sNamespace
+import com.coralogix.zio.k8s.client.v1.pods
+import com.coralogix.zio.k8s.client.v1.pods.Pods
+import zio._
+import zio.console.Console
+
+import scala.languageFeature.implicitConversions
+
+object ZIOK8sLogsExample extends zio.App {
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] = (args match {
+ case List(podName) => tailLogs(podName, None)
+ case List(podName, containerName) => tailLogs(podName, Some(containerName))
+ case _ => console.putStrLnErr("Usage: [containername]")
+ })
+ .provideCustomLayer(k8sDefault >>> Pods.live)
+ .exitCode
+
+ def tailLogs(podName: String,
+ containerName: Option[String]
+ ): ZIO[Pods with Console, K8sFailure, Unit] =
+ pods
+ .getLog(
+ name = podName,
+ namespace = K8sNamespace.default,
+ container = containerName,
+ follow = Some(true)
+ )
+ .tap { line: String =>
+ console.putStrLn(line).ignore
+ }
+ .runDrain
+}
+```
+
+## ZIO Kinesis
+
+[ZIO Kinesis](https://github.com/svroonland/zio-kinesis) is a ZIO-based AWS Kinesis client for Scala.
+
+### Introduction
+
+ZIO Kinesis is an interface to Amazon Kinesis Data Streams for consuming and producing data. This library is built on top of [ZIO AWS](https://github.com/vigoo/zio-aws), a library of automatically generated ZIO wrappers around AWS SDK methods.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+ ```scala
+libraryDependencies += "nl.vroste" %% "zio-kinesis" % "0.20.0"
+```
+
+### Example
+
+This is an example of consuming a stream from Amazon Kinesis:
+
+```scala mdoc:silent:reset
+import nl.vroste.zio.kinesis.client.serde.Serde
+import nl.vroste.zio.kinesis.client.zionative.Consumer
+import zio.clock.Clock
+import zio.console.{Console, putStrLn}
+import zio.duration._
+import zio.logging.Logging
+import zio.{ExitCode, URIO, _}
+
+object ZIOKinesisConsumerExample extends zio.App {
+ val loggingLayer: ZLayer[Any, Nothing, Logging] =
+ (Console.live ++ Clock.live) >>>
+ Logging.console() >>>
+ Logging.withRootLoggerName(getClass.getName)
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ Consumer
+ .consumeWith(
+ streamName = "my-stream",
+ applicationName = "my-application",
+ deserializer = Serde.asciiString,
+ workerIdentifier = "worker1",
+ checkpointBatchSize = 1000L,
+ checkpointDuration = 5.minutes
+ )(record => putStrLn(s"Processing record $record"))
+ .provideCustomLayer(Consumer.defaultEnvironment ++ loggingLayer)
+ .exitCode
+}
+```
+
+## ZIO Pulsar
+
+[ZIO Pulsar](https://github.com/apache/pulsar) is the _Apache Pulsar_ client for Scala with ZIO and ZIO Streams integration.
+
+### Introduction
+
+ZIO Pulsar is a purely functional Scala wrapper over the official Pulsar client. Some key features of this library:
+
+- **Type-safe** — Utilizes Scala type system to reduce runtime exceptions present in the official Java client.
+- **Streaming-enabled** — Naturally integrates with ZIO Streams.
+- **ZIO integrated** — Uses common ZIO primitives like ZIO effect and `ZManaged` to reduce the boilerplate and increase expressiveness.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file for _Scala 3_:
+
+```scala
+libraryDependencies += "com.github.jczuchnowski" %% "zio-pulsar" % "0.1"
+```
+
+### Example
+
+First of all we need to create an instance of _Apache Pulsar_ and run that:
+
+```
+docker run -it \
+ -p 6650:6650 \
+ -p 8080:8080 \
+ --mount source=pulsardata,target=/pulsar/data \
+ --mount source=pulsarconf,target=/pulsar/conf \
+ --network pulsar \
+ apachepulsar/pulsar:2.7.0 \
+ bin/pulsar standalone
+```
+
+Now we can run the following example:
+
+```scala
+import org.apache.pulsar.client.api.{PulsarClientException, Schema}
+import zio._
+import zio.blocking._
+import zio.clock._
+import zio.console._
+import zio.pulsar._
+import zio.stream._
+
+import java.nio.charset.StandardCharsets
+
+object StreamingExample extends zio.App {
+ val topic = "my-topic"
+
+ val producer: ZManaged[Has[PulsarClient], PulsarClientException, Unit] =
+ for {
+ sink <- Producer.make(topic, Schema.STRING).map(_.asSink)
+ _ <- Stream.fromIterable(0 to 100).map(i => s"Message $i").run(sink).toManaged_
+ } yield ()
+
+ val consumer: ZManaged[Has[PulsarClient] with Blocking with Console, PulsarClientException, Unit] =
+ for {
+ builder <- ConsumerBuilder.make(Schema.STRING).toManaged_
+ consumer <- builder
+ .subscription(Subscription("my-subscription", SubscriptionType.Exclusive))
+ .topic(topic)
+ .build
+ _ <- consumer.receiveStream.take(10).foreach { e =>
+ consumer.acknowledge(e.getMessageId) *>
+ putStrLn(new String(e.getData, StandardCharsets.UTF_8)).orDie
+ }.toManaged_
+ } yield ()
+
+ val myApp =
+ for {
+ f <- consumer.fork
+ _ <- producer
+ _ <- f.join.toManaged_
+ } yield ()
+
+ def run(args: List[String]): URIO[ZEnv, ExitCode] =
+ myApp
+ .provideCustomLayer(
+ (Console.live ++ Clock.live) >+>
+ PulsarClient.live("localhost", 6650)
+ ).useNow.exitCode
+}
+```
+
+## ZIO Saga
+
+[ZIO Saga](https://github.com/VladKopanev/zio-saga) is a distributed transaction manager using Saga Pattern.
+
+### Introduction
+
+Sometimes when we are architecting the business logic using microservice architecture we need distributed transactions that are across services.
+
+The _Saga Pattern_ lets us manage distributed transactions by sequencing local transactions with their corresponding compensating actions. A _Saga Pattern_ runs all operations. In the case of failure, it guarantees us to undo all previous works by running the compensating actions.
+
+ZIO Saga allows us to compose our requests and compensating actions from the Saga pattern in one transaction with no boilerplate.
+
+ZIO Saga adds a simple abstraction called `Saga` that takes the responsibility of proper composition of effects and associated compensating actions.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "com.vladkopanev" %% "zio-saga-core" % "0.4.0"
+```
+
+### Example
+
+In the following example, all API requests have a compensating action. We compose all them together and then run the whole as one transaction:
+
+
+```scala
+import com.vladkopanev.zio.saga.Saga
+import zio.{IO, UIO, URIO, ZIO}
+
+import com.vladkopanev.zio.saga.Saga._
+
+val transaction: Saga[Any, String, Unit] =
+ for {
+ _ <- bookHotel compensate cancelHotel
+ _ <- bookTaxi compensate cancelTaxi
+ _ <- bookFlight compensate cancelFlight
+ } yield ()
+
+val myApp: ZIO[Any, String, Unit] = transaction.transact
+```
+
+## ZIO Slick Interop
+
+[ZIO Slick Interop](https://github.com/ScalaConsultants/zio-slick-interop) is a small library, that provides interop between Slick and ZIO.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "io.scalac" %% "zio-slick-interop" % "0.4.0"
+```
+
+### Example
+
+To run this example we should also add the _HikariCP integration for Slick_ in our `build.sbt` file:
+
+```scala
+libraryDependencies += "com.typesafe.slick" %% "slick-hikaricp" % "3.3.3"
+```
+
+Here is a full working example of creating database-agnostic Slick repository:
+
+```scala
+import com.typesafe.config.ConfigFactory
+import slick.interop.zio.DatabaseProvider
+import slick.interop.zio.syntax._
+import slick.jdbc.H2Profile.api._
+import slick.jdbc.JdbcProfile
+import zio.console.Console
+import zio.interop.console.cats.putStrLn
+import zio.{ExitCode, Has, IO, URIO, ZIO, ZLayer}
+
+import scala.jdk.CollectionConverters._
+
+case class Item(id: Long, name: String)
+
+trait ItemRepository {
+ def add(name: String): IO[Throwable, Long]
+
+ def getById(id: Long): IO[Throwable, Option[Item]]
+
+ def upsert(name: String): IO[Throwable, Long]
+}
+
+object ItemsTable {
+ class Items(tag: Tag) extends Table[Item](
+ _tableTag = tag,
+ _tableName = "ITEMS"
+ ) {
+ def id = column[Long]("ID", O.PrimaryKey, O.AutoInc)
+
+ def name = column[String]("NAME")
+
+ def * = (id, name) <> ((Item.apply _).tupled, Item.unapply _)
+ }
+
+ val table = TableQuery[ItemsTable.Items]
+}
+
+object SlickItemRepository {
+ val live: ZLayer[Has[DatabaseProvider], Throwable, Has[ItemRepository]] =
+ ZLayer.fromServiceM { db =>
+ db.profile.flatMap { profile =>
+ import profile.api._
+
+ val initialize = ZIO.fromDBIO(ItemsTable.table.schema.createIfNotExists)
+
+ val repository = new ItemRepository {
+ private val items = ItemsTable.table
+
+ def add(name: String): IO[Throwable, Long] =
+ ZIO
+ .fromDBIO((items returning items.map(_.id)) += Item(0L, name))
+ .provide(Has(db))
+
+ def getById(id: Long): IO[Throwable, Option[Item]] = {
+ val query = items.filter(_.id === id).result
+
+ ZIO.fromDBIO(query).map(_.headOption).provide(Has(db))
+ }
+
+ def upsert(name: String): IO[Throwable, Long] =
+ ZIO
+ .fromDBIO { implicit ec =>
+ (for {
+ itemOpt <- items.filter(_.name === name).result.headOption
+ id <- itemOpt.fold[DBIOAction[Long, NoStream, Effect.Write]](
+ (items returning items.map(_.id)) += Item(0L, name)
+ )(item => (items.map(_.name) update name).map(_ => item.id))
+ } yield id).transactionally
+ }
+ .provide(Has(db))
+ }
+
+ initialize.as(repository).provide(Has(db))
+ }
+ }
+}
+
+
+object Main extends zio.App {
+
+ private val config = ConfigFactory.parseMap(
+ Map(
+ "url" -> "jdbc:h2:mem:test1;DB_CLOSE_DELAY=-1",
+ "driver" -> "org.h2.Driver",
+ "connectionPool" -> "disabled"
+ ).asJava
+ )
+
+ private val env: ZLayer[Any, Throwable, Has[ItemRepository]] =
+ (ZLayer.succeed(config) ++ ZLayer.succeed[JdbcProfile](
+ slick.jdbc.H2Profile
+ )) >>> DatabaseProvider.live >>> SlickItemRepository.live
+
+ val myApp: ZIO[Console with Has[ItemRepository], Throwable, Unit] =
+ for {
+ repo <- ZIO.service[ItemRepository]
+ aId1 <- repo.add("A")
+ _ <- repo.add("B")
+ a <- repo.getById(1L)
+ b <- repo.getById(2L)
+ aId2 <- repo.upsert("A")
+ _ <- putStrLn(s"$aId1 == $aId2")
+ _ <- putStrLn(s"A item: $a")
+ _ <- putStrLn(s"B item: $b")
+ } yield ()
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.provideCustomLayer(env).exitCode
+}
+```
+
+## ZIO Test Akka HTTP
+
+[ZIO Test Akka HTTP](https://github.com/senia-psm/zio-test-akka-http) is an Akka-HTTP Route TestKit for zio-test.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "info.senia" %% "zio-test-akka-http" % "1.0.2"
+```
+
+### Example
+
+An example of writing Akka HTTP Route test spec:
+
+```scala
+import akka.http.scaladsl.model.HttpResponse
+import akka.http.scaladsl.server.Directives.complete
+import zio.test.Assertion._
+import zio.test._
+import zio.test.akkahttp.DefaultAkkaRunnableSpec
+
+object MySpec extends DefaultAkkaRunnableSpec {
+ def spec =
+ suite("MySpec")(
+ testM("my test") {
+ assertM(Get() ~> complete(HttpResponse()))(
+ handled(
+ response(equalTo(HttpResponse()))
+ )
+ )
+ }
+ )
+}
+```
+
+## ZparkIO
+
+[ZParkIO](https://github.com/leobenkel/ZparkIO) is a boilerplate framework to use _Spark_ and _ZIO_ together.
+
+### Introduction
+
+_ZparkIO_ enables us to:
+- Wrap asynchronous and synchronous operations smoothly. So everything is wrapped in ZIO.
+- Have ZIO features in our spark jobs, like forking and joining fibers, parallelizing tasks, retrying, and timing-out.
+- Make our spark job much easier to debug
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "com.leobenkel" %% "zparkio" % "[SPARK_VERSION]_[VERSION]"
+```
+
+### Example
+
+Using _ZparkIO_ we can write jobs like the following example:
+
+```scala
+import com.leobenkel.zparkio.Services.SparkModule
+import com.leobenkel.zparkio.Services.SparkModule.SparkModule
+import com.leobenkel.zparkio.ZparkioApplicationTimeoutException
+import org.apache.spark.sql.DataFrame
+import zio.clock.Clock
+import zio.duration.durationInt
+import zio.{Schedule, Task, ZIO}
+
+def readParquetFile[A](parquetPath: String): ZIO[Clock with SparkModule, Throwable, DataFrame] =
+ for {
+ spark <- SparkModule()
+ dataset <- Task(spark.read.parquet(parquetPath))
+ .retry(
+ Schedule.recurs(3) && Schedule.exponential(2.seconds)
+ )
+ .timeoutFail(ZparkioApplicationTimeoutException())(5.minutes)
+ } yield dataset
+```
+
+## Quill
+
+[Quil](https://github.com/getquill/quill) is a Compile-time Language Integrated Queries for Scala.
+
+### Introduction
+
+Quill allows us to create SQL out of a Scala code during the **compile-time**. It provides the _Quoted Domain Specific Language (QDSL)_ to express queries in Scala and execute them in a target language.
+
+- **Boilerplate-free mapping** — The database schema is mapped using simple case classes.
+- **Quoted DSL** — Queries are defined inside a quote block. Quill parses each quoted block of code (quotation) at compile-time and translates them to an internal Abstract Syntax Tree (AST)
+- **Compile-time query generation** — The `ctx.run` call reads the quotation’s AST and translates it to the target language at compile-time, emitting the query string as a compilation message. As the query string is known at compile-time, the runtime overhead is very low and similar to using the database driver directly.
+- **Compile-time query validation** — If configured, the query is verified against the database at compile-time and the compilation fails if it is not valid. The query validation does not alter the database state.
+
+### Installation
+
+In order to use this library with ZIO, we need to add the following lines in our `build.sbt` file:
+
+```scala
+// Provides Quill contexts for ZIO.
+libraryDependencies += "io.getquill" %% "quill-zio" % "3.9.0"
+
+// Provides Quill context that execute MySQL, PostgreSQL, SQLite, H2, SQL Server and Oracle queries inside of ZIO.
+libraryDependencies += "io.getquill" %% "quill-jdbc-zio" % "3.9.0"
+
+// Provides Quill context that executes Cassandra queries inside of ZIO.
+libraryDependencies += "io.getquill" %% "quill-cassandra-zio" % "3.9.0"
+```
+
+### Example
+
+First, to run this example, we should create the `Person` table at the database initialization. Let's put the following lines into the `h2-schema.sql` file at the`src/main/resources` path:
+
+```sql
+CREATE TABLE IF NOT EXISTS Person(
+ name VARCHAR(255),
+ age int
+);
+```
+
+In this example, we use in-memory database as our data source. So we just put these lines into the `application.conf` at the `src/main/resources` path:
+
+```hocon
+myH2DB {
+ dataSourceClassName = org.h2.jdbcx.JdbcDataSource
+ dataSource {
+ url = "jdbc:h2:mem:test;DB_CLOSE_DELAY=-1;INIT=RUNSCRIPT FROM 'classpath:h2-schema.sql'"
+ user = sa
+ }
+}
+```
+
+Now we are ready to run the example below:
+
+```scala
+import io.getquill._
+import io.getquill.context.ZioJdbc._
+import zio.console.{Console, putStrLn}
+import zio.{ExitCode, Has, URIO, ZIO}
+
+import java.io.Closeable
+import javax.sql
+
+object QuillZIOExample extends zio.App {
+ val ctx = new H2ZioJdbcContext(Literal)
+
+ import ctx._
+
+ case class Person(name: String, age: Int)
+
+ val myApp: ZIO[Console with Has[sql.DataSource with Closeable], Exception, Unit] =
+ for {
+ _ <- ctx.run(
+ quote {
+ liftQuery(List(Person("Alex", 25), Person("Sarah", 23)))
+ .foreach(r =>
+ query[Person].insert(r)
+ )
+ }
+ ).onDS
+ result <- ctx.run(
+ quote(query[Person].filter(p => p.name == "Sarah"))
+ ).onDS
+ _ <- putStrLn(result.toString)
+ } yield ()
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp
+ .provideCustomLayer(DataSourceLayer.fromPrefix("myH2DB"))
+ .exitCode
+}
+```
diff --git a/website/versioned_docs/version-1.0.18/resources/ecosystem/compatible.md b/website/versioned_docs/version-1.0.18/resources/ecosystem/compatible.md
new file mode 100644
index 000000000000..406c50791a7d
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/resources/ecosystem/compatible.md
@@ -0,0 +1,15 @@
+---
+id: compatible
+title: "ZIO Compatible Libraries"
+---
+
+List of ZIO compatible libraries:
+
+- [d4s](https://github.com/PlayQ/d4s) — "Dynamo DB Database done Scala way". A library that allows accessing the DynamoDB in a purely functional way
+- [elastic4s](https://github.com/sksamuel/elastic4s) — Elasticsearch Scala Client - Reactive, Non Blocking, Type Safe, HTTP Client
+- [neotypes](https://github.com/neotypes/neotypes) — A Scala lightweight, type-safe & asynchronous driver for neo4j
+- [scanamo](https://github.com/scanamo/scanamo) — Simpler DynamoDB access for Scala
+- [sttp](https://github.com/softwaremill/sttp) — The Scala HTTP client you always wanted!
+- [doobie](https://github.com/tpolecat/doobie) — Functional JDBC layer for Scala.
+- [fs2](https://github.com/typelevel/fs2) — Compositional, streaming I/O library for Scala
+- [http4s](https://github.com/http4s/http4s) — A minimal, idiomatic Scala interface for HTTP
diff --git a/website/versioned_docs/version-1.0.18/resources/ecosystem/officials.md b/website/versioned_docs/version-1.0.18/resources/ecosystem/officials.md
new file mode 100644
index 000000000000..ea78e5a3fbb3
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/resources/ecosystem/officials.md
@@ -0,0 +1,1953 @@
+---
+id: officials
+title: "Official ZIO Libraries"
+---
+
+Official ZIO libraries are hosted in the [ZIO organization](https://github.com/zio/) on Github, and are generally maintained by core contributors to ZIO.
+
+Each project in the ZIO organization namespace has a _Stage Badge_ which indicates the current status of that project:
+
+* **Production Ready** — The project is stable and already used in production. We can expect reliability for the implemented use cases.
+
+* **Development** — The project already has RC or milestone releases, but is still under active development. We should not expect full stability yet.
+
+* **Experimental** — The project is not yet released, but an important part of the work is already done.
+
+* **Research** — The project is at the design stage, with some sketches of work but nothing usable yet.
+
+* **Concept** — The project is just an idea, development hasn't started yet.
+
+* **Deprecated** — The project is not maintained anymore, and we don't recommend its usage.
+
+## ZIO Actors
+
+[ZIO Actors](https://github.com/zio/zio-actors) is a high-performance, purely functional library for building, composing, and supervising typed actors based on ZIO.
+
+### Introduction
+
+ZIO Actors is based on the _Actor Model_ which is a conceptual model of concurrent computation. In the actor model, the _actor_ is the fundamental unit of computation, unlike the ZIO concurrency model, which is the fiber.
+
+Each actor has a mailbox that stores and processes the incoming messages in FIFO order. An actor allowed to:
+- create another actor.
+- send a message to itself or other actors.
+- handle the incoming message, and:
+ - decide **what to do** based on the current state and the received message.
+ - decide **what is the next state** based on the current state and the received message.
+
+Some characteristics of an _Actor Model_:
+
+- **Isolated State** — Each actor holds its private state. They only have access to their internal state. They are isolated from each other, and they do not share the memory. The only way to change the state of an actor is to send a message to that actor.
+
+- **Process of One Message at a Time** — Each actor handles and processes one message at a time. They read messages from their inboxes and process them sequentially.
+
+- **Actor Persistence** — A persistent actor records its state as events. The actor can recover its state from persisted events after a crash or restart.
+
+- **Remote Messaging** — Actors can communicate with each other only through messages. They can run locally or remotely on another machine. Remote actors can communicate with each other transparently as if there are located locally.
+
+- **Actor Supervision** — Parent actors can supervise their child actors. For example, if a child actor fails, the supervisor actor can restart that actor.
+
+### Installation
+
+To use this library, we need to add the following line to our library dependencies in `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-actors" % "0.0.9"
+```
+
+### Example
+
+Let's try to implement a simple Counter Actor which receives two `Increase` and `Get` commands:
+
+```scala
+import zio.actors.Actor.Stateful
+import zio.actors._
+import zio.clock.Clock
+import zio.console.putStrLn
+import zio.{ExitCode, UIO, URIO, ZIO}
+
+sealed trait Message[+_]
+case object Increase extends Message[Unit]
+case object Get extends Message[Int]
+
+object CounterActorExample extends zio.App {
+
+ // Definition of stateful actor
+ val counterActor: Stateful[Any, Int, Message] =
+ new Stateful[Any, Int, Message] {
+ override def receive[A](
+ state: Int,
+ msg: Message[A],
+ context: Context
+ ): UIO[(Int, A)] =
+ msg match {
+ case Increase => UIO((state + 1, ()))
+ case Get => UIO((state, state))
+ }
+ }
+
+ val myApp: ZIO[Clock, Throwable, Int] =
+ for {
+ system <- ActorSystem("MyActorSystem")
+ actor <- system.make("counter", Supervisor.none, 0, counterActor)
+ _ <- actor ! Increase
+ _ <- actor ! Increase
+ s <- actor ? Get
+ } yield s
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp
+ .flatMap(state => putStrLn(s"The final state of counter: $state"))
+ .exitCode
+}
+```
+
+Akka actors also has some other optional modules for persistence (which is useful for event sourcing) and integration with Akka toolkit:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-actors-persistence" % zioActorsVersion
+libraryDependencies += "dev.zio" %% "zio-actors-persistence-jdbc" % zioActorVersion
+libraryDependencies += "dev.zio" %% "zio-actors-akka-interop" % zioActorVersion
+```
+
+## ZIO Akka Cluster
+
+The [ZIO Akka Cluster](https://github.com/zio/zio-akka-cluster) library is a ZIO wrapper on [Akka Cluster](https://doc.akka.io/docs/akka/current/index-cluster.html). We can use clustering features of the Akka toolkit without the need to use the actor model.
+
+### Introduction
+
+This library provides us following features:
+
+- **Akka Cluster** — This feature contains two Akka Cluster Membership operations called `join` and `leave` and also it has some methods to retrieve _Cluster State_ and _Cluster Events_.
+
+- **Akka Distributed PubSub** — Akka has a _Distributed Publish Subscribe_ facility in the cluster. It helps us to send a message to all actors in the cluster that have registered and subscribed for a specific topic name without knowing their physical address or without knowing which node they are running on.
+
+- **Akka Cluster Sharding** — Cluster sharding is useful when we need to _distribute actors across several nodes in the cluster_ and want to be able to interact with them using their logical identifier without having to care about their physical location in the cluster, which might also change over time. When we have many stateful entities in our application that together they consume more resources (e.g. memory) than fit on one machine, it is useful to use _Akka Cluster Sharding_ to distribute our entities to multiple nodes.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-akka-cluster" % "0.2.0" // Check the repo for the latest version
+```
+
+### Example
+
+In the following example, we are using all these three features. We have a distributed counter application that lives in the Akka Cluster using _Akka Cluster Sharding_ feature. So the location of `LiveUsers` and `TotalRequests` entities in the cluster is transparent for us. We send the result of each entity to the _Distributed PubSub_. So every node in the cluster can subscribe and listen to those results. Also, we have created a fiber that is subscribed to the cluster events. All the new events will be logged to the console:
+
+```scala
+import akka.actor.ActorSystem
+import com.typesafe.config.{Config, ConfigFactory}
+import zio.akka.cluster.Cluster
+import zio.akka.cluster.sharding.{Entity, Sharding}
+import zio.console.putStrLn
+import zio.{ExitCode, Has, Managed, Task, URIO, ZIO, ZLayer}
+
+sealed trait Counter extends Product with Serializable
+case object Inc extends Counter
+case object Dec extends Counter
+
+case class CounterApp(port: String) {
+ val config: Config =
+ ConfigFactory.parseString(
+ s"""
+ |akka {
+ | actor {
+ | provider = "cluster"
+ | }
+ | remote {
+ | netty.tcp {
+ | hostname = "127.0.0.1"
+ | port = $port
+ | }
+ | }
+ | cluster {
+ | seed-nodes = ["akka.tcp://CounterApp@127.0.0.1:2551"]
+ | }
+ |}
+ |""".stripMargin)
+
+ val actorSystem: ZLayer[Any, Throwable, Has[ActorSystem]] =
+ ZLayer.fromManaged(
+ Managed.make(Task(ActorSystem("CounterApp", config)))(sys =>
+ Task.fromFuture(_ => sys.terminate()).either
+ )
+ )
+
+ val counterApp: ZIO[zio.ZEnv, Throwable, Unit] =
+ actorSystem.build.use(sys =>
+ for {
+ queue <- Cluster
+ .clusterEvents(true)
+ .provideCustomLayer(ZLayer.succeedMany(sys))
+
+ pubsub <- zio.akka.cluster.pubsub.PubSub
+ .createPubSub[Int]
+ .provideCustomLayer(ZLayer.succeedMany(sys))
+
+ liveUsersLogger <- pubsub
+ .listen("LiveUsers")
+ .flatMap(
+ _.take.tap(u => putStrLn(s"Number of live users: $u")).forever
+ )
+ .fork
+ totalRequestLogger <- pubsub
+ .listen("TotalRequests")
+ .flatMap(
+ _.take.tap(r => putStrLn(s"Total request until now: $r")).forever
+ )
+ .fork
+
+ clusterEvents <- queue.take
+ .tap(x => putStrLn("New event in cluster: " + x.toString))
+ .forever
+ .fork
+
+ counterEntityLogic = (c: Counter) =>
+ for {
+ entity <- ZIO.environment[Entity[Int]]
+ newState <- c match {
+ case Inc =>
+ entity.get.state.updateAndGet(s => Some(s.getOrElse(0) + 1))
+ case Dec =>
+ entity.get.state.updateAndGet(s => Some(s.getOrElse(0) - 1))
+ }
+ _ <- pubsub.publish(entity.get.id, newState.getOrElse(0)).orDie
+ } yield ()
+ cluster <- Sharding
+ .start("CounterEntity", counterEntityLogic)
+ .provideCustomLayer(ZLayer.succeedMany(sys))
+
+ _ <- cluster.send("LiveUsers", Inc)
+ _ <- cluster.send("TotalRequests", Inc)
+ _ <- cluster.send("LiveUsers", Dec)
+ _ <- cluster.send("LiveUsers", Inc)
+ _ <- cluster.send("LiveUsers", Inc)
+ _ <- cluster.send("TotalRequests", Inc)
+ _ <- cluster.send("TotalRequests", Inc)
+
+ _ <-
+ clusterEvents.join zipPar liveUsersLogger.join zipPar totalRequestLogger.join
+ } yield ()
+ )
+}
+```
+
+Now, let's create a cluster comprising two nodes:
+
+```scala
+object CounterApp1 extends zio.App {
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ CounterApp("2551").counterApp.exitCode
+}
+
+object CounterApp2 extends zio.App {
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ CounterApp("2552").counterApp.exitCode
+}
+```
+
+## ZIO Cache
+
+ZIO Cache is a library that makes it easy to optimize the performance of our application by caching values.
+
+Sometimes we may call or receive requests to do overlapping work. Assume we are writing a service that is going to handle all incoming requests. We don't want to handle duplicate requests. Using ZIO Cache we can make our application to be more **performant** by preventing duplicated works.
+
+### Introduction
+
+Some key features of ZIO Cache:
+
+- **Compositionality** — If we want our applications to be **compositional**, different parts of our application may do overlapping work. ZIO Cache helps us to stay benefit from compositionality while using caching.
+
+- **Unification of Synchronous and Asynchronous Caches** — Compositional definition of cache in terms of _lookup function_ unifies synchronous and asynchronous caches. So the lookup function can compute value either synchronously or asynchronously.
+
+- **Deep ZIO Integration** — ZIO Cache is a ZIO native solution. So without losing the power of ZIO it includes support for _concurrent lookups_, _failure_, and _interruption_.
+
+- **Caching Policy** — Using caching policy, the ZIO Cache can determine when values should/may be removed from the cache. So, if we want to build something more complex and custom we have a lot of flexibility. The caching policy has two parts and together they define a whole caching policy:
+
+ - **Priority (Optional Removal)** — When we are running out of space, it defines the order that the existing values **might** be removed from the cache to make more space.
+
+ - **Evict (Mandatory Removal)** — Regardless of space when we **must** remove existing values because they are no longer valid anymore. They might be invalid because they do not satisfy business requirements (e.g., maybe it's too old). This is a function that determines whether an entry is valid based on the entry and the current time.
+
+- **Composition Caching Policy** — We can define much more complicated caching policies out of much simpler ones.
+
+- **Cache/Entry Statistics** — ZIO Cache maintains some good statistic metrics, such as entries, memory size, hits, misses, loads, evictions, and total load time. So we can look at how our cache is doing and decide where we should change our caching policy to improve caching metrics.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-cache" % "0.1.0" // Check the repo for the latest version
+```
+
+### Example
+
+In this example, we are calling `timeConsumingEffect` three times in parallel with the same key. The ZIO Cache runs this effect only once. So the concurrent lookups will suspend until the value being computed is available:
+
+```scala
+import zio.cache.{Cache, Lookup}
+import zio.clock.Clock
+import zio.console.{Console, putStrLn}
+import zio.duration.{Duration, durationInt}
+import zio.{ExitCode, URIO, ZIO}
+
+import java.io.IOException
+
+def timeConsumingEffect(key: String): ZIO[Clock, Nothing, Int] =
+ ZIO.sleep(5.seconds) *> ZIO.succeed(key.hashCode)
+
+val myApp: ZIO[Console with Clock, IOException, Unit] =
+ for {
+ cache <- Cache.make(
+ capacity = 100,
+ timeToLive = Duration.Infinity,
+ lookup = Lookup(timeConsumingEffect)
+ )
+ result <- cache.get("key1")
+ .zipPar(cache.get("key1"))
+ .zipPar(cache.get("key1"))
+ _ <- putStrLn(s"Result of parallel execution three effects with the same key: $result")
+
+ hits <- cache.cacheStats.map(_.hits)
+ misses <- cache.cacheStats.map(_.misses)
+ _ <- putStrLn(s"Number of cache hits: $hits")
+ _ <- putStrLn(s"Number of cache misses: $misses")
+ } yield ()
+```
+
+The output of this program should be as follows:
+
+```
+Result of parallel execution three effects with the same key: ((3288498,3288498),3288498)
+Number of cache hits: 2
+Number of cache misses: 1
+```
+
+
+## ZIO Config
+
+[ZIO Config](https://zio.github.io/zio-config/) is a ZIO-based library for loading and parsing configuration sources.
+
+### Introduction
+In the real world, config retrieval is the first to develop applications. We mostly have some application config that should be loaded and parsed through our application. Doing such things manually is always boring and error-prone and also has lots of boilerplates.
+
+The ZIO Config has a lot of features, and it is more than just a config parsing library. Let's enumerate some key features of this library:
+
+- **Support for Various Sources** — It can read/write flat or nested configurations from/to various formats and sources.
+
+- **Composable sources** — ZIO Config can compose sources of configuration, so we can have, e.g. environmental or command-line overrides.
+
+- **Automatic Document Generation** — It can auto-generate documentation of configurations. So developers or DevOps engineers know how to configure the application.
+
+- **Report generation** — It has a report generation that shows where each piece of configuration data came from.
+
+- **Automatic Derivation** — It has built-in support for automatic derivation of readers and writers for case classes and sealed traits.
+
+- **Type-level Constraints and Automatic Validation** — because it supports _Refined_ types, we can write type-level predicates which constrain the set of values described for data types.
+
+- **Descriptive Errors** — It accumulates all errors and reports all of them to the user rather than failing fast.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-config" %
+```
+
+There are also some optional dependencies:
+- **zio-config-mangolia** — Auto Derivation
+- **zio-config-refined** — Integration with Refined Library
+- **zio-config-typesafe** — HOCON/Json Support
+- **zio-config-yaml** — Yaml Support
+- **zio-config-gen** — Random Config Generation
+
+### Example
+
+Let's add these four lines to our `build.sbt` file as we are using these modules in our example:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-config" % "1.0.6"
+libraryDependencies += "dev.zio" %% "zio-config-magnolia" % "1.0.6"
+libraryDependencies += "dev.zio" %% "zio-config-typesafe" % "1.0.6"
+libraryDependencies += "dev.zio" %% "zio-config-refined" % "1.0.6"
+```
+
+In this example we are reading from HOCON config format using type derivation:
+
+```scala
+import eu.timepit.refined.W
+import eu.timepit.refined.api.Refined
+import eu.timepit.refined.collection.NonEmpty
+import eu.timepit.refined.numeric.GreaterEqual
+import zio.config.magnolia.{describe, descriptor}
+import zio.config.typesafe.TypesafeConfigSource
+import zio.console.putStrLn
+import zio.{ExitCode, URIO, ZIO}
+
+sealed trait DataSource
+
+final case class Database(
+ @describe("Database Host Name")
+ host: Refined[String, NonEmpty],
+ @describe("Database Port")
+ port: Refined[Int, GreaterEqual[W.`1024`.T]]
+) extends DataSource
+
+final case class Kafka(
+ @describe("Kafka Topics")
+ topicName: String,
+ @describe("Kafka Brokers")
+ brokers: List[String]
+) extends DataSource
+
+object ZIOConfigExample extends zio.App {
+ import zio.config._
+ import zio.config.refined._
+
+ val json =
+ s"""
+ |"Database" : {
+ | "port" : "1024",
+ | "host" : "localhost"
+ |}
+ |""".stripMargin
+
+ val myApp =
+ for {
+ source <- ZIO.fromEither(TypesafeConfigSource.fromHoconString(json))
+ desc = descriptor[DataSource] from source
+ dataSource <- ZIO.fromEither(read(desc))
+ // Printing Auto Generated Documentation of Application Config
+ _ <- putStrLn(generateDocs(desc).toTable.toGithubFlavouredMarkdown)
+ _ <- dataSource match {
+ case Database(host, port) =>
+ putStrLn(s"Start connecting to the database: $host:$port")
+ case Kafka(_, brokers) =>
+ putStrLn(s"Start connecting to the kafka brokers: $brokers")
+ }
+ } yield ()
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.exitCode
+}
+```
+
+## ZIO FTP
+
+[ZIO FTP](https://github.com/zio/zio-ftp) is a simple, idiomatic (S)FTP client for ZIO.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-ftp" % "0.3.0"
+```
+
+### Example
+
+First we need an FTP server if we don't have:
+
+```bash
+docker run -d \
+ -p 21:21 \
+ -p 21000-21010:21000-21010 \
+ -e USERS="one|1234" \
+ -e ADDRESS=localhost \
+ delfer/alpine-ftp-server
+```
+
+Now we can run the example:
+
+```scala
+import zio.blocking.Blocking
+import zio.console.putStrLn
+import zio.ftp.Ftp._
+import zio.ftp._
+import zio.stream.{Transducer, ZStream}
+import zio.{Chunk, ExitCode, URIO, ZIO}
+
+object ZIOFTPExample extends zio.App {
+ private val settings =
+ UnsecureFtpSettings("127.0.0.1", 21, FtpCredentials("one", "1234"))
+
+ private val myApp = for {
+ _ <- putStrLn("List of files at root directory:")
+ resource <- ls("/").runCollect
+ _ <- ZIO.foreach(resource)(e => putStrLn(e.path))
+ path = "~/file.txt"
+ _ <- upload(
+ path,
+ ZStream.fromChunk(
+ Chunk.fromArray("Hello, ZIO FTP!\nHello, World!".getBytes)
+ )
+ )
+ file <- readFile(path)
+ .transduce(Transducer.utf8Decode)
+ .runCollect
+ _ <- putStrLn(s"Content of $path file:")
+ _ <- putStrLn(file.fold("")(_ + _))
+ } yield ()
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] = myApp
+ .provideCustomLayer(
+ unsecure(settings) ++ Blocking.live
+ )
+ .exitCode
+}
+```
+
+## ZIO JSON
+
+[ZIO Json](https://github.com/zio/zio-json) is a fast and secure JSON library with tight ZIO integration.
+
+### Introduction
+
+The goal of this project is to create the best all-round JSON library for Scala:
+
+- **Performance** to handle more requests per second than the incumbents, i.e. reduced operational costs.
+- **Security** to mitigate against adversarial JSON payloads that threaten the capacity of the server.
+- **Fast Compilation** no shapeless, no type astronautics.
+- **Future-Proof**, prepared for Scala 3 and next-generation Java.
+- **Simple** small codebase, concise documentation that covers everything.
+- **Helpful errors** are readable by humans and machines.
+- **ZIO Integration** so nothing more is required.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-json" % "0.1.5"
+```
+
+### Example
+
+Let's try a simple example of encoding and decoding JSON using ZIO JSON:
+
+```scala
+import zio.json._
+
+sealed trait Fruit extends Product with Serializable
+case class Banana(curvature: Double) extends Fruit
+case class Apple(poison: Boolean) extends Fruit
+
+object Fruit {
+ implicit val decoder: JsonDecoder[Fruit] =
+ DeriveJsonDecoder.gen[Fruit]
+
+ implicit val encoder: JsonEncoder[Fruit] =
+ DeriveJsonEncoder.gen[Fruit]
+}
+
+val json1 = """{ "Banana":{ "curvature":0.5 }}"""
+// json1: String = "{ \"Banana\":{ \"curvature\":0.5 }}"
+val json2 = """{ "Apple": { "poison": false }}"""
+// json2: String = "{ \"Apple\": { \"poison\": false }}"
+val malformedJson = """{ "Banana":{ "curvature": true }}"""
+// malformedJson: String = "{ \"Banana\":{ \"curvature\": true }}"
+
+json1.fromJson[Fruit]
+// res0: Either[String, Fruit] = Right(value = Banana(curvature = 0.5))
+json2.fromJson[Fruit]
+// res1: Either[String, Fruit] = Right(value = Apple(poison = false))
+malformedJson.fromJson[Fruit]
+// res2: Either[String, Fruit] = Left(
+// value = ".Banana.curvature(expected a number, got t)"
+// )
+
+List(Apple(false), Banana(0.4)).toJsonPretty
+// res3: String = """[{
+// "Apple" : {
+// "poison" : false
+// }
+// }, {
+// "Banana" : {
+// "curvature" : 0.4
+// }
+// }]"""
+```
+
+## ZIO Kafka
+
+[ZIO Kafka](https://github.com/zio/zio-kafka) is a Kafka client for ZIO. It provides a purely functional, streams-based interface to the Kafka client and integrates effortlessly with ZIO and ZIO Streams.
+
+### Introduction
+
+Apache Kafka is a distributed event streaming platform that acts as a distributed publish-subscribe messaging system. It enables us to build distributed streaming data pipelines and event-driven applications.
+
+Kafka has a mature Java client for producing and consuming events, but it has a low-level API. ZIO Kafka is a ZIO native client for Apache Kafka. It has a high-level streaming API on top of the Java client. So we can produce and consume events using the declarative concurrency model of ZIO Streams.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-kafka" % "0.15.0"
+```
+
+### Example
+
+Let's write a simple Kafka producer and consumer using ZIO Kafka with ZIO Streams. Before everything, we need a running instance of Kafka. We can do that by saving the following docker-compose script in the `docker-compose.yml` file and run `docker-compose up`:
+
+```docker
+version: '2'
+services:
+ zookeeper:
+ image: confluentinc/cp-zookeeper:latest
+ environment:
+ ZOOKEEPER_CLIENT_PORT: 2181
+ ZOOKEEPER_TICK_TIME: 2000
+ ports:
+ - 22181:2181
+
+ kafka:
+ image: confluentinc/cp-kafka:latest
+ depends_on:
+ - zookeeper
+ ports:
+ - 29092:29092
+ environment:
+ KAFKA_BROKER_ID: 1
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
+ KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+```
+
+Now, we can run our ZIO Kafka Streaming application:
+
+```scala
+import zio._
+import zio.console.putStrLn
+import zio.duration.durationInt
+import zio.kafka.consumer.{Consumer, ConsumerSettings, _}
+import zio.kafka.producer.{Producer, ProducerSettings}
+import zio.kafka.serde._
+import zio.stream.ZStream
+
+object ZIOKafkaProducerConsumerExample extends zio.App {
+ val producer =
+ ZStream
+ .repeatEffect(zio.random.nextIntBetween(0, Int.MaxValue))
+ .schedule(Schedule.fixed(2.seconds))
+ .mapM { random =>
+ Producer.produce[Any, Long, String](
+ topic = "random",
+ key = random % 4,
+ value = random.toString,
+ keySerializer = Serde.long,
+ valueSerializer = Serde.string
+ )
+ }
+ .drain
+
+ val consumer =
+ Consumer
+ .subscribeAnd(Subscription.topics("random"))
+ .plainStream(Serde.long, Serde.string)
+ .tap(r => putStrLn(r.value))
+ .map(_.offset)
+ .aggregateAsync(Consumer.offsetBatches)
+ .mapM(_.commit)
+ .drain
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ producer
+ .merge(consumer)
+ .runDrain
+ .provideCustomLayer(appLayer)
+ .exitCode
+
+ def producerLayer = ZLayer.fromManaged(
+ Producer.make(
+ settings = ProducerSettings(List("localhost:29092"))
+ )
+ )
+
+ def consumerLayer = ZLayer.fromManaged(
+ Consumer.make(
+ ConsumerSettings(List("localhost:29092")).withGroupId("group")
+ )
+ )
+
+ def appLayer = producerLayer ++ consumerLayer
+}
+```
+
+## ZIO Logging
+
+[ZIO Logging](https://github.com/zio/zio-logging) is simple logging for ZIO apps, with correlation, context, and pluggable backends out of the box.
+
+### Introduction
+
+When we are writing our applications using ZIO effects, to log easy way we need a ZIO native solution for logging. ZIO Logging is an environmental effect for adding logging into our ZIO applications.
+
+Key features of ZIO Logging:
+
+- **ZIO Native** — Other than it is a type-safe and purely functional solution, it leverages ZIO's features.
+- **Multi-Platform** - It supports both JVM and JS platforms.
+- **Composable** — Loggers are composable together via contraMap.
+- **Pluggable Backends** — Support multiple backends like ZIO Console, SLF4j, JS Console, JS HTTP endpoint.
+- **Logger Context** — It has a first citizen _Logger Context_ implemented on top of `FiberRef`. The Logger Context maintains information like logger name, filters, correlation id, and so forth across different fibers. It supports _Mapped Diagnostic Context (MDC)_ which manages contextual information across fibers in a concurrent environment.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-logging" % "0.5.13"
+```
+
+There are also some optional dependencies:
+- **zio-logging-slf4j** — SLF4j integration
+- **zio-logging-slf4j-bridge** — Using ZIO Logging for SLF4j loggers, usually third-party non-ZIO libraries
+- **zio-logging-jsconsole** — Scala.js console integration
+- **zio-logging-jshttp** — Scala.js HTTP Logger which sends logs to a backend via Ajax POST
+
+### Example
+
+Let's try an example of ZIO Logging which demonstrates a simple application of ZIO logging along with its _Logger Context_ feature:
+
+```scala
+import zio.clock.Clock
+import zio.duration.durationInt
+import zio.logging._
+import zio.random.Random
+import zio.{ExitCode, NonEmptyChunk, ZIO}
+
+object ZIOLoggingExample extends zio.App {
+
+ val myApp: ZIO[Logging with Clock with Random, Nothing, Unit] =
+ for {
+ _ <- log.info("Hello from ZIO logger")
+ _ <-
+ ZIO.foreachPar(NonEmptyChunk("UserA", "UserB", "UserC")) { user =>
+ log.locally(UserId(Some(user))) {
+ for {
+ _ <- log.info("User validation")
+ _ <- zio.random
+ .nextIntBounded(1000)
+ .flatMap(t => ZIO.sleep(t.millis))
+ _ <- log.info("Connecting to the database")
+ _ <- zio.random
+ .nextIntBounded(100)
+ .flatMap(t => ZIO.sleep(t.millis))
+ _ <- log.info("Releasing resources.")
+ } yield ()
+ }
+
+ }
+ } yield ()
+
+ type UserId = String
+ def UserId: LogAnnotation[Option[UserId]] = LogAnnotation[Option[UserId]](
+ name = "user-id",
+ initialValue = None,
+ combine = (_, r) => r,
+ render = _.map(userId => s"[user-id: $userId]")
+ .getOrElse("undefined-user-id")
+ )
+
+ val env =
+ Logging.console(
+ logLevel = LogLevel.Info,
+ format =
+ LogFormat.ColoredLogFormat((ctx, line) => s"${ctx(UserId)} $line")
+ ) >>> Logging.withRootLoggerName("MyZIOApp")
+
+ override def run(args: List[String]) =
+ myApp.provideCustomLayer(env).as(ExitCode.success)
+}
+```
+
+The output should be something like this:
+
+```
+2021-07-09 00:14:47.457+0000 info [MyZIOApp] undefined-user-id Hello from ZIO logger
+2021-07-09 00:14:47.807+0000 info [MyZIOApp] [user-id: UserA] User validation
+2021-07-09 00:14:47.808+0000 info [MyZIOApp] [user-id: UserC] User validation
+2021-07-09 00:14:47.818+0000 info [MyZIOApp] [user-id: UserB] User validation
+2021-07-09 00:14:48.290+0000 info [MyZIOApp] [user-id: UserC] Connecting to the database
+2021-07-09 00:14:48.299+0000 info [MyZIOApp] [user-id: UserA] Connecting to the database
+2021-07-09 00:14:48.321+0000 info [MyZIOApp] [user-id: UserA] Releasing resources.
+2021-07-09 00:14:48.352+0000 info [MyZIOApp] [user-id: UserC] Releasing resources.
+2021-07-09 00:14:48.820+0000 info [MyZIOApp] [user-id: UserB] Connecting to the database
+2021-07-09 00:14:48.882+0000 info [MyZIOApp] [user-id: UserB] Releasing resources.
+```
+
+[ZIO Metrcis](https://github.com/zio/zio-metrics) is a high-performance, purely-functional library for adding instrumentation to any application, with a simple web client and JMX support.
+
+### Introduction
+
+ZIO Metrics is a pure-ZIO StatsD/DogStatsD client and a thin wrapper over both _[Prometheus](https://github.com/prometheus/client_java)_ and _[Dropwizard](https://metrics.dropwizard.io/4.2.0/manual/core.html)_ instrumentation libraries allowing us to measure the behavior of our application in a performant purely functional manner.
+
+### Installation
+
+In order to use this library, we need to one of the following lines in our `build.sbt` file:
+
+```scala
+// Prometheus
+libraryDependencies += "dev.zio" %% "zio-metrics-prometheus" % "1.0.12"
+
+// Dropwizard
+libraryDependencies += "dev.zio" %% "zio-metrics-dropwizard" % "1.0.12"
+
+// StatsD/DogStatsD
+libraryDependencies += "dev.zio" %% "zio-metrics-statsd" % "1.0.12"
+```
+
+### Example
+
+In this example we are using `zio-metrics-prometheus` module. Other that initializing default exporters, we register a counter to the registry:
+
+```scala
+import zio.Runtime
+import zio.console.{Console, putStrLn}
+import zio.metrics.prometheus._
+import zio.metrics.prometheus.exporters._
+import zio.metrics.prometheus.helpers._
+
+object ZIOMetricsExample extends scala.App {
+
+ val myApp =
+ for {
+ r <- getCurrentRegistry()
+ _ <- initializeDefaultExports(r)
+ c <- counter.register("ServiceA", Array("Request", "Region"))
+ _ <- c.inc(1.0, Array("GET", "us-west-*"))
+ _ <- c.inc(2.0, Array("POST", "eu-south-*"))
+ _ <- c.inc(3.0, Array("GET", "eu-south-*"))
+ s <- http(r, 9090)
+ _ <- putStrLn(s"The application's metric endpoint: http://localhost:${s.getPort}/")
+ } yield s
+
+ Runtime
+ .unsafeFromLayer(
+ Registry.live ++ Exporters.live ++ Console.live
+ )
+ .unsafeRun(myApp)
+}
+```
+
+Now, the application's metrics are accessible via `http://localhost:9090` endpoint.
+
+## ZIO NIO
+[ZIO NIO](https://zio.github.io/zio-nio/) is a small, unopinionated ZIO interface to NIO.
+
+### Introduction
+
+In Java, there are two packages for I/O operations:
+
+1. Java IO (`java.io`)
+ - Standard Java IO API
+ - Introduced since Java 1.0
+ - Stream-based API
+ - **Blocking I/O operation**
+
+2. Java NIO (`java.nio`)
+ - Introduced since Java 1.4
+ - NIO means _New IO_, an alternative to the standard Java IO API
+ - It can operate in a **non-blocking mode** if possible
+ - Buffer-based API
+
+The [Java NIO](https://docs.oracle.com/javase/8/docs/api/java/nio/package-summary.html) is an alternative to the Java IO API. Because it supports non-blocking IO, it can be more performant in concurrent environments like web services.
+
+### Installation
+
+ZIO NIO is a ZIO wrapper on Java NIO. It comes in two flavors:
+
+- **`zio.nio.core`** — a small and unopionanted ZIO interface to NIO that just wraps NIO API in ZIO effects,
+- **`zio.nio`** — an opinionated interface with deeper ZIO integration that provides more type and resource safety.
+
+In order to use this library, we need to add one of the following lines in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-nio-core" % "1.0.0-RC11"
+libraryDependencies += "dev.zio" %% "zio-nio" % "1.0.0-RC11"
+```
+
+### Example
+
+Let's try writing a simple server using `zio-nio` module:
+
+```scala
+import zio._
+import zio.console._
+import zio.nio.channels._
+import zio.nio.core._
+import zio.stream._
+
+object ZIONIOServerExample extends zio.App {
+ val myApp =
+ AsynchronousServerSocketChannel()
+ .use(socket =>
+ for {
+ addr <- InetSocketAddress.hostName("localhost", 8080)
+ _ <- socket.bindTo(addr)
+ _ <- putStrLn(s"Waiting for incoming connections on $addr endpoint").orDie
+ _ <- ZStream
+ .repeatEffect(socket.accept.preallocate)
+ .map(_.withEarlyRelease)
+ .mapMPar(16) {
+ _.use { case (closeConn, channel) =>
+ for {
+ _ <- putStrLn("Received connection").orDie
+ data <- ZStream
+ .repeatEffectOption(
+ channel.readChunk(64).eofCheck.orElseFail(None)
+ )
+ .flattenChunks
+ .transduce(ZTransducer.utf8Decode)
+ .run(Sink.foldLeft("")(_ + _))
+ _ <- closeConn
+ _ <- putStrLn(s"Request Received:\n${data.mkString}").orDie
+ } yield ()
+ }
+ }.runDrain
+ } yield ()
+ ).orDie
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.exitCode
+}
+```
+
+Now we can send our requests to the server using _curl_ command:
+
+```
+curl -X POST localhost:8080 -d "Hello, ZIO NIO!"
+```
+
+## ZIO Optics
+
+[ZIO Optics](https://github.com/zio/zio-optics) is a library that makes it easy to modify parts of larger data structures based on a single representation of an optic as a combination of a getter and setter.
+
+### Introduction
+
+When we are working with immutable nested data structures, updating and reading operations could be tedious with lots of boilerplates. Optics is a functional programming construct that makes these operations more clear and readable.
+
+Key features of ZIO Optics:
+
+- **Unified Optic Data Type** — All the data types like `Lens`, `Prism`, `Optional`, and so forth are type aliases for the core `Optic` data type.
+- **Composability** — We can compose optics to create more advanced ones.
+- **Embracing the Tremendous Power of Concretion** — Using concretion instead of unnecessary abstractions, makes the API more ergonomic and easy to use.
+- **Integration with ZIO Data Types** — It supports effectful and transactional optics that works with ZIO data structures like `Ref` and `TMap`.
+- **Helpful Error Channel** — Like ZIO, the `Optics` data type has error channels to include failure details.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-optics" % "0.1.0"
+```
+
+### Example
+
+In this example, we are going to update a nested data structure using ZIO Optics:
+
+```scala
+import zio.optics._
+
+case class Developer(name: String, manager: Manager)
+case class Manager(name: String, rating: Rating)
+case class Rating(upvotes: Int, downvotes: Int)
+
+val developerLens = Lens[Developer, Manager](
+ get = developer => Right(developer.manager),
+ set = manager => developer => Right(developer.copy(manager = manager))
+)
+
+val managerLens = Lens[Manager, Rating](
+ get = manager => Right(manager.rating),
+ set = rating => manager => Right(manager.copy(rating = rating))
+)
+
+val ratingLens = Lens[Rating, Int](
+ get = rating => Right(rating.upvotes),
+ set = upvotes => rating => Right(rating.copy(upvotes = upvotes))
+)
+
+// Composing lenses
+val optic = developerLens >>> managerLens >>> ratingLens
+
+val jane = Developer("Jane", Manager("Steve", Rating(0, 0)))
+val updated = optic.update(jane)(_ + 1)
+
+println(updated)
+```
+
+## ZIO Prelude
+
+[ZIO Prelude](https://github.com/zio/zio-prelude) is a lightweight, distinctly Scala take on **functional abstractions**, with tight ZIO integration.
+
+### Introduction
+
+ZIO Prelude is a small library that brings common, useful algebraic abstractions and data types to scala developers.
+
+It is an alternative to libraries like _Scalaz_ and _Cats_ based on radical ideas that embrace **modularity** and **subtyping** in Scala and offer **new levels of power and ergonomics**. It throws out the classic functor hierarchy in favor of a modular algebraic approach that is smaller, easier to understand and teach, and more expressive.
+
+Design principles behind ZIO Prelude:
+
+1. **Radical** — So basically it ignores all dogma and it is completely written with a new mindset.
+2. **Orthogonality** — The goal for ZIO Prelude is to have no overlap. Type classes should do one thing and fit it well. So there is not any duplication to describe type classes.
+3. **Principled** — All type classes in ZIO Prelude include a set of laws that instances must obey.
+4. **Pragmatic** — If we have data types that don't satisfy laws but that are still useful to use in most cases, we can go ahead and provide instances for them.
+5. **Scala-First** - It embraces subtyping and benefit from object-oriented features of Scala.
+
+ZIO Prelude gives us:
+- **Data Types** that complements the Scala Standard Library:
+ - `NonEmptyList`, `NonEmptySet`
+ - `ZSet`, `ZNonEmptySet`
+ - `Validation`
+ - `ZPure`
+- **Type Classes** to describe similarities across different types to eliminate duplications and boilerplates:
+ - Business entities (`Person`, `ShoppingCart`, etc.)
+ - Effect-like structures (`Try`, `Option`, `Future`, `Either`, etc.)
+ - Collection-like structures (`List`, `Tree`, etc.)
+- **New Types** that allow to _increase type safety_ in domain modeling. Wrapping existing type adding no runtime overhead.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-prelude" % "1.0.0-RC5"
+```
+
+### Example
+
+In this example, we are going to create a simple voting application. We will use two features of ZIO Prelude:
+1. To become more type safety we are going to use _New Types_ and introducing `Topic` and `Votes` data types.
+2. Providing instance of `Associative` type class for `Votes` data type which helps us to combine `Votes` values.
+
+```scala
+import zio.prelude._
+
+object VotingExample extends scala.App {
+
+ object Votes extends Subtype[Int] {
+ implicit val associativeVotes: Associative[Votes] =
+ new Associative[Votes] {
+ override def combine(l: => Votes, r: => Votes): Votes =
+ Votes(l + r)
+ }
+ }
+ type Votes = Votes.Type
+
+ object Topic extends Subtype[String]
+ type Topic = Topic.Type
+
+ final case class VoteState(map: Map[Topic, Votes]) { self =>
+ def combine(that: VoteState): VoteState =
+ VoteState(self.map combine that.map)
+ }
+
+ val zioHttp = Topic("zio-http")
+ val uziHttp = Topic("uzi-http")
+ val zioTlsHttp = Topic("zio-tls-http")
+
+ val leftVotes = VoteState(Map(zioHttp -> Votes(4), uziHttp -> Votes(2)))
+ val rightVotes = VoteState(Map(zioHttp -> Votes(2), zioTlsHttp -> Votes(2)))
+
+ println(leftVotes combine rightVotes)
+ // Output: VoteState(Map(zio-http -> 6, uzi-http -> 2, zio-tls-http -> 2))
+}
+```
+
+## ZIO Process
+
+[ZIO Process](https://github.com/zio/zio-process) is a simple ZIO library for interacting with external processes and command-line programs.
+
+### Introduction
+
+ZIO Process provides a principled way to call out to external programs from within a ZIO application while leveraging ZIO's capabilities like interruptions and offloading blocking operations to a separate thread pool. We don't need to worry about avoiding these common pitfalls as we would if we were to use Java's `ProcessBuilder` or the `scala.sys.process` API since it is already taken care of for you.
+
+Key features of the ZIO Process:
+- **Deep ZIO Integration** — Leverages ZIO to handle interruption and offload blocking operations.
+- **ZIO Streams** — ZIO Process is backed by ZIO Streams, which enables us to obtain the command output as streams of bytes or lines. So we can work with processes that output gigabytes of data without worrying about exceeding memory constraints.
+- **Descriptive Errors** — In case of command failure, it has a descriptive category of errors.
+- **Piping** — It has a simple DSL for piping the output of one command as the input of another.
+- **Blocking Operations** —
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-process" % "0.5.0"
+```
+
+### Example
+
+Here is a simple example of using ZIO Process:
+
+```scala
+import zio.console.putStrLn
+import zio.process.Command
+import zio.{ExitCode, URIO}
+
+import java.io.File
+
+object ZIOProcessExample extends zio.App {
+
+ val myApp = for {
+ fiber <- Command("dmesg", "--follow").linesStream
+ .foreach(putStrLn(_))
+ .fork
+ cpuModel <- (Command("cat", "/proc/cpuinfo") |
+ Command("grep", "model name") |
+ Command("head", "-n", "1") |
+ Command("cut", "-d", ":", "-f", "2")).string
+ _ <- putStrLn(s"CPU Model: $cpuModel")
+ _ <- (Command("pg_dump", "my_database") > new File("dump.sql")).exitCode
+ _ <- fiber.join
+ } yield ()
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.exitCode
+}
+```
+
+## ZIO Query
+
+[ZIO Query](https://github.com/zio/zio-query) is a library for writing optimized queries to data sources in a high-level compositional style. It can add efficient pipelining, batching, and caching to any data source.
+
+### Introduction
+
+Some key features of ZIO Query:
+
+- **Batching** — ZIO Query detects parts of composite queries that can be executed in parallel without changing the semantics of the query.
+
+- **Pipelining** — ZIO Query detects parts of composite queries that can be combined together for fewer individual requests to the data source.
+
+- **Caching** — ZIO Query can transparently cache read queries to minimize the cost of fetching the same item repeatedly in the scope of a query.
+
+Assume we have the following database access layer APIs:
+
+```scala
+def getAllUserIds: ZIO[Any, Nothing, List[Int]] = {
+ // Get all user IDs e.g. SELECT id FROM users
+ ZIO.succeed(???)
+}
+
+def getUserNameById(id: Int): ZIO[Any, Nothing, String] = {
+ // Get user by ID e.g. SELECT name FROM users WHERE id = $id
+ ZIO.succeed(???)
+}
+```
+
+We can get their corresponding usernames from the database by the following code snippet:
+
+```scala
+val userNames = for {
+ ids <- getAllUserIds
+ names <- ZIO.foreachPar(ids)(getUserNameById)
+} yield names
+```
+
+It works, but this is not performant. It is going to query the underlying database _N + 1_ times.
+
+In this case, ZIO Query helps us to write an optimized query that is going to perform two queries (one for getting user IDs and one for getting all usernames).
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-query" % "0.2.9"
+```
+
+### Example
+
+Here is an example of using ZIO Query, which optimizes multiple database queries by batching all of them in one query:
+
+```scala
+import zio.console.putStrLn
+import zio.query.{CompletedRequestMap, DataSource, Request, ZQuery}
+import zio.{Chunk, ExitCode, Task, URIO, ZIO}
+
+import scala.collection.immutable.AbstractSeq
+
+object ZQueryExample extends zio.App {
+ case class GetUserName(id: Int) extends Request[Nothing, String]
+
+ lazy val UserDataSource: DataSource.Batched[Any, GetUserName] =
+ new DataSource.Batched[Any, GetUserName] {
+ val identifier: String = "UserDataSource"
+
+ def run(requests: Chunk[GetUserName]): ZIO[Any, Nothing, CompletedRequestMap] = {
+ val resultMap = CompletedRequestMap.empty
+ requests.toList match {
+ case request :: Nil =>
+ val result: Task[String] = {
+ // get user by ID e.g. SELECT name FROM users WHERE id = $id
+ ZIO.succeed(???)
+ }
+
+ result.either.map(resultMap.insert(request))
+
+ case batch: Seq[GetUserName] =>
+ val result: Task[List[(Int, String)]] = {
+ // get multiple users at once e.g. SELECT id, name FROM users WHERE id IN ($ids)
+ ZIO.succeed(???)
+ }
+
+ result.fold(
+ err =>
+ requests.foldLeft(resultMap) { case (map, req) =>
+ map.insert(req)(Left(err))
+ },
+ _.foldLeft(resultMap) { case (map, (id, name)) =>
+ map.insert(GetUserName(id))(Right(name))
+ }
+ )
+ }
+ }
+ }
+
+ def getUserNameById(id: Int): ZQuery[Any, Nothing, String] =
+ ZQuery.fromRequest(GetUserName(id))(UserDataSource)
+
+ val query: ZQuery[Any, Nothing, List[String]] =
+ for {
+ ids <- ZQuery.succeed(1 to 10)
+ names <- ZQuery.foreachPar(ids)(id => getUserNameById(id)).map(_.toList)
+ } yield (names)
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ query.run
+ .tap(usernames => putStrLn(s"Usernames: $usernames"))
+ .exitCode
+}
+```
+
+## ZIO Redis
+
+[ZIO Redis](https://github.com/zio/zio-redis) is a ZIO native Redis client.
+
+### Introduction
+
+ZIO Redis is in the experimental phase of development, but its goals are:
+
+- **Type Safety**
+- **Performance**
+- **Minimum Dependency**
+- **ZIO Native**
+
+### Installation
+
+Since the ZIO Redis is in the experimental phase, it is not released yet.
+
+### Example
+
+To execute our ZIO Redis effect, we should provide the `RedisExecutor` layer to that effect. To create this layer we should also provide the following layers:
+
+- **Logging** — For simplicity, we ignored the logging functionality.
+- **RedisConfig** — Using default one, will connect to the `localhost:6379` Redis instance.
+- **Codec** — In this example, we are going to use the built-in `StringUtf8Codec` codec.
+
+```scala
+import zio.console.{Console, putStrLn}
+import zio.duration._
+import zio.logging.Logging
+import zio.redis._
+import zio.redis.codec.StringUtf8Codec
+import zio.schema.codec.Codec
+import zio.{ExitCode, URIO, ZIO, ZLayer}
+
+object ZIORedisExample extends zio.App {
+
+ val myApp: ZIO[Console with RedisExecutor, RedisError, Unit] = for {
+ _ <- set("myKey", 8L, Some(1.minutes))
+ v <- get[String, Long]("myKey")
+ _ <- putStrLn(s"Value of myKey: $v").orDie
+ _ <- hSet("myHash", ("k1", 6), ("k2", 2))
+ _ <- rPush("myList", 1, 2, 3, 4)
+ _ <- sAdd("mySet", "a", "b", "a", "c")
+ } yield ()
+
+ val layer: ZLayer[Any, RedisError.IOError, RedisExecutor] =
+ Logging.ignore ++ ZLayer.succeed(RedisConfig.Default) ++ ZLayer.succeed(StringUtf8Codec) >>> RedisExecutor.live
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp.provideCustomLayer(layer).exitCode
+}
+```
+
+## ZIO RocksDB
+
+[ZIO RocksDB](https://github.com/zio/zio-rocksdb) is a ZIO-based interface to RocksDB.
+
+Rocksdb is an embeddable persistent key-value store that is optimized for fast storage. ZIO RocksDB provides us a functional ZIO wrapper around its Java API.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-rocksdb" % "0.3.0"
+```
+
+### Example
+
+An example of writing and reading key/value pairs and also using transactional operations when using RocksDB:
+
+```scala
+import zio.console._
+import zio.rocksdb.{RocksDB, Transaction, TransactionDB}
+import zio.{URIO, ZIO}
+
+import java.nio.charset.StandardCharsets._
+
+object ZIORocksDBExample extends zio.App {
+
+ private def bytesToString(bytes: Array[Byte]): String = new String(bytes, UTF_8)
+ private def bytesToInt(bytes: Array[Byte]): Int = bytesToString(bytes).toInt
+
+ val job1: ZIO[Console with RocksDB, Throwable, Unit] =
+ for {
+ _ <- RocksDB.put(
+ "Key".getBytes(UTF_8),
+ "Value".getBytes(UTF_8)
+ )
+ result <- RocksDB.get("Key".getBytes(UTF_8))
+ stringResult = result.map(bytesToString)
+ _ <- putStrLn(s"value: $stringResult")
+ } yield ()
+
+
+ val job2: ZIO[Console with TransactionDB, Throwable, Unit] =
+ for {
+ key <- ZIO.succeed("COUNT".getBytes(UTF_8))
+ _ <- TransactionDB.put(key, 0.toString.getBytes(UTF_8))
+ _ <- ZIO.foreachPar(0 until 10) { _ =>
+ TransactionDB.atomically {
+ Transaction.getForUpdate(key, exclusive = true) >>= { iCount =>
+ Transaction.put(key, iCount.map(bytesToInt).map(_ + 1).getOrElse(-1).toString.getBytes(UTF_8))
+ }
+ }
+ }
+ value <- TransactionDB.get(key)
+ counterValue = value.map(bytesToInt)
+ _ <- putStrLn(s"The value of counter: $counterValue") // Must be 10
+ } yield ()
+
+ private val transactional_db =
+ TransactionDB.live(new org.rocksdb.Options().setCreateIfMissing(true), "tr_db")
+
+ private val rocks_db =
+ RocksDB.live(new org.rocksdb.Options().setCreateIfMissing(true), "rocks_db")
+
+ override def run(args: List[String]): URIO[zio.ZEnv, Int] =
+ (job1 <*> job2)
+ .provideCustomLayer(transactional_db ++ rocks_db)
+ .foldCauseM(cause => putStrLn(cause.prettyPrint) *> ZIO.succeed(1), _ => ZIO.succeed(0))
+}
+```
+
+## ZIO S3
+
+[ZIO S3](https://github.com/zio/zio-s3) is an S3 client for ZIO.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-s3" % "0.3.5"
+```
+
+### Example
+
+Let's try an example of creating a bucket and adding an object into it. To run this example, we need to run an instance of _Minio_ which is object storage compatible with S3:
+
+```bash
+docker run -p 9000:9000 -e MINIO_ACCESS_KEY=MyKey -e MINIO_SECRET_KEY=MySecret minio/minio server --compat /data
+```
+
+In this example we create a bucket and then add a JSON object to it and then retrieve that:
+
+```scala
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials
+import software.amazon.awssdk.regions.Region
+import zio.console.putStrLn
+import zio.s3._
+import zio.stream.{ZStream, ZTransducer}
+import zio.{Chunk, ExitCode, URIO}
+
+import java.net.URI
+
+object ZIOS3Example extends zio.App {
+
+ val myApp = for {
+ _ <- createBucket("docs")
+ json = Chunk.fromArray("""{ "id" : 1 , "name" : "A1" }""".getBytes)
+ _ <- putObject(
+ bucketName = "docs",
+ key = "doc1",
+ contentLength = json.length,
+ content = ZStream.fromChunk(json),
+ options = UploadOptions.fromContentType("application/json")
+ )
+ _ <- getObject("docs", "doc1")
+ .transduce(ZTransducer.utf8Decode)
+ .foreach(putStrLn(_))
+ } yield ()
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ myApp
+ .provideCustomLayer(
+ live(
+ Region.CA_CENTRAL_1,
+ AwsBasicCredentials.create("MyKey", "MySecret"),
+ Some(URI.create("http://localhost:9000"))
+ )
+ )
+ .exitCode
+}
+```
+
+## ZIO Schema
+
+[ZIO Schema](https://github.com/zio/zio-schema) is a [ZIO](https://zio.dev)-based library for modeling the schema of data structures as first-class values.
+
+### Introduction
+
+Schema is a structure of a data type. ZIO Schema reifies the concept of structure for data types. It makes a high-level description of any data type and makes them as first-class values.
+
+Creating a schema for a data type helps us to write codecs for that data type. So this library can be a host of functionalities useful for writing codecs and protocols like JSON, Protobuf, CSV, and so forth.
+
+With schema descriptions that can be automatically derived for case classes and sealed traits, _ZIO Schema_ will be going to provide powerful features for free (Note that the project is in the development stage and all these features are not supported yet):
+
+- Codecs for any supported protocol (JSON, protobuf, etc.), so data structures can be serialized and deserialized in a principled way
+- Diffing, patching, merging, and other generic-data-based operations
+- Migration of data structures from one schema to another compatible schema
+- Derivation of arbitrary type classes (`Eq`, `Show`, `Ord`, etc.) from the structure of the data
+
+When our data structures need to be serialized, deserialized, persisted, or transported across the wire, then _ZIO Schema_ lets us focus on data modeling and automatically tackle all the low-level, messy details for us.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-schema" % "0.0.6"
+```
+
+### Example
+
+In this simple example first, we create a schema for `Person` and then run the _diff_ operation on two instances of the `Person` data type, and finally we encode a Person instance using _Protobuf_ protocol:
+
+```scala
+import zio.console.putStrLn
+import zio.schema.codec.ProtobufCodec._
+import zio.schema.{DeriveSchema, Schema}
+import zio.stream.ZStream
+import zio.{Chunk, ExitCode, URIO}
+
+final case class Person(name: String, age: Int, id: String)
+object Person {
+ implicit val schema: Schema[Person] = DeriveSchema.gen[Person]
+}
+
+Person.schema
+// res5: Schema[Person] = CaseClass3(
+// annotations = IndexedSeq(),
+// field1 = Field(
+// label = "name",
+// schema = Lazy(
+// schema0 = zio.schema.DeriveSchema$$$Lambda$4805/0x000000080181bc40@590f88d4
+// ),
+// annotations = IndexedSeq()
+// ),
+// field2 = Field(
+// label = "age",
+// schema = Lazy(
+// schema0 = zio.schema.DeriveSchema$$$Lambda$4806/0x000000080181c840@306f30d3
+// ),
+// annotations = IndexedSeq()
+// ),
+// field3 = Field(
+// label = "id",
+// schema = Lazy(
+// schema0 = zio.schema.DeriveSchema$$$Lambda$4807/0x000000080181cc40@109bfdcb
+// ),
+// annotations = IndexedSeq()
+// ),
+// construct = zio.schema.DeriveSchema$$$Lambda$4808/0x000000080181d040@1e2c2f73,
+// extractField1 = zio.schema.DeriveSchema$$$Lambda$4809/0x000000080181d840@3b404d4a,
+// extractField2 = zio.schema.DeriveSchema$$$Lambda$4810/0x000000080181e840@72e00b59,
+// extractField3 = zio.schema.DeriveSchema$$$Lambda$4811/0x000000080181f040@3ddad56b
+// )
+
+import zio.schema.syntax._
+
+Person("Alex", 31, "0123").diff(Person("Alex", 31, "124"))
+// res6: schema.Diff = Record(
+// differences = ListMap(
+// "name" -> Identical,
+// "age" -> Identical,
+// "id" -> Myers(
+// edits = IndexedSeq(
+// Delete(s = "0"),
+// Keep(s = "1"),
+// Keep(s = "2"),
+// Insert(s = "4"),
+// Delete(s = "3")
+// )
+// )
+// )
+// )
+
+def toHex(chunk: Chunk[Byte]): String =
+ chunk.toArray.map("%02X".format(_)).mkString
+
+zio.Runtime.default.unsafeRun(
+ ZStream
+ .succeed(Person("Thomas", 23, "2354"))
+ .transduce(
+ encoder(Person.schema)
+ )
+ .runCollect
+ .flatMap(x => putStrLn(s"Encoded data with protobuf codec: ${toHex(x)}"))
+)
+// Encoded data with protobuf codec: 0A0654686F6D617310171A0432333534
+```
+
+## ZIO SQS
+
+[ZIO SQS](https://github.com/zio/zio-sqs) is a ZIO-powered client for AWS SQS. It is built on top of the [AWS SDK for Java 2.0](https://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/basics.html) via the automatically generated wrappers from [zio-aws](https://gaithub.com/vigoo/zio-aws).
+
+### Introduction
+
+ZIO SQS enables us to produce and consume elements to/from the Amazon SQS service. It is integrated with ZIO Streams, so we can produce and consume elements in a streaming fashion, element by element or micro-batching.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-sqs" % "0.4.2"
+```
+
+### Example
+
+In this example we produce a stream of events to the `MyQueue` and then consume them from that queue:
+
+```scala
+import io.github.vigoo.zioaws
+import io.github.vigoo.zioaws.core.config.CommonAwsConfig
+import io.github.vigoo.zioaws.sqs.Sqs
+import software.amazon.awssdk.auth.credentials.{
+ AwsBasicCredentials,
+ StaticCredentialsProvider
+}
+import software.amazon.awssdk.regions.Region
+import zio.clock.Clock
+import zio.sqs.producer.{Producer, ProducerEvent}
+import zio.sqs.serialization.Serializer
+import zio.sqs.{SqsStream, SqsStreamSettings, Utils}
+import zio.stream.ZStream
+import zio.{ExitCode, RIO, URIO, ZLayer, _}
+
+object ProducerConsumerExample extends zio.App {
+ val queueName = "MyQueue"
+
+ val client: ZLayer[Any, Throwable, Sqs] = zioaws.netty.default ++
+ ZLayer.succeed(
+ CommonAwsConfig(
+ region = Some(Region.of("ap-northeast-2")),
+ credentialsProvider = StaticCredentialsProvider.create(
+ AwsBasicCredentials.create("key", "key")
+ ),
+ endpointOverride = None,
+ commonClientConfig = None
+ )
+ ) >>>
+ zioaws.core.config.configured() >>>
+ zioaws.sqs.live
+
+ val stream: ZStream[Any, Nothing, ProducerEvent[String]] =
+ ZStream.iterate(0)(_ + 1).map(_.toString).map(ProducerEvent(_))
+
+ val program: RIO[Sqs with Clock, Unit] = for {
+ _ <- Utils.createQueue(queueName)
+ queueUrl <- Utils.getQueueUrl(queueName)
+ producer = Producer.make(queueUrl, Serializer.serializeString)
+ _ <- producer.use { p =>
+ p.sendStream(stream).runDrain
+ }
+ _ <- SqsStream(
+ queueUrl,
+ SqsStreamSettings(stopWhenQueueEmpty = true, waitTimeSeconds = Some(3))
+ ).foreach(msg => UIO(println(msg.body)))
+ } yield ()
+
+ override def run(args: List[String]): URIO[zio.ZEnv, ExitCode] =
+ program.provideCustomLayer(client).exitCode
+}
+```
+
+
+## ZIO Telemetry
+
+[ZIO telemetry](https://github.com/zio/zio-telemetry) is purely-functional and type-safe. It provides clients for OpenTracing and OpenTelemetry.
+
+### Introduction
+
+In monolithic architecture, everything is in one place, and we know when a request starts and then how it goes through the components and when it finishes. We can obviously see what is happening with our request and where is it going. But, in distributed systems like microservice architecture, we cannot find out the story of a request through various services easily. This is where distributed tracing comes into play.
+
+ZIO Telemetry is a purely functional client which helps up propagate context between services in a distributed environment.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file if we want to use [OpenTelemetry](https://opentelemetry.io/) client:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-telemetry" % "0.8.1"
+```
+
+And for using [OpenTracing](https://opentracing.io/) client we should add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-opentracing" % "0.8.1"
+```
+
+### Example
+
+In this example, we create two services, `ProxyServer` and `BackendServer`. When we call ProxyServer, the BackendServer will be called.
+
+Note that we are going to use _OpenTracing_ client for this example.
+
+Here is a simplified diagram of our services:
+
+```
+ ┌────────────────┐
+ │ │
+ ┌─────►│ Jaeger Backend │◄────┐
+ │ │ │ │
+ Tracing Data │ └────────────────┘ │ Tracing Data
+ │ │
+ ┌────────┴─────────┐ ┌─────────┴────────┐
+ │ │ │ │
+User Request──►│ Proxy Server ├────────►| Backend Server │
+ │ │ │ │
+ └──────────────────┘ └──────────────────┘
+```
+
+First of all we should add following dependencies to our `build.sbt` file:
+
+```scala
+object Versions {
+ val http4s = "0.21.24"
+ val jaeger = "1.6.0"
+ val sttp = "2.2.9"
+ val opentracing = "0.33.0"
+ val opentelemetry = "1.4.1"
+ val opencensus = "0.28.3"
+ val zipkin = "2.16.3"
+ val zio = "1.0.9"
+ val zioInteropCats = "2.5.1.0"
+}
+
+lazy val openTracingExample = Seq(
+ "org.typelevel" %% "cats-core" % "2.6.1",
+ "io.circe" %% "circe-generic" % "0.14.1",
+ "org.http4s" %% "http4s-core" % Versions.http4s,
+ "org.http4s" %% "http4s-blaze-server" % Versions.http4s,
+ "org.http4s" %% "http4s-dsl" % Versions.http4s,
+ "org.http4s" %% "http4s-circe" % Versions.http4s,
+ "io.jaegertracing" % "jaeger-core" % Versions.jaeger,
+ "io.jaegertracing" % "jaeger-client" % Versions.jaeger,
+ "io.jaegertracing" % "jaeger-zipkin" % Versions.jaeger,
+ "com.github.pureconfig" %% "pureconfig" % "0.16.0",
+ "com.softwaremill.sttp.client" %% "async-http-client-backend-zio" % Versions.sttp,
+ "com.softwaremill.sttp.client" %% "circe" % Versions.sttp,
+ "dev.zio" %% "zio-interop-cats" % Versions.zioInteropCats,
+ "io.zipkin.reporter2" % "zipkin-reporter" % Versions.zipkin,
+ "io.zipkin.reporter2" % "zipkin-sender-okhttp3" % Versions.zipkin
+)
+```
+
+Let's create a `ZLayer` for `OpenTracing` which provides us Jaeger tracer. Each microservice uses this layer to send its tracing data to the _Jaeger Backend_:
+
+```scala
+import io.jaegertracing.Configuration
+import io.jaegertracing.internal.samplers.ConstSampler
+import io.jaegertracing.zipkin.ZipkinV2Reporter
+import org.apache.http.client.utils.URIBuilder
+import zio.ZLayer
+import zio.clock.Clock
+import zio.telemetry.opentracing.OpenTracing
+import zipkin2.reporter.AsyncReporter
+import zipkin2.reporter.okhttp3.OkHttpSender
+
+object JaegerTracer {
+ def makeJaegerTracer(host: String, serviceName: String): ZLayer[Clock, Throwable, Clock with OpenTracing] =
+ OpenTracing.live(new Configuration(serviceName)
+ .getTracerBuilder
+ .withSampler(new ConstSampler(true))
+ .withReporter(
+ new ZipkinV2Reporter(
+ AsyncReporter.create(
+ OkHttpSender.newBuilder
+ .compressionEnabled(true)
+ .endpoint(
+ new URIBuilder()
+ .setScheme("http")
+ .setHost(host)
+ .setPath("/api/v2/spans")
+ .build.toString
+ )
+ .build
+ )
+ )
+ )
+ .build
+ ) ++ Clock.live
+}
+```
+
+The _BackendServer_:
+
+```scala
+import io.opentracing.propagation.Format.Builtin.{HTTP_HEADERS => HttpHeadersFormat}
+import io.opentracing.propagation.TextMapAdapter
+import org.http4s._
+import org.http4s.dsl.Http4sDsl
+import org.http4s.server.Router
+import org.http4s.server.blaze.BlazeServerBuilder
+import org.http4s.syntax.kleisli._
+import zio.clock.Clock
+import zio.interop.catz._
+import zio.telemetry.opentracing._
+import JaegerTracer.makeJaegerTracer
+import zio.{ExitCode, ZEnv, ZIO}
+
+import scala.jdk.CollectionConverters._
+
+object BackendServer extends CatsApp {
+ type AppTask[A] = ZIO[Clock, Throwable, A]
+
+ val dsl: Http4sDsl[AppTask] = Http4sDsl[AppTask]
+ import dsl._
+
+ override def run(args: List[String]): ZIO[ZEnv, Nothing, ExitCode] =
+ ZIO.runtime[Clock].flatMap { implicit runtime =>
+ BlazeServerBuilder[AppTask](runtime.platform.executor.asEC)
+ .bindHttp(port = 9000, host = "0.0.0.0")
+ .withHttpApp(
+ Router[AppTask](mappings = "/" ->
+ HttpRoutes.of[AppTask] { case request@GET -> Root =>
+ ZIO.unit
+ .spanFrom(
+ format = HttpHeadersFormat,
+ carrier = new TextMapAdapter(request.headers.toList.map(h => h.name.value -> h.value).toMap.asJava),
+ operation = "GET /"
+ )
+ .provideLayer(makeJaegerTracer(host = "0.0.0.0:9411", serviceName = "backend-service")) *> Ok("Ok!")
+ }
+ ).orNotFound
+ )
+ .serve
+ .compile
+ .drain
+ }.exitCode
+}
+```
+
+And the _ProxyServer_ which calls the _BackendServer_:
+
+```scala
+import cats.effect.{ExitCode => catsExitCode}
+import io.opentracing.propagation.Format.Builtin.{HTTP_HEADERS => HttpHeadersFormat}
+import io.opentracing.propagation.TextMapAdapter
+import io.opentracing.tag.Tags
+import org.http4s.HttpRoutes
+import org.http4s.dsl.Http4sDsl
+import org.http4s.server.Router
+import org.http4s.server.blaze.BlazeServerBuilder
+import org.http4s.syntax.kleisli._
+import sttp.client.asynchttpclient.zio.AsyncHttpClientZioBackend
+import sttp.client.basicRequest
+import sttp.model.Uri
+import zio.clock.Clock
+import zio.interop.catz._
+import zio.telemetry.opentracing.OpenTracing
+import JaegerTracer.makeJaegerTracer
+import zio.{ExitCode, UIO, ZEnv, ZIO}
+
+import scala.collection.mutable
+import scala.jdk.CollectionConverters._
+
+object ProxyServer extends CatsApp {
+
+ type AppTask[A] = ZIO[Clock, Throwable, A]
+
+ private val backend = AsyncHttpClientZioBackend()
+
+ override def run(args: List[String]): ZIO[ZEnv, Nothing, ExitCode] =
+ ZIO.runtime[Clock].flatMap { implicit runtime =>
+ implicit val ec = runtime.platform.executor.asEC
+ BlazeServerBuilder[AppTask](ec)
+ .bindHttp(port = 8080, host = "0.0.0.0")
+ .withHttpApp(
+ Router[AppTask](mappings = "/" -> {
+ val dsl: Http4sDsl[AppTask] = Http4sDsl[AppTask]
+ import dsl._
+
+ HttpRoutes.of[AppTask] { case GET -> Root =>
+ (for {
+ _ <- OpenTracing.tag(Tags.SPAN_KIND.getKey, Tags.SPAN_KIND_CLIENT)
+ _ <- OpenTracing.tag(Tags.HTTP_METHOD.getKey, GET.name)
+ _ <- OpenTracing.setBaggageItem("proxy-baggage-item-key", "proxy-baggage-item-value")
+ buffer = new TextMapAdapter(mutable.Map.empty[String, String].asJava)
+ _ <- OpenTracing.inject(HttpHeadersFormat, buffer)
+ headers <- extractHeaders(buffer)
+ res <-
+ backend.flatMap { implicit backend =>
+ basicRequest.get(Uri("0.0.0.0", 9000).path("/")).headers(headers).send()
+ }.map(_.body)
+ .flatMap {
+ case Right(_) => Ok("Ok!")
+ case Left(_) => Ok("Oops!")
+ }
+ } yield res)
+ .root(operation = "GET /")
+ .provideLayer(
+ makeJaegerTracer(host = "0.0.0.0:9411", serviceName = "proxy-server")
+ )
+ }
+ }).orNotFound
+ )
+ .serve
+ .compile[AppTask, AppTask, catsExitCode]
+ .drain
+ .as(ExitCode.success)
+ }.exitCode
+
+ private def extractHeaders(adapter: TextMapAdapter): UIO[Map[String, String]] = {
+ val m = mutable.Map.empty[String, String]
+ UIO(adapter.forEach { entry =>
+ m.put(entry.getKey, entry.getValue)
+ ()
+ }).as(m.toMap)
+ }
+
+}
+```
+
+First, we run the following command to start Jaeger backend:
+
+```bash
+docker run -d --name jaeger \
+ -e COLLECTOR_ZIPKIN_HTTP_PORT=9411 \
+ -p 5775:5775/udp \
+ -p 6831:6831/udp \
+ -p 6832:6832/udp \
+ -p 5778:5778 \
+ -p 16686:16686 \
+ -p 14268:14268 \
+ -p 9411:9411 \
+ jaegertracing/all-in-one:1.6
+```
+
+It's time to run Backend and Proxy servers. After starting these two, we can start calling `ProxyServer`:
+
+```scala
+curl -X GET http://0.0.0.0:8080/
+```
+
+Now we can check the Jaeger service (http://localhost:16686/) to see the result.
+
+## ZIO ZMX
+
+[ZIO ZMX](https://github.com/zio/zio-zmx) is a monitoring, metrics, and diagnostics toolkit for ZIO applications.
+
+### Introduction
+
+So ZIO ZMX is giving us a straightforward way to understand exactly what is going on in our ZIO application when we deploy that in production.
+
+ZIO ZMX key features:
+
+- **Easy Setup** — It seamlessly integrates with an existing application. We don't need to change any line of the existing ZIO application, except a few lines of code at the top level.
+- **Diagnostics** — To track the activity of fibers in a ZIP application including fiber lifetimes and reason for termination.
+- **Metrics** — Tracking of user-defined metrics (Counter, Gauge, Histogram, etc.)
+- **Integrations** — Support for major metrics collection services including _[Prometheus](https://github.com/prometheus/prometheus)_ and _[StatsD](https://github.com/statsd/statsd)_.
+- **Zero Dependencies** - No dependencies other than ZIO itself.
+
+### Installation
+
+In order to use this library, we need to add the following line in our `build.sbt` file:
+
+```scala
+libraryDependencies += "dev.zio" %% "zio-zmx" % "0.0.6"
+```
+
+### Example
+
+To run this example, we also should add the following dependency in our `build.sbt` file:
+
+```scala
+libraryDependencies += "org.polynote" %% "uzhttp" % "0.2.7"
+```
+
+In this example, we expose metric information using _Prometheus_ protocol:
+
+```scala
+import uzhttp._
+import uzhttp.server.Server
+import zio._
+import zio.blocking.Blocking
+import zio.clock.Clock
+import zio.console._
+import zio.duration.durationInt
+import zio.zmx.metrics._
+import zio.zmx.prometheus.PrometheusClient
+
+import java.io.IOException
+import java.lang
+import java.net.InetSocketAddress
+
+object ZmxSampleApp extends zio.App {
+
+ val myApp: ZIO[Console with Clock with Has[PrometheusClient] with Blocking, IOException, Unit] =
+ for {
+ server <-
+ Server
+ .builder(new InetSocketAddress("localhost", 8080))
+ .handleSome { case request if request.uri.getPath == "/" =>
+ PrometheusClient.snapshot.map(p => Response.plain(p.value))
+ }
+ .serve
+ .use(_.awaitShutdown).fork
+ program <-
+ (for {
+ _ <- (ZIO.sleep(1.seconds) *> request @@ MetricAspect.count("request_counts")).forever.forkDaemon
+ _ <- (ZIO.sleep(3.seconds) *>
+ ZIO.succeed(
+ lang.Runtime.getRuntime.totalMemory() - lang.Runtime.getRuntime.freeMemory()
+ ).map(_ / (1024.0 * 1024.0)) @@ MetricAspect.setGauge("memory_usage")).forever.forkDaemon
+ } yield ()).fork
+ _ <- putStrLn("Press Any Key") *> getStrLn.catchAll(_ => ZIO.none) *> server.interrupt *> program.interrupt
+ } yield ()
+
+ def run(args: List[String]): URIO[ZEnv, ExitCode] =
+ myApp.provideCustomLayer(PrometheusClient.live).exitCode
+
+ private def request: UIO[Unit] = ZIO.unit
+}
+```
+
+By calling the following API we can access metric information:
+
+```bash
+curl -X GET localhost:8080
+```
+
+Now we can config the Prometheus server to scrape metric information periodically.
diff --git a/website/versioned_docs/version-1.0.18/resources/ecosystem/templates.md b/website/versioned_docs/version-1.0.18/resources/ecosystem/templates.md
new file mode 100644
index 000000000000..e7227d7ed950
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/resources/ecosystem/templates.md
@@ -0,0 +1,9 @@
+---
+id: templates
+title: "Project Templates"
+---
+
+List of project starters, bootstrap tools or, templates.
+
+- [zio-akka-quickstart.g8](https://github.com/ScalaConsultants/zio-akka-quickstart.g8) — A Giter8 template for a basic Scala application build using ZIO, Akka HTTP and Slick
+- [zio-dotty-quickstart.g8](https://github.com/ScalaConsultants/zio-dotty-quickstart.g8) — A Giter8 template for a basic Dotty application build using ZIO
diff --git a/website/versioned_docs/version-1.0.18/resources/ecosystem/tools.md b/website/versioned_docs/version-1.0.18/resources/ecosystem/tools.md
new file mode 100644
index 000000000000..4532fae4a2d5
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/resources/ecosystem/tools.md
@@ -0,0 +1,8 @@
+---
+id: tools
+title: "Tools for ZIO"
+---
+
+- [ZIO IntelliJ](https://github.com/zio/zio-intellij) — A complimentary, community-developed plugin for IntelliJ IDEA, brings enhancements when using ZIO in your projects
+- [zio-shield](https://github.com/zio/zio-shield) — Enforce best coding practices with ZIO
+- [zio/zio-zmx](https://github.com/zio/zio-zmx) — Monitoring, Metrics and Diagnostics for ZIO
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/resources/index.md b/website/versioned_docs/version-1.0.18/resources/index.md
new file mode 100644
index 000000000000..b0ec80986693
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/resources/index.md
@@ -0,0 +1,6 @@
+---
+id: index
+title: "Summary"
+---
+
+If you find a new great library, talk, resource, slides or project, related to ZIO, consider adding to the list with your PR
diff --git a/website/versioned_docs/version-1.0.18/resources/learning/articles.md b/website/versioned_docs/version-1.0.18/resources/learning/articles.md
new file mode 100644
index 000000000000..a89ad9b036c1
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/resources/learning/articles.md
@@ -0,0 +1,76 @@
+---
+id: articles
+title: "Articles"
+---
+
+_These articles reflect the state of ZIO at the time of their publication. The code samples might be outdated, considering ZIO was early in development at the time they were written. However, the concepts are still relevant._
+
+## ZIO Core
+- [Beautiful, Simple, Testable Functional Effects for Scala](http://degoes.net/articles/zio-environment) (introducing ZIO Environment) by John De Goes (February 2019)
+- [ZIO & Cats Effect: A Match Made in Heaven](http://degoes.net/articles/zio-cats-effect) by John De Goes (April 2019)
+- [Thread Pool Best Practices with ZIO](http://degoes.net/articles/zio-threads) by John De Goes (January 2019)
+- [Bifunctor IO: A Step Away from Dynamically-Typed Error Handling](http://degoes.net/articles/bifunctor-io) by John De Goes (May 2018)
+- [Wrapping impure code with ZIO](https://medium.com/@ghostdogpr/wrapping-impure-code-with-zio-9265c219e2e) by Pierre Ricadat (July 2019)
+- [Thread shifting in cats-effect and ZIO](https://blog.softwaremill.com/thread-shifting-in-cats-effect-and-zio-9c184708067b) by Adam Warski (June 2019)
+- [Performant Functional Programming to the Max with ZIO](https://cloudmark.github.io/A-Journey-To-Zio/) by Mark Galea (May 2019)
+
+## Patterns and Best Practices
+- [5 pitfalls to avoid when starting to work with ZIO](https://medium.com/wix-engineering/5-pitfalls-to-avoid-when-starting-to-work-with-zio-adefdc7d2d5c) by Natan Silnitsky (Jan 2020)
+- [Processing ZIO effects through a pipeline](https://medium.com/@svroonland/processing-zio-effects-through-a-pipeline-c469e28dff62) (September 2020)
+
+## ZIO STM
+- [How to write a concurrent LRU cache with ZIO STM](https://scalac.io/how-to-write-a-completely-lock-free-concurrent-lru-cache-with-zio-stm/) by Jorge Vasquez (March 2020)
+- [Exploring the STM functionality in ZIO](https://freskog.github.io/blog/2019/05/30/explore-zio-stm/) by Fredrik Skogberg (May 2019)
+
+## Testing
+- [Testing Incrementally with ZIO Environment](http://degoes.net/articles/testable-zio) by John De Goes (March 2019)
+- [Effective testing with ZIO Test (RC18)](https://scala.monster/zio-test/) by Pavels Sisojevs (April 2020)
+- [Integration Testing](https://blended-zio.github.io/blended-zio/blog/integration-testing)
+- [Testing background process with ZIO](https://www.rudder.io/blog/testing-background-process-zio/) by François Armand (March 2020)
+- [Effective testing with ZIO Test (RC17)](https://scala.monster/zio-test-old/) by Pavels Sisojevs (January 2020)
+- [Speeding up time with ZIO TestClock](https://timpigden.github.io/_pages/zio-streams/SpeedingUpTime.html) by Tim Pigden (October 2019)
+
+## ZIO Streams
+- [ZIO Streams and JMS](https://blended-zio.github.io/blended-zio/blog/zio-streams-jms) by Andreas Gies (October 2020)
+- [Building the Death Star with ZIO Stream](https://juliano-alves.com/2020/05/04/deathstar-zio-stream/) by Juliano Alves (May 2020)
+- [Simulating IoT Events with ZIO Streams](https://timpigden.github.io/_pages/zio-streams/GeneratingChillEvents.html) by Tim Pigden (November 2019)
+
+## ZIO Module Pattern
+- [Example of ZLayers being used in combination](https://timpigden.github.io/_pages/zlayer/Examples.html) by Tim Pigden (March 2020)
+- [From idea to product with ZLayer](https://scala.monster/welcome-zio/) by Pavels Sisojevs (March 2020)
+- [What are the benefits of the ZIO modules with ZLayers](https://medium.com/@pascal.mengelt/what-are-the-benefits-of-the-zio-modules-with-zlayers-3bf6cc064a9b) by Pascal Mengelt (March 2020)
+- [Decouple the Program from its Implementation with ZIO modules.](https://medium.com/@pascal.mengelt/decouple-the-program-from-its-implementation-with-zio-modules-d9b8713d502e) by Pascal Mengelt (December 2019)
+- [Functional dependency injection in Scala using ZIO environments](https://blog.jdriven.com/2019/10/functional-dependency-injection-in-scala-using-zio-environments/) by Chiel van de Steeg (October 2019)
+
+## ZIO Libraries
+- [Introduction to ZIO Actors](https://www.softinio.com/post/introduction-to-zio-actors/) by [Salar Rahmanian](https://www.softinio.com) (November 2020)
+
+## ZIO Use Cases
+- [Implement your future with ZIO](https://scala.monster/welcome-zio-old/) by Pavels Sisojevs (December 2019)
+- [How to write a command line application with ZIO?](https://scalac.io/write-command-line-application-with-zio/) by Piotr Gołębiewski (November 2019)
+- [Building the Hangman Game using ScalaZ ZIO](https://abhsrivastava.github.io/2018/11/03/Hangman-Game-Using-ZIO/) by Abhishek Srivastava (November 2018)
+- [Elevator Control System using ZIO](https://medium.com/@wiemzin/elevator-control-system-using-zio-c718ae423c58) by Wiem Zine El Abidine (September 2018)
+- [Spring to ZIO 101 - ZIO CRUD](https://adrianfilip.com/2020/03/15/spring-to-zio-101/) by Adrian Filip (March 2020)
+- [Hacker News API Part 5](http://justinhj.github.io/2019/04/07/hacker-news-api-5.html) by Justin Heyes-Jones (April 2019)
+
+## Integration with Other Libraries
+- [Making ZIO, Akka and Slick play together nicely](https://scalac.io/making-zio-akka-slick-play-together-nicely-part-1-zio-and-slick/) by Jakub Czuchnowski (August 2019)
+- [ZIO + Http4s: a simple API client](https://juliano-alves.com/2020/04/20/zio-http4s-a-simple-api-client/) by Juliano Alves (April 2020)
+- [Combining ZIO and Akka to enable distributed FP in Scala](https://medium.com/@ghostdogpr/combining-zio-and-akka-to-enable-distributed-fp-in-scala-61ffb81e3283) by Pierre Ricadat (July 2019)
+- [ZIO with http4s and doobie](https://medium.com/@wiemzin/zio-with-http4s-and-doobie-952fba51d089) by Wiem Zine El Abidine (June 2019)
+- [Using 47 Degree's Fetch library with ZIO](http://justinhj.github.io/2019/05/05/using-47degs-fetch-with-zio.html) by Justin Heyes-Jones (May 2019)
+- [What can ZIO do for me? A Long Polling example with sttp.](https://medium.com/@pascal.mengelt/what-can-zio-do-for-me-32281e4e8b16) by Pascal Mengelt (November 2019)
+- [uzhttp + sttp for light-weight http and websockets](https://timpigden.github.io/_pages/zio-uzhttp-sttp/uzhttp-sttp.html) updated for 1.0.1 by Tim Pigden (August 2020)
+- [Streaming all the way with ZIO, Doobie, Quill, http4s and fs2](https://juliano-alves.com/2020/06/15/streaming-all-the-way-zio-doobie-quill-http4s-fs2/) by Juliano Alves (June 2020)
+- [ZIO with http4s, Auth, Codecs and zio-tests (RC18)](https://timpigden.github.io/_pages/zio-http4s/intro.html) by Tim Pigden (April 2020)
+- [Building a cool CLI with Decline for my ZIO App](https://medium.com/@pascal.mengelt/building-a-cool-cli-with-decline-for-my-zio-app-80e095b2899a) by Pascal Mengelt (May 2020)
+- [Streaming microservices with ZIO and Kafka](https://scalac.io/streaming-microservices-with-zio-and-kafka/) by Aleksandar Skrbic (February 2021)
+- [An Introduction to ZIO Kafka](https://ziverge.com/blog/introduction-to-zio-kafka/)
+- [tAPIr’s Endpoint meets ZIO’s IO](https://blog.softwaremill.com/tapirs-endpoint-meets-zio-s-io-3278099c5e10) by Adam Warski (July 2019)
+
+## Contribution
+- [Lessons Learned From Being a ZIO Contributor](https://www.softinio.com/post/lessons-learned-from-being-a-zio-contributor/) by [Salar Rahmanian](https://www.softinio.com) (September 2020)
+
+## Benchmarking and Comparison
+- [Scalaz 8 IO vs Akka (typed) Actors vs Monix (part 1)](https://blog.softwaremill.com/scalaz-8-io-vs-akka-typed-actors-vs-monix-part-1-5672657169e1) + [part 2](https://blog.softwaremill.com/akka-vs-zio-vs-monix-part-2-communication-9ce7261aa08c) + [part 3](https://blog.softwaremill.com/supervision-error-handling-in-zio-akka-and-monix-part-3-series-summary-abe75f964c2a) by Adam Warski (June 2018)
+- [Benchmarking Functional Error Handling in Scala](https://www.iteratorshq.com/blog/benchmarking-functional-error-handling-in-scala/) by Marcin Rzeźnicki
\ No newline at end of file
diff --git a/website/versioned_docs/version-1.0.18/resources/learning/cheatsheets.md b/website/versioned_docs/version-1.0.18/resources/learning/cheatsheets.md
new file mode 100644
index 000000000000..3a4569b82ca7
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/resources/learning/cheatsheets.md
@@ -0,0 +1,7 @@
+---
+id: cheatsheets
+title: "Cheat Sheets"
+---
+
+- [ZIO Cheat Sheet](https://github.com/ghostdogpr/zio-cheatsheet)
+- [Snippets for Future/Scalaz Task](https://gist.github.com/ubourdon/7b7e929117343b2324cde6eab57674a6)
diff --git a/website/versioned_docs/version-1.0.18/resources/learning/cookbooks.md b/website/versioned_docs/version-1.0.18/resources/learning/cookbooks.md
new file mode 100644
index 000000000000..bd5ed4703f88
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/resources/learning/cookbooks.md
@@ -0,0 +1,8 @@
+---
+id: cookbooks
+title: "Cookbooks"
+---
+
+- [ZIO Cookbook](https://github.com/Neurodyne/zio-cookbook) A beginners' tour to ZIO by Boris V.Kuznetsov
+- [Mastering modularity in ZIO with ZLayers](https://scalac.io/ebook/mastering-modularity-in-zio-with-zlayer/intro/) by Jorge Vasquez
+- [Improve your focus with ZIO Optics](https://scalac.io/ebook/improve-your-focus-with-zio-optics/introduction-5/) by Jorge Vasquez
diff --git a/website/versioned_docs/version-1.0.18/resources/learning/projectsusingzio.md b/website/versioned_docs/version-1.0.18/resources/learning/projectsusingzio.md
new file mode 100644
index 000000000000..f04cdcf7c550
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/resources/learning/projectsusingzio.md
@@ -0,0 +1,9 @@
+---
+id: poweredbyzio
+title: "Projects using ZIO"
+---
+
+- [Rudder](https://github.com/normation/rudder) — an example about how to manage error ADT in several sub-projects and specialized sub-domains, and how one can gradually contextualize error messages in domain layers. Uses queues, brackets, interop with Java, and historical code. See [context and references](https://issues.rudder.io/issues/14870).
+- [ZIO AI Platform Backend](https://github.com/Clover-Group/zio_front) — Clover Group AI Platform backend, which employs ZIO, Doobie, http4s and Kafka .
+- [Polynote](https://github.com/polynote/polynote) — a new, polyglot notebook with first-class Scala support, Apache Spark integration, multi-language interoperability including Scala, Python, and SQL, as-you-type autocomplete, and more.
+- [Blended ZIO](https://blended-zio.github.io/blended-zio/) — A sample project migrating a largely untyped, actor based integration framework to ZIO.
diff --git a/website/versioned_docs/version-1.0.18/resources/learning/sampleprojects.md b/website/versioned_docs/version-1.0.18/resources/learning/sampleprojects.md
new file mode 100644
index 000000000000..196c618a3cb7
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/resources/learning/sampleprojects.md
@@ -0,0 +1,29 @@
+---
+id: sampleprojects
+title: "Sample Projects"
+---
+
+- [Redis Streams with ZIO](https://github.com/kensuio-oss/redis-streams-zio) by [Leszek Gruchała](https://github.com/leszekgruchala)
+- [ZIO Production Ready Microservice](https://github.com/saraiva132/zio-cats-backend) by [Rafael Figueiredo](https://github.com/saraiva132)
+- [ZIO Modern Backend giter8 Template - Tapir, sttp, http4s](https://github.com/Anadyne/zio-full-backend.g8) by [Boris V.Kuznetsov](https://github.com/tampler)
+- [ZIO CRUD sample - Code for the 'Spring to ZIO 101' blog post 03/2020](https://github.com/adrianfilip/zio-crud-sample) by [Adrian Filip](https://github.com/adrianfilip)
+- [ZIO modules with different implementations - Code for the 'Decouple the Program from its Implementation...' blog post 12/2019](https://github.com/pme123/zio-comps-module) by [pme123](https://github.com/pme123)
+- [TicTacToe command line game using Module Pattern](https://github.com/ioleo/zio-by-example) by [ioleo](https://github.com/ioleo)
+- [Long Polling with ZIO - Code for the 'What can ZIO do for me?' blog post 11/2019](https://github.com/pme123/zio-http4s-long-polling) by [pme123](https://github.com/pme123)
+- [Hello world with ZIO and http4s](https://gitlab.com/maplambda/zio-http4s) by [maplambda](https://gitlab.com/maplambda)
+- [STM Partitioning - Code for the 'Exploring the STM functionality in ZIO' blog post](https://github.com/freskog/stm-partitioning)
+- [ZIO Todo Backend](https://github.com/mschuwalow/zio-todo-backend) by [mschuwalow](https://github.com/mschuwalow)
+- [Event Driven Messenger](https://github.com/edvmorango/event-driven-messenger) by [edvmorango](https://github.com/edvmorango)
+- [Zorechka Bot](https://github.com/wix-incubator/zorechka-bot) by [wix-incubator](https://github.com/wix-incubator)
+- [A ZIO + http4s + Circe + Quill + Tapir giter8 template](https://github.com/pandaforme/ultron.g8) by [pandaforme](https://github.com/pandaforme)
+- [More ZIO/http4s: with http4s authentication, encoding/decoding + zio tests](https://github.com/TimPigden/zio-http4s-examples) by [Tim Pigden](https://github.com/TimPigden)
+- [GitHub Release Pager with ZIO](https://github.com/psisoyev/release-pager) by [Pavels Sisojevs](https://github.com/psisoyev)
+- [Full Scala Stack, a sample project that uses akka-http, slick, zio, scalajs, react, ScalablyTyped and semantic ui](https://github.com/rleibman/full-scala-stack) by Roberto Leibman
+- [A minimal ZIO giter8 template](https://github.com/jchoffmann/zio-seed.g8) by [jchoffmann](https://github.com/jchoffmann)
+- [Lagom CQRS/ES microservices with ZIO, Caliban, zio-grpc, elastic4s and Logstage](https://github.com/sigurdthor/book-shelf) by [Yaroslav Yaremenko](https://github.com/sigurdthor)
+- [ZIO Advanced Template](https://github.com/Neurodyne/zio-top.g8) by [Boris V.Kuznetsov](https://github.com/tampler)
+- [ZIO Quill, H2 Database with FlyWay migrations Explained](https://github.com/Neurodyne/h2db-quill-demo) by [Boris V.Kuznetsov](https://github.com/tampler)
+- [ZIO Tapir Http4s Integration](https://github.com/Neurodyne/zio-tapir) by [Boris V.Kuznetsov](https://github.com/tampler)
+- [Akka Microservice using DDD with ZIO Streams Tapir AkkaHTTP Integration](https://github.com/mvillafuertem/scalcite) by [Miguel Villafuerte](https://github.com/mvillafuertem)
+- [Simple ZIO ScalaJS Skeleton](https://github.com/sb-dev/zio-scalajs) by [Samir Benzenine](https://github.com/sb-dev)
+- [Full ZIO Stack: A sample IM that uses zio, zio-redis, zio-actors, zio-schema, zio-streams, zio-crypto, tapir, akka-http](https://github.com/bitlap/zim) by [bitlap](https://github.com/bitlap/zim) (Notes in Chinese)
diff --git a/website/versioned_docs/version-1.0.18/resources/learning/videos.md b/website/versioned_docs/version-1.0.18/resources/learning/videos.md
new file mode 100644
index 000000000000..b1c081a0c3d1
--- /dev/null
+++ b/website/versioned_docs/version-1.0.18/resources/learning/videos.md
@@ -0,0 +1,140 @@
+---
+id: videos
+title: "Videos"
+---
+
+## Functional Programming
+- [FP to the Max](https://www.youtube.com/watch?v=sxudIMiOo68) by John De Goes (July 2018)
+- [FP to the Min](https://www.youtube.com/watch?v=mrHphQT4RpU) by John De Goes (July 2020)
+
+## ZIO Core
+- [The Death of Tagless Final](https://www.youtube.com/watch?v=p98W4bUtbO8) by John De Goes (February 2019)
+- [A Tour of ZIO](https://www.youtube.com/watch?v=TWdC7DhvD8M) by John De Goes (January 2020)
+- [Upgrade Your Future](https://www.youtube.com/watch?v=USgfku1h7Hw) by John De Goes (September 2019)
+- [One Monad to Rule Them All](https://www.youtube.com/watch?v=POUEz8XHMhE) by John De Goes (August 2019)
+- [Functional Concurrency in Scala with ZIO](https://www.youtube.com/watch?v=m5nas4Hndqo) by Itamar Ravid (June 2019)
+- [Thinking Functionally](https://www.youtube.com/watch?v=-KA3BSdqYug) by John De Goes (March 2019)
+- [Tour of ZIO](https://www.youtube.com/watch?v=5s0GOA3WQnY&t=1405s) by Oleksandra Holubitska (March 2019)
+- [ZIO: Next-Generation Effects in Scala](https://www.youtube.com/watch?v=mkSHhsJXjdc&t=6s) by John De Goes (October 2018)
+- [Functional Effects in ZIO](https://www.youtube.com/watch?v=4EeL8-chAR8) by Wiem Zine El Abidine
+- [ZIO WORLD - Blocking](https://www.youtube.com/watch?v=1g21c8VKeuU&t=320s) by Adam Fraser (March 2021) — Adam Fraser presented his work to make ZIO faster with blocking code, showing the tremendous (unbelievable) performance improvements possible with batch-mode blocking; part of ongoing work to make ZIO better at blocking workloads.
+
+## ZIO Runtime System
+- [ZIO WORLD - ZIO Runtime System](https://www.youtube.com/watch?v=OFFrw5aJzG4&t=313s) by John A. De Goes (March 2021) — John A. De Goes presented his work on the runtime system for ZIO 2.0, which involves a significant rewrite and simplification of the core execution model, resulting in what will be ZIO's best performance to date; benchmarks show nearly universally faster performance compared to CE3.
+
+## Error Management
+- [Error Management: Future vs ZIO](https://www.youtube.com/watch?v=mGxcaQs3JWI) by John De Goes and Kai (May 2019), [slide](https://www.slideshare.net/jdegoes/error-management-future-vs-zio)
+- Systematic error management in application - We ported Rudder to ZIO: [video in French](https://www.youtube.com/watch?v=q0PlcgR5M1Q), [slides in English](https://speakerdeck.com/fanf42/systematic-error-management-we-ported-rudder-to-zio) by François Armand (Scala.io, October 2019)
+
+## ZIO Module Pattern
+- [ZLayers by example](https://www.youtube.com/watch?v=u5IrfkAo6nk) by Wiem Zine El Abidine (December 2020)
+- [ZIO inception: Journey through layers and intersection types](https://www.youtube.com/watch?v=vNQFlq1SvaE) by Vladimir Pavkin (July 2020)
+- [ZIO WORLD - ZLayer](https://www.youtube.com/watch?v=B3bAcU2-TGI) by Kit Langton (March 2021) — In this presentation, Kit Langton demonstrated a significant simplification of the module pattern that makes it familiar to OOP programmers, & showed how ZIO 2.0 will auto-wire ZLayers & guide users with rich & actionable error messages.
+
+## ZIO Schedule
+- [ZIO Schedule: Conquering Flakiness and Recurrence with Pure Functional Programming](https://www.youtube.com/watch?v=onQSHiafAY8&t=1s) by John De Goes (October 2018)
+
+## ZIO Concurrency Primitives
+- [Atomically { Delete Your Actors }](https://www.youtube.com/watch?v=d6WWmia0BPM) by John De Goes and Wiem Zine El Abidine (April 2019)
+- [ZIO Queue](https://www.youtube.com/watch?v=lBYkLc-j7Vo) by Wiem Zine El Abidine (January 2019)
+- [ZIO Queue: A new Queue for a new Era](https://www.youtube.com/watch?v=8JLprl34xEw&t=2437s) by John De Goes (September 2018)
+- [ZIO WORLD - ZHub](https://www.youtube.com/watch?v=v7Ontn7jZt8) by Adam Fraser (March 2020) — In this presentation, Adam Fraser introduced a hyper-fast async concurrent hub and showed how ZIO Queue and ZIO Hub are radically faster than alternatives.
+
+## ZIO STM
+- [Declarative Concurrency with ZIO STM](https://www.youtube.com/watch?v=MEH7hQmGK5M) by Dejan Mijic (June 2020)
+
+## ZIO Test and Debugging
+- [0 to 100 with ZIO Test](https://www.youtube.com/watch?v=qDFfVinjDPQ) by Adam Fraser
+- [ZIO WORLD - Execution Tracing](https://www.youtube.com/watch?v=1-z06KIde0k) by Rob Walsh (March 2021) — Rob Walsh then presented his work taking ZIO's execution tracing to the next level, which will improve the quality of tracing, as well as make it virtually "cost-free", which is effectively the same as more than doubling the perf of ZIO applications today.
+- [Using Aspects To Transform Your Code With ZIO Environment](https://www.youtube.com/watch?v=gcqWdNwNEPg) by Adam Fraser (September 2020)
+
+## Migration and Interoperability
+- [Functional Legacy - How to Incorporate ZIO In Your Legacy Services](https://www.youtube.com/watch?v=pdgr9bbFQLE) by Natan Silnitsky (March 2020) — You want to introduce ZIO to your existing Scala codebase? Great Idea! It will make your code more efficient, readable, composable, and safe. For the past year, Natan Silnitsky has done this at Wix and has learned a lot about how to do it right. In this talk, Natan will show you how to successfully use ZIO in your legacy service using real-life code examples. You will learn key tips and takeaways including how and when to execute the ZIO runtime; how/when to introduce ZLayers into your codebase; how to make your existing APIs interop with ZIO; and how to have more flexibility on ZManaged resource shutdown. When you're done attending the presentation, you'll be able to introduce ZIO into your existing Scala code base with confidence!
+
+## ZIO Streams
+- [Modern Data Driven Applications with ZIO Streams](https://youtu.be/bbss7elSfxs) by Itamar Ravid (December 2019)
+- [ZIO Stream: Rebirth](https://www.youtube.com/watch?v=mLJYODobz44&t=15s) by John De Goes and Itamar Ravid (November 2018)
+- [The Joys of (Z)Streams](https://www.youtube.com/watch?v=XIIX2YSg7M0) by Itamar Ravid
+- [A Tour of ZIO Streams](https://www.youtube.com/watch?v=OiuKbpMOKsc&t=291s) by Itamar Ravid (June 2020) — Watch this video for a brief tour of ZIO Streams from its author, Itamar Ravid. ZIO Streams is a Scala library for creating concurrent, asynchronous stream processing applications, based on the cutting edge functional effect system ZIO.
+- [ZIO WORLD - ZIO Streams](https://www.youtube.com/watch?v=8b3t65tmMkE) by Itamar Ravid (March 2020) — In this presentation, Itamar Ravid introduces a radical new basis for ZIO 2.0 Streams, inspired by the work of Michael Snoyman, which unifies streams and sinks & offers next-level performance for streaming apps on the JVM.
+
+## ZIO Libraries
+- [What Happens In ZIO Stays in ZIO](https://www.youtube.com/watch?v=gZZazYy0tWM) by Willem Vermeer — ZIO has excellent interoperability with http4s, Cats, Akka libraries, and more, and there are a number of posts and videos that help developers use ZIO with these great libraries. Yet, the question remains: can you develop a web service using ZIO libraries alone? In this talk, we'll explore the answer to this question directly. We'll start by creating an SMTP server using ZIO NIO, and then we'll add a frontend based on ZIO gRPC and Scala.js. Along the way, we'll leverage ZIO Config to externalize the configuration, as well as the ZLayer mechanism for dependency injection. Maybe we'll throw ZIO JSON into the mix, and at the end, we'll take a step back to appreciate how much is really possible gluing those ZIO components together! Finally, we'll conclude by discussing what's still left to be done to make ZIO a full-fledged competitor in the area of full-stack web service development
+
+### ZIO Actors
+- [Acting Lessons for Scala Engineers with Akka and ZIO](https://www.youtube.com/watch?v=AQXBlbkf9wc) by [Salar Rahmanian](https://wwww.softinio.com) (November 2020)
+
+### ZIO Arrow
+- [Blazing Fast, Pure Effects without Monads](https://www.youtube.com/watch?v=L8AEj6IRNEE) by John De Goes (Dec 2018)
+
+### ZIO Config
+- [Easy Config For Your App](https://www.youtube.com/watch?v=4SrSKluyyKo) by Afsal Thaj (December 2020) — Managing application configuration can be quite challenging: we often have to support multiple data sources with overrides, including HOCON, system properties, environment variables, and more. We have to document our configuration so it is clear to IT and DevOps how to configure our applications. We have to do strong validation with error accumulation to ensure bad data is rejected and good error messages are generated for end-users. In this presentation, Afsal Thaj, the author of ZIO Config, shows attendees how to solve all of these problems in a composable and testable way. By separating the description of configuration from what must be done with the configuration, ZIO Config provides all standard features—including multiple data sources, data source overrides, documentation, and validation with error accumulation—for free. Come learn how to make your applications configurable in an easy way that will delight IT and DevOps and make it easy to change your applications over time.
+
+- Introduction to ZIO Config by Afsal Thaj
+ - Part 1: [Start writing better Scala with zio-config](https://www.youtube.com/watch?v=l5CVQmSp7fY)
+ - Part 2: [Maximise the use of Scala types (Option & Either in zio-config)](https://www.youtube.com/watch?v=SusCbrSK5eA&t=0s)
+ - Part 3: [Intro to ADT, and scalable configuration management!](https://www.youtube.com/watch?v=LGo_g1GK6_k&t=0s)
+ - Part 4: [Auto generate sample configurations of your application in Scala](https://www.youtube.com/watch?v=--mcs4HztJY&t=0s)
+
+### ZIO Prelude
+- [SF Scala: Reimagining Functional Type Classes](https://www.youtube.com/watch?v=OwmHgL9F_9Q) John A. De Goes and Adam Fraser (August 2020) — In this presentation, John A. De Goes and Adam Fraser introduce a new Scala library with a completely different factoring of functional type classes—one which throws literally everything away and starts from a clean slate. In this new factoring, type classes leverage Scala’s strengths, including variance and modularity. Pieces fit together cleanly and uniformly, and in a way that satisfies existing use cases, but enables new ones never before possible. Finally, type classes are named, organized, and described in a way that makes teaching them easier, without compromising on algebraic principles.
+- [The Terror-Free Guide To Introducing Functional Scala At Work](https://www.youtube.com/watch?v=Sinde_P7nmY) by Jorge Vasquez (December 2020) — Too often, our applications are dominated by boilerplate that's not fun to write or test, and that makes our business logic complicated. In object-oriented programming, classes and interfaces help us with abstraction to reduce boilerplate. But, in functional programming, we use type classes. Historically, type classes in functional programming have been very complex and confusing, partially because they import ideas from Haskell that don't make sense in Scala, and partially because of their esoteric origins in category theory. In this presentation, Jorge Vásquez presents a new library called ZIO Prelude, which offers a distinctly Scala take on Functional Abstractions, and you will learn how you can eliminate common types of boilerplate by using it. Come see how you can improve your happiness and productivity with a new take on what it means to do functional programming in Scala!
+- [ZIO WORLD - ZIO Prelude](https://www.youtube.com/watch?v=69ngoqVXKPI) by Jorge Vasquez (March 2020) — In this talk, Jorge Vasques discusses his work bringing refined newtypes to ZIO Prelude, which are working natively on Scala 3 with a beautiful syntax and DSL.
+- [Zymposium - ZIO Prelude](https://www.youtube.com/watch?v=M3HmROwOoRU) by Adam Fraser and Kit Langton (May 2021) — We'll see how ZIO Prelude gives us the tools for solving some common problems in day-to-day development. We'll also see how ZIO Prelude provides a set of abstractions we can use for inspiration when implementing our own data types but never forces us to use these abstractions.
+
+### ZIO Query
+- [Wicked Fast API Calls with ZIO Query](https://www.youtube.com/watch?v=rUUxDXJMzJo) by Adam Fraser (July 2020) (https://www.youtube.com/watch?v=rUUxDXJMzJo)
+
+### ZIO Cache
+- [Compositional Caching](https://www.youtube.com/watch?v=iFeTUhYpPLs) by Adam Fraser (December 2020) — In this talk, Adam will introduce ZIO Cache, a new library in the ZIO ecosystem that provides a drop-in caching solution for ZIO applications. We will see how ZIO’s support for asynchrony and concurrent lets us implement a cache in terms of a single lookup function and how we get many other things such as typed errors and compositional caching policies for free. See how easy it can be to add caching to your ZIO application!
+
+### ZIO Http
+- [ZIO World - ZIO HTTP](https://www.youtube.com/watch?v=dVggks9_1Qk&t=257s) by Tushar Mathur (March 2020) — At ZIO World Tushar Mathur unveiled a new open-source library 'ZIO HTTP' that gives you better performance than Vert.x, but with pure functional Scala and native ZIO integration.
+
+### ZIO CLI
+- [10 Minute Command-Line Apps With ZIO CLI](https://www.youtube.com/watch?v=UeR8YUN4Tws) by Aiswarya Prakasan (December 2020) — Command-line applications are pervasive, relied upon by developers,third-parties, and IT staff. While it’s easy to develop a basic command-line application, building one that features rich help, forgiving parsing, bulletproof validation, shell completions, and a clean and beautiful appearance is quite challenging. In this talk, Aiswarya Prakasan, a contributor to ZIO CLI, will lead you on a journey to building powerful command-line applications in Scala. Through the power of composition and strong types, you will discover how to build type-safe command-line applications in just a few simple lines of code, which give you rich features of command-line applications like git.
+
+### ZIO Flow
+- [ZIO WORLD - ZIO FLOW](https://www.youtube.com/watch?v=H4pMkTAsg48) by Aiswarya Prakasan (March 2020) - Aiswarya Prakasan introducing ZIO Flow the new open-source platform will help developers orchestrate microservices for mission-critical code. With a type-safe and compositional design, ZIO Flow will make it easier for developers to create and test robust workflows, which statefully interact with microservices, databases, and human agents, and which survive flakiness, software updates, and even data center failures.
+
+### ZIO SQL
+- [ZIO WORLD - ZIO SQL](https://www.youtube.com/watch?v=cIMA6iT9B-k) by Jakub Czuchnowski (March 2020) — Jakub Czuchnowski summarized the latest work in ZIO SQL, demonstrating type-inferred, type-safe queries with the full-range of SQL features, including database-specific functions, with working SELECT/DELETE/UPDATE for Postgres.
+
+### ZIO Optics
+- [Zymposium - Optics](https://www.youtube.com/watch?v=-km5ohYhJa4) by Adam Fraser and Kit Langton (June 2021) — Optics are great tools for working with parts of larger data structures and come up in disguise in many places such as ZIO Test assertions.
+
+### ZIO Pulsar
+- [ZIO World - ZIO PULSAR](https://www.youtube.com/watch?v=tpwydDqQBmk) by Jakub Czuchnowski (March 2020) — A new library that offers a native, first-class ZIO experience for Pulsar, the Kafka competitor gaining traction for some use cases.
+
+### ZIO Schema
+- [Zymposium - ZIO Schema](https://www.youtube.com/watch?v=GfNiDaL5aIM) by John A. De Goes, Adam Fraser and Kit Langton (May 2021)
+
+### ZIO Kafka
+- [ZIO WORLD - ZIO Kafka](https://www.youtube.com/watch?v=GECv1ONieLw) by Aleksandar Skrbic (March 2020) — Aleksandar Skrbic presented ZIO Kafka, a critical library for the modern Scala developer, which hides some of the complexities of Kafka.
+
+### ZIO Web
+- [ZIO WORLD - ZIO WEB](https://www.youtube.com/watch?v=UBT-7h8JgU4) by Piotr Golebiewski (March 2020) — Piotr Golebiewski toured ZIO Web, reaching the milestone of "hello world", with working server and client generation for the HTTP protocol.
+
+### ZIO K8s
+- [ZIO World - ZIO Kubernetes (ZIO K8S 1.0)](https://www.youtube.com/watch?v=BUMe2hGKjXA&t=31s) by Daniel Vigovszky (March 2020) — ZIO K8S 1.0, a new library by Daniel Vigovszky and Coralogix for working with Kubernetes, which includes support for the whole API, including event processors and compositional aspects for metrics and logging.
+
+## ZIO gRPC
+- [Functional, Type-safe, Testable Microservices with ZIO gRPC](https://www.youtube.com/watch?v=XTkhxRTH1nE) by Nadav Samet (July 2020)
+
+### ZIO Redis
+- [ZIO Redis](https://www.youtube.com/watch?v=yqFt3b3RBkI) by Dejan Mijic — Redis is one of the most commonly used in-memory data structure stores. In this talk, Dejan will introduce ZIO Redis, a purely functional, strongly typed client library backed by ZIO, with excellent performance and extensive support for nearly all of Redis' features. He will explain the library design using the bottom-up approach - from communication protocol to public APIs. Finally, he will wrap the talk by demonstrating the client's usage and discussing its performance characteristics.
+
+### ZparkIO
+- [Accelerating Spark with ZIO](https://www.youtube.com/watch?v=bWgVGzb5-H8) by Leo Benkel (December 2020) — Apache Spark is a powerful tool for distributed analytics but isn’t built to offer rich primitives for local parallelism. Yet, using local parallelism, it is possible to dramatically improve the performance of ML projects—for example, by fetching data sources in parallel. Historically, Scala projects have used Scala's Future for local parallelism, but increasingly, many projects have begun adopting ZIO as a better Future. Although powerful, ZIO is not easy to set up with Spark. In this talk, Leo will introduce an open-source library that handles all boilerplate so you can easily implement Spark and ZIO in your ML projects, and will then present detailed benchmarks showing how much additional performance can be obtained through local parallelism. Learn how to use ZIO to make Spark better and faster!
+
+## Use Cases
+- [Search Hacker News with ZIO with Scala, ZIO, Sttp and magic](https://www.youtube.com/watch?v=3P2Gi--dG9A&list=PL-G8WBFTPSVpCcFq6O7czfx9m9T21Cz24&index=11) — A practical look at building a ZIO program. + [Source Code](https://github.com/justinhj/magic-rate-limiter) by [Justin Heyes-Jones](https://twitter.com/justinhj) (April 2021)
+- [ML Powered Apps with ZIO Scala](https://www.youtube.com/watch?v=nbZCfkGuNIE) by Aris Vlasakakis (August 2020)
+- [Production-grade Microservices with ZIO](https://www.youtube.com/watch?v=oMJ1RMdR7wg) by Itamar Ravid (April 2021) — These days, there are all sorts of boxes to check when deploying production-grade microservices. Our applications must be (reasonably!) performant and correct, but they must also be observable, resilient, and easily extensible. In this talk, Itamar will share from his experience in running microservices based on ZIO in production: resilient, Kubernetes-friendly structuring; cloud-native observability with logs, metrics and telemetry, and modern ways of service communication.
+
+## Others
+- [Redis Streams with ZIO](https://youtu.be/jJnco6sMZQY) by [Leszek Gruchała](https://twitter.com/leszekgruchala) (October 2020)
+- [The Rise Of Loom And The Evolution Of Reactive Programming](https://www.youtube.com/watch?v=SJeAb-XEIe8&t=2938s) by John A. De Goes (October 2020)
+- [Izumi 1.0: Your Next Scala Stack](https://www.youtube.com/watch?v=o65sKWnFyk0) by Pavel Shirshov and Kai (December 2020) — Frameworks are bulky, quirky, and non-compositional, which has led to a rejection of Spring and similar frameworks in the Scala ecosystem. Yet, despite their drawbacks, frameworks have been used to boost team productivity in many large companies. In this presentation, Paul and Kai will introduce Izumi 1.0, a Scala microframework based on compositional functional programming. Designed to help you and your team achieve new levels of productivity, Izumi now includes full compile-time checks for your configurable applications and completely reworked Tagless Final hierarchy for Bifunctors and Trifunctors.
+- [ZIO WORLD - Distage 1.0](https://www.youtube.com/watch?v=WJvno8yZuWU) (March 2020) — Distage 1.0, brings the power of tagless-final to trifunctor/bifunctor effect types, with a robust commercial-ready DI framework capable of working with TF & ZLayers w/compile-time validation.
+- [Demystifying Functional Effect Systems, Or Build Your Own (Toy) ZIO](https://www.youtube.com/watch?v=Q4OCmKRPUf8) by Dmitry Karlinsky (December 2020) — Ever wondered how ZIO and other functional effect systems work under the hood? Sure feels like magic, sometimes! Dmitry Karlinsky is a big believer in learning by doing, and in this talk, the speaker will walk you through building a toy Scala effect system (called 'TIO'), from scratch, starting with a basic IO monad, and gradually adding capabilities such as Failure and Recovery, Asynchronous Effects, Stack Safety, and finally Concurrency with Fibers. Peek behind the curtain and demystify the technology that is becoming the backbone of modern Scala applications!
diff --git a/website/versioned_sidebars/version-1.0.18-sidebars.json b/website/versioned_sidebars/version-1.0.18-sidebars.json
new file mode 100644
index 000000000000..cab2d5ee6d7c
--- /dev/null
+++ b/website/versioned_sidebars/version-1.0.18-sidebars.json
@@ -0,0 +1,239 @@
+{
+ "overview_sidebar": [
+ {
+ "type": "category",
+ "label": "Overview",
+ "collapsed": true,
+ "items": [
+ "overview/overview_index",
+ "overview/overview_creating_effects",
+ "overview/overview_basic_operations",
+ "overview/overview_handling_errors",
+ "overview/overview_handling_resources",
+ "overview/overview_basic_concurrency",
+ "overview/overview_testing_effects",
+ "overview/overview_running_effects",
+ "overview/overview_background",
+ "overview/overview_performance",
+ "overview/overview_platforms"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Reference",
+ "link": {
+ "type": "doc",
+ "id": "reference/index"
+ },
+ "collapsed": true,
+ "items": [
+ {
+ "type": "category",
+ "label": "Core Data Types",
+ "link": {
+ "type": "doc",
+ "id": "reference/core/index"
+ },
+ "collapsed": true,
+ "items": [
+ "reference/core/zio",
+ "reference/core/uio",
+ "reference/core/urio",
+ "reference/core/task",
+ "reference/core/rio",
+ "reference/core/io",
+ "reference/core/exit",
+ "reference/core/cause",
+ "reference/core/runtime"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Contextual Types",
+ "link": {
+ "type": "doc",
+ "id": "reference/contextual/index"
+ },
+ "collapsed": true,
+ "items": [
+ "reference/contextual/has",
+ "reference/contextual/zlayer",
+ "reference/contextual/rlayer",
+ "reference/contextual/ulayer",
+ "reference/contextual/layer",
+ "reference/contextual/urlayer",
+ "reference/contextual/tasklayer"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Fiber Primitives",
+ "link": {
+ "type": "doc",
+ "id": "reference/fiber/index"
+ },
+ "collapsed": true,
+ "items": [
+ "reference/fiber/fiber",
+ "reference/fiber/fiberref",
+ "reference/fiber/fiberid",
+ "reference/fiber/fiberstatus"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Concurrency Primitives",
+ "link": {
+ "type": "doc",
+ "id": "reference/concurrency/index"
+ },
+ "collapsed": true,
+ "items": [
+ "reference/concurrency/zref",
+ "reference/concurrency/ref",
+ "reference/concurrency/zrefm",
+ "reference/concurrency/refm",
+ "reference/concurrency/promise",
+ "reference/concurrency/queue",
+ "reference/concurrency/hub",
+ "reference/concurrency/semaphore"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "STM",
+ "link": {
+ "type": "doc",
+ "id": "reference/stm/index"
+ },
+ "collapsed": true,
+ "items": [
+ "reference/concurrency/zref",
+ "reference/concurrency/ref",
+ "reference/concurrency/zrefm",
+ "reference/concurrency/refm",
+ "reference/concurrency/promise",
+ "reference/concurrency/queue",
+ "reference/concurrency/hub",
+ "reference/concurrency/semaphore"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Resource Safety",
+ "link": {
+ "type": "doc",
+ "id": "reference/resource/index"
+ },
+ "collapsed": true,
+ "items": [
+ "reference/resource/zmanaged",
+ "reference/resource/managed",
+ "reference/resource/task-managed",
+ "reference/resource/rmanaged",
+ "reference/resource/umanaged",
+ "reference/resource/urmanaged"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Streaming",
+ "link": {
+ "type": "doc",
+ "id": "reference/stream/index"
+ },
+ "collapsed": true,
+ "items": [
+ "reference/stream/zstream",
+ "reference/stream/stream",
+ "reference/stream/ustream",
+ "reference/stream/ztransducer",
+ "reference/stream/transducer",
+ "reference/stream/zsink",
+ "reference/stream/sink",
+ "reference/stream/subscription-ref"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Miscellaneous",
+ "link": {
+ "type": "doc",
+ "id": "reference/misc/index"
+ },
+ "collapsed": true,
+ "items": [
+ "reference/misc/chunk",
+ "reference/misc/schedule",
+ "reference/misc/supervisor"
+ ]
+ }
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Services",
+ "link": {
+ "type": "doc",
+ "id": "reference/services/index"
+ },
+ "collapsed": true,
+ "items": [
+ "reference/services/console",
+ "reference/services/clock",
+ "reference/services/random",
+ "reference/services/blocking",
+ "reference/services/system"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Guides",
+ "link": {
+ "type": "doc",
+ "id": "guides/index"
+ },
+ "collapsed": true,
+ "items": [
+ {
+ "type": "category",
+ "label": "How to",
+ "link": {
+ "type": "doc",
+ "id": "guides/index"
+ },
+ "collapsed": true,
+ "items": [
+ "guides/use-test-assertions",
+ "guides/test-effects",
+ "guides/mock-services",
+ "guides/handle-errors",
+ "guides/access-system-information",
+ "guides/use-zio-macros"
+ ]
+ },
+ {
+ "type": "category",
+ "label": "Interop",
+ "link": {
+ "type": "doc",
+ "id": "guides/index"
+ },
+ "collapsed": true,
+ "items": [
+ "guides/interop/with-cats-effect",
+ "guides/interop/with-future",
+ "guides/interop/with-java",
+ "guides/interop/with-javascript",
+ "guides/interop/with-monix",
+ "guides/interop/with-scalaz-7x",
+ "guides/interop/with-reactive-streams",
+ "guides/interop/with-twitter",
+ "guides/interop/with-guava"
+ ]
+ },
+ "guides/migrate/from-monix"
+ ]
+ }
+ ]
+}
diff --git a/website/versioned_sidebars/version-1.x-sidebars.json b/website/versioned_sidebars/version-1.x-sidebars.json
deleted file mode 100644
index cd830384b91c..000000000000
--- a/website/versioned_sidebars/version-1.x-sidebars.json
+++ /dev/null
@@ -1,140 +0,0 @@
-{
- "overview-sidebar": {
- "Overview": [
- "version-1.x/overview/overview_index",
- "version-1.x/overview/overview_creating_effects",
- "version-1.x/overview/overview_basic_operations",
- "version-1.x/overview/overview_handling_errors",
- "version-1.x/overview/overview_handling_resources",
- "version-1.x/overview/overview_basic_concurrency",
- "version-1.x/overview/overview_testing_effects",
- "version-1.x/overview/overview_running_effects",
- "version-1.x/overview/overview_background",
- "version-1.x/overview/overview_performance",
- "version-1.x/overview/overview_platforms"
- ]
- },
- "datatypes-sidebar": {
- "Overview": [
- "version-1.x/datatypes/index"
- ],
- "Core Data Types": [
- "version-1.x/datatypes/core/index",
- "version-1.x/datatypes/core/zio",
- "version-1.x/datatypes/core/uio",
- "version-1.x/datatypes/core/urio",
- "version-1.x/datatypes/core/task",
- "version-1.x/datatypes/core/rio",
- "version-1.x/datatypes/core/io",
- "version-1.x/datatypes/core/exit",
- "version-1.x/datatypes/core/cause",
- "version-1.x/datatypes/core/runtime"
- ],
- "Contextual Types": [
- "version-1.x/datatypes/contextual/index",
- "version-1.x/datatypes/contextual/has",
- "version-1.x/datatypes/contextual/zlayer",
- "version-1.x/datatypes/contextual/rlayer",
- "version-1.x/datatypes/contextual/ulayer",
- "version-1.x/datatypes/contextual/layer",
- "version-1.x/datatypes/contextual/urlayer",
- "version-1.x/datatypes/contextual/tasklayer"
- ],
- "Fiber Primitives": [
- "version-1.x/datatypes/fiber/index",
- "version-1.x/datatypes/fiber/fiber",
- "version-1.x/datatypes/fiber/fiberref",
- "version-1.x/datatypes/fiber/fiberid",
- "version-1.x/datatypes/fiber/fiberstatus"
- ],
- "Concurrency Primitives": [
- "version-1.x/datatypes/concurrency/index",
- "version-1.x/datatypes/concurrency/zref",
- "version-1.x/datatypes/concurrency/ref",
- "version-1.x/datatypes/concurrency/zrefm",
- "version-1.x/datatypes/concurrency/refm",
- "version-1.x/datatypes/concurrency/promise",
- "version-1.x/datatypes/concurrency/queue",
- "version-1.x/datatypes/concurrency/hub",
- "version-1.x/datatypes/concurrency/semaphore"
- ],
- "STM": [
- "version-1.x/datatypes/stm/index",
- "version-1.x/datatypes/stm/stm",
- "version-1.x/datatypes/stm/tarray",
- "version-1.x/datatypes/stm/tset",
- "version-1.x/datatypes/stm/tmap",
- "version-1.x/datatypes/stm/tref",
- "version-1.x/datatypes/stm/tpriorityqueue",
- "version-1.x/datatypes/stm/tpromise",
- "version-1.x/datatypes/stm/tqueue",
- "version-1.x/datatypes/stm/treentrantlock",
- "version-1.x/datatypes/stm/tsemaphore"
- ],
- "Resource Safety": [
- "version-1.x/datatypes/resource/index",
- "version-1.x/datatypes/resource/zmanaged",
- "version-1.x/datatypes/resource/managed",
- "version-1.x/datatypes/resource/task-managed",
- "version-1.x/datatypes/resource/rmanaged",
- "version-1.x/datatypes/resource/umanaged",
- "version-1.x/datatypes/resource/urmanaged"
- ],
- "Streaming": [
- "version-1.x/datatypes/stream/index",
- "version-1.x/datatypes/stream/zstream",
- "version-1.x/datatypes/stream/stream",
- "version-1.x/datatypes/stream/ustream",
- "version-1.x/datatypes/stream/ztransducer",
- "version-1.x/datatypes/stream/transducer",
- "version-1.x/datatypes/stream/zsink",
- "version-1.x/datatypes/stream/sink",
- "version-1.x/datatypes/stream/subscription-ref"
- ],
- "Miscellaneous": [
- "version-1.x/datatypes/misc/index",
- "version-1.x/datatypes/misc/chunk",
- "version-1.x/datatypes/misc/schedule",
- "version-1.x/datatypes/misc/supervisor"
- ]
- },
- "services-sidebar": {
- "Services": [
- "version-1.x/services/index",
- "version-1.x/services/console",
- "version-1.x/services/clock",
- "version-1.x/services/random",
- "version-1.x/services/blocking",
- "version-1.x/services/system"
- ]
- },
- "resources-sidebar": {
- "Overview": [
- "version-1.x/resources/index"
- ],
- "Learning": [
- "version-1.x/resources/learning/articles",
- "version-1.x/resources/learning/videos",
- "version-1.x/resources/learning/cookbooks",
- "version-1.x/resources/learning/cheatsheets",
- "version-1.x/resources/learning/sampleprojects",
- "version-1.x/resources/learning/poweredbyzio"
- ],
- "Ecosystem": [
- "version-1.x/resources/ecosystem/officials",
- "version-1.x/resources/ecosystem/community",
- "version-1.x/resources/ecosystem/compatible",
- "version-1.x/resources/ecosystem/tools",
- "version-1.x/resources/ecosystem/templates"
- ]
- },
- "about-sidebar": {
- "About": [
- "version-1.x/about/about_index",
- "version-1.x/about/about_coding_guidelines",
- "version-1.x/about/about_contributing",
- "version-1.x/about/about_coc"
- ]
- }
-}
-
diff --git a/website/versions.json b/website/versions.json
index 32960f8ced39..ae9da1df4e01 100644
--- a/website/versions.json
+++ b/website/versions.json
@@ -1,2 +1,3 @@
[
+ "1.0.18"
]
\ No newline at end of file