From a482fbbd007eb2f96fe1748e355acb40ae5f6f63 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 2 May 2024 18:04:06 +0200 Subject: [PATCH 01/26] Migrate asciidoc files path to Antora structure, removed stylesheets/reactor.css. --- docs/asciidoc/images/flux.svg | 1 - docs/asciidoc/images/mono.svg | 1 - docs/asciidoc/stylesheets/reactor.css | 2112 ----------------- .../ROOT/assets}/highlight/CHANGES.md | 0 .../ROOT/assets}/highlight/LICENSE | 0 .../ROOT/assets}/highlight/README.md | 0 .../ROOT/assets}/highlight/README.ru.md | 0 .../ROOT/assets}/highlight/highlight.min.js | 0 .../highlight/styles/railscasts.min.css | 0 docs/modules/ROOT/assets/images/flux.svg | 1 + .../ROOT/assets}/images/gs-cold.png | Bin .../ROOT/assets}/images/gs-compose.png | Bin .../ROOT/assets}/images/gs-hot.png | Bin .../ROOT/assets}/images/gs-operators.png | Bin .../ROOT/assets}/images/gs-reftail.png | Bin .../ROOT/assets}/images/gs-transform.png | Bin .../ROOT/assets}/images/legend-events.svg | 0 .../images/legend-operator-companion.svg | 0 .../images/legend-operator-double-source.svg | 0 .../assets}/images/legend-operator-method.svg | 0 .../images/legend-operator-parallel.svg | 0 .../assets}/images/legend-operator-static.svg | 0 .../images/legend-operator-windowing.svg | 0 .../assets}/images/legend-sideEffects1.svg | 0 .../assets}/images/legend-sideEffects2.svg | 0 .../ROOT/assets}/images/logo-2x.png | Bin .../ROOT/assets}/images/logo.png | Bin docs/modules/ROOT/assets/images/mono.svg | 1 + .../index.asciidoc => modules/ROOT/nav.adoc} | 0 .../ROOT/pages}/aboutDoc.adoc | 0 .../pages}/advanced-contextPropagation.adoc | 0 .../ROOT/pages}/advancedFeatures.adoc | 0 .../ROOT/pages}/apdx-howtoReadMarbles.adoc | 0 .../ROOT/pages}/apdx-implem.adoc | 0 .../ROOT/pages}/apdx-migrating.adoc | 0 .../ROOT/pages}/apdx-operatorChoice.adoc | 0 .../ROOT/pages}/apdx-optimizations.adoc | 0 .../ROOT/pages}/apdx-reactorExtra.adoc | 0 .../ROOT/pages}/apdx-writingOperator.adoc | 0 .../ROOT/pages}/coreFeatures.adoc | 0 .../ROOT/pages}/debugging.adoc | 0 .../{asciidoc => modules/ROOT/pages}/faq.adoc | 0 .../ROOT/pages}/gettingStarted.adoc | 0 .../ROOT/pages}/kotlin.adoc | 0 .../ROOT/pages}/metrics-details.adoc | 0 .../ROOT/pages}/metrics.adoc | 0 .../ROOT/pages}/processors.adoc | 0 .../ROOT/pages}/producing.adoc | 0 .../ROOT/pages}/reactiveProgramming.adoc | 0 .../ROOT/pages}/snippetRetryWhenRetry.adoc | 0 .../ROOT/pages}/subscribe-backpressure.adoc | 0 .../ROOT/pages}/subscribe-details.adoc | 0 .../ROOT/pages}/testing.adoc | 0 53 files changed, 2 insertions(+), 2114 deletions(-) delete mode 120000 docs/asciidoc/images/flux.svg delete mode 120000 docs/asciidoc/images/mono.svg delete mode 100644 docs/asciidoc/stylesheets/reactor.css rename docs/{asciidoc => modules/ROOT/assets}/highlight/CHANGES.md (100%) rename docs/{asciidoc => modules/ROOT/assets}/highlight/LICENSE (100%) rename docs/{asciidoc => modules/ROOT/assets}/highlight/README.md (100%) rename docs/{asciidoc => modules/ROOT/assets}/highlight/README.ru.md (100%) rename docs/{asciidoc => modules/ROOT/assets}/highlight/highlight.min.js (100%) rename docs/{asciidoc => modules/ROOT/assets}/highlight/styles/railscasts.min.css (100%) create mode 120000 docs/modules/ROOT/assets/images/flux.svg rename docs/{asciidoc => modules/ROOT/assets}/images/gs-cold.png (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/gs-compose.png (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/gs-hot.png (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/gs-operators.png (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/gs-reftail.png (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/gs-transform.png (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/legend-events.svg (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/legend-operator-companion.svg (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/legend-operator-double-source.svg (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/legend-operator-method.svg (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/legend-operator-parallel.svg (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/legend-operator-static.svg (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/legend-operator-windowing.svg (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/legend-sideEffects1.svg (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/legend-sideEffects2.svg (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/logo-2x.png (100%) rename docs/{asciidoc => modules/ROOT/assets}/images/logo.png (100%) create mode 120000 docs/modules/ROOT/assets/images/mono.svg rename docs/{asciidoc/index.asciidoc => modules/ROOT/nav.adoc} (100%) rename docs/{asciidoc => modules/ROOT/pages}/aboutDoc.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/advanced-contextPropagation.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/advancedFeatures.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/apdx-howtoReadMarbles.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/apdx-implem.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/apdx-migrating.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/apdx-operatorChoice.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/apdx-optimizations.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/apdx-reactorExtra.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/apdx-writingOperator.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/coreFeatures.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/debugging.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/faq.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/gettingStarted.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/kotlin.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/metrics-details.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/metrics.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/processors.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/producing.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/reactiveProgramming.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/snippetRetryWhenRetry.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/subscribe-backpressure.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/subscribe-details.adoc (100%) rename docs/{asciidoc => modules/ROOT/pages}/testing.adoc (100%) diff --git a/docs/asciidoc/images/flux.svg b/docs/asciidoc/images/flux.svg deleted file mode 120000 index 0ebf650420..0000000000 --- a/docs/asciidoc/images/flux.svg +++ /dev/null @@ -1 +0,0 @@ -../../../reactor-core/src/main/java/reactor/core/publisher/doc-files/marbles/flux.svg \ No newline at end of file diff --git a/docs/asciidoc/images/mono.svg b/docs/asciidoc/images/mono.svg deleted file mode 120000 index 7cb9a0ee11..0000000000 --- a/docs/asciidoc/images/mono.svg +++ /dev/null @@ -1 +0,0 @@ -../../../reactor-core/src/main/java/reactor/core/publisher/doc-files/marbles/mono.svg \ No newline at end of file diff --git a/docs/asciidoc/stylesheets/reactor.css b/docs/asciidoc/stylesheets/reactor.css deleted file mode 100644 index d0e19483a5..0000000000 --- a/docs/asciidoc/stylesheets/reactor.css +++ /dev/null @@ -1,2112 +0,0 @@ -@import url(https://fonts.googleapis.com/css?family=Montserrat:400,700); -@import url(https://cdnjs.cloudflare.com/ajax/libs/semantic-ui/1.6.2/semantic.min.css); - - -#header .details br+span.author:before { - content: "\00a0\0026\00a0"; - color: rgba(0,0,0,.85); -} - -#header .details br+span.email:before { - content: "("; -} - -#header .details br+span.email:after { - content: ")"; -} - -/*! normalize.css v2.1.2 | MIT License | git.io/normalize */ -/* ========================================================================== HTML5 display definitions ========================================================================== */ -/** Correct `block` display not defined in IE 8/9. */ -@import url(https://cdnjs.cloudflare.com/ajax/libs/font-awesome/3.2.1/css/font-awesome.css); - -article, aside, details, figcaption, figure, footer, header, hgroup, main, nav, section, summary { - display: block; -} - -/** Correct `inline-block` display not defined in IE 8/9. */ -audio, canvas, video { - display: inline-block; -} - -/** Prevent modern browsers from displaying `audio` without controls. Remove excess height in iOS 5 devices. */ -audio:not([controls]) { - display: none; - height: 0; -} - -/** Address `[hidden]` styling not present in IE 8/9. Hide the `template` element in IE, Safari, and Firefox < 22. */ -[hidden], template { - display: none; -} - -script { - display: none !important; -} - -/* ========================================================================== Base ========================================================================== */ -/** 1. Set default font family to sans-serif. 2. Prevent iOS text size adjust after orientation change, without disabling user zoom. */ -html { - font-family: sans-serif; /* 1 */ - -ms-text-size-adjust: 100%; /* 2 */ - -webkit-text-size-adjust: 100%; /* 2 */ -} - -/** Remove default margin. */ -body { - margin: 0; -} - -/* ========================================================================== Links ========================================================================== */ -/** Remove the gray background color from active links in IE 10. */ -a { - background: transparent; -} - -/** Address `outline` inconsistency between Chrome and other browsers. */ -a:focus { - outline: thin dotted; -} - -/** Improve readability when focused and also mouse hovered in all browsers. */ -a:active, a:hover { - outline: 0; -} - -/* ========================================================================== Typography ========================================================================== */ -/** Address variable `h1` font-size and margin within `section` and `article` contexts in Firefox 4+, Safari 5, and Chrome. */ -h1 { - font-size: 2em; - margin: 1.2em 0; -} - -/** Address styling not present in IE 8/9, Safari 5, and Chrome. */ -abbr[title] { - border-bottom: 1px dotted; -} - -/** Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. */ -b, strong { - font-weight: bold; -} - -/** Address styling not present in Safari 5 and Chrome. */ -dfn { - font-style: italic; -} - -/** Address differences between Firefox and other browsers. */ -hr { - -moz-box-sizing: content-box; - box-sizing: content-box; - height: 0; -} - -/** Address styling not present in IE 8/9. */ -mark { - background: #ff0; - color: #000; -} - -/** Correct font family set oddly in Safari 5 and Chrome. */ -code, kbd, pre, samp { - font-family: monospace, serif; - font-size: 1em; -} - -/** Improve readability of pre-formatted text in all browsers. */ -pre { - white-space: pre-wrap; -} - -/** Set consistent quote types. */ -q { - quotes: "\201C" "\201D" "\2018" "\2019"; -} - -/** Address inconsistent and variable font size in all browsers. */ -small { - font-size: 80%; -} - -/** Prevent `sub` and `sup` affecting `line-height` in all browsers. */ -sub, sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} - -sup { - top: -0.5em; -} - -sub { - bottom: -0.25em; -} - -/* ========================================================================== Embedded content ========================================================================== */ -/** Remove border when inside `a` element in IE 8/9. */ -img { - border: 0; -} - -/** Correct overflow displayed oddly in IE 9. */ -svg:not(:root) { - overflow: hidden; -} - -/* ========================================================================== Figures ========================================================================== */ -/** Address margin not present in IE 8/9 and Safari 5. */ -figure { - margin: 0; -} - -/* ========================================================================== Forms ========================================================================== */ -/** Define consistent border, margin, and padding. */ -fieldset { - border: 1px solid #c0c0c0; - margin: 0 2px; - padding: 0.35em 0.625em 0.75em; -} - -/** 1. Correct `color` not being inherited in IE 8/9. 2. Remove padding so people aren't caught out if they zero out fieldsets. */ -legend { - border: 0; /* 1 */ - padding: 0; /* 2 */ -} - -/** 1. Correct font family not being inherited in all browsers. 2. Correct font size not being inherited in all browsers. 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. */ -button, input, select, textarea { - font-family: inherit; /* 1 */ - font-size: 100%; /* 2 */ - margin: 0; /* 3 */ -} - -/** Address Firefox 4+ setting `line-height` on `input` using `!important` in the UA stylesheet. */ -button, input { - line-height: normal; -} - -/** Address inconsistent `text-transform` inheritance for `button` and `select`. All other form control elements do not inherit `text-transform` values. Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. Correct `select` style inheritance in Firefox 4+ and Opera. */ -button, select { - text-transform: none; -} - -/** 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` and `video` controls. 2. Correct inability to style clickable `input` types in iOS. 3. Improve usability and consistency of cursor style between image-type `input` and others. */ -button, html input[type="button"], input[type="reset"], input[type="submit"] { - -webkit-appearance: button; /* 2 */ - cursor: pointer; /* 3 */ -} - -/** Re-set default cursor for disabled elements. */ -button[disabled], html input[disabled] { - cursor: default; -} - -/** 1. Address box sizing set to `content-box` in IE 8/9. 2. Remove excess padding in IE 8/9. */ -input[type="checkbox"], input[type="radio"] { - box-sizing: border-box; /* 1 */ - padding: 0; /* 2 */ -} - -/** 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome (include `-moz` to future-proof). */ -input[type="search"] { - -webkit-appearance: textfield; /* 1 */ - -moz-box-sizing: content-box; - -webkit-box-sizing: content-box; /* 2 */ - box-sizing: content-box; -} - -/** Remove inner padding and search cancel button in Safari 5 and Chrome on OS X. */ -input[type="search"]::-webkit-search-cancel-button, input[type="search"]::-webkit-search-decoration { - -webkit-appearance: none; -} - -/** Remove inner padding and border in Firefox 4+. */ -button::-moz-focus-inner, input::-moz-focus-inner { - border: 0; - padding: 0; -} - -/** 1. Remove default vertical scrollbar in IE 8/9. 2. Improve readability and alignment in all browsers. */ -textarea { - overflow: auto; /* 1 */ - vertical-align: top; /* 2 */ -} - -/* ========================================================================== Tables ========================================================================== */ -/** Remove most spacing between table cells. */ -table { - border-collapse: collapse; - border-spacing: 0; -} - -meta.foundation-mq-small { - font-family: "only screen and (min-width: 768px)"; - width: 768px; -} - -meta.foundation-mq-medium { - font-family: "only screen and (min-width:1280px)"; - width: 1280px; -} - -meta.foundation-mq-large { - font-family: "only screen and (min-width:1440px)"; - width: 1440px; -} - -*, *:before, *:after { - -moz-box-sizing: border-box; - -webkit-box-sizing: border-box; - box-sizing: border-box; -} - -html, body { - font-size: 100%; -} - -body { - background: white; - color: #34302d; - padding: 0; - margin: 0; - font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; - font-weight: normal; - font-style: normal; - line-height: 1.8em; - position: relative; - cursor: auto; -} - -#content, #content p { - line-height: 1.8em; - margin-top: 1.5em; -} - -#content li p { - margin-top: 0.25em; -} - -a:hover { - cursor: pointer; -} - -img, object, embed { - max-width: 100%; - height: auto; -} - -object, embed { - height: 100%; -} - -img { - -ms-interpolation-mode: bicubic; -} - -#map_canvas img, #map_canvas embed, #map_canvas object, .map_canvas img, .map_canvas embed, .map_canvas object { - max-width: none !important; -} - -.left { - float: left !important; -} - -.right { - float: right !important; -} - -.text-left { - text-align: left !important; -} - -.text-right { - text-align: right !important; -} - -.text-center { - text-align: center !important; -} - -.text-justify { - text-align: justify !important; -} - -.hide { - display: none; -} - -.antialiased, body { - -webkit-font-smoothing: antialiased; -} - -img { - display: inline-block; - vertical-align: middle; -} - -textarea { - height: auto; - min-height: 50px; -} - -select { - width: 100%; -} - -p.lead, .paragraph.lead > p, #preamble > .sectionbody > .paragraph:first-of-type p { - font-size: 1.21875em; -} - -.subheader, #content #toctitle, .admonitionblock td.content > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .mathblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, .sidebarblock > .title, .tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title, .tableblock > caption { - color: #6db33f; - font-weight: 300; - margin-top: 0.2em; - margin-bottom: 0.5em; -} - -/* Typography resets */ -div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { - margin: 0; - padding: 0; - direction: ltr; -} - -/* Default Link Styles */ -a { - color: #6db33f; - line-height: inherit; - text-decoration: none; -} - -a:hover, a:focus { - color: #6db33f; - text-decoration: underline; -} - -a img { - border: none; -} - -/* Default paragraph styles */ -p { - font-family: inherit; - font-weight: normal; - font-size: 1em; - margin-bottom: 1.25em; - text-rendering: optimizeLegibility; -} - -p aside { - font-size: 0.875em; - font-style: italic; -} - -/* Default header styles */ -h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { - font-family: "Montserrat", Arial, sans-serif; - font-weight: normal; - font-style: normal; - color: #34302d; - text-rendering: optimizeLegibility; - margin-top: 1.6em; - margin-bottom: 0.6em; -} - -h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { - font-size: 60%; - color: #6db33f; - line-height: 0; -} - -h1 { - font-size: 2.125em; -} - -h2 { - font-size: 1.6875em; -} - -h3, #toctitle, .sidebarblock > .content > .title { - font-size: 1.375em; -} - -h4 { - font-size: 1.125em; -} - -h5 { - font-size: 1.125em; -} - -h6 { - font-size: 1em; -} - -hr { - border: solid #dcd2c9; - border-width: 1px 0 0; - clear: both; - margin: 1.25em 0 1.1875em; - height: 0; -} - -/* Helpful Typography Defaults */ -em, i { - font-style: italic; - line-height: inherit; -} - -strong, b { - font-weight: bold; - line-height: inherit; -} - -small { - font-size: 60%; - line-height: inherit; -} - -code { - font-family: Consolas, "Liberation Mono", Courier, monospace; - font-weight: bold; - color: #305CB5; -} - -/* Lists */ -ul, ol, dl { - font-size: 1em; - margin-bottom: 1.25em; - list-style-position: outside; - font-family: inherit; -} - -ul, ol { - margin-left: 1.5em; -} - -ul.no-bullet, ol.no-bullet { - margin-left: 1.5em; -} - -/* Unordered Lists */ -ul li ul, ul li ol { - margin-left: 1.25em; - margin-bottom: 0; - font-size: 1em; /* Override nested font-size change */ -} - -ul.square li ul, ul.circle li ul, ul.disc li ul { - list-style: inherit; -} - -ul.square { - list-style-type: square; -} - -ul.circle { - list-style-type: circle; -} - -ul.disc { - list-style-type: disc; -} - -ul.no-bullet { - list-style: none; -} - -/* Ordered Lists */ -ol li ul, ol li ol { - margin-left: 1.25em; - margin-bottom: 0; -} - -/* Definition Lists */ -dl dt { - margin-bottom: 0.3125em; - font-weight: bold; -} - -dl dd { - margin-bottom: 1.25em; -} - -/* Abbreviations */ -abbr, acronym { - text-transform: uppercase; - font-size: 90%; - color: #34302d; - border-bottom: 1px dotted #dddddd; - cursor: help; -} - -abbr { - text-transform: none; -} - -/* Blockquotes */ -blockquote { - margin: 0 0 1.25em; - padding: 0.5625em 1.25em 0 1.1875em; - border-left: 1px solid #dddddd; -} - -blockquote cite { - display: block; - font-size: 0.8125em; - color: #655241; -} - -blockquote cite:before { - content: "\2014 \0020"; -} - -blockquote cite a, blockquote cite a:visited { - color: #655241; -} - -blockquote, blockquote p { - color: #34302d; -} - -/* Microformats */ -.vcard { - display: inline-block; - margin: 0 0 1.25em 0; - border: 1px solid #dddddd; - padding: 0.625em 0.75em; -} - -.vcard li { - margin: 0; - display: block; -} - -.vcard .fn { - font-weight: bold; - font-size: 0.9375em; -} - -.vevent .summary { - font-weight: bold; -} - -.vevent abbr { - cursor: auto; - text-decoration: none; - font-weight: bold; - border: none; - padding: 0 0.0625em; -} - -@media only screen and (min-width: 768px) { - h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { - } - - h1 { - font-size: 2.75em; - } - - h2 { - font-size: 2.3125em; - } - - h3, #toctitle, .sidebarblock > .content > .title { - font-size: 1.6875em; - } - - h4 { - font-size: 1.4375em; - } -} - -/* Print styles. Inlined to avoid required HTTP connection: www.phpied.com/delay-loading-your-print-css/ Credit to Paul Irish and HTML5 Boilerplate (html5boilerplate.com) -*/ -.print-only { - display: none !important; -} - -@media print { - * { - background: transparent !important; - color: #000 !important; /* Black prints faster: h5bp.com/s */ - box-shadow: none !important; - text-shadow: none !important; - } - - a, a:visited { - text-decoration: underline; - } - - a[href]:after { - content: " (" attr(href) ")"; - } - - abbr[title]:after { - content: " (" attr(title) ")"; - } - - .ir a:after, a[href^="javascript:"]:after, a[href^="#"]:after { - content: ""; - } - - pre, blockquote { - border: 1px solid #999; - page-break-inside: avoid; - } - - thead { - display: table-header-group; /* h5bp.com/t */ - } - - tr, img { - page-break-inside: avoid; - } - - img { - max-width: 100% !important; - } - - @page { - margin: 0.5cm; - } - - p, h2, h3, #toctitle, .sidebarblock > .content > .title { - orphans: 3; - widows: 3; - } - - h2, h3, #toctitle, .sidebarblock > .content > .title { - page-break-after: avoid; - } - - .hide-on-print { - display: none !important; - } - - .print-only { - display: block !important; - } - - .hide-for-print { - display: none !important; - } - - .show-for-print { - display: inherit !important; - } -} - -/* Tables */ -table { - background: white; - margin-bottom: 1.25em; - border: solid 1px #34302d; -} - -table thead, table tfoot { - font-weight: bold; -} - -table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { - padding: 0.5em 0.625em 0.625em; - font-size: inherit; - color: #34302d; - text-align: left; -} - -table thead tr th { - color: white; - background: #34302d; -} - -table tr th, table tr td { - padding: 0.5625em 0.625em; - font-size: inherit; - color: #34302d; - border: 0 none; -} - -table tr.even, table tr.alt, table tr:nth-of-type(even) { - background: #f2F2F2; -} - -table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { - display: table-cell; -} - -.clearfix:before, .clearfix:after, .float-group:before, .float-group:after { - content: " "; - display: table; -} - -.clearfix:after, .float-group:after { - clear: both; -} - -*:not(pre) > code { - font-size: inherit; - padding: 0; - background-color: inherit; - border: 0 solid #dddddd; - -webkit-border-radius: 6px; - border-radius: 6px; - text-shadow: none; - overflow-wrap: anywhere; -} - -@media only screen and (min-width: 1280px) { - *:not(pre) > code { - white-space: nowrap; - overflow-wrap: normal; - } -} - -pre, pre > code { - color: black; - font-family: monospace, serif; - font-weight: normal; -} - -.keyseq { - color: #774417; -} - -kbd:not(.keyseq) { - display: inline-block; - color: #211306; - font-size: 0.75em; - background-color: #F7F7F7; - border: 1px solid #ccc; - -webkit-border-radius: 3px; - border-radius: 3px; - -webkit-box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 2px white inset; - box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 2px white inset; - margin: -0.15em 0.15em 0 0.15em; - padding: 0.2em 0.6em 0.2em 0.5em; - vertical-align: middle; - white-space: nowrap; -} - -.keyseq kbd:first-child { - margin-left: 0; -} - -.keyseq kbd:last-child { - margin-right: 0; -} - -.menuseq, .menu { - color: black; -} - -b.button:before, b.button:after { - position: relative; - top: -1px; - font-weight: normal; -} - -b.button:before { - content: "["; - padding: 0 3px 0 2px; -} - -b.button:after { - content: "]"; - padding: 0 2px 0 3px; -} - -p a > code:hover { - color: #541312; -} - -#header, #content, #footnotes, #footer { - width: 100%; - margin-left: auto; - margin-right: auto; - margin-top: 0; - margin-bottom: 0; - max-width: max(80em, 60%); - *zoom: 1; - position: relative; - padding-left: 4em; - padding-right: 4em; -} - -#header:before, #header:after, #content:before, #content:after, #footnotes:before, #footnotes:after, #footer:before, #footer:after { - content: " "; - display: table; -} - -#header:after, #content:after, #footnotes:after, #footer:after { - clear: both; -} - -#header { - margin-bottom: 2.5em; -} - -#header > h1 { - color: #34302d; - font-weight: 400; -} - -#header span { - color: #34302d; -} - -#header #revnumber { - text-transform: capitalize; -} - -#header br { - display: none; -} - -#header br + span { -} - -#revdate { - display: block; -} - -#toc { - border-bottom: 1px solid #e6dfd8; - padding-bottom: 1.25em; -} - -#toc > ul { - margin-left: 0.25em; -} - -#toc ul.sectlevel0 > li > a { - font-style: italic; -} - -#toc ul.sectlevel0 ul.sectlevel1 { - margin-left: 0; - margin-top: 0.5em; - margin-bottom: 0.5em; -} - -#toc ul { - list-style-type: none; -} - -#toctitle { - color: #385dbd; -} - -@media only screen and (min-width: 768px) { - body.toc2 { - padding-left: 15em; - padding-right: 0; - } - - #toc.toc2 { - position: fixed; - width: 15em; - left: 0; - border-bottom: 0; - z-index: 1000; - padding: 1em; - height: 100%; - top: 0px; - background: #F1F1F1; - overflow: auto; - - -moz-transition-property: top; - -o-transition-property: top; - -webkit-transition-property: top; - transition-property: top; - -moz-transition-duration: 0.4s; - -o-transition-duration: 0.4s; - -webkit-transition-duration: 0.4s; - transition-duration: 0.4s; - } - - #reactor-header { - position: fixed; - top: -75px; - left: 0; - right: 0; - height: 75px; - - - -moz-transition-property: top; - -o-transition-property: top; - -webkit-transition-property: top; - transition-property: top; - -moz-transition-duration: 0.4s; - -o-transition-duration: 0.4s; - -webkit-transition-duration: 0.4s; - transition-duration: 0.4s; - } - - body.head-show #toc.toc2 { - top: 75px; - } - body.head-show #reactor-header { - top: 0; - } - - #toc.toc2 a { - color: #34302d; - font-family: Montserrat; - } - - #toc.toc2 #toctitle { - margin-top: 0; - font-size: 1.2em; - } - - #toc.toc2 > ul { - font-size: .90em; - } - - #toc.toc2 ul ul { - margin-left: 0; - padding-left: 0.4em; - } - - #toc.toc2 ul.sectlevel0 ul.sectlevel1 { - padding-left: 0; - margin-top: 0.5em; - margin-bottom: 0.5em; - } - - body.toc2.toc-right { - padding-left: 0; - padding-right: 15em; - } - - body.toc2.toc-right #toc.toc2 { - border-right: 0; - border-left: 1px solid #e6dfd8; - left: auto; - right: 0; - } -} - -@media only screen and (min-width: 1280px) { - body.toc2 { - padding-left: 20em; - padding-right: 0; - } - - #toc.toc2 { - width: 20em; - } - - #toc.toc2 #toctitle { - font-size: 1.375em; - } - - #toc.toc2 > ul { - font-size: 0.95em; - } - - #toc.toc2 ul ul { - padding-left: 1.25em; - } - - body.toc2.toc-right { - padding-left: 0; - padding-right: 20em; - } -} - -#content #toc { - border-style: solid; - border-width: 1px; - border-color: #d9d9d9; - margin-bottom: 1.25em; - padding: 1.25em; - background: #f2f2f2; - border-width: 0; - -webkit-border-radius: 6px; - border-radius: 6px; -} - -#content #toc > :first-child { - margin-top: 0; -} - -#content #toc > :last-child { - margin-bottom: 0; -} - -#content #toc a { - text-decoration: none; -} - -#content #toctitle { - font-weight: bold; - font-family: "Montserrat", Arial, sans-serif; - font-size: 1em; - padding-left: 0.125em; -} - -#footer { - max-width: 100%; - background-color: white; - padding: 1.25em; - color: #CCC; - border-top: 3px solid #F1F1F1; -} - -#footer-text { - color: #444; - line-height: 1.44; -} - -.sect1 { - padding-bottom: 1.25em; -} - -.sect1 + .sect1 { - border-top: 1px solid #e6dfd8; -} - -#content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { - position: absolute; - width: 1em; - margin-left: -1em; - display: block; - text-decoration: none; - visibility: hidden; - text-align: center; - font-weight: normal; -} - -#content h1 > a.anchor:before, h2 > a.anchor:before, h3 > a.anchor:before, #toctitle > a.anchor:before, .sidebarblock > .content > .title > a.anchor:before, h4 > a.anchor:before, h5 > a.anchor:before, h6 > a.anchor:before { - content: '\00A7'; - font-size: .85em; - vertical-align: text-top; - display: block; - margin-top: 0.05em; -} - -#content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { - visibility: visible; -} - -#content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { - color: #34302d; - text-decoration: none; -} - -#content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { - color: #34302d; -} - -.paragraph { - overflow-wrap: break-word; -} - -.imageblock, .literalblock, .listingblock, .mathblock, .verseblock, .videoblock { - margin-bottom: 1.25em; - margin-top: 1.25em; -} - -.admonitionblock td.content > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .mathblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, .sidebarblock > .title, .tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { - text-align: left; - font-weight: bold; -} - -.tableblock { - overflow-wrap: anywhere; -} - -.tableblock > caption { - text-align: left; - font-weight: bold; - white-space: nowrap; - overflow: visible; - max-width: 0; -} - -table.tableblock #preamble > .sectionbody > .paragraph:first-of-type p { - font-size: inherit; -} - -.admonitionblock > table { - border: 0; - background: none; - width: 100%; - table-layout: fixed; -} - -.admonitionblock > table td.icon { - text-align: center; - width: 80px; -} - -.admonitionblock > table td.icon img { - max-width: none; -} - -.admonitionblock > table td.icon .title { - font-weight: bold; - text-transform: uppercase; -} - -.admonitionblock > table td.content { - padding-left: 1.125em; - padding-right: 1.25em; - border-left: 1px solid #dcd2c9; - color: #34302d; - overflow-wrap: break-word; -} - -.admonitionblock > table td.content > :last-child > :last-child { - margin-bottom: 0; -} - -.exampleblock > .content { - border-style: solid; - border-width: 1px; - border-color: #f3e0ce; - margin-bottom: 1.25em; - padding: 1.25em; - background: white; - -webkit-border-radius: 6px; - border-radius: 6px; -} - -.exampleblock > .content > :first-child { - margin-top: 0; -} - -.exampleblock > .content > :last-child { - margin-bottom: 0; -} - -.exampleblock > .content h1, .exampleblock > .content h2, .exampleblock > .content h3, .exampleblock > .content #toctitle, .sidebarblock.exampleblock > .content > .title, .exampleblock > .content h4, .exampleblock > .content h5, .exampleblock > .content h6, .exampleblock > .content p { - color: #333333; -} - -.exampleblock > .content h1, .exampleblock > .content h2, .exampleblock > .content h3, .exampleblock > .content #toctitle, .sidebarblock.exampleblock > .content > .title, .exampleblock > .content h4, .exampleblock > .content h5, .exampleblock > .content h6 { - margin-bottom: 0.625em; -} - -.exampleblock > .content h1.subheader, .exampleblock > .content h2.subheader, .exampleblock > .content h3.subheader, .exampleblock > .content .subheader#toctitle, .sidebarblock.exampleblock > .content > .subheader.title, .exampleblock > .content h4.subheader, .exampleblock > .content h5.subheader, .exampleblock > .content h6.subheader { -} - -.exampleblock.result > .content { - -webkit-box-shadow: 0 1px 8px #d9d9d9; - box-shadow: 0 1px 8px #d9d9d9; -} - -.sidebarblock { - padding: 1.25em 2em; - background: #F1F1F1; - margin: 2em -2em; - -} - -.sidebarblock > :first-child { - margin-top: 0; -} - -.sidebarblock > :last-child { - margin-bottom: 0; -} - -.sidebarblock h1, .sidebarblock h2, .sidebarblock h3, .sidebarblock #toctitle, .sidebarblock > .content > .title, .sidebarblock h4, .sidebarblock h5, .sidebarblock h6, .sidebarblock p { - color: #333333; -} - -.sidebarblock h1, .sidebarblock h2, .sidebarblock h3, .sidebarblock #toctitle, .sidebarblock > .content > .title, .sidebarblock h4, .sidebarblock h5, .sidebarblock h6 { - margin-bottom: 0.625em; -} - -.sidebarblock h1.subheader, .sidebarblock h2.subheader, .sidebarblock h3.subheader, .sidebarblock .subheader#toctitle, .sidebarblock > .content > .subheader.title, .sidebarblock h4.subheader, .sidebarblock h5.subheader, .sidebarblock h6.subheader { -} - -.sidebarblock > .content > .title { - color: #6db33f; - margin-top: 0; - font-size: 1.2em; -} - -.exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { - margin-bottom: 0; -} - -/*.literalblock .content pre.highlight, .listingblock .content pre.highlight {*/ - /*background-color: #f1f8ec;*/ -/*}*/ -/*.literalblock pre:not([class]), .listingblock pre:not([class]) {*/ - /*background-color: #f1f8ec;*/ -/*}*/ - -.literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { - border-width: 1px; - border-style: solid; - border-color: rgba(21, 35, 71, 0.1); - -webkit-border-radius: 6px; - border-radius: 6px; - padding: 0.8em; - word-wrap: break-word; -} - -.literalblock pre.nowrap, .literalblock pre[class].nowrap, .listingblock pre.nowrap, .listingblock pre[class].nowrap { - overflow-x: auto; - white-space: pre; - word-wrap: normal; -} - -.literalblock pre > code, .literalblock pre[class] > code, .listingblock pre > code, .listingblock pre[class] > code { - display: block; -} - -@media only screen { - .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { - font-size: 0.72em; - } -} - -@media only screen and (min-width: 768px) { - .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { - font-size: 0.81em; - } -} - -@media only screen and (min-width: 1280px) { - .literalblock pre, .literalblock pre[class], .listingblock pre, .listingblock pre[class] { - font-size: 0.9em; - } -} - -.listingblock pre.highlight { - padding: 0; - line-height: 1em; -} - -.listingblock pre.highlight > code { - padding: 0.8em; -} - -.listingblock > .content { - position: relative; -} - -.listingblock:hover code[class*=" language-"]:before { - text-transform: uppercase; - font-size: 0.9em; - color: #999; - position: absolute; - top: 0.375em; - right: 0.375em; -} - -.listingblock:hover code.asciidoc:before { - content: "asciidoc"; -} - -.listingblock:hover code.clojure:before { - content: "clojure"; -} - -.listingblock:hover code.css:before { - content: "css"; -} - -.listingblock:hover code.groovy:before { - content: "groovy"; -} - -.listingblock:hover code.html:before { - content: "html"; -} - -.listingblock:hover code.java:before { - content: "java"; -} - -.listingblock:hover code.javascript:before { - content: "javascript"; -} - -.listingblock:hover code.python:before { - content: "python"; -} - -.listingblock:hover code.ruby:before { - content: "ruby"; -} - -.listingblock:hover code.sass:before { - content: "sass"; -} - -.listingblock:hover code.scss:before { - content: "scss"; -} - -.listingblock:hover code.xml:before { - content: "xml"; -} - -.listingblock:hover code.yaml:before { - content: "yaml"; -} - -.listingblock.terminal pre .command:before { - content: attr(data-prompt); - padding-right: 0.5em; - color: #999; -} - -.listingblock.terminal pre .command:not([data-prompt]):before { - content: '$'; -} - -table.pyhltable { - border: 0; - margin-bottom: 0; -} - -table.pyhltable td { - vertical-align: top; - padding-top: 0; - padding-bottom: 0; -} - -table.pyhltable td.code { - padding-left: .75em; - padding-right: 0; -} - -.highlight.pygments .lineno, table.pyhltable td:not(.code) { - color: #999; - padding-left: 0; - padding-right: .5em; - border-right: 1px solid #dcd2c9; -} - -.highlight.pygments .lineno { - display: inline-block; - margin-right: .25em; -} - -table.pyhltable .linenodiv { - background-color: transparent !important; - padding-right: 0 !important; -} - -.quoteblock { - margin: 0 0 1.25em; - padding: 0.5625em 1.25em 0 1.1875em; - border-left: 3px solid #dddddd; -} - -.quoteblock blockquote { - margin: 0 0 1.25em 0; - padding: 0 0 0.5625em 0; - border: 0; -} - -.quoteblock blockquote > .paragraph:last-child p { - margin-bottom: 0; -} - -.quoteblock .attribution { - margin-top: -.25em; - padding-bottom: 0.5625em; - font-size: 0.8125em; - overflow-wrap: break-word; -} - -.quoteblock .attribution br { - display: none; -} - -.quoteblock .attribution cite { - display: block; - margin-bottom: 0.625em; -} - -table thead th, table tfoot th { - font-weight: bold; -} - -table.tableblock.grid-all { - border-collapse: separate; - border-radius: 6px; - border-top: 1px solid #34302d; - border-bottom: 1px solid #34302d; -} - -table.tableblock.frame-topbot, table.tableblock.frame-none { - border-left: 0; - border-right: 0; -} - -table.tableblock.frame-sides, table.tableblock.frame-none { - border-top: 0; - border-bottom: 0; -} - -table.tableblock td .paragraph:last-child p > p:last-child, table.tableblock th > p:last-child, table.tableblock td > p:last-child { - margin-bottom: 0; -} - -th.tableblock.halign-left, td.tableblock.halign-left { - text-align: left; -} - -th.tableblock.halign-right, td.tableblock.halign-right { - text-align: right; -} - -th.tableblock.halign-center, td.tableblock.halign-center { - text-align: center; -} - -th.tableblock.valign-top, td.tableblock.valign-top { - vertical-align: top; -} - -th.tableblock.valign-bottom, td.tableblock.valign-bottom { - vertical-align: bottom; -} - -th.tableblock.valign-middle, td.tableblock.valign-middle { - vertical-align: middle; -} - -tbody tr th { - display: table-cell; - background: rgba(105, 60, 22, 0.25); -} - -tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { - color: #211306; - font-weight: bold; -} - -td > div.verse { - white-space: pre; -} - -ol { - margin-left: 1.75em; -} - -ul li ol { - margin-left: 1.5em; -} - -dl dd { - margin-left: 1.125em; -} - -dl dd:last-child, dl dd:last-child > :last-child { - margin-bottom: 0; -} - -ol > li p, ul > li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { - margin-bottom: 0.625em; -} - -.ulist { - overflow-wrap: break-word; -} - -ul.unstyled, ol.unnumbered, ul.checklist, ul.none { - list-style-type: none; -} - -ul.unstyled, ol.unnumbered, ul.checklist { - margin-left: 0.625em; -} - -ul.checklist li > p:first-child > i[class^="icon-check"]:first-child, ul.checklist li > p:first-child > input[type="checkbox"]:first-child { - margin-right: 0.25em; -} - -ul.checklist li > p:first-child > input[type="checkbox"]:first-child { - position: relative; - top: 1px; -} - -ul.inline { - margin: 0 auto 0.625em auto; - margin-left: -1.375em; - margin-right: 0; - padding: 0; - list-style: none; - overflow: hidden; -} - -ul.inline > li { - list-style: none; - float: left; - margin-left: 1.375em; - display: block; -} - -ul.inline > li > * { - display: block; -} - -.unstyled dl dt { - font-weight: normal; - font-style: normal; -} - -ol.arabic { - list-style-type: decimal; -} - -ol.decimal { - list-style-type: decimal-leading-zero; -} - -ol.loweralpha { - list-style-type: lower-alpha; -} - -ol.upperalpha { - list-style-type: upper-alpha; -} - -ol.lowerroman { - list-style-type: lower-roman; -} - -ol.upperroman { - list-style-type: upper-roman; -} - -ol.lowergreek { - list-style-type: lower-greek; -} - -.hdlist > table, .colist > table { - border: 0; - background: none; -} - -.hdlist > table > tbody > tr, .colist > table > tbody > tr { - background: none; -} - -td.hdlist1 { - padding-right: .75em; - font-weight: bold; -} - -td.hdlist1, td.hdlist2 { - vertical-align: top; -} - -.literalblock + .colist, .listingblock + .colist { - margin-top: -0.5em; -} - -.colist > table { - overflow-wrap: anywhere; -} - -.colist > table tr > td:first-of-type { - padding: 0 .75em; -} - -.colist > table tr > td:last-of-type { - padding: 0.25em 0; -} - -.qanda > ol > li > p > em:only-child { - color: #063f40; -} - -.thumb, .th { - line-height: 0; - display: inline-block; - border: solid 4px white; - -webkit-box-shadow: 0 0 0 1px #dddddd; - box-shadow: 0 0 0 1px #dddddd; -} - -.imageblock.left, .imageblock[style*="float: left"] { - margin: 0.25em 0.625em 1.25em 0; -} - -.imageblock.right, .imageblock[style*="float: right"] { - margin: 0.25em 0 1.25em 0.625em; -} - -.imageblock > .title { - margin-bottom: 0; -} - -.imageblock.thumb, .imageblock.th { - border-width: 6px; -} - -.imageblock.thumb > .title, .imageblock.th > .title { - padding: 0 0.125em; -} - -.image.left, .image.right { - margin-top: 0.25em; - margin-bottom: 0.25em; - display: inline-block; - line-height: 0; -} - -.image.left { - margin-right: 0.625em; -} - -.image.right { - margin-left: 0.625em; -} - -a.image { - text-decoration: none; -} - -span.footnote, span.footnoteref { - vertical-align: super; - font-size: 0.875em; -} - -span.footnote a, span.footnoteref a { - text-decoration: none; -} - -#footnotes { - padding-top: 0.75em; - padding-bottom: 0.75em; - margin-bottom: 0.625em; -} - -#footnotes hr { - width: 20%; - min-width: 6.25em; - margin: -.25em 0 .75em 0; - border-width: 1px 0 0 0; -} - -#footnotes .footnote { - padding: 0 0.375em; - font-size: 0.875em; - margin-left: 1.2em; - text-indent: -1.2em; - margin-bottom: .2em; -} - -#footnotes .footnote a:first-of-type { - font-weight: bold; - text-decoration: none; -} - -#footnotes .footnote:last-of-type { - margin-bottom: 0; -} - -#content #footnotes { - margin-top: -0.625em; - margin-bottom: 0; - padding: 0.75em 0; -} - -.gist .file-data > table { - border: none; - background: #fff; - width: 100%; - margin-bottom: 0; -} - -.gist .file-data > table td.line-data { - width: 99%; -} - -div.unbreakable { - page-break-inside: avoid; -} - -.big { - font-size: larger; -} - -.small { - font-size: smaller; -} - -.underline { - text-decoration: underline; -} - -.overline { - text-decoration: overline; -} - -.line-through { - text-decoration: line-through; -} - -.aqua { - color: #00bfbf; -} - -.aqua-background { - background-color: #00fafa; -} - -.black { - color: black; -} - -.black-background { - background-color: black; -} - -.blue { - color: #0000bf; -} - -.blue-background { - background-color: #0000fa; -} - -.fuchsia { - color: #bf00bf; -} - -.fuchsia-background { - background-color: #fa00fa; -} - -.gray { - color: #606060; -} - -.gray-background { - background-color: #7d7d7d; -} - -.green { - color: #006000; -} - -.green-background { - background-color: #007d00; -} - -.lime { - color: #00bf00; -} - -.lime-background { - background-color: #00fa00; -} - -.maroon { - color: #600000; -} - -.maroon-background { - background-color: #7d0000; -} - -.navy { - color: #000060; -} - -.navy-background { - background-color: #00007d; -} - -.olive { - color: #606000; -} - -.olive-background { - background-color: #7d7d00; -} - -.purple { - color: #600060; -} - -.purple-background { - background-color: #7d007d; -} - -.red { - color: #bf0000; -} - -.red-background { - background-color: #fa0000; -} - -.silver { - color: #909090; -} - -.silver-background { - background-color: #bcbcbc; -} - -.teal { - color: #006060; -} - -.teal-background { - background-color: #007d7d; -} - -.white { - color: #bfbfbf; -} - -.white-background { - background-color: #fafafa; -} - -.yellow { - color: #bfbf00; -} - -.yellow-background { - background-color: #fafa00; -} - -span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { - cursor: default; -} - -.admonitionblock td.icon [class^="icon-"]:before { - font-size: 2.5em; - text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); - cursor: default; -} - -.admonitionblock td.icon .icon-note:before { - content: "\f05a"; - color: #095557; - color: #064042; -} - -.admonitionblock td.icon .icon-tip:before { - content: "\f0eb"; - text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); - color: #111; -} - -.admonitionblock td.icon .icon-warning:before { - content: "\f071"; - color: #bf6900; -} - -.admonitionblock td.icon .icon-caution:before { - content: "\f06d"; - color: #bf3400; -} - -.admonitionblock td.icon .icon-important:before { - content: "\f06a"; - color: #bf0000; -} - -.conum { - display: inline-block; - color: white !important; - background-color: #6db33f; - -webkit-border-radius: 100px; - border-radius: 100px; - text-align: center; - width: 20px; - height: 20px; - font-size: 12px; - font-weight: bold; - line-height: 20px; - font-family: Arial, sans-serif; - font-style: normal; - position: relative; - top: -2px; - letter-spacing: -1px; -} - -.conum * { - color: white !important; -} - -.conum + b { - display: none; -} - -.conum:after { - content: attr(data-value); -} - -.conum:not([data-value]):empty { - display: none; -} - -body { - padding-top: 60px; -} - -#toc.toc2 ul ul { - padding-left: 1em; -} -#toc.toc2 ul ul.sectlevel2 { -} - -#toctitle { - color: #34302d; - display: none; -} - -#header h1 { - font-weight: bold; - position: relative; - left: -0.0625em; -} - -#header h1 span.lo { - color: #dc9424; -} - -#content h2, #content h3, #content #toctitle, #content .sidebarblock > .content > .title, #content h4, #content h5, #content #toctitle { - font-weight: normal; - position: relative; - left: -0.0625em; - overflow-wrap: break-word; -} - -#content h2 { - font-weight: bold; -} - -.admonitionblock > table td.content { - border-color: #e6dfd8; -} - -table.tableblock.grid-all { - -webkit-border-radius: 0; - border-radius: 0; -} - -#footer { - background-color: white; - color: #34302d; -} - -.imageblock .title { - text-align: center; -} - -#content h1.sect0 { - font-size: 48px; -} - -#toc > ul > li > a { - font-size: large; -} - - - - -@import url(https://fonts.googleapis.com/css?family=Montserrat:400,700|Karla:400,700); -body { -} -#reactor-header { - background: #34302d; - border-top: 4px solid #6db33f; - z-index: 2000; - font-family: 'Montserrat'; - height: 75px; -} - -#reactor-header h1#logo { - margin: 7px 0 0 10px; - padding: 0; - float: left; -} - -#reactor-header h1#logo a { - display: block; - background: url(images/logo-2x.png) no-repeat 0 0; - background-size: 253px 80px; - height: 40px; - width: 253px; - text-indent: -6000em; - margin: 8px 0; -} -#reactor-header h1#logo a:hover strong { - filter: progid:DXImageTransform.Microsoft.Alpha(enabled=false); - opacity: 1; -} -#reactor-header h1#logo a strong { - display: block; - background: url(images/logo-2x.png) no-repeat 0 0; - background-size: 253px 80px; - color: red; - height: 40px; - width: 253px; - text-indent: -6000em; - margin: 8px 0; - -moz-transition-property: opacity; - -o-transition-property: opacity; - -webkit-transition-property: opacity; - transition-property: opacity; - -moz-transition-duration: 0.2s; - -o-transition-duration: 0.2s; - -webkit-transition-duration: 0.2s; - transition-duration: 0.2s; - filter: progid:DXImageTransform.Microsoft.Alpha(Opacity=0); - opacity: 0; -} - -#nav, #nav ul { - display: block; - margin: 0; - padding: 0; -} -#nav { - float: right; - margin-right: 10px; -} -#nav ul li { - display: block; - float: left; - list-style: none; - margin: 0; - padding: 0; -} -#nav ul li a { - color: white; - text-decoration: none; - font-weight: 500; - display: block; - text-transform: uppercase; - font-size: 13.5px; - line-height: 71px; - margin: 0; - padding: 0 12px; - -moz-transition-property: background-color; - -o-transition-property: background-color; - -webkit-transition-property: background-color; - transition-property: background-color; - -moz-transition-duration: 0.2s; - -o-transition-duration: 0.2s; - -webkit-transition-duration: 0.2s; - transition-duration: 0.2s; -} -#nav ul li a:hover { - background: #6db33f; -} -#nav ul li a.active { - background: #6db33f; -} diff --git a/docs/asciidoc/highlight/CHANGES.md b/docs/modules/ROOT/assets/highlight/CHANGES.md similarity index 100% rename from docs/asciidoc/highlight/CHANGES.md rename to docs/modules/ROOT/assets/highlight/CHANGES.md diff --git a/docs/asciidoc/highlight/LICENSE b/docs/modules/ROOT/assets/highlight/LICENSE similarity index 100% rename from docs/asciidoc/highlight/LICENSE rename to docs/modules/ROOT/assets/highlight/LICENSE diff --git a/docs/asciidoc/highlight/README.md b/docs/modules/ROOT/assets/highlight/README.md similarity index 100% rename from docs/asciidoc/highlight/README.md rename to docs/modules/ROOT/assets/highlight/README.md diff --git a/docs/asciidoc/highlight/README.ru.md b/docs/modules/ROOT/assets/highlight/README.ru.md similarity index 100% rename from docs/asciidoc/highlight/README.ru.md rename to docs/modules/ROOT/assets/highlight/README.ru.md diff --git a/docs/asciidoc/highlight/highlight.min.js b/docs/modules/ROOT/assets/highlight/highlight.min.js similarity index 100% rename from docs/asciidoc/highlight/highlight.min.js rename to docs/modules/ROOT/assets/highlight/highlight.min.js diff --git a/docs/asciidoc/highlight/styles/railscasts.min.css b/docs/modules/ROOT/assets/highlight/styles/railscasts.min.css similarity index 100% rename from docs/asciidoc/highlight/styles/railscasts.min.css rename to docs/modules/ROOT/assets/highlight/styles/railscasts.min.css diff --git a/docs/modules/ROOT/assets/images/flux.svg b/docs/modules/ROOT/assets/images/flux.svg new file mode 120000 index 0000000000..88dc73d291 --- /dev/null +++ b/docs/modules/ROOT/assets/images/flux.svg @@ -0,0 +1 @@ +../../../../../reactor-core/src/main/java/reactor/core/publisher/doc-files/marbles/flux.svg \ No newline at end of file diff --git a/docs/asciidoc/images/gs-cold.png b/docs/modules/ROOT/assets/images/gs-cold.png similarity index 100% rename from docs/asciidoc/images/gs-cold.png rename to docs/modules/ROOT/assets/images/gs-cold.png diff --git a/docs/asciidoc/images/gs-compose.png b/docs/modules/ROOT/assets/images/gs-compose.png similarity index 100% rename from docs/asciidoc/images/gs-compose.png rename to docs/modules/ROOT/assets/images/gs-compose.png diff --git a/docs/asciidoc/images/gs-hot.png b/docs/modules/ROOT/assets/images/gs-hot.png similarity index 100% rename from docs/asciidoc/images/gs-hot.png rename to docs/modules/ROOT/assets/images/gs-hot.png diff --git a/docs/asciidoc/images/gs-operators.png b/docs/modules/ROOT/assets/images/gs-operators.png similarity index 100% rename from docs/asciidoc/images/gs-operators.png rename to docs/modules/ROOT/assets/images/gs-operators.png diff --git a/docs/asciidoc/images/gs-reftail.png b/docs/modules/ROOT/assets/images/gs-reftail.png similarity index 100% rename from docs/asciidoc/images/gs-reftail.png rename to docs/modules/ROOT/assets/images/gs-reftail.png diff --git a/docs/asciidoc/images/gs-transform.png b/docs/modules/ROOT/assets/images/gs-transform.png similarity index 100% rename from docs/asciidoc/images/gs-transform.png rename to docs/modules/ROOT/assets/images/gs-transform.png diff --git a/docs/asciidoc/images/legend-events.svg b/docs/modules/ROOT/assets/images/legend-events.svg similarity index 100% rename from docs/asciidoc/images/legend-events.svg rename to docs/modules/ROOT/assets/images/legend-events.svg diff --git a/docs/asciidoc/images/legend-operator-companion.svg b/docs/modules/ROOT/assets/images/legend-operator-companion.svg similarity index 100% rename from docs/asciidoc/images/legend-operator-companion.svg rename to docs/modules/ROOT/assets/images/legend-operator-companion.svg diff --git a/docs/asciidoc/images/legend-operator-double-source.svg b/docs/modules/ROOT/assets/images/legend-operator-double-source.svg similarity index 100% rename from docs/asciidoc/images/legend-operator-double-source.svg rename to docs/modules/ROOT/assets/images/legend-operator-double-source.svg diff --git a/docs/asciidoc/images/legend-operator-method.svg b/docs/modules/ROOT/assets/images/legend-operator-method.svg similarity index 100% rename from docs/asciidoc/images/legend-operator-method.svg rename to docs/modules/ROOT/assets/images/legend-operator-method.svg diff --git a/docs/asciidoc/images/legend-operator-parallel.svg b/docs/modules/ROOT/assets/images/legend-operator-parallel.svg similarity index 100% rename from docs/asciidoc/images/legend-operator-parallel.svg rename to docs/modules/ROOT/assets/images/legend-operator-parallel.svg diff --git a/docs/asciidoc/images/legend-operator-static.svg b/docs/modules/ROOT/assets/images/legend-operator-static.svg similarity index 100% rename from docs/asciidoc/images/legend-operator-static.svg rename to docs/modules/ROOT/assets/images/legend-operator-static.svg diff --git a/docs/asciidoc/images/legend-operator-windowing.svg b/docs/modules/ROOT/assets/images/legend-operator-windowing.svg similarity index 100% rename from docs/asciidoc/images/legend-operator-windowing.svg rename to docs/modules/ROOT/assets/images/legend-operator-windowing.svg diff --git a/docs/asciidoc/images/legend-sideEffects1.svg b/docs/modules/ROOT/assets/images/legend-sideEffects1.svg similarity index 100% rename from docs/asciidoc/images/legend-sideEffects1.svg rename to docs/modules/ROOT/assets/images/legend-sideEffects1.svg diff --git a/docs/asciidoc/images/legend-sideEffects2.svg b/docs/modules/ROOT/assets/images/legend-sideEffects2.svg similarity index 100% rename from docs/asciidoc/images/legend-sideEffects2.svg rename to docs/modules/ROOT/assets/images/legend-sideEffects2.svg diff --git a/docs/asciidoc/images/logo-2x.png b/docs/modules/ROOT/assets/images/logo-2x.png similarity index 100% rename from docs/asciidoc/images/logo-2x.png rename to docs/modules/ROOT/assets/images/logo-2x.png diff --git a/docs/asciidoc/images/logo.png b/docs/modules/ROOT/assets/images/logo.png similarity index 100% rename from docs/asciidoc/images/logo.png rename to docs/modules/ROOT/assets/images/logo.png diff --git a/docs/modules/ROOT/assets/images/mono.svg b/docs/modules/ROOT/assets/images/mono.svg new file mode 120000 index 0000000000..590dd1386f --- /dev/null +++ b/docs/modules/ROOT/assets/images/mono.svg @@ -0,0 +1 @@ +../../../../../reactor-core/src/main/java/reactor/core/publisher/doc-files/marbles/mono.svg \ No newline at end of file diff --git a/docs/asciidoc/index.asciidoc b/docs/modules/ROOT/nav.adoc similarity index 100% rename from docs/asciidoc/index.asciidoc rename to docs/modules/ROOT/nav.adoc diff --git a/docs/asciidoc/aboutDoc.adoc b/docs/modules/ROOT/pages/aboutDoc.adoc similarity index 100% rename from docs/asciidoc/aboutDoc.adoc rename to docs/modules/ROOT/pages/aboutDoc.adoc diff --git a/docs/asciidoc/advanced-contextPropagation.adoc b/docs/modules/ROOT/pages/advanced-contextPropagation.adoc similarity index 100% rename from docs/asciidoc/advanced-contextPropagation.adoc rename to docs/modules/ROOT/pages/advanced-contextPropagation.adoc diff --git a/docs/asciidoc/advancedFeatures.adoc b/docs/modules/ROOT/pages/advancedFeatures.adoc similarity index 100% rename from docs/asciidoc/advancedFeatures.adoc rename to docs/modules/ROOT/pages/advancedFeatures.adoc diff --git a/docs/asciidoc/apdx-howtoReadMarbles.adoc b/docs/modules/ROOT/pages/apdx-howtoReadMarbles.adoc similarity index 100% rename from docs/asciidoc/apdx-howtoReadMarbles.adoc rename to docs/modules/ROOT/pages/apdx-howtoReadMarbles.adoc diff --git a/docs/asciidoc/apdx-implem.adoc b/docs/modules/ROOT/pages/apdx-implem.adoc similarity index 100% rename from docs/asciidoc/apdx-implem.adoc rename to docs/modules/ROOT/pages/apdx-implem.adoc diff --git a/docs/asciidoc/apdx-migrating.adoc b/docs/modules/ROOT/pages/apdx-migrating.adoc similarity index 100% rename from docs/asciidoc/apdx-migrating.adoc rename to docs/modules/ROOT/pages/apdx-migrating.adoc diff --git a/docs/asciidoc/apdx-operatorChoice.adoc b/docs/modules/ROOT/pages/apdx-operatorChoice.adoc similarity index 100% rename from docs/asciidoc/apdx-operatorChoice.adoc rename to docs/modules/ROOT/pages/apdx-operatorChoice.adoc diff --git a/docs/asciidoc/apdx-optimizations.adoc b/docs/modules/ROOT/pages/apdx-optimizations.adoc similarity index 100% rename from docs/asciidoc/apdx-optimizations.adoc rename to docs/modules/ROOT/pages/apdx-optimizations.adoc diff --git a/docs/asciidoc/apdx-reactorExtra.adoc b/docs/modules/ROOT/pages/apdx-reactorExtra.adoc similarity index 100% rename from docs/asciidoc/apdx-reactorExtra.adoc rename to docs/modules/ROOT/pages/apdx-reactorExtra.adoc diff --git a/docs/asciidoc/apdx-writingOperator.adoc b/docs/modules/ROOT/pages/apdx-writingOperator.adoc similarity index 100% rename from docs/asciidoc/apdx-writingOperator.adoc rename to docs/modules/ROOT/pages/apdx-writingOperator.adoc diff --git a/docs/asciidoc/coreFeatures.adoc b/docs/modules/ROOT/pages/coreFeatures.adoc similarity index 100% rename from docs/asciidoc/coreFeatures.adoc rename to docs/modules/ROOT/pages/coreFeatures.adoc diff --git a/docs/asciidoc/debugging.adoc b/docs/modules/ROOT/pages/debugging.adoc similarity index 100% rename from docs/asciidoc/debugging.adoc rename to docs/modules/ROOT/pages/debugging.adoc diff --git a/docs/asciidoc/faq.adoc b/docs/modules/ROOT/pages/faq.adoc similarity index 100% rename from docs/asciidoc/faq.adoc rename to docs/modules/ROOT/pages/faq.adoc diff --git a/docs/asciidoc/gettingStarted.adoc b/docs/modules/ROOT/pages/gettingStarted.adoc similarity index 100% rename from docs/asciidoc/gettingStarted.adoc rename to docs/modules/ROOT/pages/gettingStarted.adoc diff --git a/docs/asciidoc/kotlin.adoc b/docs/modules/ROOT/pages/kotlin.adoc similarity index 100% rename from docs/asciidoc/kotlin.adoc rename to docs/modules/ROOT/pages/kotlin.adoc diff --git a/docs/asciidoc/metrics-details.adoc b/docs/modules/ROOT/pages/metrics-details.adoc similarity index 100% rename from docs/asciidoc/metrics-details.adoc rename to docs/modules/ROOT/pages/metrics-details.adoc diff --git a/docs/asciidoc/metrics.adoc b/docs/modules/ROOT/pages/metrics.adoc similarity index 100% rename from docs/asciidoc/metrics.adoc rename to docs/modules/ROOT/pages/metrics.adoc diff --git a/docs/asciidoc/processors.adoc b/docs/modules/ROOT/pages/processors.adoc similarity index 100% rename from docs/asciidoc/processors.adoc rename to docs/modules/ROOT/pages/processors.adoc diff --git a/docs/asciidoc/producing.adoc b/docs/modules/ROOT/pages/producing.adoc similarity index 100% rename from docs/asciidoc/producing.adoc rename to docs/modules/ROOT/pages/producing.adoc diff --git a/docs/asciidoc/reactiveProgramming.adoc b/docs/modules/ROOT/pages/reactiveProgramming.adoc similarity index 100% rename from docs/asciidoc/reactiveProgramming.adoc rename to docs/modules/ROOT/pages/reactiveProgramming.adoc diff --git a/docs/asciidoc/snippetRetryWhenRetry.adoc b/docs/modules/ROOT/pages/snippetRetryWhenRetry.adoc similarity index 100% rename from docs/asciidoc/snippetRetryWhenRetry.adoc rename to docs/modules/ROOT/pages/snippetRetryWhenRetry.adoc diff --git a/docs/asciidoc/subscribe-backpressure.adoc b/docs/modules/ROOT/pages/subscribe-backpressure.adoc similarity index 100% rename from docs/asciidoc/subscribe-backpressure.adoc rename to docs/modules/ROOT/pages/subscribe-backpressure.adoc diff --git a/docs/asciidoc/subscribe-details.adoc b/docs/modules/ROOT/pages/subscribe-details.adoc similarity index 100% rename from docs/asciidoc/subscribe-details.adoc rename to docs/modules/ROOT/pages/subscribe-details.adoc diff --git a/docs/asciidoc/testing.adoc b/docs/modules/ROOT/pages/testing.adoc similarity index 100% rename from docs/asciidoc/testing.adoc rename to docs/modules/ROOT/pages/testing.adoc From 2d340c040cb21309c3174d115a320d8d244fd6f6 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 2 May 2024 18:07:14 +0200 Subject: [PATCH 02/26] Refactor ascidoc adoc files to Antora. core features and advenced features are now split in multiple adoc files. --- docs/modules/ROOT/nav.adoc | 134 +- docs/modules/ROOT/pages/aboutDoc.adoc | 41 +- .../pages/advanced-contextPropagation.adoc | 20 +- docs/modules/ROOT/pages/advancedFeatures.adoc | 1197 +---------------- ...-multiple-subscribers-connectableflux.adoc | 97 ++ .../advanced-mutualizing-operator-usage.adoc | 101 ++ .../advanced-parallelizing-parralelflux.adoc | 80 ++ .../advanced-three-sorts-batching.adoc | 155 +++ .../ROOT/pages/advancedFeatures/cleanup.adoc | 74 + .../ROOT/pages/advancedFeatures/context.adoc | 371 +++++ .../ROOT/pages/advancedFeatures/hooks.adoc | 87 ++ .../pages/advancedFeatures/null-safety.adoc | 38 + .../advancedFeatures/reactor-hotCold.adoc | 114 ++ .../advancedFeatures/scheduler-factory.adoc | 41 + .../ROOT/pages/apdx-howtoReadMarbles.adoc | 20 +- docs/modules/ROOT/pages/apdx-implem.adoc | 3 +- docs/modules/ROOT/pages/apdx-migrating.adoc | 3 +- .../ROOT/pages/apdx-operatorChoice.adoc | 158 +-- .../ROOT/pages/apdx-optimizations.adoc | 7 +- .../modules/ROOT/pages/apdx-reactorExtra.adoc | 18 +- .../ROOT/pages/apdx-writingOperator.adoc | 3 +- docs/modules/ROOT/pages/appendices.adoc | 27 + docs/modules/ROOT/pages/coreFeatures.adoc | 1140 ---------------- .../pages/coreFeatures/error-handling.adoc | 787 +++++++++++ .../modules/ROOT/pages/coreFeatures/flux.adoc | 19 + .../modules/ROOT/pages/coreFeatures/mono.adoc | 27 + .../programmatically-creating-sequence.adoc | 1 + .../ROOT/pages/coreFeatures/schedulers.adoc | 206 +++ ...te-a-flux-or-mono-and-subscribe-to-it.adoc | 75 ++ .../ROOT/pages/coreFeatures/sinks.adoc | 1 + docs/modules/ROOT/pages/debugging.adoc | 83 +- docs/modules/ROOT/pages/faq.adoc | 93 +- docs/modules/ROOT/pages/gettingStarted.adoc | 35 +- docs/modules/ROOT/pages/kotlin.adoc | 8 +- docs/modules/ROOT/pages/metrics-details.adoc | 16 +- docs/modules/ROOT/pages/metrics.adoc | 36 +- docs/modules/ROOT/pages/processors.adoc | 40 +- docs/modules/ROOT/pages/producing.adoc | 51 +- .../ROOT/pages/reactiveProgramming.adoc | 33 +- .../ROOT/pages/snippetRetryWhenRetry.adoc | 4 +- .../ROOT/pages/subscribe-backpressure.adoc | 14 +- .../modules/ROOT/pages/subscribe-details.adoc | 47 +- docs/modules/ROOT/pages/testing.adoc | 55 +- 43 files changed, 2718 insertions(+), 2842 deletions(-) create mode 100644 docs/modules/ROOT/pages/advancedFeatures/advanced-broadcast-multiple-subscribers-connectableflux.adoc create mode 100644 docs/modules/ROOT/pages/advancedFeatures/advanced-mutualizing-operator-usage.adoc create mode 100644 docs/modules/ROOT/pages/advancedFeatures/advanced-parallelizing-parralelflux.adoc create mode 100644 docs/modules/ROOT/pages/advancedFeatures/advanced-three-sorts-batching.adoc create mode 100644 docs/modules/ROOT/pages/advancedFeatures/cleanup.adoc create mode 100644 docs/modules/ROOT/pages/advancedFeatures/context.adoc create mode 100644 docs/modules/ROOT/pages/advancedFeatures/hooks.adoc create mode 100644 docs/modules/ROOT/pages/advancedFeatures/null-safety.adoc create mode 100644 docs/modules/ROOT/pages/advancedFeatures/reactor-hotCold.adoc create mode 100644 docs/modules/ROOT/pages/advancedFeatures/scheduler-factory.adoc create mode 100644 docs/modules/ROOT/pages/appendices.adoc create mode 100644 docs/modules/ROOT/pages/coreFeatures/error-handling.adoc create mode 100644 docs/modules/ROOT/pages/coreFeatures/flux.adoc create mode 100644 docs/modules/ROOT/pages/coreFeatures/mono.adoc create mode 100644 docs/modules/ROOT/pages/coreFeatures/programmatically-creating-sequence.adoc create mode 100644 docs/modules/ROOT/pages/coreFeatures/schedulers.adoc create mode 100644 docs/modules/ROOT/pages/coreFeatures/simple-ways-to-create-a-flux-or-mono-and-subscribe-to-it.adoc create mode 100644 docs/modules/ROOT/pages/coreFeatures/sinks.adoc diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index 67f4172443..19f09ce25d 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -1,107 +1,29 @@ -= Reactor 3 Reference Guide -Stephane Maldini ; Simon Baslé -ifndef::host-github[:ext-relative: {outfilesuffix}] -:doctype: book -:icons: font -:toc2: -:sectnums: -:sectanchors: +* xref:aboutDoc.adoc[] +* xref:gettingStarted.adoc[] +* xref:reactiveProgramming.adoc[] +* xref:coreFeatures.adoc[] +** xref:coreFeatures/flux.adoc[] +** xref:coreFeatures/mono.adoc[] +** xref:coreFeatures/simple-ways-to-create-a-flux-or-mono-and-subscribe-to-it.adoc[] +** xref:coreFeatures/programmatically-creating-sequence.adoc[] +** xref:coreFeatures/schedulers.adoc[] +** xref:coreFeatures/error-handling.adoc[] +** xref:coreFeatures/sinks.adoc[] +* xref:kotlin.adoc[] +* xref:testing.adoc[] +* xref:debugging.adoc[] +* xref:metrics.adoc[] +* xref:advancedFeatures.adoc[] +** xref:advancedFeatures/advanced-mutualizing-operator-usage.adoc[] +** xref:advancedFeatures/reactor-hotCold.adoc[] +** xref:advancedFeatures/advanced-broadcast-multiple-subscribers-connectableflux.adoc[] +** xref:advancedFeatures/advanced-three-sorts-batching.adoc[] +** xref:advancedFeatures/advanced-parallelizing-parralelflux.adoc[] +** xref:advancedFeatures/scheduler-factory.adoc[] +** xref:advancedFeatures/hooks.adoc[] +** xref:advancedFeatures/context.adoc[] +** xref:advanced-contextPropagation.adoc[] +** xref:advancedFeatures/cleanup.adoc[] +** xref:advancedFeatures/null-safety.adoc[] +* xref:appendices.adoc[] -include::aboutDoc.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/aboutDoc.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] - -include::gettingStarted.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/gettingStarted.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] - -include::reactiveProgramming.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/reactiveProgramming.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] - -include::coreFeatures.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/coreFeatures.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] - -//TODO see other sections from consuming.adoc - -include::kotlin.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/kotlin.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] - -include::testing.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/testing.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] - -include::debugging.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/debugging.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] - -include::metrics.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/metrics.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] -include::metrics-details.adoc[] - -include::advancedFeatures.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/advancedFeatures.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] - - -[appendix] -include::apdx-operatorChoice.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/apdx-operatorChoice.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] - -[appendix] -include::apdx-howtoReadMarbles.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/apdx-howtoReadMarbles.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] - -[appendix] -include::faq.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/faq.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] - -[appendix] -include::apdx-reactorExtra.adoc[leveloffset=1] -ifeval::["{backend}" == "html5"] -https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/apdx-reactorExtra.adoc[Suggest Edit^, title="Suggest an edit to the above section via github", role="fa fa-edit"] -to "<>" -endif::[] - -//TODO later add appendices about internals, writing operators, fusion -//[appendix] -//include::apdx-implem.adoc[] -// -//[appendix] -//include::apdx-writingOperator.adoc[] -//[appendix] -//include::apdx-optimizations.adoc[] - -//TODO later add appendix about migrating from RxJava? -//[appendix] -//include::apdx-migrating.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/aboutDoc.adoc b/docs/modules/ROOT/pages/aboutDoc.adoc index 6a909cbd3c..5ebe05b10e 100644 --- a/docs/modules/ROOT/pages/aboutDoc.adoc +++ b/docs/modules/ROOT/pages/aboutDoc.adoc @@ -1,10 +1,14 @@ [[about-doc]] = About the Documentation :linkattrs: + +Stephane Maldini ; Simon Baslé Version {project-version} + This section provides a brief overview of Reactor reference documentation. You do not need to read this guide in a linear fashion. Each piece stands on its own, though they often refer to other pieces. +[[latest-version-copyright-notice]] == Latest Version & Copyright Notice The Reactor reference guide is available as HTML documents. The latest copy is available at https://projectreactor.io/docs/core/release/reference/index.html @@ -13,10 +17,11 @@ Copies of this document may be made for your own use and for distribution to oth provided that you do not charge any fee for such copies and further provided that each copy contains this Copyright Notice, whether distributed in print or electronically. +[[contributing-to-the-documentation]] == Contributing to the Documentation The reference guide is written in -https://asciidoctor.org/docs/asciidoc-writers-guide/[Asciidoc], and you can find its sources at -https://github.com/reactor/reactor-core/tree/main/docs/asciidoc. +https://asciidoctor.org/docs/asciidoc-writers-guide/[Asciidoc] using https://docs.antora.org/antora/latest/[Antora], and you can find its +sources at {reactor-github-repo}/docs/. If you have an improvement or a suggestion, we will be happy to get a pull request from you! @@ -26,12 +31,13 @@ rendering. Some of the sections rely on included files, so GitHub rendering is not always complete. ifeval::["{backend}" == "html5"] -TIP: To facilitate documentation edits, most sections have a link at the end that opens -an edit UI directly on GitHub for the main source file for that section. These links are -only present in the HTML5 version of this reference guide. They look like the following: -link:https://github.com/reactor/reactor-core/edit/main/docs/asciidoc/aboutDoc.adoc[Suggest Edit^, role="fa fa-edit"] to <>. +TIP: To facilitate documentation edits, you can edit the current page from the `Edit this Page` link located in the upper right corner sidebar. The link opens +an edit `UI` directly on `GitHub` for the main source file for the current page. These links are +only present in the `HTML5` version of this reference guide. They look like the following link: +link:https://github.com/reactor/reactor-core/edit/main/docs/modules/ROOT/pages/aboutDoc.adoc[Edit this Page^, role="fa fa-edit"] to xref:aboutDoc.adoc[About the Documentation]. endif::[] +[[getting-help]] == Getting Help You can reach out for help in several ways with Reactor: @@ -44,22 +50,23 @@ essential features) and https://github.com/reactor/reactor-addons/issues[reactor (which covers reactor-test and adapters issues). NOTE: All of Reactor is open source, -https://github.com/reactor/reactor-core/tree/main/docs/asciidoc[including this +{reactor-github-repo}/docs[including this documentation]. If you find problems with the docs or if you want to improve them, please https://github.com/reactor/.github/blob/main/CONTRIBUTING.md[get involved]. +[[where-to-go-from-here]] == Where to Go from Here -* Head to <> if you feel like jumping straight into the code. +* Head to xref:gettingStarted.adoc[Getting Started] if you feel like jumping straight into the code. * If you are new to reactive programming, though, you should probably start with the -<>. +xref:reactiveProgramming.adoc[Introduction to Reactive Programming]. * If you are familiar with Reactor concepts and are just looking for the right tool -for the job but cannot think of a relevant operator, try the <> Appendix. -* In order to dig deeper into the core features of Reactor, head to <> to +for the job but cannot think of a relevant operator, try the xref:apdx-operatorChoice.adoc[Which operator do I need?] Appendix. +* In order to dig deeper into the core features of Reactor, head to xref:coreFeatures.adoc[Reactor Core Features] to learn: -** More about Reactor's reactive types in the <> and <> +** More about Reactor's reactive types in the xref:coreFeatures/flux.adoc[`Flux`, an Asynchronous Sequence of 0-N Items] and xref:coreFeatures/mono.adoc[`Mono`, an Asynchronous 0-1 Result] sections. -** How to switch threading contexts using <>. -** How to handle errors in the <> section. -* Unit testing? Yes it is possible with the `reactor-test` project! See <>. -* <> offers a more advanced way of creating reactive sources. -* Other advanced topics are covered in <>. +** How to switch threading contexts using xref:apdx-reactorExtra.adoc#extra-schedulers[a scheduler]. +** How to handle errors in the xref:coreFeatures/error-handling.adoc[Handling Errors] section. +* Unit testing? Yes it is possible with the `reactor-test` project! See xref:testing.adoc[Testing]. +* xref:producing.adoc[Programmatically creating a sequence] offers a more advanced way of creating reactive sources. +* Other advanced topics are covered in xref:advancedFeatures.adoc[Advanced Features and Concepts]. diff --git a/docs/modules/ROOT/pages/advanced-contextPropagation.adoc b/docs/modules/ROOT/pages/advanced-contextPropagation.adoc index acc5639bad..3ec7a1e5a3 100644 --- a/docs/modules/ROOT/pages/advanced-contextPropagation.adoc +++ b/docs/modules/ROOT/pages/advanced-contextPropagation.adoc @@ -17,8 +17,8 @@ Reactor-Core supports two modes of operation with `io.micrometer:context-propaga Please note that this mode applies only to new subscriptions, so it is recommended to enable this hook when the application starts. -Their key differences are discussed in the context of either <> to Reactor `Context`, or <> that +Their key differences are discussed in the context of either xref:advanced-contextPropagation.adoc#context-writing[writing data] + to Reactor `Context`, or xref:advanced-contextPropagation.adoc#context-accessing[accessing `ThreadLocal` state] that reflects the contents of the `Context` of currently attached `Subscriber` for reading. [[context-writing]] @@ -28,13 +28,14 @@ Depending on the individual application, you might either have to store already `ThreadLocal` state as entries in the `Context`, or might only need to directly populate the `Context`. +[[contextwrite-operator]] === `contextWrite` Operator When the values meant to be accessed as `ThreadLocal` are not (or do not need to be) present at the time of subscription, they can immediately be stored in the `Context`: -==== [source,java] +[%unbreakable] ---- // assuming TL is known to Context-Propagation as key TLKEY. static final ThreadLocal TL = new ThreadLocal<>(); @@ -49,8 +50,8 @@ Mono.deferContextual(ctx -> .block(); // returns "delayed ctx[TLKEY]=HELLO, TL=null" in default mode // returns "delayed ctx[TLKEY]=HELLO, TL=HELLO" in automatic mode ---- -==== +[[contextcapture-operator]] === `contextCapture` Operator This operator can be used when one needs to capture `ThreadLocal` value(s) at subscription time and reflect these values in the Reactor `Context` for the benefit of upstream operators. @@ -61,8 +62,8 @@ to populate the Reactor `Context`. As a result, if there were any `ThreadLocal` values during subscription phase, for which there is a registered `ThreadLocalAccessor`, their values would now be stored in the Reactor `Context` and visible at runtime in upstream operators. -==== [source,java] +[%unbreakable] ---- // assuming TL is known to Context-Propagation as key TLKEY. static final ThreadLocal TL = new ThreadLocal<>(); @@ -78,7 +79,6 @@ Mono.deferContextual(ctx -> .block(); // returns "delayed ctx[TLKEY]=HELLO, TL=null" in default mode // returns "delayed ctx[TLKEY]=HELLO, TL=HELLO" in automatic mode ---- -==== NOTE: In the **automatic** mode, blocking operators, such as `Flux#blockFirst()`, `Flux#blockLast()`, `Flux#toIterable()`, `Mono#block()`, `Mono#blockOptional()`, and @@ -97,6 +97,7 @@ Reactor-Core performs `ThreadLocal` state restoration using the values stored in `Context` and `ThreadLocalAccessor` instances registered in `ContextRegistry` that match by key. +[[default-mode-operators-for-snapshot-restoration:-handle-and-tap]] === Default mode operators for snapshot restoration: `handle` and `tap` In the **default** mode, both `Flux` and `Mono` variants of `handle` and `tap` will have @@ -113,8 +114,8 @@ These operators will ensure restoration is performed around the user-provided co The intent is to have a minimalistic set of operators transparently perform restoration. As a result we chose operators with rather general and broad applications (one with transformative capabilities, one with side-effect capabilities) -==== [source,java] +[%unbreakable] ---- //assuming TL is known to Context-Propagation. static final ThreadLocal TL = new ThreadLocal<>(); @@ -130,8 +131,8 @@ Mono.delay(Duration.ofSeconds(1)) .contextCapture() .block(); // prints "null" and returns "handled delayed TL=HELLO" ---- -==== +[[automatic-mode]] === Automatic mode In the **automatic** mode, all operators restore `ThreadLocal` state across `Thread` @@ -154,6 +155,7 @@ treat absent keys in the `Context` for registered instances of `ThreadLocalAcces signals to clear the corresponding `ThreadLocal` state. This is especially important for an empty `Context`, which clears all state for registered `ThreadLocalAccessor` instances. +[[which-mode-should-i-choose]] == Which mode should I choose? Both **default** and **automatic** modes have an impact on performance. Accessing @@ -167,4 +169,4 @@ compromise to make. The **automatic** mode, depending on the flow of your applic the amount of operators used, can be either better or worse than the **default** mode. The only recommendation that can be given is to measure how your application behaves and what scalability and performance characteristics you obtain when presented with a load you -expect. \ No newline at end of file +expect. diff --git a/docs/modules/ROOT/pages/advancedFeatures.adoc b/docs/modules/ROOT/pages/advancedFeatures.adoc index 353ab1405f..4eb45803d3 100644 --- a/docs/modules/ROOT/pages/advancedFeatures.adoc +++ b/docs/modules/ROOT/pages/advancedFeatures.adoc @@ -3,1190 +3,15 @@ This chapter covers advanced features and concepts of Reactor, including the following: -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> +* xref:advancedFeatures/advanced-mutualizing-operator-usage.adoc[Mutualizing Operator Usage] +* xref:advancedFeatures/reactor-hotCold.adoc[Hot Versus Cold] +* xref:advancedFeatures/advanced-broadcast-multiple-subscribers-connectableflux.adoc[Broadcasting to Multiple Subscribers with `ConnectableFlux`] +* xref:advancedFeatures/advanced-three-sorts-batching.adoc[Three Sorts of Batching] +* xref:advancedFeatures/advanced-parallelizing-parralelflux.adoc[Parallelizing Work with `ParallelFlux`] +* xref:advancedFeatures/scheduler-factory.adoc[Replacing Default `Schedulers`] +* xref:advancedFeatures/hooks.adoc[Using Global Hooks] +* xref:advancedFeatures/context.adoc[Adding a Context to a Reactive Sequence] +* xref:advanced-contextPropagation.adoc[Context-Propagation Support] +* xref:advancedFeatures/null-safety.adoc[Null Safety] +* xref:advancedFeatures/cleanup.adoc[Dealing with Objects that Need Cleanup] -[[advanced-mutualizing-operator-usage]] -== Mutualizing Operator Usage - -From a clean-code perspective, code reuse is generally a good thing. Reactor offers a few -patterns that can help you reuse and mutualize code, notably for operators or combinations -of operators that you might want to apply regularly in your codebase. If you think of a -chain of operators as a recipe, you can create a "`cookbook`" of operator recipes. - -=== Using the `transform` Operator - -The `transform` operator lets you encapsulate a piece of an operator chain into a -function. That function is applied to an original operator chain at assembly time to -augment it with the encapsulated operators. Doing so applies the same operations to all -the subscribers of a sequence and is basically equivalent to chaining the operators -directly. The following code shows an example: - -==== -[source,java] ----- -Function, Flux> filterAndMap = -f -> f.filter(color -> !color.equals("orange")) - .map(String::toUpperCase); - -Flux.fromIterable(Arrays.asList("blue", "green", "orange", "purple")) - .doOnNext(System.out::println) - .transform(filterAndMap) - .subscribe(d -> System.out.println("Subscriber to Transformed MapAndFilter: "+d)); ----- -==== - -The following image shows how the `transform` operator encapsulates flows: - -image::images/gs-transform.png[Transform Operator : encapsulate flows] - -The preceding example produces the following output: - -==== ----- -blue -Subscriber to Transformed MapAndFilter: BLUE -green -Subscriber to Transformed MapAndFilter: GREEN -orange -purple -Subscriber to Transformed MapAndFilter: PURPLE ----- -==== - -=== Using the `transformDeferred` Operator - -The `transformDeferred` operator is similar to `transform` and also lets you encapsulate operators -in a function. The major difference is that this function is applied to the original -sequence _on a per-subscriber basis_. It means that the function can actually produce a -different operator chain for each subscription (by maintaining some state). The -following code shows an example: - -==== -[source,java] ----- -AtomicInteger ai = new AtomicInteger(); -Function, Flux> filterAndMap = f -> { - if (ai.incrementAndGet() == 1) { -return f.filter(color -> !color.equals("orange")) - .map(String::toUpperCase); - } - return f.filter(color -> !color.equals("purple")) - .map(String::toUpperCase); -}; - -Flux composedFlux = -Flux.fromIterable(Arrays.asList("blue", "green", "orange", "purple")) - .doOnNext(System.out::println) - .transformDeferred(filterAndMap); - -composedFlux.subscribe(d -> System.out.println("Subscriber 1 to Composed MapAndFilter :"+d)); -composedFlux.subscribe(d -> System.out.println("Subscriber 2 to Composed MapAndFilter: "+d)); ----- -==== - -The following image shows how the `transformDeferred` operator works with per-subscriber transformations: - -image::images/gs-compose.png[Compose Operator : Per Subscriber transformation] - -The preceding example produces the following output: - -==== ----- -blue -Subscriber 1 to Composed MapAndFilter :BLUE -green -Subscriber 1 to Composed MapAndFilter :GREEN -orange -purple -Subscriber 1 to Composed MapAndFilter :PURPLE -blue -Subscriber 2 to Composed MapAndFilter: BLUE -green -Subscriber 2 to Composed MapAndFilter: GREEN -orange -Subscriber 2 to Composed MapAndFilter: ORANGE -purple ----- -==== - -[[reactor.hotCold]] -== Hot Versus Cold - -So far, we have considered that all `Flux` (and `Mono`) are the same: They all represent -an asynchronous sequence of data, and nothing happens before you subscribe. - -Really, though, there are two broad families of publishers: hot and cold. - -The earlier description applies to the cold family of publishers. They generate data anew -for each subscription. If no subscription is created, data never gets generated. - -Think of an HTTP request: Each new subscriber triggers an HTTP call, but no call is -made if no one is interested in the result. - -Hot publishers, on the other hand, do not depend on any number of subscribers. They -might start publishing data right away and would continue doing so whenever a new -`Subscriber` comes in (in which case, the subscriber would see only new elements emitted -_after_ it subscribed). For hot publishers, _something_ does indeed happen before you -subscribe. - -One example of the few hot operators in Reactor is `just`: It directly captures the value -at assembly time and replays it to anybody subscribing to it later. To re-use the HTTP -call analogy, if the captured data is the result of an HTTP call, then only one network -call is made, when instantiating `just`. - -To transform `just` into a cold publisher, you can use `defer`. It defers the HTTP -request in our example to subscription time (and would result in a separate network call -for each new subscription). - -On the opposite, `share()` and `replay(...)` can be used to turn a cold publisher into -a hot one (at least once a first subscription has happened). Both of these also have -`Sinks.Many` equivalents in the `Sinks` class, which allow programmatically -feeding the sequence. - -Consider two examples, one that demonstrates a cold Flux and the other that makes use of the -`Sinks` to simulate a hot Flux. The following code shows the first example: - -==== -[source,java] ----- -Flux source = Flux.fromIterable(Arrays.asList("blue", "green", "orange", "purple")) - .map(String::toUpperCase); - -source.subscribe(d -> System.out.println("Subscriber 1: "+d)); -source.subscribe(d -> System.out.println("Subscriber 2: "+d)); ----- -==== - -This first example produces the following output: - -==== ----- -Subscriber 1: BLUE -Subscriber 1: GREEN -Subscriber 1: ORANGE -Subscriber 1: PURPLE -Subscriber 2: BLUE -Subscriber 2: GREEN -Subscriber 2: ORANGE -Subscriber 2: PURPLE ----- -==== - -The following image shows the replay behavior: - -image::images/gs-cold.png[Replaying behavior] - -Both subscribers catch all four colors, because each subscriber causes the -process defined by the operators on the `Flux` to run. - -Compare the first example to the second example, shown in the following code: - -==== -[source,java] ----- -Sinks.Many hotSource = Sinks.unsafe().many().multicast().directBestEffort(); - -Flux hotFlux = hotSource.asFlux().map(String::toUpperCase); - -hotFlux.subscribe(d -> System.out.println("Subscriber 1 to Hot Source: "+d)); - -hotSource.emitNext("blue", FAIL_FAST); // <1> -hotSource.tryEmitNext("green").orThrow(); // <2> - -hotFlux.subscribe(d -> System.out.println("Subscriber 2 to Hot Source: "+d)); - -hotSource.emitNext("orange", FAIL_FAST); -hotSource.emitNext("purple", FAIL_FAST); -hotSource.emitComplete(FAIL_FAST); ----- -<1> for more details about sinks, see <> -<2> side note: `orThrow()` here is an alternative to `emitNext` + `Sinks.EmitFailureHandler.FAIL_FAST` -that is suitable for tests, since throwing there is acceptable (more so than in reactive -applications). -==== - -The second example produces the following output: - -==== ----- -Subscriber 1 to Hot Source: BLUE -Subscriber 1 to Hot Source: GREEN -Subscriber 1 to Hot Source: ORANGE -Subscriber 2 to Hot Source: ORANGE -Subscriber 1 to Hot Source: PURPLE -Subscriber 2 to Hot Source: PURPLE ----- -==== - -The following image shows how a subscription is broadcast: - -image::images/gs-hot.png[Broadcasting a subscription] - -Subscriber 1 catches all four colors. Subscriber 2, having been created after the first -two colors were produced, catches only the last two colors. This difference accounts for -the doubling of `ORANGE` and `PURPLE` in the output. The process described by the -operators on this Flux runs regardless of when subscriptions have been attached. - -[[advanced-broadcast-multiple-subscribers-connectableflux]] -== Broadcasting to Multiple Subscribers with `ConnectableFlux` - -Sometimes, you may want to not defer only some processing to the subscription time of one -subscriber, but you might actually want for several of them to rendezvous and then -trigger the subscription and data generation. - -This is what `ConnectableFlux` is made for. Two main patterns are covered in the `Flux` -API that return a `ConnectableFlux`: `publish` and `replay`. - -* `publish` dynamically tries to respect the demand from its various subscribers, in -terms of backpressure, by forwarding these requests to the source. Most notably, if any -subscriber has a pending demand of `0`, publish pauses its requesting to the source. -* `replay` buffers data seen through the first subscription, up to configurable limits -(in time and buffer size). It replays the data to subsequent subscribers. - -A `ConnectableFlux` offers additional methods to manage subscriptions downstream -versus subscriptions to the original source. These additional methods include the -following: - -* `connect()` can be called manually once you reach enough subscriptions to the `Flux`. That -triggers the subscription to the upstream source. -* `autoConnect(n)` can do the same job automatically once `n` subscriptions have been -made. -* `refCount(n)` not only automatically tracks incoming subscriptions but also detects -when these subscriptions are cancelled. If not enough subscribers are tracked, the source -is "`disconnected`", causing a new subscription to the source later if additional -subscribers appear. -* `refCount(int, Duration)` adds a "`grace period.`" Once the number of tracked subscribers -becomes too low, it waits for the `Duration` before disconnecting the source, potentially -allowing for enough new subscribers to come in and cross the connection threshold again. - -Consider the following example: - -==== -[source,java] ----- -Flux source = Flux.range(1, 3) - .doOnSubscribe(s -> System.out.println("subscribed to source")); - -ConnectableFlux co = source.publish(); - -co.subscribe(System.out::println, e -> {}, () -> {}); -co.subscribe(System.out::println, e -> {}, () -> {}); - -System.out.println("done subscribing"); -Thread.sleep(500); -System.out.println("will now connect"); - -co.connect(); ----- -==== - -The preceding code produces the following output: - -==== ----- -done subscribing -will now connect -subscribed to source -1 -1 -2 -2 -3 -3 ----- -==== - -The following code uses `autoConnect`: - -==== -[source,java] ----- -Flux source = Flux.range(1, 3) - .doOnSubscribe(s -> System.out.println("subscribed to source")); - -Flux autoCo = source.publish().autoConnect(2); - -autoCo.subscribe(System.out::println, e -> {}, () -> {}); -System.out.println("subscribed first"); -Thread.sleep(500); -System.out.println("subscribing second"); -autoCo.subscribe(System.out::println, e -> {}, () -> {}); ----- -==== - -The preceding code produces the following output: - -==== ----- -subscribed first -subscribing second -subscribed to source -1 -1 -2 -2 -3 -3 ----- -==== - -[[advanced-three-sorts-batching]] -== Three Sorts of Batching - -When you have lots of elements and you want to separate them into batches, you have three -broad solutions in Reactor: grouping, windowing, and buffering. These three are -conceptually close, because they redistribute a `Flux` into an aggregate. Grouping and -windowing create a `Flux>`, while buffering aggregates into a `Collection`. - -=== Grouping with `Flux>` - -Grouping is the act of splitting the source `Flux` into multiple batches, each of which -matches a key. - -The associated operator is `groupBy`. - -Each group is represented as a `GroupedFlux`, which lets you retrieve the key by calling its -`key()` method. - -There is no necessary continuity in the content of the groups. Once a source element -produces a new key, the group for this key is opened and elements that match the key end -up in the group (several groups could be open at the same time). - -This means that groups: - - 1. Are always disjoint (a source element belongs to one and only one group). - 2. Can contain elements from different places in the original sequence. - 3. Are never empty. - -The following example groups values by whether they are even or odd: - -==== -[source,java] ----- -StepVerifier.create( - Flux.just(1, 3, 5, 2, 4, 6, 11, 12, 13) - .groupBy(i -> i % 2 == 0 ? "even" : "odd") - .concatMap(g -> g.defaultIfEmpty(-1) //if empty groups, show them - .map(String::valueOf) //map to string - .startWith(g.key())) //start with the group's key - ) - .expectNext("odd", "1", "3", "5", "11", "13") - .expectNext("even", "2", "4", "6", "12") - .verifyComplete(); ----- -==== - -WARNING: Grouping is best suited for when you have a medium to low number of groups. The -groups must also imperatively be consumed (such as by a `flatMap`) so that `groupBy` -continues fetching data from upstream and feeding more groups. Sometimes, these two -constraints multiply and lead to hangs, such as when you have a high cardinality and the -concurrency of the `flatMap` consuming the groups is too low. - -=== Windowing with `Flux>` - -Windowing is the act of splitting the source `Flux` into _windows_, by criteria of -size, time, boundary-defining predicates, or boundary-defining `Publisher`. - -The associated operators are `window`, `windowTimeout`, `windowUntil`, `windowWhile`, and -`windowWhen`. - -Contrary to `groupBy`, which randomly overlaps according to incoming keys, -windows are (most of the time) opened sequentially. - -Some variants can still overlap, though. For instance, in `window(int maxSize, int skip)` -the `maxSize` parameter is the number of elements after which a window -closes, and the `skip` parameter is the number of elements in the source after which a -new window is opened. So if `maxSize > skip`, a new window opens before the previous one -closes and the two windows overlap. - -The following example shows overlapping windows: - -==== -[source,java] ----- -StepVerifier.create( - Flux.range(1, 10) - .window(5, 3) //overlapping windows - .concatMap(g -> g.defaultIfEmpty(-1)) //show empty windows as -1 - ) - .expectNext(1, 2, 3, 4, 5) - .expectNext(4, 5, 6, 7, 8) - .expectNext(7, 8, 9, 10) - .expectNext(10) - .verifyComplete(); ----- -==== - -NOTE: With the reverse configuration (`maxSize` < `skip`), some elements from -the source are dropped and are not part of any window. - -In the case of predicate-based windowing through `windowUntil` and `windowWhile`, -having subsequent source elements that do not match the predicate can also lead -to empty windows, as demonstrated in the following example: - -==== -[source,java] ----- -StepVerifier.create( - Flux.just(1, 3, 5, 2, 4, 6, 11, 12, 13) - .windowWhile(i -> i % 2 == 0) - .concatMap(g -> g.defaultIfEmpty(-1)) - ) - .expectNext(-1, -1, -1) //respectively triggered by odd 1 3 5 - .expectNext(2, 4, 6) // triggered by 11 - .expectNext(12) // triggered by 13 - // however, no empty completion window is emitted (would contain extra matching elements) - .verifyComplete(); ----- -==== - -=== Buffering with `Flux>` - -Buffering is similar to windowing, with the following twist: Instead of emitting -_windows_ (each of which is each a `Flux`), it emits _buffers_ (which are `Collection` --- by default, `List`). - -The operators for buffering mirror those for windowing: `buffer`, `bufferTimeout`, -`bufferUntil`, `bufferWhile`, and `bufferWhen`. - -Where the corresponding windowing operator opens a window, a buffering operator creates a -new collection and starts adding elements to it. Where a window closes, the buffering -operator emits the collection. - -Buffering can also lead to dropping source elements or having overlapping buffers, as -the following example shows: - -==== -[source,java] ----- -StepVerifier.create( - Flux.range(1, 10) - .buffer(5, 3) //overlapping buffers - ) - .expectNext(Arrays.asList(1, 2, 3, 4, 5)) - .expectNext(Arrays.asList(4, 5, 6, 7, 8)) - .expectNext(Arrays.asList(7, 8, 9, 10)) - .expectNext(Collections.singletonList(10)) - .verifyComplete(); ----- -==== - -Unlike in windowing, `bufferUntil` and `bufferWhile` do not emit an empty buffer, as -the following example shows: - -==== -[source,java] ----- -StepVerifier.create( - Flux.just(1, 3, 5, 2, 4, 6, 11, 12, 13) - .bufferWhile(i -> i % 2 == 0) - ) - .expectNext(Arrays.asList(2, 4, 6)) // triggered by 11 - .expectNext(Collections.singletonList(12)) // triggered by 13 - .verifyComplete(); ----- -==== - -[[advanced-parallelizing-parralelflux]] -== Parallelizing Work with `ParallelFlux` - -With multi-core architectures being a commodity nowadays, being able to easily -parallelize work is important. Reactor helps with that by providing a special type, -`ParallelFlux`, that exposes operators that are optimized for parallelized work. - -To obtain a `ParallelFlux`, you can use the `parallel()` operator on any `Flux`. -By itself, this method does not parallelize the work. Rather, it divides -the workload into "`rails`" (by default, as many rails as there are CPU cores). - -In order to tell the resulting `ParallelFlux` where to run each rail (and, by -extension, to run rails in parallel) you have to use `runOn(Scheduler)`. Note that -there is a recommended dedicated `Scheduler` for parallel work: `Schedulers.parallel()`. - -Compare the next two examples: - -==== -[source,java] ----- -Flux.range(1, 10) - .parallel(2) //<1> - .subscribe(i -> System.out.println(Thread.currentThread().getName() + " -> " + i)); ----- -<1> We force a number of rails instead of relying on the number of CPU cores. -==== - -==== -[source,java] ----- -Flux.range(1, 10) - .parallel(2) - .runOn(Schedulers.parallel()) - .subscribe(i -> System.out.println(Thread.currentThread().getName() + " -> " + i)); ----- -==== - -The first example produces the following output: - -==== ----- -main -> 1 -main -> 2 -main -> 3 -main -> 4 -main -> 5 -main -> 6 -main -> 7 -main -> 8 -main -> 9 -main -> 10 ----- -==== - -The second correctly parallelizes on two threads, as shown in the following output: - -==== ----- -parallel-1 -> 1 -parallel-2 -> 2 -parallel-1 -> 3 -parallel-2 -> 4 -parallel-1 -> 5 -parallel-2 -> 6 -parallel-1 -> 7 -parallel-1 -> 9 -parallel-2 -> 8 -parallel-2 -> 10 ----- -==== - -If, once you process your sequence in parallel, you want to revert back to a "`normal`" -`Flux` and apply the rest of the operator chain in a sequential manner, you can use the -`sequential()` method on `ParallelFlux`. - -Note that `sequential()` is implicitly applied if you `subscribe` to the `ParallelFlux` -with a `Subscriber` but not when using the lambda-based variants of `subscribe`. - -Note also that `subscribe(Subscriber)` merges all the rails, while -`subscribe(Consumer)` runs all the rails. If the `subscribe()` method has a lambda, -each lambda is executed as many times as there are rails. - -You can also access individual rails or "`groups`" as a `Flux>` through the -`groups()` method and apply additional operators to them through the `composeGroup()` -method. - -[[scheduler-factory]] -== Replacing Default `Schedulers` - -As we described in the <> section, Reactor Core comes with several -`Scheduler` implementations. While you can always create new instances through the `new*` -factory methods, each `Scheduler` flavor also has a default singleton instance that is -accessible through the direct factory method (such as `Schedulers.boundedElastic()` versus -`Schedulers.newBoundedElastic(...)`). - -These default instances are the ones used by operators that need a `Scheduler` to work -when you do not explicitly specify one. For example, `Flux#delayElements(Duration)` uses -the `Schedulers.parallel()` instance. - -In some cases, however, you might need to change these default instances with something -else in a cross-cutting way, without having to make sure every single operator you call -has your specific `Scheduler` as a parameter. An example is measuring the time every -single scheduled task takes by wrapping the real schedulers, for instrumentation -purposes. In other words, you might want to change the default `Schedulers`. - -Changing the default schedulers is possible through the `Schedulers.Factory` class. By -default, a `Factory` creates all the standard `Scheduler` through similarly named -methods. You can override each of these with your custom implementation. - -Additionally, the factory exposes one additional customization method: -`decorateExecutorService`. It is invoked during the creation of every Reactor Core -`Scheduler` that is backed by a `ScheduledExecutorService` (even non-default instances, -such as those created by calls to `Schedulers.newParallel()`). - -This lets you tune the `ScheduledExecutorService` to be used: The default one is exposed -as a `Supplier` and, depending on the type of `Scheduler` being configured, you can choose -to entirely bypass that supplier and return your own instance or you can `get()` the -default instance and wrap it. - -IMPORTANT: Once you create a `Factory` that fits your needs, you must install it by calling -`Schedulers.setFactory(Factory)`. - -Finally, there is a last customizable hook in `Schedulers`: `onHandleError`. This hook is -invoked whenever a `Runnable` task submitted to a `Scheduler` throws an `Exception` (note -that if there is an `UncaughtExceptionHandler` set for the `Thread` that ran the task, -both the handler and the hook are invoked). - -[[hooks]] -== Using Global Hooks - -Reactor has another category of configurable callbacks that are invoked by Reactor -operators in various situations. They are all set in the `Hooks` class, and they fall into -three categories: - -* <> -* <> -* <> - -[[hooks-dropping]] -=== Dropping Hooks - -Dropping hooks are invoked when the source of an operator does not comply with the -Reactive Streams specification. These kind of errors are outside of the normal execution -path (that is, they cannot be propagated through `onError`). - -Typically, a `Publisher` calls `onNext` on the operator despite having already called -`onCompleted` on it previously. In that case, the `onNext` value is dropped. The same -is true for an extraneous `onError` signal. - -The corresponding hooks, `onNextDropped` and `onErrorDropped`, let you provide a global -`Consumer` for these drops. For example, you can use it to log the drop and clean up -resources associated with a value if needed (as it never makes it to the rest of the -reactive chain). - -Setting the hooks twice in a row is additive: every consumer you provide is invoked. The -hooks can be fully reset to their defaults by using the `Hooks.resetOn*Dropped()` methods. - -[[hooks-internal]] -=== Internal Error Hook - -One hook, `onOperatorError`, is invoked by operators when an unexpected `Exception` is -thrown during the execution of their `onNext`, `onError`, and `onComplete` methods. - -Unlike the previous category, this is still within the normal execution path. A typical -example is the `map` operator with a map function that throws an `Exception` (such as -division by zero). It is still possible at this point to go through the usual channel of -`onError`, and that is what the operator does. - -First, it passes the `Exception` through `onOperatorError`. The hook lets you inspect the -error (and the incriminating value, if relevant) and change the `Exception`. Of course, -you can also do something on the side, such as log and return the original `Exception`. - -Note that you can set the `onOperatorError` hook multiple times. You can provide a -`String` identifier for a particular `BiFunction` and subsequent calls with different -keys concatenates the functions, which are all executed. On the other hand, reusing the -same key twice lets you replace a function you previously set. - -As a consequence, the default hook behavior can be both fully reset (by using -`Hooks.resetOnOperatorError()`) or partially reset for a specific `key` only (by using -`Hooks.resetOnOperatorError(String)`). - -[[hooks-assembly]] -=== Assembly Hooks - -These hooks tie in the lifecycle of operators. They are invoked when a chain of operators -is assembled (that is, instantiated). `onEachOperator` lets you dynamically change each -operator as it is assembled in the chain, by returning a different `Publisher`. -`onLastOperator` is similar, except that it is invoked only on the last operator in the -chain before the `subscribe` call. - -If you want to decorate all operators with a cross-cutting `Subscriber` implementation, -you can look into the `Operators#lift*` methods to help you deal with the various -types of Reactor `Publishers` out there (`Flux`, `Mono`, `ParallelFlux`, `GroupedFlux`, and `ConnectableFlux`), -as well as their `Fuseable` versions. - -Like `onOperatorError`, these hooks are cumulative and can be identified with a key. They -can also be reset partially or totally. - -=== Hook Presets - -The `Hooks` utility class provides two preset hooks. These are alternatives to -the default behaviors that you can use by calling their corresponding method, rather than -coming up with the hook yourself: - -* `onNextDroppedFail()`: `onNextDropped` used to throw a `Exceptions.failWithCancel()` -exception. It now defaults to logging the dropped value at the DEBUG level. To go back to -the old default behavior of throwing, use `onNextDroppedFail()`. - -* `onOperatorDebug()`: This method activates <>. It ties into -the `onOperatorError` hook, so calling `resetOnOperatorError()` also resets it. You can -independently reset it by using `resetOnOperatorDebug()`, as it uses a specific key internally. - - -[[context]] -== Adding a Context to a Reactive Sequence - -One of the big technical challenges encountered when switching from an imperative -programming perspective to a reactive programming mindset lies in how you deal with -threading. - -Contrary to what you might be used to, in reactive programming, you can use a `Thread` -to process several asynchronous sequences that run at roughly the same time (actually, in -non-blocking locksteps). The execution can also easily and often jump from one thread to -another. - -This arrangement is especially hard for developers that use features dependent on the -threading model being more "`stable,`" such as `ThreadLocal`. As it lets you associate -data with a thread, it becomes tricky to use in a reactive context. As a result, -libraries that rely on `ThreadLocal` at least introduce new challenges when used with -Reactor. At worst, they work badly or even fail. Using the MDC of Logback to store and -log correlation IDs is a prime example of such a situation. - -The usual workaround for `ThreadLocal` usage is to move the contextual data, `C`, along -your business data, `T`, in the sequence, by using (for instance) `Tuple2`. This does -not look good and leaks an orthogonal concern (the contextual data) into your method and -`Flux` signatures. - -Since version `3.1.0`, Reactor comes with an advanced feature that is somewhat comparable -to `ThreadLocal` but can be applied to a `Flux` or a `Mono` instead of a `Thread`. -This feature is called `Context`. - -As an illustration of what it looks like, the following example both reads from and -writes to `Context`: - -==== -[source,java] ----- -String key = "message"; -Mono r = Mono.just("Hello") - .flatMap(s -> Mono.deferContextual(ctx -> - Mono.just(s + " " + ctx.get(key)))) - .contextWrite(ctx -> ctx.put(key, "World")); - -StepVerifier.create(r) - .expectNext("Hello World") - .verifyComplete(); ----- -==== - -In the following sections, we cover `Context` and how to use it, so that you -can eventually understand the preceding example. - -IMPORTANT: This is an advanced feature that is more targeted at library developers. It -requires good understanding of https://github.com/reactive-streams/reactive-streams-jvm/blob/master/README.md#3-subscription-code[the lifecycle of a `Subscription`] and is intended for -libraries that are responsible for the subscriptions. - -[[context.api]] -=== The `Context` API - -`Context` is an interface reminiscent of `Map`. It stores key-value pairs and lets you -fetch a value you stored by its key. It has a simplified version that only exposes read -methods, the `ContextView`. More specifically: - -* Both key and values are of type `Object`, so a `Context` (and `ContextView`) instance can contain any number of -highly divergent values from different libraries and sources. -* A `Context` is immutable. It exposes write methods like `put` and `putAll` but they produce a new instance. -* For a read-only API that doesn't even expose such write methods, there's the `ContextView` superinterface since 3.4.0 -* You can check whether the key is present with `hasKey(Object key)`. -* Use `getOrDefault(Object key, T defaultValue)` to retrieve a value (cast to a `T`) or -fall back to a default one if the `Context` instance does not have that key. -* Use `getOrEmpty(Object key)` to get an `Optional` (the `Context` instance attempts to cast the -stored value to `T`). -* Use `put(Object key, Object value)` to store a key-value pair, returning a new -`Context` instance. You can also merge two contexts into a new one by using -`putAll(ContextView)`. -* Use `delete(Object key)` to remove the value associated to a key, returning a new -`Context`. - -[TIP] -==== -When you create a `Context`, you can create pre-valued `Context` instances with up to five -key-value pairs by using the static `Context.of` methods. They take 2, 4, 6, 8 or 10 -`Object` instances, each couple of `Object` instances being a key-value pair to add to -the `Context`. - -Alternatively you can also create an empty `Context` by using `Context.empty()`. -==== - -[[context.write]] -=== Tying a `Context` to a `Flux` and Writing - -To make a `Context` be useful, it must be tied to a specific sequence and be accessible by -each operator in a chain. Note that the operator must be a Reactor-native operator, as -`Context` is specific to Reactor. - -Actually, a `Context` is tied to each `Subscriber` in a chain. It uses the `Subscription` -propagation mechanism to make itself available to each operator, starting with the final -`subscribe` and moving up the chain. - -In order to populate the `Context`, which can only be done at subscription time, you need -to use the `contextWrite` operator. - -`contextWrite(ContextView)` merges the `ContextView` you provide and the -`Context` from downstream (remember, the `Context` is propagated from the bottom of the -chain towards the top). This is done through a call to `putAll`, resulting in a NEW -`Context` for upstream. - -TIP: You can also use the more advanced `contextWrite(Function)`. -It receives a copy of the `Context` from downstream, lets you put or delete values -as you see fit, and returns the new `Context` to use. You can even decide to return a -completely different instance, although it is really not recommended (doing so might -impact third-party libraries that depend on the `Context`). - -[[context.read]] -=== Reading a `Context`, through the `ContextView` - -Once you have populated a `Context`, you may want to peek into it at runtime. -Most of the time, the responsibility of putting information into the `Context` -is on the end user's side, while exploiting that information is on the third-party library's side, -as such libraries are usually upstream of the client code. - -The read oriented operators allow to obtain data from the `Context` in a chain of operators by exposing -its `ContextView`: - - - to access the context from a source-like operator, use `deferContextual` factory method - - to access the context from the middle of an operator chain, use `transformDeferredContextual(BiFunction)` - - alternatively, when dealing with an inner sequence (like inside a `flatMap`), the `ContextView` - can be materialized using `Mono.deferContextual(Mono::just)`. Usually though, you might want - to perform meaningful work directly within the defer's lambda, eg. `Mono.deferContextual(ctx -> doSomethingAsyncWithContextData(v, ctx.get(key)))` - where `v` is the value being flatMapped. - -TIP: In order to read from the `Context` without misleading users into thinking one can write to it -while data is running through the pipeline, only the `ContextView` is exposed by the operators above. -In case one needs to use one of the remaining APIs that still require a `Context`, one can use `Context.of(contextView)` for conversion. - -=== Simple `Context` Examples - -The examples in this section are meant as ways to better understand some of the caveats of -using a `Context`. - -We first look back at our simple example from the introduction in a bit more detail, as -the following example shows: - -==== -[source,java] ----- -String key = "message"; -Mono r = Mono.just("Hello") - .flatMap(s -> Mono.deferContextual(ctx -> - Mono.just(s + " " + ctx.get(key)))) //<2> - .contextWrite(ctx -> ctx.put(key, "World")); //<1> - -StepVerifier.create(r) - .expectNext("Hello World") //<3> - .verifyComplete(); ----- -<1> The chain of operators ends with a call to `contextWrite(Function)` that puts -`"World"` into the `Context` under a key of `"message"`. -<2> We `flatMap` on the source element, materializing the `ContextView` with `Mono.deferContextual()` -and directly extract the data associated to `"message"` and concatenate that with the original word. -<3> The resulting `Mono` emits `"Hello World"`. -==== - -IMPORTANT: The numbering above versus the actual line order is not a mistake. It represents -the execution order. Even though `contextWrite` is the last piece of the chain, it is -the one that gets executed first (due to its subscription-time nature and the fact that -the subscription signal flows from bottom to top). - -IMPORTANT: In your chain of operators, the relative positions of where you write to the -`Context` and where you read from it matters. The `Context` -is immutable and its content can only be seen by operators above it, as demonstrated in -the following example: - -==== -[source,java] ----- -String key = "message"; -Mono r = Mono.just("Hello") - .contextWrite(ctx -> ctx.put(key, "World")) //<1> - .flatMap( s -> Mono.deferContextual(ctx -> - Mono.just(s + " " + ctx.getOrDefault(key, "Stranger")))); //<2> - -StepVerifier.create(r) - .expectNext("Hello Stranger") //<3> - .verifyComplete(); ----- -<1> The `Context` is written to too high in the chain. -<2> As a result, in the `flatMap`, there is no value associated with our key. A default value -is used instead. -<3> The resulting `Mono` thus emits `"Hello Stranger"`. -==== - -Similarly, in the case of several attempts to write the same key to the `Context`, the -relative order of the writes matters, too. Operators that read the `Context` see -the value that was set closest to being under them, as demonstrated in the following example: - -==== -[source,java] ----- -String key = "message"; -Mono r = Mono - .deferContextual(ctx -> Mono.just("Hello " + ctx.get(key))) - .contextWrite(ctx -> ctx.put(key, "Reactor")) //<1> - .contextWrite(ctx -> ctx.put(key, "World")); //<2> - -StepVerifier.create(r) - .expectNext("Hello Reactor") //<3> - .verifyComplete(); ----- -<1> A write attempt on key `"message"`. -<2> Another write attempt on key `"message"`. -<3> The `deferContextual` only saw the value set closest to it (and below it): `"Reactor"`. -==== - -In the preceding example, the `Context` is populated with `"World"` during subscription. -Then the subscription signal moves upstream and another write happens. This produces a -second immutable `Context` with a value of `"Reactor"`. After that, data starts flowing. -The `deferContextual` sees the `Context` closest to it, which is our second `Context` with the -`"Reactor"` value (exposed to the user as a `ContextView`). - -You might wonder if the `Context` is propagated along with the data signal. If that was -the case, putting another `flatMap` between these two writes would use the value from -the top `Context`. But this is not the case, as demonstrated by the following example: - -==== -[source,java] ----- -String key = "message"; -Mono r = Mono - .deferContextual(ctx -> Mono.just("Hello " + ctx.get(key))) //<3> - .contextWrite(ctx -> ctx.put(key, "Reactor")) //<2> - .flatMap( s -> Mono.deferContextual(ctx -> - Mono.just(s + " " + ctx.get(key)))) //<4> - .contextWrite(ctx -> ctx.put(key, "World")); //<1> - -StepVerifier.create(r) - .expectNext("Hello Reactor World") //<5> - .verifyComplete(); ----- -<1> This is the first write to happen. -<2> This is the second write to happen. -<3> The top context read sees second write. -<4> The `flatMap` concatenates the result from initial read with the value from the first write. -<5> The `Mono` emits `"Hello Reactor World"`. -==== - -The reason is that the `Context` is associated to the `Subscriber` and each operator -accesses the `Context` by requesting it from its downstream `Subscriber`. - -One last interesting propagation case is the one where the `Context` is also written to -inside a `flatMap`, as in the following example: - -==== -[source,java] ----- -String key = "message"; -Mono r = Mono.just("Hello") - .flatMap( s -> Mono - .deferContextual(ctxView -> Mono.just(s + " " + ctxView.get(key))) - ) - .flatMap( s -> Mono - .deferContextual(ctxView -> Mono.just(s + " " + ctxView.get(key))) - .contextWrite(ctx -> ctx.put(key, "Reactor")) //<1> - ) - .contextWrite(ctx -> ctx.put(key, "World")); // <2> - -StepVerifier.create(r) - .expectNext("Hello World Reactor") - .verifyComplete(); ----- -<1> This `contextWrite` does not impact anything outside of its `flatMap`. -<2> This `contextWrite` impacts the main sequence's `Context`. -==== - -In the preceding example, the final emitted value is `"Hello World Reactor"` and not "Hello -Reactor World", because the `contextWrite` that writes `"Reactor"` does so as part of -the inner sequence of the second `flatMap`. As a consequence, it is not visible or propagated -through the main sequence and the first `flatMap` does not see it. Propagation and immutability -isolate the `Context` in operators that create intermediate inner sequences such as `flatMap`. - -=== Full Example - -Now we can consider a more real life example of a library reading information from the `Context`: -a reactive HTTP client that takes a `Mono` as the source of data for a `PUT` but -also looks for a particular Context key to add a correlation ID to the request's headers. - -From the user perspective, it is called as follows: - -==== -[source,java] ----- -doPut("www.example.com", Mono.just("Walter")) ----- -==== - -In order to propagate a correlation ID, it would be called as follows: - -==== -[source,java] ----- -doPut("www.example.com", Mono.just("Walter")) - .contextWrite(Context.of(HTTP_CORRELATION_ID, "2-j3r9afaf92j-afkaf")) ----- -==== - -As the preceding snippets show, the user code uses `contextWrite` to populate -a `Context` with an `HTTP_CORRELATION_ID` key-value pair. The upstream of the operator is -a `Mono>` (a simplistic representation of an HTTP response) -returned by the HTTP client library. So it effectively passes information from the -user code to the library code. - -The following example shows mock code from the library's perspective that reads the -context and "`augments the request`" if it can find the correlation ID: - -==== -[source,java] ----- -static final String HTTP_CORRELATION_ID = "reactive.http.library.correlationId"; - -Mono> doPut(String url, Mono data) { - Mono>> dataAndContext = - data.zipWith(Mono.deferContextual(c -> // <1> - Mono.just(c.getOrEmpty(HTTP_CORRELATION_ID))) // <2> - ); - - return dataAndContext.handle((dac, sink) -> { - if (dac.getT2().isPresent()) { // <3> - sink.next("PUT <" + dac.getT1() + "> sent to " + url + - " with header X-Correlation-ID = " + dac.getT2().get()); - } - else { - sink.next("PUT <" + dac.getT1() + "> sent to " + url); - } - sink.complete(); - }) - .map(msg -> Tuples.of(200, msg)); -} ----- -<1> Materialize the `ContextView` through `Mono.deferContextual` and... -<2> within the defer, extract a value for the correlation ID key, as an `Optional`. -<3> If the key was present in the context, use the correlation ID as a header. -==== - -The library snippet zips the data `Mono` with `Mono.deferContextual(Mono::just)`. -This gives the library a `Tuple2`, and that -context contains the `HTTP_CORRELATION_ID` entry from downstream (as it is on the direct -path to the subscriber). - -The library code then uses `map` to extract an `Optional` for that key, and, if -the entry is present, it uses the passed correlation ID as a `X-Correlation-ID` header. -That last part is simulated by the `handle`. - -The whole test that validates the library code used the correlation ID can be written as -follows: - -==== -[source,java] ----- -@Test -public void contextForLibraryReactivePut() { - Mono put = doPut("www.example.com", Mono.just("Walter")) - .contextWrite(Context.of(HTTP_CORRELATION_ID, "2-j3r9afaf92j-afkaf")) - .filter(t -> t.getT1() < 300) - .map(Tuple2::getT2); - - StepVerifier.create(put) - .expectNext("PUT sent to www.example.com" + - " with header X-Correlation-ID = 2-j3r9afaf92j-afkaf") - .verifyComplete(); -} ----- -==== - -include::advanced-contextPropagation.adoc[leveloffset=2] - -[[cleanup]] -== Dealing with Objects that Need Cleanup - -In very specific cases, your application may deal with types that necessitate some form of cleanup once they are no longer in use. -This is an advanced scenario -- for, example when you have reference-counted objects or when you deal with off-heap objects. -Netty's `ByteBuf` is a prime example of both. - -In order to ensure proper cleanup of such objects, you need to account for it on a `Flux`-by-`Flux` basis, as well as in several of the global hooks (see <>): - - * The `doOnDiscard` `Flux`/`Mono` operator - * The `onOperatorError` hook - * The `onNextDropped` hook - * Operator-specific handlers - -This is needed because each hook is made with a specific subset of cleanup in mind, and users might want (for example) to implement specific error-handling logic in addition to cleanup logic within `onOperatorError`. - -Note that some operators are less adapted to dealing with objects that need cleanup. -For example, `bufferWhen` can introduce overlapping buffers, and that means that the discard "`local hook`" we used earlier might see a first buffer as being discarded and cleanup an element in it that is in a second buffer, where it is still valid. - -IMPORTANT: For the purpose of cleaning up, *all these hooks MUST be IDEMPOTENT*. -They might on some occasions get applied several times to the same object. -Unlike the `doOnDiscard` operator, which performs a class-level `instanceOf` check, the global hooks are also dealing with instances that can be any `Object`. It is up to the user's implementation to distinguish between which instances need cleanup and which do not. - - -=== The `doOnDiscard` Operator or Local Hook - -This hook has been specifically put in place for cleanup of objects that would otherwise never be exposed to user code. -It is intended as a cleanup hook for flows that operate under normal circumstances (not malformed sources that push too many items, which is covered by `onNextDropped`). - -It is local, in the sense that it is activated through an operator and applies only to a given `Flux` or `Mono`. - -Obvious cases include operators that filter elements from upstream. -These elements never reach the next operator (or final subscriber), but this is part of the normal path of execution. -As such, they are passed to the `doOnDiscard` hook. -Examples of when you might use the `doOnDiscard` hook include the following: - -* `filter`: Items that do not match the filter are considered to be "`discarded.`" -* `skip`: Skipped items are discarded. -* `buffer(maxSize, skip)` with `maxSize < skip`: A "`dropping buffer`" -- items in between buffers are discarded. - -But `doOnDiscard` is not limited to filtering operators, and is also used by operators that internally queue data for backpressure purposes. -More specifically, most of the time, this is important during cancellation. An operator that prefetches data from its source and later drains to its subscriber upon demand could have un-emitted data when it gets cancelled. -Such operators use the `doOnDiscard` hook during cancellation to clear up their internal backpressure `Queue`. - -WARNING: Each call to `doOnDiscard(Class, Consumer)` is additive with the others, to the extent that it is visible and used by only operators upstream of it. - -=== The `onOperatorError` hook - -The `onOperatorError` hook is intended to modify errors in a transverse manner (similar to an AOP catch-and-rethrow). - -When the error happens during the processing of an `onNext` signal, the element that was being emitted is passed to `onOperatorError`. - -If that type of element needs cleanup, you need to implement it in the `onOperatorError` hook, possibly on top of error-rewriting code. - -=== The `onNextDropped` Hook - -With malformed `Publishers`, there could be cases where an operator receives an element when it expected none (typically, after having received the `onError` or `onComplete` signals). -In such cases, the unexpected element is "`dropped`" -- that is, passed to the `onNextDropped` hook. -If you have types that need cleanup, you must detect these in the `onNextDropped` hook and implement cleanup code there as well. - -=== Operator-specific Handlers - -Some operators that deal with buffers or collect values as part of their operations have specific handlers for cases where collected data is not propagated downstream. -If you use such operators with the type(s) that need cleanup, you need to perform cleanup in these handlers. - -For example, `distinct` has such a callback that is invoked when the operator terminates (or is cancelled) in order to clear the collection it uses to judge whether an element is distinct or not. -By default, the collection is a `HashSet`, and the cleanup callback is a `HashSet::clear`. -However, if you deal with reference-counted objects, you might want to change that to a more involved handler that would `release` each element in the set before calling `clear()` on it. - - -[[null-safety]] -== Null Safety - -Although Java does not allow expressing null-safety with its type system, Reactor -now provides annotations to declare nullability of APIs, similar to those provided by -Spring Framework 5. - -Reactor uses these annotations, but they can also be used in any Reactor-based -Java project to declare null-safe APIs. Nullability of the types used inside method bodies -is outside of the scope of this feature. - -These annotations are meta-annotated with https://jcp.org/en/jsr/detail?id=305[JSR 305] -annotations (a dormant JSR that is supported by tools such as IntelliJ IDEA) to provide -useful warnings to Java developers related to null-safety in order to avoid -`NullPointerException` at runtime. JSR 305 meta-annotations let tooling vendors -provide null safety support in a generic way, without having to hard-code support for Reactor annotations. - -NOTE: It is not necessary nor recommended with Kotlin 1.1.5+ to have a dependency on JSR 305 in -your project classpath. - -They are also used by Kotlin, which natively supports -https://kotlinlang.org/docs/reference/null-safety.html[null safety]. See -<> for more details. - -The following annotations are provided in the `reactor.util.annotation` package: - -* https://projectreactor.io/docs/core/release/api/reactor/util/annotation/NonNull.html[`@NonNull`]: -Indicates that a specific parameter, return value, or field cannot be `null`. -(It is not needed on parameters and return values where `@NonNullApi` applies) . -* https://projectreactor.io/docs/core/release/api/reactor/util/annotation/Nullable.html[`@Nullable`]: -Indicates that a parameter, return value, or field can be `null`. -* https://projectreactor.io/docs/core/release/api/reactor/util/annotation/NonNullApi.html[`@NonNullApi`]: -Package-level annotation that indicates non-null is the default behavior for -parameters and return values. - -NOTE: Nullability for generic type arguments, variable arguments, and array elements is not yet supported. -See https://github.com/reactor/reactor-core/issues/878[issue #878] for up-to-date -information. diff --git a/docs/modules/ROOT/pages/advancedFeatures/advanced-broadcast-multiple-subscribers-connectableflux.adoc b/docs/modules/ROOT/pages/advancedFeatures/advanced-broadcast-multiple-subscribers-connectableflux.adoc new file mode 100644 index 0000000000..28d8563822 --- /dev/null +++ b/docs/modules/ROOT/pages/advancedFeatures/advanced-broadcast-multiple-subscribers-connectableflux.adoc @@ -0,0 +1,97 @@ +[[advanced-broadcast-multiple-subscribers-connectableflux]] += Broadcasting to Multiple Subscribers with `ConnectableFlux` + +Sometimes, you may want to not defer only some processing to the subscription time of one +subscriber, but you might actually want for several of them to rendezvous and then +trigger the subscription and data generation. + +This is what `ConnectableFlux` is made for. Two main patterns are covered in the `Flux` +API that return a `ConnectableFlux`: `publish` and `replay`. + +* `publish` dynamically tries to respect the demand from its various subscribers, in +terms of backpressure, by forwarding these requests to the source. Most notably, if any +subscriber has a pending demand of `0`, publish pauses its requesting to the source. +* `replay` buffers data seen through the first subscription, up to configurable limits +(in time and buffer size). It replays the data to subsequent subscribers. + +A `ConnectableFlux` offers additional methods to manage subscriptions downstream +versus subscriptions to the original source. These additional methods include the +following: + +* `connect()` can be called manually once you reach enough subscriptions to the `Flux`. That +triggers the subscription to the upstream source. +* `autoConnect(n)` can do the same job automatically once `n` subscriptions have been +made. +* `refCount(n)` not only automatically tracks incoming subscriptions but also detects +when these subscriptions are cancelled. If not enough subscribers are tracked, the source +is "`disconnected`", causing a new subscription to the source later if additional +subscribers appear. +* `refCount(int, Duration)` adds a "`grace period.`" Once the number of tracked subscribers +becomes too low, it waits for the `Duration` before disconnecting the source, potentially +allowing for enough new subscribers to come in and cross the connection threshold again. + +Consider the following example: + +[source,java] +[%unbreakable] +---- +Flux source = Flux.range(1, 3) + .doOnSubscribe(s -> System.out.println("subscribed to source")); + +ConnectableFlux co = source.publish(); + +co.subscribe(System.out::println, e -> {}, () -> {}); +co.subscribe(System.out::println, e -> {}, () -> {}); + +System.out.println("done subscribing"); +Thread.sleep(500); +System.out.println("will now connect"); + +co.connect(); +---- + +The preceding code produces the following output: + +---- +done subscribing +will now connect +subscribed to source +1 +1 +2 +2 +3 +3 +---- + +The following code uses `autoConnect`: + +[source,java] +[%unbreakable] +---- +Flux source = Flux.range(1, 3) + .doOnSubscribe(s -> System.out.println("subscribed to source")); + +Flux autoCo = source.publish().autoConnect(2); + +autoCo.subscribe(System.out::println, e -> {}, () -> {}); +System.out.println("subscribed first"); +Thread.sleep(500); +System.out.println("subscribing second"); +autoCo.subscribe(System.out::println, e -> {}, () -> {}); +---- + +The preceding code produces the following output: + +---- +subscribed first +subscribing second +subscribed to source +1 +1 +2 +2 +3 +3 +---- + diff --git a/docs/modules/ROOT/pages/advancedFeatures/advanced-mutualizing-operator-usage.adoc b/docs/modules/ROOT/pages/advancedFeatures/advanced-mutualizing-operator-usage.adoc new file mode 100644 index 0000000000..81cb880c93 --- /dev/null +++ b/docs/modules/ROOT/pages/advancedFeatures/advanced-mutualizing-operator-usage.adoc @@ -0,0 +1,101 @@ +[[advanced-mutualizing-operator-usage]] += Mutualizing Operator Usage + +From a clean-code perspective, code reuse is generally a good thing. Reactor offers a few +patterns that can help you reuse and mutualize code, notably for operators or combinations +of operators that you might want to apply regularly in your codebase. If you think of a +chain of operators as a recipe, you can create a "`cookbook`" of operator recipes. + +[[using-the-transform-operator]] +== Using the `transform` Operator + +The `transform` operator lets you encapsulate a piece of an operator chain into a +function. That function is applied to an original operator chain at assembly time to +augment it with the encapsulated operators. Doing so applies the same operations to all +the subscribers of a sequence and is basically equivalent to chaining the operators +directly. The following code shows an example: + +[source,java] +[%unbreakable] +---- +Function, Flux> filterAndMap = +f -> f.filter(color -> !color.equals("orange")) + .map(String::toUpperCase); + +Flux.fromIterable(Arrays.asList("blue", "green", "orange", "purple")) + .doOnNext(System.out::println) + .transform(filterAndMap) + .subscribe(d -> System.out.println("Subscriber to Transformed MapAndFilter: "+d)); +---- + +The following image shows how the `transform` operator encapsulates flows: + +image::gs-transform.png[Transform Operator : encapsulate flows] + +The preceding example produces the following output: + +---- +blue +Subscriber to Transformed MapAndFilter: BLUE +green +Subscriber to Transformed MapAndFilter: GREEN +orange +purple +Subscriber to Transformed MapAndFilter: PURPLE +---- + +[[using-the-transformdeferred-operator]] +== Using the `transformDeferred` Operator + +The `transformDeferred` operator is similar to `transform` and also lets you encapsulate operators +in a function. The major difference is that this function is applied to the original +sequence _on a per-subscriber basis_. It means that the function can actually produce a +different operator chain for each subscription (by maintaining some state). The +following code shows an example: + +[source,java] +[%unbreakable] +---- +AtomicInteger ai = new AtomicInteger(); +Function, Flux> filterAndMap = f -> { + if (ai.incrementAndGet() == 1) { +return f.filter(color -> !color.equals("orange")) + .map(String::toUpperCase); + } + return f.filter(color -> !color.equals("purple")) + .map(String::toUpperCase); +}; + +Flux composedFlux = +Flux.fromIterable(Arrays.asList("blue", "green", "orange", "purple")) + .doOnNext(System.out::println) + .transformDeferred(filterAndMap); + +composedFlux.subscribe(d -> System.out.println("Subscriber 1 to Composed MapAndFilter :"+d)); +composedFlux.subscribe(d -> System.out.println("Subscriber 2 to Composed MapAndFilter: "+d)); +---- + +The following image shows how the `transformDeferred` operator works with per-subscriber transformations: + +image::gs-compose.png[Compose Operator : Per Subscriber transformation] + +The preceding example produces the following output: + +[%unbreakable] +---- +blue +Subscriber 1 to Composed MapAndFilter :BLUE +green +Subscriber 1 to Composed MapAndFilter :GREEN +orange +purple +Subscriber 1 to Composed MapAndFilter :PURPLE +blue +Subscriber 2 to Composed MapAndFilter: BLUE +green +Subscriber 2 to Composed MapAndFilter: GREEN +orange +Subscriber 2 to Composed MapAndFilter: ORANGE +purple +---- + diff --git a/docs/modules/ROOT/pages/advancedFeatures/advanced-parallelizing-parralelflux.adoc b/docs/modules/ROOT/pages/advancedFeatures/advanced-parallelizing-parralelflux.adoc new file mode 100644 index 0000000000..16156306c3 --- /dev/null +++ b/docs/modules/ROOT/pages/advancedFeatures/advanced-parallelizing-parralelflux.adoc @@ -0,0 +1,80 @@ +[[advanced-parallelizing-parralelflux]] += Parallelizing Work with `ParallelFlux` + +With multi-core architectures being a commodity nowadays, being able to easily +parallelize work is important. Reactor helps with that by providing a special type, +`ParallelFlux`, that exposes operators that are optimized for parallelized work. + +To obtain a `ParallelFlux`, you can use the `parallel()` operator on any `Flux`. +By itself, this method does not parallelize the work. Rather, it divides +the workload into "`rails`" (by default, as many rails as there are CPU cores). + +In order to tell the resulting `ParallelFlux` where to run each rail (and, by +extension, to run rails in parallel) you have to use `runOn(Scheduler)`. Note that +there is a recommended dedicated `Scheduler` for parallel work: `Schedulers.parallel()`. + +Compare the next two examples: + +[source,java] +[%unbreakable] +---- +Flux.range(1, 10) + .parallel(2) //<1> + .subscribe(i -> System.out.println(Thread.currentThread().getName() + " -> " + i)); +---- +<1> We force a number of rails instead of relying on the number of CPU cores. + +[source,java] +[%unbreakable] +---- +Flux.range(1, 10) + .parallel(2) + .runOn(Schedulers.parallel()) + .subscribe(i -> System.out.println(Thread.currentThread().getName() + " -> " + i)); +---- + +The first example produces the following output: + +---- +main -> 1 +main -> 2 +main -> 3 +main -> 4 +main -> 5 +main -> 6 +main -> 7 +main -> 8 +main -> 9 +main -> 10 +---- + +The second correctly parallelizes on two threads, as shown in the following output: + +---- +parallel-1 -> 1 +parallel-2 -> 2 +parallel-1 -> 3 +parallel-2 -> 4 +parallel-1 -> 5 +parallel-2 -> 6 +parallel-1 -> 7 +parallel-1 -> 9 +parallel-2 -> 8 +parallel-2 -> 10 +---- + +If, once you process your sequence in parallel, you want to revert back to a "`normal`" +`Flux` and apply the rest of the operator chain in a sequential manner, you can use the +`sequential()` method on `ParallelFlux`. + +Note that `sequential()` is implicitly applied if you `subscribe` to the `ParallelFlux` +with a `Subscriber` but not when using the lambda-based variants of `subscribe`. + +Note also that `subscribe(Subscriber)` merges all the rails, while +`subscribe(Consumer)` runs all the rails. If the `subscribe()` method has a lambda, +each lambda is executed as many times as there are rails. + +You can also access individual rails or "`groups`" as a `Flux>` through the +`groups()` method and apply additional operators to them through the `composeGroup()` +method. + diff --git a/docs/modules/ROOT/pages/advancedFeatures/advanced-three-sorts-batching.adoc b/docs/modules/ROOT/pages/advancedFeatures/advanced-three-sorts-batching.adoc new file mode 100644 index 0000000000..82b4d43aa4 --- /dev/null +++ b/docs/modules/ROOT/pages/advancedFeatures/advanced-three-sorts-batching.adoc @@ -0,0 +1,155 @@ +[[advanced-three-sorts-batching]] += Three Sorts of Batching + +When you have lots of elements and you want to separate them into batches, you have three +broad solutions in Reactor: grouping, windowing, and buffering. These three are +conceptually close, because they redistribute a `Flux` into an aggregate. Grouping and +windowing create a `Flux>`, while buffering aggregates into a `Collection`. + +[[grouping-with-flux-groupedflux]] +== Grouping with `Flux>` + +Grouping is the act of splitting the source `Flux` into multiple batches, each of which +matches a key. + +The associated operator is `groupBy`. + +Each group is represented as a `GroupedFlux`, which lets you retrieve the key by calling its +`key()` method. + +There is no necessary continuity in the content of the groups. Once a source element +produces a new key, the group for this key is opened and elements that match the key end +up in the group (several groups could be open at the same time). + +This means that groups: + + 1. Are always disjoint (a source element belongs to one and only one group). + 2. Can contain elements from different places in the original sequence. + 3. Are never empty. + +The following example groups values by whether they are even or odd: + +[source,java] +[%unbreakable] +---- +StepVerifier.create( + Flux.just(1, 3, 5, 2, 4, 6, 11, 12, 13) + .groupBy(i -> i % 2 == 0 ? "even" : "odd") + .concatMap(g -> g.defaultIfEmpty(-1) //if empty groups, show them + .map(String::valueOf) //map to string + .startWith(g.key())) //start with the group's key + ) + .expectNext("odd", "1", "3", "5", "11", "13") + .expectNext("even", "2", "4", "6", "12") + .verifyComplete(); +---- + +WARNING: Grouping is best suited for when you have a medium to low number of groups. The +groups must also imperatively be consumed (such as by a `flatMap`) so that `groupBy` +continues fetching data from upstream and feeding more groups. Sometimes, these two +constraints multiply and lead to hangs, such as when you have a high cardinality and the +concurrency of the `flatMap` consuming the groups is too low. + +[[windowing-with-flux-flux]] +== Windowing with `Flux>` + +Windowing is the act of splitting the source `Flux` into _windows_, by criteria of +size, time, boundary-defining predicates, or boundary-defining `Publisher`. + +The associated operators are `window`, `windowTimeout`, `windowUntil`, `windowWhile`, and +`windowWhen`. + +Contrary to `groupBy`, which randomly overlaps according to incoming keys, +windows are (most of the time) opened sequentially. + +Some variants can still overlap, though. For instance, in `window(int maxSize, int skip)` +the `maxSize` parameter is the number of elements after which a window +closes, and the `skip` parameter is the number of elements in the source after which a +new window is opened. So if `maxSize > skip`, a new window opens before the previous one +closes and the two windows overlap. + +The following example shows overlapping windows: + +[source,java] +[%unbreakable] +---- +StepVerifier.create( + Flux.range(1, 10) + .window(5, 3) //overlapping windows + .concatMap(g -> g.defaultIfEmpty(-1)) //show empty windows as -1 + ) + .expectNext(1, 2, 3, 4, 5) + .expectNext(4, 5, 6, 7, 8) + .expectNext(7, 8, 9, 10) + .expectNext(10) + .verifyComplete(); +---- + +NOTE: With the reverse configuration (`maxSize` < `skip`), some elements from +the source are dropped and are not part of any window. + +In the case of predicate-based windowing through `windowUntil` and `windowWhile`, +having subsequent source elements that do not match the predicate can also lead +to empty windows, as demonstrated in the following example: + +[source,java] +[%unbreakable] +---- +StepVerifier.create( + Flux.just(1, 3, 5, 2, 4, 6, 11, 12, 13) + .windowWhile(i -> i % 2 == 0) + .concatMap(g -> g.defaultIfEmpty(-1)) + ) + .expectNext(-1, -1, -1) //respectively triggered by odd 1 3 5 + .expectNext(2, 4, 6) // triggered by 11 + .expectNext(12) // triggered by 13 + // however, no empty completion window is emitted (would contain extra matching elements) + .verifyComplete(); +---- + +[[buffering-with-flux-list]] +== Buffering with `Flux>` + +Buffering is similar to windowing, with the following twist: Instead of emitting +_windows_ (each of which is each a `Flux`), it emits _buffers_ (which are `Collection` +-- by default, `List`). + +The operators for buffering mirror those for windowing: `buffer`, `bufferTimeout`, +`bufferUntil`, `bufferWhile`, and `bufferWhen`. + +Where the corresponding windowing operator opens a window, a buffering operator creates a +new collection and starts adding elements to it. Where a window closes, the buffering +operator emits the collection. + +Buffering can also lead to dropping source elements or having overlapping buffers, as +the following example shows: + +[source,java] +[%unbreakable] +---- +StepVerifier.create( + Flux.range(1, 10) + .buffer(5, 3) //overlapping buffers + ) + .expectNext(Arrays.asList(1, 2, 3, 4, 5)) + .expectNext(Arrays.asList(4, 5, 6, 7, 8)) + .expectNext(Arrays.asList(7, 8, 9, 10)) + .expectNext(Collections.singletonList(10)) + .verifyComplete(); +---- + +Unlike in windowing, `bufferUntil` and `bufferWhile` do not emit an empty buffer, as +the following example shows: + +[source,java] +[%unbreakable] +---- +StepVerifier.create( + Flux.just(1, 3, 5, 2, 4, 6, 11, 12, 13) + .bufferWhile(i -> i % 2 == 0) + ) + .expectNext(Arrays.asList(2, 4, 6)) // triggered by 11 + .expectNext(Collections.singletonList(12)) // triggered by 13 + .verifyComplete(); +---- + diff --git a/docs/modules/ROOT/pages/advancedFeatures/cleanup.adoc b/docs/modules/ROOT/pages/advancedFeatures/cleanup.adoc new file mode 100644 index 0000000000..9d4cf50f7f --- /dev/null +++ b/docs/modules/ROOT/pages/advancedFeatures/cleanup.adoc @@ -0,0 +1,74 @@ +[[cleanup]] += Dealing with Objects that Need Cleanup + +In very specific cases, your application may deal with types that necessitate some form of cleanup once they are no longer in use. +This is an advanced scenario -- for, example when you have reference-counted objects or when you deal with off-heap objects. +Netty's `ByteBuf` is a prime example of both. + +In order to ensure proper cleanup of such objects, you need to account for it on a `Flux`-by-`Flux` basis, as well as in several of the global hooks (see xref:advancedFeatures/hooks.adoc[Using Global Hooks]): + + * The `doOnDiscard` `Flux`/`Mono` operator + * The `onOperatorError` hook + * The `onNextDropped` hook + * Operator-specific handlers + +This is needed because each hook is made with a specific subset of cleanup in mind, and users might want (for example) to implement specific error-handling logic in addition to cleanup logic within `onOperatorError`. + +Note that some operators are less adapted to dealing with objects that need cleanup. +For example, `bufferWhen` can introduce overlapping buffers, and that means that the discard "`local hook`" we used earlier might see a first buffer as being discarded and cleanup an element in it that is in a second buffer, where it is still valid. + +IMPORTANT: For the purpose of cleaning up, *all these hooks MUST be IDEMPOTENT*. +They might on some occasions get applied several times to the same object. +Unlike the `doOnDiscard` operator, which performs a class-level `instanceOf` check, the global hooks are also dealing with instances that can be any `Object`. It is up to the user's implementation to distinguish between which instances need cleanup and which do not. + + +[[the-doondiscard-operator-or-local-hook]] +== The `doOnDiscard` Operator or Local Hook + +This hook has been specifically put in place for cleanup of objects that would otherwise never be exposed to user code. +It is intended as a cleanup hook for flows that operate under normal circumstances (not malformed sources that push too many items, which is covered by `onNextDropped`). + +It is local, in the sense that it is activated through an operator and applies only to a given `Flux` or `Mono`. + +Obvious cases include operators that filter elements from upstream. +These elements never reach the next operator (or final subscriber), but this is part of the normal path of execution. +As such, they are passed to the `doOnDiscard` hook. +Examples of when you might use the `doOnDiscard` hook include the following: + +* `filter`: Items that do not match the filter are considered to be "`discarded.`" +* `skip`: Skipped items are discarded. +* `buffer(maxSize, skip)` with `maxSize < skip`: A "`dropping buffer`" -- items in between buffers are discarded. + +But `doOnDiscard` is not limited to filtering operators, and is also used by operators that internally queue data for backpressure purposes. +More specifically, most of the time, this is important during cancellation. An operator that prefetches data from its source and later drains to its subscriber upon demand could have un-emitted data when it gets cancelled. +Such operators use the `doOnDiscard` hook during cancellation to clear up their internal backpressure `Queue`. + +WARNING: Each call to `doOnDiscard(Class, Consumer)` is additive with the others, to the extent that it is visible and used by only operators upstream of it. + +[[the-onoperatorerror-hook]] +== The `onOperatorError` hook + +The `onOperatorError` hook is intended to modify errors in a transverse manner (similar to an AOP catch-and-rethrow). + +When the error happens during the processing of an `onNext` signal, the element that was being emitted is passed to `onOperatorError`. + +If that type of element needs cleanup, you need to implement it in the `onOperatorError` hook, possibly on top of error-rewriting code. + +[[the-onnextdropped-hook]] +== The `onNextDropped` Hook + +With malformed `Publishers`, there could be cases where an operator receives an element when it expected none (typically, after having received the `onError` or `onComplete` signals). +In such cases, the unexpected element is "`dropped`" -- that is, passed to the `onNextDropped` hook. +If you have types that need cleanup, you must detect these in the `onNextDropped` hook and implement cleanup code there as well. + +[[operator-specific-handlers]] +== Operator-specific Handlers + +Some operators that deal with buffers or collect values as part of their operations have specific handlers for cases where collected data is not propagated downstream. +If you use such operators with the type(s) that need cleanup, you need to perform cleanup in these handlers. + +For example, `distinct` has such a callback that is invoked when the operator terminates (or is cancelled) in order to clear the collection it uses to judge whether an element is distinct or not. +By default, the collection is a `HashSet`, and the cleanup callback is a `HashSet::clear`. +However, if you deal with reference-counted objects, you might want to change that to a more involved handler that would `release` each element in the set before calling `clear()` on it. + + diff --git a/docs/modules/ROOT/pages/advancedFeatures/context.adoc b/docs/modules/ROOT/pages/advancedFeatures/context.adoc new file mode 100644 index 0000000000..b27f5610a3 --- /dev/null +++ b/docs/modules/ROOT/pages/advancedFeatures/context.adoc @@ -0,0 +1,371 @@ +[[context]] += Adding a Context to a Reactive Sequence + +One of the big technical challenges encountered when switching from an imperative +programming perspective to a reactive programming mindset lies in how you deal with +threading. + +Contrary to what you might be used to, in reactive programming, you can use a `Thread` +to process several asynchronous sequences that run at roughly the same time (actually, in +non-blocking locksteps). The execution can also easily and often jump from one thread to +another. + +This arrangement is especially hard for developers that use features dependent on the +threading model being more "`stable,`" such as `ThreadLocal`. As it lets you associate +data with a thread, it becomes tricky to use in a reactive context. As a result, +libraries that rely on `ThreadLocal` at least introduce new challenges when used with +Reactor. At worst, they work badly or even fail. Using the MDC of Logback to store and +log correlation IDs is a prime example of such a situation. + +The usual workaround for `ThreadLocal` usage is to move the contextual data, `C`, along +your business data, `T`, in the sequence, by using (for instance) `Tuple2`. This does +not look good and leaks an orthogonal concern (the contextual data) into your method and +`Flux` signatures. + +Since version `3.1.0`, Reactor comes with an advanced feature that is somewhat comparable +to `ThreadLocal` but can be applied to a `Flux` or a `Mono` instead of a `Thread`. +This feature is called `Context`. + +As an illustration of what it looks like, the following example both reads from and +writes to `Context`: + +[source,java] +[%unbreakable] +---- +String key = "message"; +Mono r = Mono.just("Hello") + .flatMap(s -> Mono.deferContextual(ctx -> + Mono.just(s + " " + ctx.get(key)))) + .contextWrite(ctx -> ctx.put(key, "World")); + +StepVerifier.create(r) + .expectNext("Hello World") + .verifyComplete(); +---- + +In the following sections, we cover `Context` and how to use it, so that you +can eventually understand the preceding example. + +IMPORTANT: This is an advanced feature that is more targeted at library developers. It +requires good understanding of https://github.com/reactive-streams/reactive-streams-jvm/blob/master/README.md#3-subscription-code[the lifecycle of a `Subscription`] and is intended for +libraries that are responsible for the subscriptions. + +[[context.api]] +== The `Context` API + +`Context` is an interface reminiscent of `Map`. It stores key-value pairs and lets you +fetch a value you stored by its key. It has a simplified version that only exposes read +methods, the `ContextView`. More specifically: + +* Both key and values are of type `Object`, so a `Context` (and `ContextView`) instance can contain any number of +highly divergent values from different libraries and sources. +* A `Context` is immutable. It exposes write methods like `put` and `putAll` but they produce a new instance. +* For a read-only API that doesn't even expose such write methods, there's the `ContextView` superinterface since 3.4.0 +* You can check whether the key is present with `hasKey(Object key)`. +* Use `getOrDefault(Object key, T defaultValue)` to retrieve a value (cast to a `T`) or +fall back to a default one if the `Context` instance does not have that key. +* Use `getOrEmpty(Object key)` to get an `Optional` (the `Context` instance attempts to cast the +stored value to `T`). +* Use `put(Object key, Object value)` to store a key-value pair, returning a new +`Context` instance. You can also merge two contexts into a new one by using +`putAll(ContextView)`. +* Use `delete(Object key)` to remove the value associated to a key, returning a new +`Context`. + +[TIP] +==== +When you create a `Context`, you can create pre-valued `Context` instances with up to five +key-value pairs by using the static `Context.of` methods. They take 2, 4, 6, 8 or 10 +`Object` instances, each couple of `Object` instances being a key-value pair to add to +the `Context`. + +Alternatively you can also create an empty `Context` by using `Context.empty()`. +==== + +[[context.write]] +== Tying a `Context` to a `Flux` and Writing + +To make a `Context` be useful, it must be tied to a specific sequence and be accessible by +each operator in a chain. Note that the operator must be a Reactor-native operator, as +`Context` is specific to Reactor. + +Actually, a `Context` is tied to each `Subscriber` in a chain. It uses the `Subscription` +propagation mechanism to make itself available to each operator, starting with the final +`subscribe` and moving up the chain. + +In order to populate the `Context`, which can only be done at subscription time, you need +to use the `contextWrite` operator. + +`contextWrite(ContextView)` merges the `ContextView` you provide and the +`Context` from downstream (remember, the `Context` is propagated from the bottom of the +chain towards the top). This is done through a call to `putAll`, resulting in a NEW +`Context` for upstream. + +TIP: You can also use the more advanced `contextWrite(Function)`. +It receives a copy of the `Context` from downstream, lets you put or delete values +as you see fit, and returns the new `Context` to use. You can even decide to return a +completely different instance, although it is really not recommended (doing so might +impact third-party libraries that depend on the `Context`). + +[[context.read]] +== Reading a `Context`, through the `ContextView` + +Once you have populated a `Context`, you may want to peek into it at runtime. +Most of the time, the responsibility of putting information into the `Context` +is on the end user's side, while exploiting that information is on the third-party library's side, +as such libraries are usually upstream of the client code. + +The read oriented operators allow to obtain data from the `Context` in a chain of operators by exposing +its `ContextView`: + + - to access the context from a source-like operator, use `deferContextual` factory method + - to access the context from the middle of an operator chain, use `transformDeferredContextual(BiFunction)` + - alternatively, when dealing with an inner sequence (like inside a `flatMap`), the `ContextView` + can be materialized using `Mono.deferContextual(Mono::just)`. Usually though, you might want + to perform meaningful work directly within the defer's lambda, eg. `Mono.deferContextual(ctx -> doSomethingAsyncWithContextData(v, ctx.get(key)))` + where `v` is the value being flatMapped. + +TIP: In order to read from the `Context` without misleading users into thinking one can write to it +while data is running through the pipeline, only the `ContextView` is exposed by the operators above. +In case one needs to use one of the remaining APIs that still require a `Context`, one can use `Context.of(contextView)` for conversion. + +[[simple-context-examples]] +== Simple `Context` Examples + +The examples in this section are meant as ways to better understand some of the caveats of +using a `Context`. + +We first look back at our simple example from the introduction in a bit more detail, as +the following example shows: + +[source,java] +[%unbreakable] +---- + +String key = "message"; +Mono r = Mono.just("Hello") + .flatMap(s -> Mono.deferContextual(ctx -> + Mono.just(s + " " + ctx.get(key)))) //<2> + .contextWrite(ctx -> ctx.put(key, "World")); //<1> + +StepVerifier.create(r) + .expectNext("Hello World") //<3> + .verifyComplete(); +---- +<1> The chain of operators ends with a call to `contextWrite(Function)` that puts +`"World"` into the `Context` under a key of `"message"`. +<2> We `flatMap` on the source element, materializing the `ContextView` with `Mono.deferContextual()` +and directly extract the data associated to `"message"` and concatenate that with the original word. +<3> The resulting `Mono` emits `"Hello World"`. + +IMPORTANT: The numbering above versus the actual line order is not a mistake. It represents +the execution order. Even though `contextWrite` is the last piece of the chain, it is +the one that gets executed first (due to its subscription-time nature and the fact that +the subscription signal flows from bottom to top). + +IMPORTANT: In your chain of operators, the relative positions of where you write to the +`Context` and where you read from it matters. The `Context` +is immutable and its content can only be seen by operators above it, as demonstrated in +the following example: + +[source,java] +[%unbreakable] +---- + +String key = "message"; +Mono r = Mono.just("Hello") + .contextWrite(ctx -> ctx.put(key, "World")) //<1> + .flatMap( s -> Mono.deferContextual(ctx -> + Mono.just(s + " " + ctx.getOrDefault(key, "Stranger")))); //<2> + +StepVerifier.create(r) + .expectNext("Hello Stranger") //<3> + .verifyComplete(); +---- +<1> The `Context` is written to too high in the chain. +<2> As a result, in the `flatMap`, there is no value associated with our key. A default value +is used instead. +<3> The resulting `Mono` thus emits `"Hello Stranger"`. + +Similarly, in the case of several attempts to write the same key to the `Context`, the +relative order of the writes matters, too. Operators that read the `Context` see +the value that was set closest to being under them, as demonstrated in the following example: + +[source,java] +[%unbreakable] +---- + +String key = "message"; +Mono r = Mono + .deferContextual(ctx -> Mono.just("Hello " + ctx.get(key))) + .contextWrite(ctx -> ctx.put(key, "Reactor")) //<1> + .contextWrite(ctx -> ctx.put(key, "World")); //<2> + +StepVerifier.create(r) + .expectNext("Hello Reactor") //<3> + .verifyComplete(); +---- +<1> A write attempt on key `"message"`. +<2> Another write attempt on key `"message"`. +<3> The `deferContextual` only saw the value set closest to it (and below it): `"Reactor"`. + +In the preceding example, the `Context` is populated with `"World"` during subscription. +Then the subscription signal moves upstream and another write happens. This produces a +second immutable `Context` with a value of `"Reactor"`. After that, data starts flowing. +The `deferContextual` sees the `Context` closest to it, which is our second `Context` with the +`"Reactor"` value (exposed to the user as a `ContextView`). + +You might wonder if the `Context` is propagated along with the data signal. If that was +the case, putting another `flatMap` between these two writes would use the value from +the top `Context`. But this is not the case, as demonstrated by the following example: + +[source,java] +[%unbreakable] +---- + +String key = "message"; +Mono r = Mono + .deferContextual(ctx -> Mono.just("Hello " + ctx.get(key))) //<3> + .contextWrite(ctx -> ctx.put(key, "Reactor")) //<2> + .flatMap( s -> Mono.deferContextual(ctx -> + Mono.just(s + " " + ctx.get(key)))) //<4> + .contextWrite(ctx -> ctx.put(key, "World")); //<1> + +StepVerifier.create(r) + .expectNext("Hello Reactor World") //<5> + .verifyComplete(); +---- +<1> This is the first write to happen. +<2> This is the second write to happen. +<3> The top context read sees second write. +<4> The `flatMap` concatenates the result from initial read with the value from the first write. +<5> The `Mono` emits `"Hello Reactor World"`. + +The reason is that the `Context` is associated to the `Subscriber` and each operator +accesses the `Context` by requesting it from its downstream `Subscriber`. + +One last interesting propagation case is the one where the `Context` is also written to +inside a `flatMap`, as in the following example: + +[source,java] +[%unbreakable] +---- + +String key = "message"; +Mono r = Mono.just("Hello") + .flatMap( s -> Mono + .deferContextual(ctxView -> Mono.just(s + " " + ctxView.get(key))) + ) + .flatMap( s -> Mono + .deferContextual(ctxView -> Mono.just(s + " " + ctxView.get(key))) + .contextWrite(ctx -> ctx.put(key, "Reactor")) //<1> + ) + .contextWrite(ctx -> ctx.put(key, "World")); // <2> + +StepVerifier.create(r) + .expectNext("Hello World Reactor") + .verifyComplete(); +---- +<1> This `contextWrite` does not impact anything outside of its `flatMap`. +<2> This `contextWrite` impacts the main sequence's `Context`. + +In the preceding example, the final emitted value is `"Hello World Reactor"` and not "Hello +Reactor World", because the `contextWrite` that writes `"Reactor"` does so as part of +the inner sequence of the second `flatMap`. As a consequence, it is not visible or propagated +through the main sequence and the first `flatMap` does not see it. Propagation and immutability +isolate the `Context` in operators that create intermediate inner sequences such as `flatMap`. + +[[full-example]] +== Full Example + +Now we can consider a more real life example of a library reading information from the `Context`: +a reactive HTTP client that takes a `Mono` as the source of data for a `PUT` but +also looks for a particular Context key to add a correlation ID to the request's headers. + +From the user perspective, it is called as follows: + +[source,java] +[%unbreakable] +---- + +doPut("www.example.com", Mono.just("Walter")) +---- + +In order to propagate a correlation ID, it would be called as follows: + +[source,java] +[%unbreakable] +---- + +doPut("www.example.com", Mono.just("Walter")) + .contextWrite(Context.of(HTTP_CORRELATION_ID, "2-j3r9afaf92j-afkaf")) +---- + +As the preceding snippets show, the user code uses `contextWrite` to populate +a `Context` with an `HTTP_CORRELATION_ID` key-value pair. The upstream of the operator is +a `Mono>` (a simplistic representation of an HTTP response) +returned by the HTTP client library. So it effectively passes information from the +user code to the library code. + +The following example shows mock code from the library's perspective that reads the +context and "`augments the request`" if it can find the correlation ID: + +[source,java] +[%unbreakable] +---- + +static final String HTTP_CORRELATION_ID = "reactive.http.library.correlationId"; + +Mono> doPut(String url, Mono data) { + Mono>> dataAndContext = + data.zipWith(Mono.deferContextual(c -> // <1> + Mono.just(c.getOrEmpty(HTTP_CORRELATION_ID))) // <2> + ); + + return dataAndContext.handle((dac, sink) -> { + if (dac.getT2().isPresent()) { // <3> + sink.next("PUT <" + dac.getT1() + "> sent to " + url + + " with header X-Correlation-ID = " + dac.getT2().get()); + } + else { + sink.next("PUT <" + dac.getT1() + "> sent to " + url); + } + sink.complete(); + }) + .map(msg -> Tuples.of(200, msg)); +} +---- +<1> Materialize the `ContextView` through `Mono.deferContextual` and... +<2> within the defer, extract a value for the correlation ID key, as an `Optional`. +<3> If the key was present in the context, use the correlation ID as a header. + +The library snippet zips the data `Mono` with `Mono.deferContextual(Mono::just)`. +This gives the library a `Tuple2`, and that +context contains the `HTTP_CORRELATION_ID` entry from downstream (as it is on the direct +path to the subscriber). + +The library code then uses `map` to extract an `Optional` for that key, and, if +the entry is present, it uses the passed correlation ID as a `X-Correlation-ID` header. +That last part is simulated by the `handle`. + +The whole test that validates the library code used the correlation ID can be written as +follows: + +[source,java] +[%unbreakable] +---- + +@Test +public void contextForLibraryReactivePut() { + Mono put = doPut("www.example.com", Mono.just("Walter")) + .contextWrite(Context.of(HTTP_CORRELATION_ID, "2-j3r9afaf92j-afkaf")) + .filter(t -> t.getT1() < 300) + .map(Tuple2::getT2); + + StepVerifier.create(put) + .expectNext("PUT sent to www.example.com" + + " with header X-Correlation-ID = 2-j3r9afaf92j-afkaf") + .verifyComplete(); +} +---- + diff --git a/docs/modules/ROOT/pages/advancedFeatures/hooks.adoc b/docs/modules/ROOT/pages/advancedFeatures/hooks.adoc new file mode 100644 index 0000000000..0ec50d33f2 --- /dev/null +++ b/docs/modules/ROOT/pages/advancedFeatures/hooks.adoc @@ -0,0 +1,87 @@ +[[hooks]] += Using Global Hooks + +Reactor has another category of configurable callbacks that are invoked by Reactor +operators in various situations. They are all set in the `Hooks` class, and they fall into +three categories: + +* xref:advancedFeatures/hooks.adoc#hooks-dropping[Dropping Hooks] +* xref:advancedFeatures/hooks.adoc#hooks-internal[Internal Error Hook] +* xref:advancedFeatures/hooks.adoc#hooks-assembly[Assembly Hooks] + +[[hooks-dropping]] +== Dropping Hooks + +Dropping hooks are invoked when the source of an operator does not comply with the +Reactive Streams specification. These kind of errors are outside of the normal execution +path (that is, they cannot be propagated through `onError`). + +Typically, a `Publisher` calls `onNext` on the operator despite having already called +`onCompleted` on it previously. In that case, the `onNext` value is dropped. The same +is true for an extraneous `onError` signal. + +The corresponding hooks, `onNextDropped` and `onErrorDropped`, let you provide a global +`Consumer` for these drops. For example, you can use it to log the drop and clean up +resources associated with a value if needed (as it never makes it to the rest of the +reactive chain). + +Setting the hooks twice in a row is additive: every consumer you provide is invoked. The +hooks can be fully reset to their defaults by using the `Hooks.resetOn*Dropped()` methods. + +[[hooks-internal]] +== Internal Error Hook + +One hook, `onOperatorError`, is invoked by operators when an unexpected `Exception` is +thrown during the execution of their `onNext`, `onError`, and `onComplete` methods. + +Unlike the previous category, this is still within the normal execution path. A typical +example is the `map` operator with a map function that throws an `Exception` (such as +division by zero). It is still possible at this point to go through the usual channel of +`onError`, and that is what the operator does. + +First, it passes the `Exception` through `onOperatorError`. The hook lets you inspect the +error (and the incriminating value, if relevant) and change the `Exception`. Of course, +you can also do something on the side, such as log and return the original `Exception`. + +Note that you can set the `onOperatorError` hook multiple times. You can provide a +`String` identifier for a particular `BiFunction` and subsequent calls with different +keys concatenates the functions, which are all executed. On the other hand, reusing the +same key twice lets you replace a function you previously set. + +As a consequence, the default hook behavior can be both fully reset (by using +`Hooks.resetOnOperatorError()`) or partially reset for a specific `key` only (by using +`Hooks.resetOnOperatorError(String)`). + +[[hooks-assembly]] +== Assembly Hooks + +These hooks tie in the lifecycle of operators. They are invoked when a chain of operators +is assembled (that is, instantiated). `onEachOperator` lets you dynamically change each +operator as it is assembled in the chain, by returning a different `Publisher`. +`onLastOperator` is similar, except that it is invoked only on the last operator in the +chain before the `subscribe` call. + +If you want to decorate all operators with a cross-cutting `Subscriber` implementation, +you can look into the `Operators#lift*` methods to help you deal with the various +types of Reactor `Publishers` out there (`Flux`, `Mono`, `ParallelFlux`, `GroupedFlux`, and `ConnectableFlux`), +as well as their `Fuseable` versions. + +Like `onOperatorError`, these hooks are cumulative and can be identified with a key. They +can also be reset partially or totally. + +[[hook-presets]] +== Hook Presets + +The `Hooks` utility class provides two preset hooks. These are alternatives to +the default behaviors that you can use by calling their corresponding method, rather than +coming up with the hook yourself: + +* `onNextDroppedFail()`: `onNextDropped` used to throw a `Exceptions.failWithCancel()` +exception. It now defaults to logging the dropped value at the DEBUG level. To go back to +the old default behavior of throwing, use `onNextDroppedFail()`. + +* `onOperatorDebug()`: This method activates xref:debugging.adoc#debug-activate[debug mode]. It ties into +the `onOperatorError` hook, so calling `resetOnOperatorError()` also resets it. You can +independently reset it by using `resetOnOperatorDebug()`, as it uses a specific key internally. + + diff --git a/docs/modules/ROOT/pages/advancedFeatures/null-safety.adoc b/docs/modules/ROOT/pages/advancedFeatures/null-safety.adoc new file mode 100644 index 0000000000..9fd4cd3649 --- /dev/null +++ b/docs/modules/ROOT/pages/advancedFeatures/null-safety.adoc @@ -0,0 +1,38 @@ +[[null-safety]] += Null Safety + +Although Java does not allow expressing null-safety with its type system, Reactor +now provides annotations to declare nullability of APIs, similar to those provided by +Spring Framework 5. + +Reactor uses these annotations, but they can also be used in any Reactor-based +Java project to declare null-safe APIs. Nullability of the types used inside method bodies +is outside of the scope of this feature. + +These annotations are meta-annotated with https://jcp.org/en/jsr/detail?id=305[JSR 305] +annotations (a dormant JSR that is supported by tools such as IntelliJ IDEA) to provide +useful warnings to Java developers related to null-safety in order to avoid +`NullPointerException` at runtime. JSR 305 meta-annotations let tooling vendors +provide null safety support in a generic way, without having to hard-code support for Reactor annotations. + +NOTE: It is not necessary nor recommended with Kotlin 1.1.5+ to have a dependency on JSR 305 in +your project classpath. + +They are also used by Kotlin, which natively supports +https://kotlinlang.org/docs/reference/null-safety.html[null safety]. See +xref:kotlin.adoc#kotlin-null-safety[this dedicated section] for more details. + +The following annotations are provided in the `reactor.util.annotation` package: + +* {javadoc}/reactor/util/annotation/NonNull.html[`@NonNull`]: +Indicates that a specific parameter, return value, or field cannot be `null`. +(It is not needed on parameters and return values where `@NonNullApi` applies) . +* {javadoc}/reactor/util/annotation/Nullable.html[`@Nullable`]: +Indicates that a parameter, return value, or field can be `null`. +* {javadoc}/reactor/util/annotation/NonNullApi.html[`@NonNullApi`]: +Package-level annotation that indicates non-null is the default behavior for +parameters and return values. + +NOTE: Nullability for generic type arguments, variable arguments, and array elements is not yet supported. +See https://github.com/reactor/reactor-core/issues/878[issue #878] for up-to-date +information. diff --git a/docs/modules/ROOT/pages/advancedFeatures/reactor-hotCold.adoc b/docs/modules/ROOT/pages/advancedFeatures/reactor-hotCold.adoc new file mode 100644 index 0000000000..49f302f4c4 --- /dev/null +++ b/docs/modules/ROOT/pages/advancedFeatures/reactor-hotCold.adoc @@ -0,0 +1,114 @@ +[[reactor.hotCold]] += Hot Versus Cold + +So far, we have considered that all `Flux` (and `Mono`) are the same: They all represent +an asynchronous sequence of data, and nothing happens before you subscribe. + +Really, though, there are two broad families of publishers: hot and cold. + +The earlier description applies to the cold family of publishers. They generate data anew +for each subscription. If no subscription is created, data never gets generated. + +Think of an HTTP request: Each new subscriber triggers an HTTP call, but no call is +made if no one is interested in the result. + +Hot publishers, on the other hand, do not depend on any number of subscribers. They +might start publishing data right away and would continue doing so whenever a new +`Subscriber` comes in (in which case, the subscriber would see only new elements emitted +_after_ it subscribed). For hot publishers, _something_ does indeed happen before you +subscribe. + +One example of the few hot operators in Reactor is `just`: It directly captures the value +at assembly time and replays it to anybody subscribing to it later. To re-use the HTTP +call analogy, if the captured data is the result of an HTTP call, then only one network +call is made, when instantiating `just`. + +To transform `just` into a cold publisher, you can use `defer`. It defers the HTTP +request in our example to subscription time (and would result in a separate network call +for each new subscription). + +On the opposite, `share()` and `replay(...)` can be used to turn a cold publisher into +a hot one (at least once a first subscription has happened). Both of these also have +`Sinks.Many` equivalents in the `Sinks` class, which allow programmatically +feeding the sequence. + +Consider two examples, one that demonstrates a cold Flux and the other that makes use of the +`Sinks` to simulate a hot Flux. The following code shows the first example: + +[source,java] +[%unbreakable] +---- + +Flux source = Flux.fromIterable(Arrays.asList("blue", "green", "orange", "purple")) + .map(String::toUpperCase); + +source.subscribe(d -> System.out.println("Subscriber 1: "+d)); +source.subscribe(d -> System.out.println("Subscriber 2: "+d)); +---- + +This first example produces the following output: + +---- +Subscriber 1: BLUE +Subscriber 1: GREEN +Subscriber 1: ORANGE +Subscriber 1: PURPLE +Subscriber 2: BLUE +Subscriber 2: GREEN +Subscriber 2: ORANGE +Subscriber 2: PURPLE +---- + +The following image shows the replay behavior: + +image::gs-cold.png[Replaying behavior] + +Both subscribers catch all four colors, because each subscriber causes the +process defined by the operators on the `Flux` to run. + +Compare the first example to the second example, shown in the following code: + +[source,java] +[%unbreakable] +---- + +Sinks.Many hotSource = Sinks.unsafe().many().multicast().directBestEffort(); + +Flux hotFlux = hotSource.asFlux().map(String::toUpperCase); + +hotFlux.subscribe(d -> System.out.println("Subscriber 1 to Hot Source: "+d)); + +hotSource.emitNext("blue", FAIL_FAST); // <1> +hotSource.tryEmitNext("green").orThrow(); // <2> + +hotFlux.subscribe(d -> System.out.println("Subscriber 2 to Hot Source: "+d)); + +hotSource.emitNext("orange", FAIL_FAST); +hotSource.emitNext("purple", FAIL_FAST); +hotSource.emitComplete(FAIL_FAST); +---- +<1> for more details about sinks, see xref:coreFeatures/sinks.adoc[Sinks] +<2> side note: `orThrow()` here is an alternative to `emitNext` + `Sinks.EmitFailureHandler.FAIL_FAST` +that is suitable for tests, since throwing there is acceptable (more so than in reactive +applications). + +The second example produces the following output: + +---- +Subscriber 1 to Hot Source: BLUE +Subscriber 1 to Hot Source: GREEN +Subscriber 1 to Hot Source: ORANGE +Subscriber 2 to Hot Source: ORANGE +Subscriber 1 to Hot Source: PURPLE +Subscriber 2 to Hot Source: PURPLE +---- + +The following image shows how a subscription is broadcast: + +image::gs-hot.png[Broadcasting a subscription] + +Subscriber 1 catches all four colors. Subscriber 2, having been created after the first +two colors were produced, catches only the last two colors. This difference accounts for +the doubling of `ORANGE` and `PURPLE` in the output. The process described by the +operators on this Flux runs regardless of when subscriptions have been attached. + diff --git a/docs/modules/ROOT/pages/advancedFeatures/scheduler-factory.adoc b/docs/modules/ROOT/pages/advancedFeatures/scheduler-factory.adoc new file mode 100644 index 0000000000..c24cd8b253 --- /dev/null +++ b/docs/modules/ROOT/pages/advancedFeatures/scheduler-factory.adoc @@ -0,0 +1,41 @@ +[[scheduler-factory]] += Replacing Default `Schedulers` + +As we described in the xref:coreFeatures/schedulers#schedulers[Threading and Schedulers] section, Reactor Core comes with several +`Scheduler` implementations. While you can always create new instances through the `new*` +factory methods, each `Scheduler` flavor also has a default singleton instance that is +accessible through the direct factory method (such as `Schedulers.boundedElastic()` versus +`Schedulers.newBoundedElastic(...)`). + +These default instances are the ones used by operators that need a `Scheduler` to work +when you do not explicitly specify one. For example, `Flux#delayElements(Duration)` uses +the `Schedulers.parallel()` instance. + +In some cases, however, you might need to change these default instances with something +else in a cross-cutting way, without having to make sure every single operator you call +has your specific `Scheduler` as a parameter. An example is measuring the time every +single scheduled task takes by wrapping the real schedulers, for instrumentation +purposes. In other words, you might want to change the default `Schedulers`. + +Changing the default schedulers is possible through the `Schedulers.Factory` class. By +default, a `Factory` creates all the standard `Scheduler` through similarly named +methods. You can override each of these with your custom implementation. + +Additionally, the factory exposes one additional customization method: +`decorateExecutorService`. It is invoked during the creation of every Reactor Core +`Scheduler` that is backed by a `ScheduledExecutorService` (even non-default instances, +such as those created by calls to `Schedulers.newParallel()`). + +This lets you tune the `ScheduledExecutorService` to be used: The default one is exposed +as a `Supplier` and, depending on the type of `Scheduler` being configured, you can choose +to entirely bypass that supplier and return your own instance or you can `get()` the +default instance and wrap it. + +IMPORTANT: Once you create a `Factory` that fits your needs, you must install it by calling +`Schedulers.setFactory(Factory)`. + +Finally, there is a last customizable hook in `Schedulers`: `onHandleError`. This hook is +invoked whenever a `Runnable` task submitted to a `Scheduler` throws an `Exception` (note +that if there is an `UncaughtExceptionHandler` set for the `Thread` that ran the task, +both the handler and the hook are invoked). + diff --git a/docs/modules/ROOT/pages/apdx-howtoReadMarbles.adoc b/docs/modules/ROOT/pages/apdx-howtoReadMarbles.adoc index 8df6148d60..c8b82708b3 100644 --- a/docs/modules/ROOT/pages/apdx-howtoReadMarbles.adoc +++ b/docs/modules/ROOT/pages/apdx-howtoReadMarbles.adoc @@ -1,5 +1,5 @@ [[howtoReadMarbles]] -= How to read marble diagrams? +== How to read marble diagrams? When we introduced `Flux` and `Mono`, we showed an example of a "marble diagram". These are found throughout the javadoc in order to explain the behavior of an operator in a more visual way. @@ -9,41 +9,41 @@ First, let's see how the most common patterns of operators are represented. Some operators are instance methods: their output is produced by calling a method on a source `Flux` instance (like `Flux output = source.fluxOperator()`): -image::images/legend-operator-method.svg[A common operator] +image::legend-operator-method.svg[A common operator] Other operators are static methods. They can still take a source as an input parameter, like in `Flux output = Flux.merge(sourceFlux1, sourcePublisher2)`. These are represented like below: -image::images/legend-operator-static.svg[A static operator] +image::legend-operator-static.svg[A static operator] Note that sometimes we represent multiple variants or behaviors depending on the operator's input, in which case there's a single operator "box", but the source and output variants are separated like below: -image::images/legend-operator-double-source.svg[An operator with two examples of input] +image::legend-operator-double-source.svg[An operator with two examples of input] These are the basic cases, yet some operators display slightly more advanced patterns. For instance, `ParallelFlux` creates multiple rails so they have multiple output `Flux`. These are represented one below the other, like in the following diagram: -image::images/legend-operator-parallel.svg[A parallel operator] +image::legend-operator-parallel.svg[A parallel operator] Windowing operators produce a `Flux>`: the main `Flux` notifies of each window opening, while inner `Flux` represent the windows content and termination. Windows are represented as branching out of the main `Flux`, like in the following diagram: -image::images/legend-operator-windowing.svg[The output of a windowing operator] +image::legend-operator-windowing.svg[The output of a windowing operator] Sometimes, operators take a "companion publisher" as input (a `Flux`, `Mono` or arbitrary Reactive Stream `Publisher`). Such companion publishers help to customize the operator's behavior, which will use some of the companion's signals as trigger for its own internal behavior. They are represented like in the following diagram: -image::images/legend-operator-companion.svg[An operator with a companion Publisher] +image::legend-operator-companion.svg[An operator with a companion Publisher] Now that we've seen the most common operator patterns, let's show the graphical representation of all the different signals, events and elements that can occur in a `Flux` or `Mono`: -image::images/legend-events.svg[All types of signals and events] +image::legend-events.svg[All types of signals and events] Finally, in the same vein we have the graphical representation of _side effects_, which occur alongside the Reactive Stream signals: -image::images/legend-sideEffects1.svg[Side effects: representation of doOn* handlers] -image::images/legend-sideEffects2.svg[Side effects: in a diagram] +image::legend-sideEffects1.svg[Side effects: representation of doOn* handlers] +image::legend-sideEffects2.svg[Side effects: in a diagram] diff --git a/docs/modules/ROOT/pages/apdx-implem.adoc b/docs/modules/ROOT/pages/apdx-implem.adoc index c95d7beee8..f2221737af 100644 --- a/docs/modules/ROOT/pages/apdx-implem.adoc +++ b/docs/modules/ROOT/pages/apdx-implem.adoc @@ -1 +1,2 @@ -== How is Reactor implemented? +[[how-is-reactor-implemented?]] += How is Reactor implemented? diff --git a/docs/modules/ROOT/pages/apdx-migrating.adoc b/docs/modules/ROOT/pages/apdx-migrating.adoc index 7a3d1f58ba..c4fb93ced6 100644 --- a/docs/modules/ROOT/pages/apdx-migrating.adoc +++ b/docs/modules/ROOT/pages/apdx-migrating.adoc @@ -1 +1,2 @@ -== Migrating from RxJava +[[migrating-from-rxjava]] += Migrating from RxJava diff --git a/docs/modules/ROOT/pages/apdx-operatorChoice.adoc b/docs/modules/ROOT/pages/apdx-operatorChoice.adoc index 464588fd8a..09a403039f 100644 --- a/docs/modules/ROOT/pages/apdx-operatorChoice.adoc +++ b/docs/modules/ROOT/pages/apdx-operatorChoice.adoc @@ -1,7 +1,7 @@ [[which-operator]] -= Which operator do I need? +== Which operator do I need? -TIP: In this section, if an operator is specific to https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html[Flux] or https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html[Mono], it is prefixed and linked accordingly, like this: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#fromArray-T:A-[Flux#fromArray]. Common operators have no prefix, and links to both implementations are provided, for example: `just` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#just-T%2E%2E%2E-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#just-T-[Mono]). When a specific use case is covered by a combination of operators, it is presented as a method call, with a leading dot and parameters in parentheses, as follows: `.methodCall(parameter)`. +TIP: In this section, if an operator is specific to {javadoc}/reactor/core/publisher/Flux.html[Flux] or {javadoc}/reactor/core/publisher/Mono.html[Mono], it is prefixed and linked accordingly, like this: {javadoc}/reactor/core/publisher/Flux.html#fromArray-T:A-[Flux#fromArray]. Common operators have no prefix, and links to both implementations are provided, for example: `just` ({javadoc}/reactor/core/publisher/Flux.html#just-T%2E%2E%2E-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#just-T-[Mono]). When a specific use case is covered by a combination of operators, it is presented as a method call, with a leading dot and parameters in parentheses, as follows: `.methodCall(parameter)`. //TODO flux: publishOn/subscribeOn/cancelOn //transformDeferred/transform, repeatWhen, sort, startWith @@ -9,103 +9,103 @@ TIP: In this section, if an operator is specific to https://projectreactor.io/do I want to deal with: -* <> +* xref:apdx-operatorChoice.adoc#which.create[Creating a New Sequence...] -* <> +* xref:apdx-operatorChoice.adoc#which.values[Transforming an Existing Sequence] -* <> +* xref:apdx-operatorChoice.adoc#which.filtering[Filtering a Sequence] -* <> +* xref:apdx-operatorChoice.adoc#which.peeking[Peeking into a Sequence] -* <> +* xref:apdx-operatorChoice.adoc#which.errors[Handling Errors] -* <> +* xref:apdx-operatorChoice.adoc#which.time[Working with Time] -* <> +* xref:apdx-operatorChoice.adoc#which.window[Splitting a {javadoc}/reactor/core/publisher/Flux.html[Flux]] -* <> +* xref:apdx-operatorChoice.adoc#which.blocking[Going Back to the Synchronous World] -* <> +* xref:apdx-operatorChoice.adoc#which.multicasting[Multicasting a {javadoc}/reactor/core/publisher/Flux.html[Flux] to several https://www.reactive-streams.org/reactive-streams-1.0.3-javadoc/org/reactivestreams/Subscriber.html?is-external=true[Subscribers]] [[which.create]] -== Creating a New Sequence... - -* that emits a `T`, and I already have: `just` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#just-T%2E%2E%2E-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#just-T-[Mono]) -** ...from an https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html[Optional]: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#justOrEmpty-java.util.Optional-[Mono#justOrEmpty(Optional)] -** ...from a potentially `null` T: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#justOrEmpty-T-[Mono#justOrEmpty(T)] -* that emits a `T` returned by a method: `just` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#just-T%2E%2E%2E-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#just-T-[Mono]) as well -** ...but lazily captured: use https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#fromSupplier-java.util.function.Supplier-[Mono#fromSupplier] or wrap `just` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#just-T%2E%2E%2E-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#just-T-[Mono]) inside `defer` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#defer-java.util.function.Supplier-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#defer-java.util.function.Supplier-[Mono]) -* that emits several `T` I can explicitly enumerate: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#just-T%2E%2E%2E-[Flux#just(T...)] +=== Creating a New Sequence... + +* that emits a `T`, and I already have: `just` ({javadoc}/reactor/core/publisher/Flux.html#just-T%2E%2E%2E-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#just-T-[Mono]) +** ...from an https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html[Optional]: {javadoc}/reactor/core/publisher/Mono.html#justOrEmpty-java.util.Optional-[Mono#justOrEmpty(Optional)] +** ...from a potentially `null` T: {javadoc}/reactor/core/publisher/Mono.html#justOrEmpty-T-[Mono#justOrEmpty(T)] +* that emits a `T` returned by a method: `just` ({javadoc}/reactor/core/publisher/Flux.html#just-T%2E%2E%2E-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#just-T-[Mono]) as well +** ...but lazily captured: use {javadoc}/reactor/core/publisher/Mono.html#fromSupplier-java.util.function.Supplier-[Mono#fromSupplier] or wrap `just` ({javadoc}/reactor/core/publisher/Flux.html#just-T%2E%2E%2E-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#just-T-[Mono]) inside `defer` ({javadoc}/reactor/core/publisher/Flux.html#defer-java.util.function.Supplier-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#defer-java.util.function.Supplier-[Mono]) +* that emits several `T` I can explicitly enumerate: {javadoc}/reactor/core/publisher/Flux.html#just-T%2E%2E%2E-[Flux#just(T...)] * that iterates over: -** an array: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#fromArray-T:A-[Flux#fromArray] -** a collection or iterable: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#fromIterable-java.lang.Iterable-[Flux#fromIterable] -** a range of integers: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#range-int-int-[Flux#range] -** a https://docs.oracle.com/javase/8/docs/api/java/util/stream/Stream.html[Stream] supplied for each Subscription: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#fromStream-java.util.function.Supplier-[Flux#fromStream(Supplier)] +** an array: {javadoc}/reactor/core/publisher/Flux.html#fromArray-T:A-[Flux#fromArray] +** a collection or iterable: {javadoc}/reactor/core/publisher/Flux.html#fromIterable-java.lang.Iterable-[Flux#fromIterable] +** a range of integers: {javadoc}/reactor/core/publisher/Flux.html#range-int-int-[Flux#range] +** a https://docs.oracle.com/javase/8/docs/api/java/util/stream/Stream.html[Stream] supplied for each Subscription: {javadoc}/reactor/core/publisher/Flux.html#fromStream-java.util.function.Supplier-[Flux#fromStream(Supplier)] * that emits from various single-valued sources such as: -** a https://docs.oracle.com/javase/8/docs/api/java/util/function/Supplier.html[Supplier]: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#fromSupplier-java.util.function.Supplier-[Mono#fromSupplier] -** a task: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#fromCallable-java.util.concurrent.Callable-[Mono#fromCallable], https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#fromRunnable-java.lang.Runnable-[Mono#fromRunnable] -** a https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html[CompletableFuture]: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#fromFuture-java.util.concurrent.CompletableFuture-[Mono#fromFuture] -* that completes: `empty` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#empty--[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#empty--[Mono]) -* that errors immediately: `error` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#error-java.lang.Throwable-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#error-java.lang.Throwable-[Mono]) -** ...but lazily build the https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html[Throwable]: `error(Supplier)` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#error-java.util.function.Supplier-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#error-java.util.function.Supplier-[Mono]) -* that never does anything: `never` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#never--[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#never--[Mono]) -* that is decided at subscription: `defer` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#defer-java.util.function.Supplier-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#defer-java.util.function.Supplier-[Mono]) -* that depends on a disposable resource: `using` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#using-java.util.concurrent.Callable-java.util.function.Function-java.util.function.Consumer-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#using-java.util.concurrent.Callable-java.util.function.Function-java.util.function.Consumer-[Mono]) +** a https://docs.oracle.com/javase/8/docs/api/java/util/function/Supplier.html[Supplier]: {javadoc}/reactor/core/publisher/Mono.html#fromSupplier-java.util.function.Supplier-[Mono#fromSupplier] +** a task: {javadoc}/reactor/core/publisher/Mono.html#fromCallable-java.util.concurrent.Callable-[Mono#fromCallable], {javadoc}/reactor/core/publisher/Mono.html#fromRunnable-java.lang.Runnable-[Mono#fromRunnable] +** a https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html[CompletableFuture]: {javadoc}/reactor/core/publisher/Mono.html#fromFuture-java.util.concurrent.CompletableFuture-[Mono#fromFuture] +* that completes: `empty` ({javadoc}/reactor/core/publisher/Flux.html#empty--[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#empty--[Mono]) +* that errors immediately: `error` ({javadoc}/reactor/core/publisher/Flux.html#error-java.lang.Throwable-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#error-java.lang.Throwable-[Mono]) +** ...but lazily build the https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html[Throwable]: `error(Supplier)` ({javadoc}/reactor/core/publisher/Flux.html#error-java.util.function.Supplier-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#error-java.util.function.Supplier-[Mono]) +* that never does anything: `never` ({javadoc}/reactor/core/publisher/Flux.html#never--[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#never--[Mono]) +* that is decided at subscription: `defer` ({javadoc}/reactor/core/publisher/Flux.html#defer-java.util.function.Supplier-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#defer-java.util.function.Supplier-[Mono]) +* that depends on a disposable resource: `using` ({javadoc}/reactor/core/publisher/Flux.html#using-java.util.concurrent.Callable-java.util.function.Function-java.util.function.Consumer-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#using-java.util.concurrent.Callable-java.util.function.Function-java.util.function.Consumer-[Mono]) * that generates events programmatically (can use state): -** synchronously and one-by-one: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#generate-java.util.concurrent.Callable-java.util.function.BiFunction-[Flux#generate] -** asynchronously (can also be sync), multiple emissions possible in one pass: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#create-java.util.function.Consumer-[Flux#create] -(https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#create-java.util.function.Consumer-[Mono#create] as well, without the multiple emission aspect) +** synchronously and one-by-one: {javadoc}/reactor/core/publisher/Flux.html#generate-java.util.concurrent.Callable-java.util.function.BiFunction-[Flux#generate] +** asynchronously (can also be sync), multiple emissions possible in one pass: {javadoc}/reactor/core/publisher/Flux.html#create-java.util.function.Consumer-[Flux#create] +({javadoc}/reactor/core/publisher/Mono.html#create-java.util.function.Consumer-[Mono#create] as well, without the multiple emission aspect) [[which.values]] -== Transforming an Existing Sequence +=== Transforming an Existing Sequence * I want to transform existing data: -** on a 1-to-1 basis (eg. strings to their length): `map` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#map-java.util.function.Function-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#map-java.util.function.Function-[Mono]) -*** ...by just casting it: `cast` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#cast-java.lang.Class-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#cast-java.lang.Class-[Mono]) -*** ...in order to materialize each source value's index: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#index--[Flux#index] -** on a 1-to-n basis (eg. strings to their characters): `flatMap` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#flatMap-java.util.function.Function-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#flatMap-java.util.function.Function-[Mono]) + use a factory method -** on a 1-to-n basis with programmatic behavior for each source element and/or state: `handle` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#handle-java.util.function.BiConsumer-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#handle-java.util.function.BiConsumer-[Mono]) -** running an asynchronous task for each source item (eg. urls to http request): `flatMap` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#flatMap-java.util.function.Function-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#flatMap-java.util.function.Function-[Mono]) + an async https://www.reactive-streams.org/reactive-streams-1.0.3-javadoc/org/reactivestreams/Publisher.html?is-external=true[Publisher]-returning method -*** ...ignoring some data: conditionally return a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#empty--[Mono.empty()] in the flatMap lambda -*** ...retaining the original sequence order: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#flatMapSequential-java.util.function.Function-[Flux#flatMapSequential] (this triggers the async processes immediately but reorders the results) -*** ...where the async task can return multiple values, from a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html[Mono] source: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#flatMapMany-java.util.function.Function-[Mono#flatMapMany] +** on a 1-to-1 basis (eg. strings to their length): `map` ({javadoc}/reactor/core/publisher/Flux.html#map-java.util.function.Function-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#map-java.util.function.Function-[Mono]) +*** ...by just casting it: `cast` ({javadoc}/reactor/core/publisher/Flux.html#cast-java.lang.Class-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#cast-java.lang.Class-[Mono]) +*** ...in order to materialize each source value's index: {javadoc}/reactor/core/publisher/Flux.html#index--[Flux#index] +** on a 1-to-n basis (eg. strings to their characters): `flatMap` ({javadoc}/reactor/core/publisher/Flux.html#flatMap-java.util.function.Function-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#flatMap-java.util.function.Function-[Mono]) + use a factory method +** on a 1-to-n basis with programmatic behavior for each source element and/or state: `handle` ({javadoc}/reactor/core/publisher/Flux.html#handle-java.util.function.BiConsumer-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#handle-java.util.function.BiConsumer-[Mono]) +** running an asynchronous task for each source item (eg. urls to http request): `flatMap` ({javadoc}/reactor/core/publisher/Flux.html#flatMap-java.util.function.Function-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#flatMap-java.util.function.Function-[Mono]) + an async https://www.reactive-streams.org/reactive-streams-1.0.3-javadoc/org/reactivestreams/Publisher.html?is-external=true[Publisher]-returning method +*** ...ignoring some data: conditionally return a {javadoc}/reactor/core/publisher/Mono.html#empty--[Mono.empty()] in the flatMap lambda +*** ...retaining the original sequence order: {javadoc}/reactor/core/publisher/Flux.html#flatMapSequential-java.util.function.Function-[Flux#flatMapSequential] (this triggers the async processes immediately but reorders the results) +*** ...where the async task can return multiple values, from a {javadoc}/reactor/core/publisher/Mono.html[Mono] source: {javadoc}/reactor/core/publisher/Mono.html#flatMapMany-java.util.function.Function-[Mono#flatMapMany] * I want to add pre-set elements to an existing sequence: -** at the start: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#startWith-T%2E%2E%2E-[Flux#startWith(T...)] -** at the end: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#concatWithValues-T%2E%2E%2E-[Flux#concatWithValues(T...)] - -* I want to aggregate a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html[Flux]: (the `Flux#` prefix is assumed below) -** into a List: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#collectList--[collectList], https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#collectSortedList--[collectSortedList] -** into a Map: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#collectMap-java.util.function.Function-[collectMap], https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#collectMultimap-java.util.function.Function-[collectMultiMap] -** into an arbitrary container: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#collect-java.util.stream.Collector-[collect] -** into the size of the sequence: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#count--[count] -** by applying a function between each element (eg. running sum): https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#reduce-A-java.util.function.BiFunction-[reduce] -*** ...but emitting each intermediary value: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#scan-A-java.util.function.BiFunction-[scan] +** at the start: {javadoc}/reactor/core/publisher/Flux.html#startWith-T%2E%2E%2E-[Flux#startWith(T...)] +** at the end: {javadoc}/reactor/core/publisher/Flux.html#concatWithValues-T%2E%2E%2E-[Flux#concatWithValues(T...)] + +* I want to aggregate a {javadoc}/reactor/core/publisher/Flux.html[Flux]: (the `Flux#` prefix is assumed below) +** into a List: {javadoc}/reactor/core/publisher/Flux.html#collectList--[collectList], {javadoc}/reactor/core/publisher/Flux.html#collectSortedList--[collectSortedList] +** into a Map: {javadoc}/reactor/core/publisher/Flux.html#collectMap-java.util.function.Function-[collectMap], {javadoc}/reactor/core/publisher/Flux.html#collectMultimap-java.util.function.Function-[collectMultiMap] +** into an arbitrary container: {javadoc}/reactor/core/publisher/Flux.html#collect-java.util.stream.Collector-[collect] +** into the size of the sequence: {javadoc}/reactor/core/publisher/Flux.html#count--[count] +** by applying a function between each element (eg. running sum): {javadoc}/reactor/core/publisher/Flux.html#reduce-A-java.util.function.BiFunction-[reduce] +*** ...but emitting each intermediary value: {javadoc}/reactor/core/publisher/Flux.html#scan-A-java.util.function.BiFunction-[scan] ** into a boolean value from a predicate: -*** applied to all values (AND): https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#all-java.util.function.Predicate-[all] -*** applied to at least one value (OR): https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#any-java.util.function.Predicate-[any] -*** testing the presence of any value: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#hasElements--[hasElements] _(there is a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html[Mono] equivalent in https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#hasElement--[hasElement])_ -*** testing the presence of a specific value: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#hasElement-T-[hasElement(T)] +*** applied to all values (AND): {javadoc}/reactor/core/publisher/Flux.html#all-java.util.function.Predicate-[all] +*** applied to at least one value (OR): {javadoc}/reactor/core/publisher/Flux.html#any-java.util.function.Predicate-[any] +*** testing the presence of any value: {javadoc}/reactor/core/publisher/Flux.html#hasElements--[hasElements] _(there is a {javadoc}/reactor/core/publisher/Mono.html[Mono] equivalent in {javadoc}/reactor/core/publisher/Mono.html#hasElement--[hasElement])_ +*** testing the presence of a specific value: {javadoc}/reactor/core/publisher/Flux.html#hasElement-T-[hasElement(T)] * I want to combine publishers... -** in sequential order: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#concat-org.reactivestreams.Publisher%2E%2E%2E-[Flux#concat] or `.concatWith(other)` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#concatWith-org.reactivestreams.Publisher-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#concatWith-org.reactivestreams.Publisher-[Mono]) -*** ...but delaying any error until remaining publishers have been emitted: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#concatDelayError-org.reactivestreams.Publisher-[Flux#concatDelayError] -*** ...but eagerly subscribing to subsequent publishers: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#mergeSequential-int-org.reactivestreams.Publisher%2E%2E%2E-[Flux#mergeSequential] -** in emission order (combined items emitted as they come): https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#merge-int-org.reactivestreams.Publisher%2E%2E%2E-[Flux#merge] / `.mergeWith(other)` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#mergeWith-org.reactivestreams.Publisher-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#mergeWith-org.reactivestreams.Publisher-[Mono]) -*** ...with different types (transforming merge): https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#zip-java.util.function.Function-org.reactivestreams.Publisher%2E%2E%2E-[Flux#zip] / https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#zipWith-org.reactivestreams.Publisher-[Flux#zipWith] +** in sequential order: {javadoc}/reactor/core/publisher/Flux.html#concat-org.reactivestreams.Publisher%2E%2E%2E-[Flux#concat] or `.concatWith(other)` ({javadoc}/reactor/core/publisher/Flux.html#concatWith-org.reactivestreams.Publisher-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#concatWith-org.reactivestreams.Publisher-[Mono]) +*** ...but delaying any error until remaining publishers have been emitted: {javadoc}/reactor/core/publisher/Flux.html#concatDelayError-org.reactivestreams.Publisher-[Flux#concatDelayError] +*** ...but eagerly subscribing to subsequent publishers: {javadoc}/reactor/core/publisher/Flux.html#mergeSequential-int-org.reactivestreams.Publisher%2E%2E%2E-[Flux#mergeSequential] +** in emission order (combined items emitted as they come): {javadoc}/reactor/core/publisher/Flux.html#merge-int-org.reactivestreams.Publisher%2E%2E%2E-[Flux#merge] / `.mergeWith(other)` ({javadoc}/reactor/core/publisher/Flux.html#mergeWith-org.reactivestreams.Publisher-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#mergeWith-org.reactivestreams.Publisher-[Mono]) +*** ...with different types (transforming merge): {javadoc}/reactor/core/publisher/Flux.html#zip-java.util.function.Function-org.reactivestreams.Publisher%2E%2E%2E-[Flux#zip] / {javadoc}/reactor/core/publisher/Flux.html#zipWith-org.reactivestreams.Publisher-[Flux#zipWith] ** by pairing values: -*** from 2 Monos into a https://projectreactor.io/docs/core/release/api/reactor/util/function/Tuple2.html[Tuple2]: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#zipWith-reactor.core.publisher.Mono-[Mono#zipWith] -*** from n Monos when they all completed: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#zip-java.util.function.Function-reactor.core.publisher.Mono%2E%2E%2E-[Mono#zip] +*** from 2 Monos into a {javadoc}/reactor/util/function/Tuple2.html[Tuple2]: {javadoc}/reactor/core/publisher/Mono.html#zipWith-reactor.core.publisher.Mono-[Mono#zipWith] +*** from n Monos when they all completed: {javadoc}/reactor/core/publisher/Mono.html#zip-java.util.function.Function-reactor.core.publisher.Mono%2E%2E%2E-[Mono#zip] ** by coordinating their termination: -*** from 1 Mono and any source into a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html[Mono]: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#and-org.reactivestreams.Publisher-[Mono#and] -*** from n sources when they all completed: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#when-java.lang.Iterable-[Mono#when] +*** from 1 Mono and any source into a {javadoc}/reactor/core/publisher/Mono.html[Mono]: {javadoc}/reactor/core/publisher/Mono.html#and-org.reactivestreams.Publisher-[Mono#and] +*** from n sources when they all completed: {javadoc}/reactor/core/publisher/Mono.html#when-java.lang.Iterable-[Mono#when] *** into an arbitrary container type: -**** each time all sides have emitted: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#zip-java.util.function.Function-org.reactivestreams.Publisher%2E%2E%2E-[Flux#zip] (up to the smallest cardinality) -**** each time a new value arrives at either side: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#combineLatest-java.util.function.Function-int-org.reactivestreams.Publisher%2E%2E%2E-[Flux#combineLatest] +**** each time all sides have emitted: {javadoc}/reactor/core/publisher/Flux.html#zip-java.util.function.Function-org.reactivestreams.Publisher%2E%2E%2E-[Flux#zip] (up to the smallest cardinality) +**** each time a new value arrives at either side: {javadoc}/reactor/core/publisher/Flux.html#combineLatest-java.util.function.Function-int-org.reactivestreams.Publisher%2E%2E%2E-[Flux#combineLatest] ** selecting the first publisher which... -*** produces a _value_ (`onNext`): `firstWithValue` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#firstWithValue-java.lang.Iterable-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#firstWithValue-java.lang.Iterable-[Mono]) -*** produces _any signal_: `firstWithSignal` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#firstWithSignal-java.lang.Iterable-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#firstWithSignal-java.lang.Iterable-[Mono]) +*** produces a _value_ (`onNext`): `firstWithValue` ({javadoc}/reactor/core/publisher/Flux.html#firstWithValue-java.lang.Iterable-[Flux]|link:{javadoc}/reactor/core/publisher/Mono.html#firstWithValue-java.lang.Iterable-[Mono]) +*** produces _any signal_: `firstWithSignal` ({javadoc}/reactor/core/publisher/Flux.html#firstWithSignal-java.lang.Iterable-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#firstWithSignal-java.lang.Iterable-[Mono]) ** triggered by the elements in a source sequence: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#switchMap-java.util.function.Function-[switchMap] (each source element is mapped to a Publisher) ** triggered by the start of the next publisher in a sequence of publishers: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#switchOnNext-org.reactivestreams.Publisher-[switchOnNext] @@ -131,7 +131,7 @@ I want to deal with: ** ...expanding the graph depth first: `expandDeep(Function)` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#expandDeep-java.util.function.Function-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#expandDeep-java.util.function.Function-[Mono]) [[which.peeking]] -== Peeking into a Sequence +=== Peeking into a Sequence * Without modifying the final sequence, I want to: ** get notified of / execute additional behavior (sometimes referred to as "side-effects") on: @@ -159,7 +159,7 @@ I want to deal with: ** as a line in a log: `log` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#log--[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#log--[Mono]) [[which.filtering]] -== Filtering a Sequence +=== Filtering a Sequence * I want to filter a sequence: ** based on an arbitrary criteria: `filter` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#filter-java.util.function.Predicate-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#filter-java.util.function.Predicate-[Mono]) @@ -204,7 +204,7 @@ I want to deal with: [[which.errors]] -== Handling Errors +=== Handling Errors * I want to create an erroring sequence: `error` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#error-java.lang.Throwable-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#error-java.lang.Throwable-[Mono])... ** ...to replace the completion of a successful https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html[Flux]: `.concat(Flux.error(e))` @@ -240,7 +240,7 @@ I want to deal with: *** ...and applying a strategy when bounded buffer also overflows: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#onBackpressureBuffer-int-reactor.core.publisher.BufferOverflowStrategy-[Flux#onBackpressureBuffer] with a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/BufferOverflowStrategy.html[BufferOverflowStrategy] [[which.time]] -== Working with Time +=== Working with Time * I want to associate emissions with a timing measured... ** ...with best available precision and versatility of provided data: `timed` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#timed--[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#timed--[Mono]) @@ -264,7 +264,7 @@ I want to deal with: ** before the subscription happens: `delaySubscription` (https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#delaySubscription-java.time.Duration-[Flux]|link:https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#delaySubscription-java.time.Duration-[Mono]) [[which.window]] -== Splitting a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html[Flux] +=== Splitting a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html[Flux] * I want to split a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html[Flux] into a `Flux>`, by a boundary criteria: ** of size: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#window-int-[window(int)] @@ -294,7 +294,7 @@ I want to deal with: TIP: Note that this returns a `Flux>`, each inner https://projectreactor.io/docs/core/release/api/reactor/core/publisher/GroupedFlux.html[GroupedFlux] shares the same `K` key accessible through https://projectreactor.io/docs/core/release/api/reactor/core/publisher/GroupedFlux.html#key--[key()]. [[which.blocking]] -== Going Back to the Synchronous World +=== Going Back to the Synchronous World Note: all of these methods except https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#toFuture--[Mono#toFuture] will throw an https://docs.oracle.com/javase/8/docs/api/java/lang/UnsupportedOperationException.html?is-external=true[UnsupportedOperatorException] if called from within a https://projectreactor.io/docs/core/release/api/reactor/core/scheduler/Scheduler.html[Scheduler] marked as "non-blocking only" (by default https://projectreactor.io/docs/core/release/api/reactor/core/scheduler/Schedulers.html#parallel--[parallel()] and https://projectreactor.io/docs/core/release/api/reactor/core/scheduler/Schedulers.html#single--[single()]). @@ -313,7 +313,7 @@ within a https://projectreactor.io/docs/core/release/api/reactor/core/scheduler/ ** a https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html[CompletableFuture]: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Mono.html#toFuture--[Mono#toFuture] [[which.multicasting]] -== Multicasting a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html[Flux] to several https://www.reactive-streams.org/reactive-streams-1.0.3-javadoc/org/reactivestreams/Subscriber.html?is-external=true[Subscribers] +=== Multicasting a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html[Flux] to several https://www.reactive-streams.org/reactive-streams-1.0.3-javadoc/org/reactivestreams/Subscriber.html?is-external=true[Subscribers] * I want to connect multiple https://www.reactive-streams.org/reactive-streams-1.0.3-javadoc/org/reactivestreams/Subscriber.html?is-external=true[Subscriber] to a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html[Flux]: ** and decide when to trigger the source with https://projectreactor.io/docs/core/release/api/reactor/core/publisher/ConnectableFlux.html#connect--[connect()]: https://projectreactor.io/docs/core/release/api/reactor/core/publisher/Flux.html#publish--[publish()] (returns a https://projectreactor.io/docs/core/release/api/reactor/core/publisher/ConnectableFlux.html[ConnectableFlux]) diff --git a/docs/modules/ROOT/pages/apdx-optimizations.adoc b/docs/modules/ROOT/pages/apdx-optimizations.adoc index 5d2dbe3179..d68bd7b6dc 100644 --- a/docs/modules/ROOT/pages/apdx-optimizations.adoc +++ b/docs/modules/ROOT/pages/apdx-optimizations.adoc @@ -1,7 +1,8 @@ -== Automatic optimizations +[[automatic-optimizations]] += Automatic optimizations [[macrofusion]] -=== Macro-fusion +== Macro-fusion [[microfusion]] -=== Micro-fusion +== Micro-fusion diff --git a/docs/modules/ROOT/pages/apdx-reactorExtra.adoc b/docs/modules/ROOT/pages/apdx-reactorExtra.adoc index d5c26c1d69..3c94357e8c 100644 --- a/docs/modules/ROOT/pages/apdx-reactorExtra.adoc +++ b/docs/modules/ROOT/pages/apdx-reactorExtra.adoc @@ -1,5 +1,5 @@ [[reactor-extra]] -= Reactor-Extra +== Reactor-Extra The `reactor-extra` artifact contains additional operators and utilities that are for users of `reactor-core` with advanced needs, or incubating operators. @@ -7,7 +7,6 @@ users of `reactor-core` with advanced needs, or incubating operators. As this is a separate artifact, you need to explicitly add it to your build. The following example shows how to do so in Gradle: -==== [source,groovy] ---- dependencies { @@ -15,12 +14,11 @@ dependencies { compile 'io.projectreactor.addons:reactor-extra' <1> } ---- -<1> Add the reactor extra artifact in addition to core. See <> for details +<1> Add the reactor extra artifact in addition to core. See xref:gettingStarted.adoc#getting[Getting Reactor] for details about why you do not need to specify a version if you use the BOM, usage in Maven, and other details. -==== [[extra-tuples]] -== `TupleUtils` and Functional Interfaces +=== `TupleUtils` and Functional Interfaces The `reactor.function` package contains functional interfaces that complement the Java 8 `Function`, `Predicate`, and `Consumer` interfaces, for three to eight values. @@ -30,8 +28,8 @@ interfaces to a similar interface on the corresponding `Tuple`. This lets you easily work with independent parts of any `Tuple`, as the following example shows: -==== [source,java] +[%unbreakable] ---- .map(tuple -> { String firstName = tuple.getT1(); @@ -41,25 +39,23 @@ This lets you easily work with independent parts of any `Tuple`, as the followin return new Customer(firstName, lastName, address); }); ---- -==== You can rewrite the preceding example as follows: -==== [source,java] +[%unbreakable] ---- .map(TupleUtils.function(Customer::new)); // <1> ---- <1> (as `Customer` constructor conforms to `Function3` functional interface signature) -==== [[extra-math]] -== Math Operators With `MathFlux` +=== Math Operators With `MathFlux` The `reactor.math` package contains a `MathFlux` specialized version of `Flux` that offers mathematical operators, including `max`, `min`, `sumInt`, `averageDouble`, and others. [[extra-schedulers]] -== Schedulers +=== Schedulers Reactor-extra comes with the `ForkJoinPoolScheduler` (in the `reactor.scheduler.forkjoin` package): it uses the Java `ForkJoinPool` to execute tasks. diff --git a/docs/modules/ROOT/pages/apdx-writingOperator.adoc b/docs/modules/ROOT/pages/apdx-writingOperator.adoc index ae26eb1353..0756a37bac 100644 --- a/docs/modules/ROOT/pages/apdx-writingOperator.adoc +++ b/docs/modules/ROOT/pages/apdx-writingOperator.adoc @@ -1 +1,2 @@ -== A Primer on Writing an Operator +[[a-primer-on-writing-an-operator]] += A Primer on Writing an Operator diff --git a/docs/modules/ROOT/pages/appendices.adoc b/docs/modules/ROOT/pages/appendices.adoc new file mode 100644 index 0000000000..3918f10aa1 --- /dev/null +++ b/docs/modules/ROOT/pages/appendices.adoc @@ -0,0 +1,27 @@ += Appendices + +[appendix] +include::apdx-operatorChoice.adoc[] + +[appendix] +include::apdx-howtoReadMarbles.adoc[] + +[appendix] +include::faq.adoc[] + +[appendix] +include::apdx-reactorExtra.adoc[] + +//TODO later add appendices about internals, writing operators, fusion +//[appendix] +//include::apdx-implem.adoc[levelOffset=1] + +//[appendix] +//include::apdx-writingOperator.adoc[levelOffset=1] + +//[appendix] +//include::apdx-optimizations.adoc[levelOffset=1] + +//TODO later add appendix about migrating from RxJava? +//[appendix] +//include::apdx-migrating.adoc[levelOffset=1] diff --git a/docs/modules/ROOT/pages/coreFeatures.adoc b/docs/modules/ROOT/pages/coreFeatures.adoc index 295897ea87..6470415035 100644 --- a/docs/modules/ROOT/pages/coreFeatures.adoc +++ b/docs/modules/ROOT/pages/coreFeatures.adoc @@ -20,1143 +20,3 @@ Operators that change the maximum cardinality of the processing also switch to t relevant type. For instance, the `count` operator exists in `Flux`, but it returns a `Mono`. -[[flux]] -== `Flux`, an Asynchronous Sequence of 0-N Items - -The following image shows how a `Flux` transforms items: - -image::images/flux.svg[Flux] - -A `Flux` is a standard `Publisher` that represents an asynchronous sequence of 0 to N -emitted items, optionally terminated by either a completion signal or an error. -As in the Reactive Streams spec, these three types of signal translate to calls to a downstream -Subscriber's `onNext`, `onComplete`, and `onError` methods. - -With this large scope of possible signals, `Flux` is the general-purpose reactive type. -Note that all events, even terminating ones, are optional: no `onNext` event but an -`onComplete` event represents an _empty_ finite sequence, but remove the `onComplete` and -you have an _infinite_ empty sequence (not particularly useful, except for tests around cancellation). -Similarly, infinite sequences are not necessarily empty. For example, `Flux.interval(Duration)` -produces a `Flux` that is infinite and emits regular ticks from a clock. - -[[mono]] -== `Mono`, an Asynchronous 0-1 Result - -The following image shows how a `Mono` transforms an item: - -image::images/mono.svg[Mono] - -A `Mono` is a specialized `Publisher` that emits at most one item _via_ the -`onNext` signal then terminates with an `onComplete` signal (successful `Mono`, -with or without value), or only emits a single `onError` signal (failed `Mono`). - - -Most `Mono` implementations are expected to immediately call `onComplete` on their -`Subscriber` after having called `onNext`. `Mono.never()` is an outlier: it doesn't -emit any signal, which is not technically forbidden although not terribly useful outside -of tests. On the other hand, a combination of `onNext` and `onError` is explicitly forbidden. - -`Mono` offers only a subset of the operators that are available for a `Flux`, and -some operators (notably those that combine the `Mono` with another `Publisher`) -switch to a `Flux`. -For example, `Mono#concatWith(Publisher)` returns a `Flux` while `Mono#then(Mono)` -returns another `Mono`. - -Note that you can use a `Mono` to represent no-value asynchronous processes that only -have the concept of completion (similar to a `Runnable`). To create one, you can use an empty -`Mono`. - -== Simple Ways to Create a Flux or Mono and Subscribe to It - -The easiest way to get started with `Flux` and `Mono` is to use one of the numerous -factory methods found in their respective classes. - -For instance, to create a sequence of `String`, you can either enumerate them or put them -in a collection and create the Flux from it, as follows: - -==== -[source,java] ----- -Flux seq1 = Flux.just("foo", "bar", "foobar"); - -List iterable = Arrays.asList("foo", "bar", "foobar"); -Flux seq2 = Flux.fromIterable(iterable); ----- -==== - -Other examples of factory methods include the following: - -==== -[source,java] ----- -Mono noData = Mono.empty(); <1> - -Mono data = Mono.just("foo"); - -Flux numbersFromFiveToSeven = Flux.range(5, 3); <2> ----- -<1> Notice the factory method honors the generic type even though it has no value. -<2> The first parameter is the start of the range, while the second parameter is the -number of items to produce. -==== - -When it comes to subscribing, `Flux` and `Mono` make use of Java 8 lambdas. You -have a wide choice of `.subscribe()` variants that take lambdas for different -combinations of callbacks, as shown in the following method signatures: - -[[subscribeMethods]] -.Lambda-based subscribe variants for `Flux` -==== -[source,java] ----- -subscribe(); <1> - -subscribe(Consumer consumer); <2> - -subscribe(Consumer consumer, - Consumer errorConsumer); <3> - -subscribe(Consumer consumer, - Consumer errorConsumer, - Runnable completeConsumer); <4> - -subscribe(Consumer consumer, - Consumer errorConsumer, - Runnable completeConsumer, - Consumer subscriptionConsumer); <5> ----- -<1> Subscribe and trigger the sequence. -<2> Do something with each produced value. -<3> Deal with values but also react to an error. -<4> Deal with values and errors but also run some code when the sequence successfully -completes. -<5> Deal with values and errors and successful completion but also do something with the -`Subscription` produced by this `subscribe` call. -==== - -TIP: These variants return a reference to the subscription that you can use to cancel the -subscription when no more data is needed. Upon cancellation, the source should stop -producing values and clean up any resources it created. This cancel-and-clean-up behavior -is represented in Reactor by the general-purpose `Disposable` interface. - -include::subscribe-details.adoc[] - -include::subscribe-backpressure.adoc[] - -//the leveloffset seems to be absolute from root -include::producing.adoc[leveloffset=2] - -[[schedulers]] -== Threading and Schedulers - -Reactor, like RxJava, can be considered to be *concurrency-agnostic*. That is, it does not -enforce a concurrency model. Rather, it leaves you, the developer, in command. However, -that does not prevent the library from helping you with concurrency. - -Obtaining a `Flux` or a `Mono` does not necessarily mean that it runs in a dedicated -`Thread`. Instead, most operators continue working in the `Thread` on which the -previous operator executed. Unless specified, the topmost operator (the source) -itself runs on the `Thread` in which the `subscribe()` call was made. The following -example runs a `Mono` in a new thread: - -==== -[source,java] ----- -public static void main(String[] args) throws InterruptedException { - final Mono mono = Mono.just("hello "); //<1> - - Thread t = new Thread(() -> mono - .map(msg -> msg + "thread ") - .subscribe(v -> //<2> - System.out.println(v + Thread.currentThread().getName()) //<3> - ) - ) - t.start(); - t.join(); - -} ----- -<1> The `Mono` is assembled in thread `main`. -<2> However, it is subscribed to in thread `Thread-0`. -<3> As a consequence, both the `map` and the `onNext` callback actually run in `Thread-0` -==== - -The preceding code produces the following output: - -==== -[source] ----- -hello thread Thread-0 ----- -==== - -In Reactor, the execution model and where the execution happens is determined by the -`Scheduler` that is used. A -https://projectreactor.io/docs/core/release/api/reactor/core/scheduler/Scheduler.html[`Scheduler`] -has scheduling responsibilities similar to an `ExecutorService`, but having a -dedicated abstraction lets it do more, notably acting as a clock and enabling -a wider range of implementations (virtual time for tests, trampolining or -immediate scheduling, and so on). - -The https://projectreactor.io/docs/core/release/api/reactor/core/scheduler/Schedulers.html[`Schedulers`] -class has static methods that give access to the following execution contexts: - -* No execution context (`Schedulers.immediate()`): at processing time, the submitted `Runnable` -will be directly executed, effectively running them on the current `Thread` (can be seen as a "null object" or no-op `Scheduler`). -* A single, reusable thread (`Schedulers.single()`). Note that this method reuses the -same thread for all callers, until the Scheduler is disposed. If you want a per-call -dedicated thread, use `Schedulers.newSingle()` for each call. -* An unbounded elastic thread pool (`Schedulers.elastic()`). This one is no longer preferred -with the introduction of `Schedulers.boundedElastic()`, as it has a tendency to hide backpressure -problems and lead to too many threads (see below). -* A bounded elastic thread pool (`Schedulers.boundedElastic()`). This is a handy way to -give a blocking process its own thread so that it does not tie up other resources. This is a better choice for I/O blocking work. See -<>, but doesn't pressure the system too much with new threads. -Starting from 3.6.0 this can offer two different implementations depending on the setup: - - `ExecutorService`-based, which reuses platform threads between tasks. This -implementation, like its predecessor `elastic()`, creates new worker pools as needed -and reuses idle ones. Worker pools that stay idle for too long (the default is 60s) are -also disposed. Unlike its `elastic()` predecessor, it has a cap on the number of backing threads it can create (default is number of CPU cores x 10). -Up to 100 000 tasks submitted after the cap has been reached are enqueued and will be re-scheduled when a thread becomes available -(when scheduling with a delay, the delay starts when the thread becomes available). - - Thread-per-task-based, designed to run on `VirtualThread` instances. -To embrace that functionality, the application should run in Java 21+ environment and set the `reactor.schedulers.defaultBoundedElasticOnVirtualThreads` system property to `true`. -Once the above is set, the shared `Schedulers.boundedElastic()` return a specific implementation -of `BoundedElasticScheduler` tailored to run every task on a new instance of the -`VirtualThread` class. This implementation is similar in terms of the behavior to the -`ExecutorService`-based one but does not have idle pool and creates a new `VirtualThread` -for each task. -* A fixed pool of workers that is tuned for parallel work (`Schedulers.parallel()`). It -creates as many workers as you have CPU cores. - -Additionally, you can create a `Scheduler` out of any pre-existing `ExecutorService` by -using `Schedulers.fromExecutorService(ExecutorService)`. (You can also create one from an -`Executor`, although doing so is discouraged.) - -You can also create new instances of the various scheduler types by using the `newXXX` -methods. For example, `Schedulers.newParallel(yourScheduleName)` creates a new parallel -scheduler named `yourScheduleName`. - -[WARNING] -==== -While `boundedElastic` is made to help with legacy blocking code if it cannot be avoided, -`single` and `parallel` are not. As a consequence, the use of Reactor blocking APIs -(`block()`, `blockFirst()`, `blockLast()` (as well as iterating over `toIterable()` -or `toStream()`) inside the default single and parallel schedulers) results in -an `IllegalStateException` being thrown. - -Custom `Schedulers` can also be marked as "non blocking only" by creating instances of `Thread` -that implement the `NonBlocking` marker interface. -==== - -Some operators use a specific scheduler from `Schedulers` by default (and usually give -you the option of providing a different one). For instance, calling the -`Flux.interval(Duration.ofMillis(300))` factory method produces a `Flux` that ticks every 300ms. -By default, this is enabled by `Schedulers.parallel()`. The following line changes the -Scheduler to a new instance similar to `Schedulers.single()`: - -==== -[source,java] ----- -Flux.interval(Duration.ofMillis(300), Schedulers.newSingle("test")) ----- -==== - -Reactor offers two means of switching the execution context (or `Scheduler`) in a -reactive chain: `publishOn` and `subscribeOn`. Both take a `Scheduler` and let you switch -the execution context to that scheduler. But the placement of `publishOn` in the chain -matters, while the placement of `subscribeOn` does not. To understand that difference, -you first have to remember that <>. - -In Reactor, when you chain operators, you can wrap as many `Flux` and `Mono` -implementations inside one another as you need. Once you subscribe, a chain of -`Subscriber` objects is created, backward (up the chain) to the first -publisher. This is effectively hidden from you. All you can see is the outer layer of -`Flux` (or `Mono`) and `Subscription`, but these intermediate operator-specific -subscribers are where the real work happens. - -With that knowledge, we can have a closer look at the `publishOn` and `subscribeOn` -operators: - -=== The `publishOn` Method - -`publishOn` applies in the same way as any other operator, in the middle of the -subscriber chain. It takes signals from upstream and replays them downstream while -executing the callback on a worker from the associated `Scheduler`. Consequently, it -*affects where the subsequent operators execute* (until another `publishOn` is -chained in), as follows: - -* Changes the execution context to one `Thread` picked by the `Scheduler` -* as per the specification, `onNext` calls happen in sequence, so this uses up a single thread -* unless they work on a specific `Scheduler`, operators after `publishOn` continue execution on that same thread - -The following example uses the `publishOn` method: - -==== -[source,java] ----- -Scheduler s = Schedulers.newParallel("parallel-scheduler", 4); //<1> - -final Flux flux = Flux - .range(1, 2) - .map(i -> 10 + i) //<2> - .publishOn(s) //<3> - .map(i -> "value " + i); //<4> - -new Thread(() -> flux.subscribe(System.out::println)); //<5> ----- -<1> Creates a new `Scheduler` backed by four `Thread` instances. -<2> The first `map` runs on the anonymous thread in <5>. -<3> The `publishOn` switches the whole sequence on a `Thread` picked from <1>. -<4> The second `map` runs on the `Thread` from <1>. -<5> This anonymous `Thread` is the one where the _subscription_ happens. -The print happens on the latest execution context, which is the one from `publishOn`. -==== - -=== The `subscribeOn` Method - -`subscribeOn` applies to the subscription process, when the backward chain is being -constructed. It is usually recommended to place it immediately after the source of data, -as intermediate operators can affect the context of the execution. - -However, this does not affect the -behavior of subsequent calls to `publishOn` -- they still switch the execution context for -the part of the chain after them. - -* Changes the `Thread` from which the *whole chain* of operators subscribes -* Picks one thread from the `Scheduler` - -NOTE: Only the closest `subscribeOn` call in the downstream chain effectively - schedules subscription and request signals to the source or operators that can - intercept them (`doFirst`, `doOnRequest`). Using multiple `subscribeOn` calls will - introduce unnecessary Thread switches that have no value. - -The following example uses the `subscribeOn` method: - -==== -[source,java] ----- -Scheduler s = Schedulers.newParallel("parallel-scheduler", 4); //<1> - -final Flux flux = Flux - .range(1, 2) - .map(i -> 10 + i) //<2> - .subscribeOn(s) //<3> - .map(i -> "value " + i); //<4> - -new Thread(() -> flux.subscribe(System.out::println)); //<5> ----- -<1> Creates a new `Scheduler` backed by four `Thread`. -<2> The first `map` runs on one of these four threads... -<3> ...because `subscribeOn` switches the whole sequence right from subscription time (<5>). -<4> The second `map` also runs on same thread. -<5> This anonymous `Thread` is the one where the _subscription_ initially happens, but `subscribeOn` immediately shifts it to one of the four scheduler threads. -==== - -[[error.handling]] -== Handling Errors - -TIP: For a quick look at the available operators for error handling, see -<>. - -In Reactive Streams, errors are terminal events. As soon as an error occurs, it stops the -sequence and gets propagated down the chain of operators to the last step, the -`Subscriber` you defined and its `onError` method. - -Such errors should still be dealt with at the application level. For instance, you might -display an error notification in a UI or send a meaningful error payload in a REST -endpoint. For this reason, the subscriber's `onError` method should always be defined. - -WARNING: If not defined, `onError` throws an `UnsupportedOperationException`. You can -further detect and triage it with the `Exceptions.isErrorCallbackNotImplemented` method. - -Reactor also offers alternative means of dealing with errors in the middle of the chain, -as error-handling operators. The following example shows how to do so: - -==== -[source,java] ----- -Flux.just(1, 2, 0) - .map(i -> "100 / " + i + " = " + (100 / i)) //this triggers an error with 0 - .onErrorReturn("Divided by zero :("); // error handling example ----- -==== - -IMPORTANT: Before you learn about error-handling operators, you must keep in mind that -_any error in a reactive sequence is a terminal event_. Even if an error-handling -operator is used, it does not let the original sequence continue. Rather, it -converts the `onError` signal into the start of a new sequence (the fallback one). In -other words, it replaces the terminated sequence _upstream_ of it. - -Now we can consider each means of error handling one-by-one. When relevant, we make a -parallel with imperative programming's `try` patterns. - -=== Error Handling Operators - -You may be familiar with several ways of dealing with exceptions in a try-catch block. -Most notably, these include the following: - -* Catch and return a static default value. -* Catch and execute an alternative path with a fallback method. -* Catch and dynamically compute a fallback value. -* Catch, wrap to a `BusinessException`, and re-throw. -* Catch, log an error-specific message, and re-throw. -* Use the `finally` block to clean up resources or a Java 7 "`try-with-resource`" construct. - -All of these have equivalents in Reactor, in the form of error-handling operators. -Before looking into these operators, we first want to establish a parallel between a reactive -chain and a try-catch block. - -When subscribing, the `onError` callback at the end of the chain is akin to a `catch` -block. There, execution skips to the catch in case an `Exception` is thrown, as the -following example shows: - -==== -[source,java] ----- -Flux s = Flux.range(1, 10) - .map(v -> doSomethingDangerous(v)) // <1> - .map(v -> doSecondTransform(v)); // <2> -s.subscribe(value -> System.out.println("RECEIVED " + value), // <3> - error -> System.err.println("CAUGHT " + error) // <4> -); ----- -<1> A transformation that can throw an exception is performed. -<2> If everything went well, a second transformation is performed. -<3> Each successfully transformed value is printed out. -<4> In case of an error, the sequence terminates and an error message is displayed. -==== - -The preceding example is conceptually similar to the following try-catch block: - -==== -[source,java] ----- -try { - for (int i = 1; i < 11; i++) { - String v1 = doSomethingDangerous(i); // <1> - String v2 = doSecondTransform(v1); // <2> - System.out.println("RECEIVED " + v2); - } -} catch (Throwable t) { - System.err.println("CAUGHT " + t); // <3> -} ----- -<1> If an exception is thrown here... -<2> ...the rest of the loop is skipped... -<3> ... and the execution goes straight to here. -==== - -Now that we have established a parallel, we can look at the different error handling cases -and their equivalent operators. - -==== Static Fallback Value - -The equivalent of "`Catch and return a static default value`" is `onErrorReturn`. -The following example shows how to use it: - -==== -[source,java] ----- -try { - return doSomethingDangerous(10); -} -catch (Throwable error) { - return "RECOVERED"; -} ----- -==== - -The following example shows the Reactor equivalent: - -==== -[source,java] ----- -Flux.just(10) - .map(this::doSomethingDangerous) - .onErrorReturn("RECOVERED"); ----- -==== - -You also have the option of applying a `Predicate` on the exception to decide -whether or not to recover, as the following example shows: - -==== -[source,java] ----- -Flux.just(10) - .map(this::doSomethingDangerous) - .onErrorReturn(e -> e.getMessage().equals("boom10"), "recovered10"); //<1> ----- -<1> Recover only if the message of the exception is `"boom10"` -==== - -==== Catch and swallow the error - -If you don't even want to replace the exception with a fallback value, but instead to ignore it and -only propagate elements that have been produced so far, what you want is essentially replacing -the `onError` signal with an `onComplete` signal. This can be done by the `onErrorComplete` operator: - -==== -[source,java] ----- -Flux.just(10,20,30) - .map(this::doSomethingDangerousOn30) - .onErrorComplete(); //<1> ----- -<1> Recover by turning the `onError` into an `onComplete` -==== - -Like `onErrorReturn`, `onErrorComplete` has variants that let you filter which exceptions -to fall back on, based either on the exception's class or on a `Predicate`. - -==== Fallback Method - -If you want more than a single default value and you have an alternative (safer) way of -processing your data, you can use `onErrorResume`. This would be the equivalent of -"`Catch and execute an alternative path with a fallback method`". - -For example, if your nominal process is fetching data from an external and unreliable -service but you also keep a local cache of the same data that _can_ be a bit more out of -date but is more reliable, you could do the following: - -==== -[source,java] ----- -String v1; -try { - v1 = callExternalService("key1"); -} -catch (Throwable error) { - v1 = getFromCache("key1"); -} - -String v2; -try { - v2 = callExternalService("key2"); -} -catch (Throwable error) { - v2 = getFromCache("key2"); -} ----- -==== - -The following example shows the Reactor equivalent: - -==== -[source,java] ----- -Flux.just("key1", "key2") - .flatMap(k -> callExternalService(k) // <1> - .onErrorResume(e -> getFromCache(k)) // <2> - ); ----- -<1> For each key, asynchronously call the external service. -<2> If the external service call fails, fall back to the cache for that key. Note that -we always apply the same fallback, whatever the source error, `e`, is. -==== - -Like `onErrorReturn`, `onErrorResume` has variants that let you filter which exceptions -to fall back on, based either on the exception's class or on a `Predicate`. The fact that it -takes a `Function` also lets you choose a different fallback sequence to switch to, -depending on the error encountered. The following example shows how to do so: - -==== -[source,java] ----- -Flux.just("timeout1", "unknown", "key2") - .flatMap(k -> callExternalService(k) - .onErrorResume(error -> { // <1> - if (error instanceof TimeoutException) // <2> - return getFromCache(k); - else if (error instanceof UnknownKeyException) // <3> - return registerNewEntry(k, "DEFAULT"); - else - return Flux.error(error); // <4> - }) - ); ----- -<1> The function allows dynamically choosing how to continue. -<2> If the source times out, hit the local cache. -<3> If the source says the key is unknown, create a new entry. -<4> In all other cases, "`re-throw`". -==== - -==== Dynamic Fallback Value - -Even if you do not have an alternative (safer) way of processing your data, you might want -to compute a fallback value out of the exception you received. This would be the -equivalent of "`Catch and dynamically compute a fallback value`". - -For instance, if your return type (`MyWrapper`) has a variant dedicated to holding an exception (think -`Future.complete(T success)` versus `Future.completeExceptionally(Throwable error)`), you -could instantiate the error-holding variant and pass the exception. - -An imperative example would look like the following: - -==== -[source,java] ----- -try { - Value v = erroringMethod(); - return MyWrapper.fromValue(v); -} -catch (Throwable error) { - return MyWrapper.fromError(error); -} ----- -==== - -You can do this reactively in the same way as the fallback method solution, -by using `onErrorResume`, with a tiny bit of boilerplate, as follows: - -==== -[source,java] ----- -erroringFlux.onErrorResume(error -> Mono.just( // <1> - MyWrapper.fromError(error) // <2> -)); ----- -<1> Since you expect a `MyWrapper` representation of the error, you need to get a -`Mono` for `onErrorResume`. We use `Mono.just()` for that. -<2> We need to compute the value out of the exception. Here, we achieved that -by wrapping the exception with a relevant `MyWrapper` factory method. -==== - -==== Catch and Rethrow - -"Catch, wrap to a `BusinessException`, and re-throw" looks like the following in the -imperative world: - -==== -[source,java] ----- -try { - return callExternalService(k); -} -catch (Throwable error) { - throw new BusinessException("oops, SLA exceeded", error); -} ----- -==== - -In the "`fallback method`" example, the last line inside the `flatMap` gives us a hint -at achieving the same reactively, as follows: - -==== -[source,java] ----- -Flux.just("timeout1") - .flatMap(k -> callExternalService(k)) - .onErrorResume(original -> Flux.error( - new BusinessException("oops, SLA exceeded", original)) - ); ----- -==== - -However, there is a more straightforward way of achieving the same effect with `onErrorMap`: - -==== -[source,java] ----- -Flux.just("timeout1") - .flatMap(k -> callExternalService(k)) - .onErrorMap(original -> new BusinessException("oops, SLA exceeded", original)); ----- -==== - -==== Log or React on the Side - -For cases where you want the error to continue propagating but still want to react to -it without modifying the sequence (logging it, for instance), you can use the `doOnError` -operator. This is the equivalent of "`Catch, log an error-specific message, and re-throw`" -pattern, as the following example shows: - -==== -[source,java] ----- -try { - return callExternalService(k); -} -catch (RuntimeException error) { - //make a record of the error - log("uh oh, falling back, service failed for key " + k); - throw error; -} ----- -==== - -The `doOnError` operator, as well as all operators prefixed with `doOn` , are sometimes -referred to as having a "`side-effect`". They let you peek inside the sequence's events without -modifying them. - -Like the imperative example shown earlier, the following example still propagates the error yet -ensures that we at least log that the external service had a failure: - -==== -[source,java] ----- -LongAdder failureStat = new LongAdder(); -Flux flux = -Flux.just("unknown") - .flatMap(k -> callExternalService(k) // <1> - .doOnError(e -> { - failureStat.increment(); - log("uh oh, falling back, service failed for key " + k); // <2> - }) - // <3> - ); ----- -<1> The external service call that can fail... -<2> ...is decorated with a logging and stats side-effect... -<3> ...after which, it still terminates with an error, unless we use an error-recovery operator here. -==== - -We can also imagine we have statistic counters to increment as a second error side-effect. - -==== Using Resources and the Finally Block - -The last parallel to draw with imperative programming is the cleaning up that can be done -either by using a "`Use of the `finally` block to clean up resources`" or by using a -"`Java 7 try-with-resource construct`", both shown below: - -.Imperative use of finally -==== -[source,java] ----- -Stats stats = new Stats(); -stats.startTimer(); -try { - doSomethingDangerous(); -} -finally { - stats.stopTimerAndRecordTiming(); -} ----- -==== - -.Imperative use of try-with-resource -==== -[source,java] ----- -try (SomeAutoCloseable disposableInstance = new SomeAutoCloseable()) { - return disposableInstance.toString(); -} ----- -==== - -Both have their Reactor equivalents: `doFinally` and `using`. - -`doFinally` is about side-effects that you want to be executed whenever the -sequence terminates (with `onComplete` or `onError`) or is cancelled. -It gives you a hint as to what kind of termination triggered the side-effect. -The following example shows how to use `doFinally`: - -==== -[source,java] -.Reactive finally: `doFinally()` ----- -Stats stats = new Stats(); -LongAdder statsCancel = new LongAdder(); - -Flux flux = -Flux.just("foo", "bar") - .doOnSubscribe(s -> stats.startTimer()) - .doFinally(type -> { // <1> - stats.stopTimerAndRecordTiming();// <2> - if (type == SignalType.CANCEL) // <3> - statsCancel.increment(); - }) - .take(1); // <4> ----- -<1> `doFinally` consumes a `SignalType` for the type of termination. -<2> Similarly to `finally` blocks, we always record the timing. -<3> Here we also increment statistics in case of cancellation only. -<4> `take(1)` requests exactly 1 from upstream, and cancels after one item is emitted. -==== - -On the other hand, `using` handles the case where a `Flux` is derived from a -resource and that resource must be acted upon whenever processing is done. -In the following example, we replace the `AutoCloseable` interface of "`try-with-resource`" with a -`Disposable`: - -.The Disposable resource -==== -[source,java] ----- -AtomicBoolean isDisposed = new AtomicBoolean(); -Disposable disposableInstance = new Disposable() { - @Override - public void dispose() { - isDisposed.set(true); // <4> - } - - @Override - public String toString() { - return "DISPOSABLE"; - } -}; ----- -==== - -Now we can do the reactive equivalent of "`try-with-resource`" on it, which looks -like the following: - -.Reactive try-with-resource: `using()` -==== -[source,java] ----- -Flux flux = -Flux.using( - () -> disposableInstance, // <1> - disposable -> Flux.just(disposable.toString()), // <2> - Disposable::dispose // <3> -); ----- -<1> The first lambda generates the resource. Here, we return our mock `Disposable`. -<2> The second lambda processes the resource, returning a `Flux`. -<3> The third lambda is called when the `Flux` from <2> terminates or is cancelled, to -clean up resources. -<4> After subscription and execution of the sequence, the `isDisposed` atomic boolean -becomes `true`. -==== - -==== Demonstrating the Terminal Aspect of `onError` - -In order to demonstrate that all these operators cause the upstream original sequence to -terminate when an error happens, we can use a more visual example with a -`Flux.interval`. The `interval` operator ticks every x units of time with an increasing -`Long` value. The following example uses an `interval` operator: - -==== -[source,java] ----- -Flux flux = -Flux.interval(Duration.ofMillis(250)) - .map(input -> { - if (input < 3) return "tick " + input; - throw new RuntimeException("boom"); - }) - .onErrorReturn("Uh oh"); - -flux.subscribe(System.out::println); -Thread.sleep(2100); // <1> ----- -<1> Note that `interval` executes on a *timer* `Scheduler` by default. If we want -to run that example in a main class, we would need to add a `sleep` call here so that the -application does not exit immediately without any value being produced. -==== - -The preceding example prints out one line every 250ms, as follows: - -==== -[source] ----- -tick 0 -tick 1 -tick 2 -Uh oh ----- -==== - -Even with one extra second of runtime, no more tick comes in from the `interval`. The -sequence was indeed terminated by the error. - -==== Retrying - -There is another operator of interest with regards to error handling, and you might be -tempted to use it in the case described in the previous section. `retry`, as its name -indicates, lets you retry an error-producing sequence. - -The thing to keep in mind is that it works by *re-subscribing* to the upstream `Flux`. -This is really a different sequence, and the original one is still terminated. -To verify that, we can re-use the previous example and append a `retry(1)` to -retry once instead of using `onErrorReturn`. The following example shows how to do so: - -==== -[source,java] ----- -Flux.interval(Duration.ofMillis(250)) - .map(input -> { - if (input < 3) return "tick " + input; - throw new RuntimeException("boom"); - }) - .retry(1) - .elapsed() // <1> - .subscribe(System.out::println, System.err::println); // <2> - -Thread.sleep(2100); // <3> ----- -<1> `elapsed` associates each value with the duration since previous value was emitted. -<2> We also want to see when there is an `onError`. -<3> Ensure we have enough time for our 4x2 ticks. -==== - -The preceding example produces the following output: - -==== -[source] ----- -259,tick 0 -249,tick 1 -251,tick 2 -506,tick 0 <1> -248,tick 1 -253,tick 2 -java.lang.RuntimeException: boom ----- -<1> A new `interval` started, from tick 0. The additional 250ms duration is -coming from the 4th tick, the one that causes the exception and subsequent -retry. -==== - -As you can see from the preceding example, `retry(1)` merely re-subscribed to the original `interval` -once, restarting the tick from 0. The second time around, since the exception -still occurs, it gives up and propagates the error downstream. - -There is a more advanced version of `retry` (called `retryWhen`) that uses a "`companion`" -`Flux` to tell whether or not a particular failure should retry. This companion `Flux` is -created by the operator but decorated by the user, in order to customize the retry -condition. - -The companion `Flux` is a `Flux` that gets passed to a `Retry` strategy/function, -supplied as the sole parameter of `retryWhen`. As the user, you define that function and make it return a new -`Publisher`. The `Retry` class is an abstract class, but it offers a factory method if you -want to transform the companion with a simple lambda (`Retry.from(Function)`). - -Retry cycles go as follows: - -. Each time an error happens (giving potential for a retry), a `RetrySignal` is emitted into the -companion `Flux`, which has been decorated by your function. Having a `Flux` here -gives a bird eye's view of all the attempts so far. The `RetrySignal` gives access to the error -as well as metadata around it. -. If the companion `Flux` emits a value, a retry happens. -. If the companion `Flux` completes, the error is swallowed, the retry cycle stops, -and the resulting sequence completes, too. -. If the companion `Flux` produces an error (`e`), the retry cycle stops and the -resulting sequence errors with `e`. - -The distinction between the previous two cases is important. Simply completing the -companion would effectively swallow an error. Consider the following way of emulating -`retry(3)` by using `retryWhen`: - -==== -[source,java] ----- -Flux flux = Flux - .error(new IllegalArgumentException()) // <1> - .doOnError(System.out::println) // <2> - .retryWhen(Retry.from(companion -> // <3> - companion.take(3))); // <4> ----- -<1> This continuously produces errors, calling for retry attempts. -<2> `doOnError` before the retry lets us log and see all failures. -<3> The `Retry` is adapted from a very simple `Function` lambda -<4> Here, we consider the first three errors as retry-able (`take(3)`) and then give up. -==== - -In effect, the preceding example results in an empty `Flux`, but it completes successfully. Since -`retry(3)` on the same `Flux` would have terminated with the latest error, this -`retryWhen` example is not exactly the same as a `retry(3)`. - -Getting to the same behavior involves a few additional tricks: -include::snippetRetryWhenRetry.adoc[] - -TIP: One can use the builders exposed in `Retry` to achieve the same in a more fluent manner, as -well as more finely tuned retry strategies. For example: `errorFlux.retryWhen(Retry.max(3));`. - -TIP: You can use similar code to implement an "`exponential backoff and retry`" pattern, -as shown in the <>. - -The core-provided `Retry` helpers, `RetrySpec` and `RetryBackoffSpec`, both allow advanced customizations like: - -- setting the `filter(Predicate)` for the exceptions that can trigger a retry -- modifying such a previously set filter through `modifyErrorFilter(Function)` -- triggering a side effect like logging around the retry trigger (ie for backoff before and after the delay), provided the retry is validated (`doBeforeRetry()` and `doAfterRetry()` are additive) -- triggering an asynchronous `Mono` around the retry trigger, which allows to add asynchronous behavior on top of the base delay but thus further delay the trigger (`doBeforeRetryAsync` and `doAfterRetryAsync` are additive) -- customizing the exception in case the maximum number of attempts has been reached, through `onRetryExhaustedThrow(BiFunction)`. -By default, `Exceptions.retryExhausted(...)` is used, which can be distinguished with `Exceptions.isRetryExhausted(Throwable)` -- activating the handling of _transient errors_ (see below) - -===== Retrying with transient errors -Some long-lived sources may see sporadic bursts of errors followed by longer periods of time during which all is running smoothly. -This documentation refers to this pattern of errors as _transient errors_. - -In such cases, it would be desirable to deal with each burst in isolation, so that the next burst doesn't inherit the retry state from the previous one. -For instance, with an exponential backoff strategy each subsequent burst should delay retry attempts starting from the minimum backoff `Duration` instead of an ever-growing one. - -The `RetrySignal` interface, which represents `retryWhen` state, has a `totalRetriesInARow()` value which can be used for this. -Instead of the usual monotonically-increasing `totalRetries()` index, this secondary index is reset to 0 each time an error -is recovered from by the retry (ie. when a retry attempt results in an incoming `onNext` instead of an `onError` again). - -When setting the `transientErrors(boolean)` configuration parameter to `true` in the `RetrySpec` or `RetryBackoffSpec`, the resulting strategy makes use of that `totalRetriesInARow()` index, effectively dealing with _transient errors_. -These specs compute the retry pattern from the index, so in effect all other configuration parameters of the spec apply to each burst of error independently. - -==== -[source,java] ----- -AtomicInteger errorCount = new AtomicInteger(); // <1> -Flux transientFlux = httpRequest.get() // <2> - .doOnError(e -> errorCount.incrementAndGet()); - -transientFlux.retryWhen(Retry.max(2).transientErrors(true)) // <3> - .blockLast(); -assertThat(errorCount).hasValue(6); // <4> ----- -<1> We will count the number of errors in the retried sequence for illustration. -<2> We assume a http request source, eg. a streaming endpoint that will sometimes fail two times in a row, then recover. -<3> We use `retryWhen` on that source, configured for at most 2 retry attempts, but in `transientErrors` mode. -<4> At the end, a valid response is achieved and the `transientFlux` successfully completes after `6` attempts have been registered in `errorCount`. -==== - -Without the `transientErrors(true)`, the configured maximum attempt of `2` would be exceeded by the second burst and the whole sequence would have ultimately failed. - -[NOTE] -==== -If you want to locally try this without an actual http remote endpoint, you can implement a pseudo `httpRequest` method as a `Supplier`, as follows: - -===== -[source,java] ----- -final AtomicInteger transientHelper = new AtomicInteger(); -Supplier> httpRequest = () -> - Flux.generate(sink -> { // <1> - int i = transientHelper.getAndIncrement(); - if (i == 10) { // <2> - sink.next(i); - sink.complete(); - } - else if (i % 3 == 0) { // <3> - sink.next(i); - } - else { - sink.error(new IllegalStateException("Transient error at " + i)); // <4> - } - }); ----- -<1> We `generate` a source that has bursts of errors. -<2> It will successfully complete when the counter reaches 10. -<3> If the `transientHelper` atomic is at a multiple of `3`, we emit `onNext` and thus end the current burst. -<4> In other cases we emit an `onError`. That's 2 out of 3 times, so bursts of 2 `onError` interrupted by 1 `onNext`. -===== -==== - -=== Handling Exceptions in Operators or Functions - -In general, all operators can themselves contain code that potentially trigger an -exception or calls to a user-defined callback that can similarly fail, so they all -contain some form of error handling. - -As a rule of thumb, an unchecked exception is always propagated through `onError`. For -instance, throwing a `RuntimeException` inside a `map` function translates to an -`onError` event, as the following code shows: - -==== -[source,java] ----- -Flux.just("foo") - .map(s -> { throw new IllegalArgumentException(s); }) - .subscribe(v -> System.out.println("GOT VALUE"), - e -> System.out.println("ERROR: " + e)); ----- -==== - -The preceding code prints out the following: - -==== -[source] ----- -ERROR: java.lang.IllegalArgumentException: foo ----- -==== - -TIP: You can tune the `Exception` before it is passed to `onError`, through the use of a -<>. - -Reactor, however, defines a set of exceptions (such as `OutOfMemoryError`) that are -always deemed to be fatal. See the `Exceptions.throwIfFatal` method. These errors mean that -Reactor cannot keep operating and are thrown rather than propagated. - -NOTE: Internally, there are also cases where an unchecked exception still cannot be -propagated (most notably during the subscribe and request phases), due to concurrency -races that could lead to double `onError` or `onComplete` conditions. When these races -happen, the error that cannot be propagated is "`dropped`". These cases can still be -managed to some extent by using customizable hooks. See <>. - -You may ask: "`What about checked exceptions?`" - -If, for example, you need to call some method that declares it `throws` exceptions, you -still have to deal with those exceptions in a `try-catch` block. You have several -options, though: - -. Catch the exception and recover from it. The sequence continues normally. -. Catch the exception, wrap it into an _unchecked_ exception, and then throw it -(interrupting the sequence). The `Exceptions` utility class can help you with that (we -get to that next). -. If you need to return a `Flux` (for example, you are in a `flatMap`), wrap the -exception in an error-producing `Flux`, as follows: `return Flux.error(checkedException)`. (The -sequence also terminates.) - -Reactor has an `Exceptions` utility class that you can use to ensure that exceptions are -wrapped only if they are checked exceptions: - -* Use the `Exceptions.propagate` method to wrap exceptions, if necessary. It also calls -`throwIfFatal` first and does not wrap `RuntimeException`. -* Use the `Exceptions.unwrap` method to get the original unwrapped exception (going back -to the root cause of a hierarchy of reactor-specific exceptions). - -Consider the following example of a `map` that uses a conversion method that can throw an -`IOException`: - -==== -[source,java] ----- -public String convert(int i) throws IOException { - if (i > 3) { - throw new IOException("boom " + i); - } - return "OK " + i; -} ----- -==== - -Now imagine that you want to use that method in a `map`. You must now explicitly catch -the exception, and your map function cannot re-throw it. So you can propagate it to the -map's `onError` method as a `RuntimeException`, as follows: - -==== -[source,java] ----- -Flux converted = Flux - .range(1, 10) - .map(i -> { - try { return convert(i); } - catch (IOException e) { throw Exceptions.propagate(e); } - }); ----- -==== - -Later on, when subscribing to the preceding `Flux` and reacting to errors (such as in the -UI), you could revert back to the original exception if you want to do something -special for IOExceptions. The following example shows how to do so: - -==== -[source,java] ----- -converted.subscribe( - v -> System.out.println("RECEIVED: " + v), - e -> { - if (Exceptions.unwrap(e) instanceof IOException) { - System.out.println("Something bad happened with I/O"); - } else { - System.out.println("Something bad happened"); - } - } -); ----- -==== - -[[sinks]] -== Sinks -include::processors.adoc[leveloffset=3] diff --git a/docs/modules/ROOT/pages/coreFeatures/error-handling.adoc b/docs/modules/ROOT/pages/coreFeatures/error-handling.adoc new file mode 100644 index 0000000000..a71ebe56d1 --- /dev/null +++ b/docs/modules/ROOT/pages/coreFeatures/error-handling.adoc @@ -0,0 +1,787 @@ +[[error.handling]] += Handling Errors + +TIP: For a quick look at the available operators for error handling, see +xref:apdx-operatorChoice.adoc#which.errors[the relevant operator decision tree]. + +In Reactive Streams, errors are terminal events. As soon as an error occurs, it stops the +sequence and gets propagated down the chain of operators to the last step, the +`Subscriber` you defined and its `onError` method. + +Such errors should still be dealt with at the application level. For instance, you might +display an error notification in a UI or send a meaningful error payload in a REST +endpoint. For this reason, the subscriber's `onError` method should always be defined. + +WARNING: If not defined, `onError` throws an `UnsupportedOperationException`. You can +further detect and triage it with the `Exceptions.isErrorCallbackNotImplemented` method. + +Reactor also offers alternative means of dealing with errors in the middle of the chain, +as error-handling operators. The following example shows how to do so: + +[source,java] +[%unbreakable] +[%unbreakable] +---- +Flux.just(1, 2, 0) + .map(i -> "100 / " + i + " = " + (100 / i)) //this triggers an error with 0 + .onErrorReturn("Divided by zero :("); // error handling example +---- + +IMPORTANT: Before you learn about error-handling operators, you must keep in mind that +_any error in a reactive sequence is a terminal event_. Even if an error-handling +operator is used, it does not let the original sequence continue. Rather, it +converts the `onError` signal into the start of a new sequence (the fallback one). In +other words, it replaces the terminated sequence _upstream_ of it. + +Now we can consider each means of error handling one-by-one. When relevant, we make a +parallel with imperative programming's `try` patterns. + +[[error-handling-operators]] +== Error Handling Operators + +You may be familiar with several ways of dealing with exceptions in a try-catch block. +Most notably, these include the following: + +* Catch and return a static default value. +* Catch and execute an alternative path with a fallback method. +* Catch and dynamically compute a fallback value. +* Catch, wrap to a `BusinessException`, and re-throw. +* Catch, log an error-specific message, and re-throw. +* Use the `finally` block to clean up resources or a Java 7 "`try-with-resource`" construct. + +All of these have equivalents in Reactor, in the form of error-handling operators. +Before looking into these operators, we first want to establish a parallel between a reactive +chain and a try-catch block. + +When subscribing, the `onError` callback at the end of the chain is akin to a `catch` +block. There, execution skips to the catch in case an `Exception` is thrown, as the +following example shows: + +[source,java] +[%unbreakable] +---- +Flux s = Flux.range(1, 10) + .map(v -> doSomethingDangerous(v)) // <1> + .map(v -> doSecondTransform(v)); // <2> +s.subscribe(value -> System.out.println("RECEIVED " + value), // <3> + error -> System.err.println("CAUGHT " + error) // <4> +); +---- +<1> A transformation that can throw an exception is performed. +<2> If everything went well, a second transformation is performed. +<3> Each successfully transformed value is printed out. +<4> In case of an error, the sequence terminates and an error message is displayed. + +The preceding example is conceptually similar to the following try-catch block: + +[source,java] +[%unbreakable] +---- +try { + for (int i = 1; i < 11; i++) { + String v1 = doSomethingDangerous(i); // <1> + String v2 = doSecondTransform(v1); // <2> + System.out.println("RECEIVED " + v2); + } +} catch (Throwable t) { + System.err.println("CAUGHT " + t); // <3> +} +---- +<1> If an exception is thrown here... +<2> ...the rest of the loop is skipped... +<3> ... and the execution goes straight to here. + +Now that we have established a parallel, we can look at the different error handling cases +and their equivalent operators. + +[[static-fallback-value]] +=== Static Fallback Value + +The equivalent of "`Catch and return a static default value`" is `onErrorReturn`. +The following example shows how to use it: + +[source,java] +[%unbreakable] +---- +try { + return doSomethingDangerous(10); +} +catch (Throwable error) { + return "RECOVERED"; +} +---- + +The following example shows the Reactor equivalent: + +[source,java] +[%unbreakable] +---- +Flux.just(10) + .map(this::doSomethingDangerous) + .onErrorReturn("RECOVERED"); +---- + +You also have the option of applying a `Predicate` on the exception to decide +whether or not to recover, as the following example shows: + +[source,java] +[%unbreakable] +---- +Flux.just(10) + .map(this::doSomethingDangerous) + .onErrorReturn(e -> e.getMessage().equals("boom10"), "recovered10"); //<1> +---- +<1> Recover only if the message of the exception is `"boom10"` + +[[catch-and-swallow-the-error]] +=== Catch and swallow the error + +If you don't even want to replace the exception with a fallback value, but instead to ignore it and +only propagate elements that have been produced so far, what you want is essentially replacing +the `onError` signal with an `onComplete` signal. This can be done by the `onErrorComplete` operator: + +[source,java] +[%unbreakable] +---- +Flux.just(10,20,30) + .map(this::doSomethingDangerousOn30) + .onErrorComplete(); //<1> +---- +<1> Recover by turning the `onError` into an `onComplete` + +Like `onErrorReturn`, `onErrorComplete` has variants that let you filter which exceptions +to fall back on, based either on the exception's class or on a `Predicate`. + +[[fallback-method]] +=== Fallback Method + +If you want more than a single default value and you have an alternative (safer) way of +processing your data, you can use `onErrorResume`. This would be the equivalent of +"`Catch and execute an alternative path with a fallback method`". + +For example, if your nominal process is fetching data from an external and unreliable +service but you also keep a local cache of the same data that _can_ be a bit more out of +date but is more reliable, you could do the following: + +[source,java] +[%unbreakable] +---- +String v1; +try { + v1 = callExternalService("key1"); +} +catch (Throwable error) { + v1 = getFromCache("key1"); +} + +String v2; +try { + v2 = callExternalService("key2"); +} +catch (Throwable error) { + v2 = getFromCache("key2"); +} +---- + +The following example shows the Reactor equivalent: + +[source,java] +[%unbreakable] +---- +Flux.just("key1", "key2") + .flatMap(k -> callExternalService(k) // <1> + .onErrorResume(e -> getFromCache(k)) // <2> + ); +---- +<1> For each key, asynchronously call the external service. +<2> If the external service call fails, fall back to the cache for that key. Note that +we always apply the same fallback, whatever the source error, `e`, is. + +Like `onErrorReturn`, `onErrorResume` has variants that let you filter which exceptions +to fall back on, based either on the exception's class or on a `Predicate`. The fact that it +takes a `Function` also lets you choose a different fallback sequence to switch to, +depending on the error encountered. The following example shows how to do so: + +[source,java] +[%unbreakable] +---- +Flux.just("timeout1", "unknown", "key2") + .flatMap(k -> callExternalService(k) + .onErrorResume(error -> { // <1> + if (error instanceof TimeoutException) // <2> + return getFromCache(k); + else if (error instanceof UnknownKeyException) // <3> + return registerNewEntry(k, "DEFAULT"); + else + return Flux.error(error); // <4> + }) + ); +---- +<1> The function allows dynamically choosing how to continue. +<2> If the source times out, hit the local cache. +<3> If the source says the key is unknown, create a new entry. +<4> In all other cases, "`re-throw`". + +[[dynamic-fallback-value]] +=== Dynamic Fallback Value + +Even if you do not have an alternative (safer) way of processing your data, you might want +to compute a fallback value out of the exception you received. This would be the +equivalent of "`Catch and dynamically compute a fallback value`". + +For instance, if your return type (`MyWrapper`) has a variant dedicated to holding an exception (think +`Future.complete(T success)` versus `Future.completeExceptionally(Throwable error)`), you +could instantiate the error-holding variant and pass the exception. + +An imperative example would look like the following: + +[source,java] +[%unbreakable] +---- +try { + Value v = erroringMethod(); + return MyWrapper.fromValue(v); +} +catch (Throwable error) { + return MyWrapper.fromError(error); +} +---- + +You can do this reactively in the same way as the fallback method solution, +by using `onErrorResume`, with a tiny bit of boilerplate, as follows: + +[source,java] +[%unbreakable] +---- +erroringFlux.onErrorResume(error -> Mono.just( // <1> + MyWrapper.fromError(error) // <2> +)); +---- +<1> Since you expect a `MyWrapper` representation of the error, you need to get a +`Mono` for `onErrorResume`. We use `Mono.just()` for that. +<2> We need to compute the value out of the exception. Here, we achieved that +by wrapping the exception with a relevant `MyWrapper` factory method. + +[[catch-and-rethrow]] +=== Catch and Rethrow + +"Catch, wrap to a `BusinessException`, and re-throw" looks like the following in the +imperative world: + +[source,java] +[%unbreakable] +---- +try { + return callExternalService(k); +} +catch (Throwable error) { + throw new BusinessException("oops, SLA exceeded", error); +} +---- + +In the "`fallback method`" example, the last line inside the `flatMap` gives us a hint +at achieving the same reactively, as follows: + +[source,java] +[%unbreakable] +---- +Flux.just("timeout1") + .flatMap(k -> callExternalService(k)) + .onErrorResume(original -> Flux.error( + new BusinessException("oops, SLA exceeded", original)) + ); +---- + +However, there is a more straightforward way of achieving the same effect with `onErrorMap`: + +[source,java] +[%unbreakable] +---- +Flux.just("timeout1") + .flatMap(k -> callExternalService(k)) + .onErrorMap(original -> new BusinessException("oops, SLA exceeded", original)); +---- + +[[log-or-react-on-the-side]] +=== Log or React on the Side + +For cases where you want the error to continue propagating but still want to react to +it without modifying the sequence (logging it, for instance), you can use the `doOnError` +operator. This is the equivalent of "`Catch, log an error-specific message, and re-throw`" +pattern, as the following example shows: + +[source,java] +[%unbreakable] +---- +try { + return callExternalService(k); +} +catch (RuntimeException error) { + //make a record of the error + log("uh oh, falling back, service failed for key " + k); + throw error; +} +---- + +The `doOnError` operator, as well as all operators prefixed with `doOn` , are sometimes +referred to as having a "`side-effect`". They let you peek inside the sequence's events without +modifying them. + +Like the imperative example shown earlier, the following example still propagates the error yet +ensures that we at least log that the external service had a failure: + +[source,java] +[%unbreakable] +---- +LongAdder failureStat = new LongAdder(); +Flux flux = +Flux.just("unknown") + .flatMap(k -> callExternalService(k) // <1> + .doOnError(e -> { + failureStat.increment(); + log("uh oh, falling back, service failed for key " + k); // <2> + }) + // <3> + ); +---- +<1> The external service call that can fail... +<2> ...is decorated with a logging and stats side-effect... +<3> ...after which, it still terminates with an error, unless we use an error-recovery operator here. + +We can also imagine we have statistic counters to increment as a second error side-effect. + +[[using-resources-and-the-finally-block]] +=== Using Resources and the Finally Block + +The last parallel to draw with imperative programming is the cleaning up that can be done +either by using a "`Use of the `finally` block to clean up resources`" or by using a +"`Java 7 try-with-resource construct`", both shown below: + +.Imperative use of finally +[source,java] +[%unbreakable] +---- +Stats stats = new Stats(); +stats.startTimer(); +try { + doSomethingDangerous(); +} +finally { + stats.stopTimerAndRecordTiming(); +} +---- + +.Imperative use of try-with-resource +[source,java] +[%unbreakable] +---- +try (SomeAutoCloseable disposableInstance = new SomeAutoCloseable()) { + return disposableInstance.toString(); +} +---- + +Both have their Reactor equivalents: `doFinally` and `using`. + +`doFinally` is about side-effects that you want to be executed whenever the +sequence terminates (with `onComplete` or `onError`) or is cancelled. +It gives you a hint as to what kind of termination triggered the side-effect. +The following example shows how to use `doFinally`: + +[source,java] +[%unbreakable] +.Reactive finally: `doFinally()` +---- +Stats stats = new Stats(); +LongAdder statsCancel = new LongAdder(); + +Flux flux = +Flux.just("foo", "bar") + .doOnSubscribe(s -> stats.startTimer()) + .doFinally(type -> { // <1> + stats.stopTimerAndRecordTiming();// <2> + if (type == SignalType.CANCEL) // <3> + statsCancel.increment(); + }) + .take(1); // <4> +---- +<1> `doFinally` consumes a `SignalType` for the type of termination. +<2> Similarly to `finally` blocks, we always record the timing. +<3> Here we also increment statistics in case of cancellation only. +<4> `take(1)` requests exactly 1 from upstream, and cancels after one item is emitted. + +On the other hand, `using` handles the case where a `Flux` is derived from a +resource and that resource must be acted upon whenever processing is done. +In the following example, we replace the `AutoCloseable` interface of "`try-with-resource`" with a +`Disposable`: + +.The Disposable resource +[source,java] +[%unbreakable] +---- +AtomicBoolean isDisposed = new AtomicBoolean(); +Disposable disposableInstance = new Disposable() { + @Override + public void dispose() { + isDisposed.set(true); // <4> + } + + @Override + public String toString() { + return "DISPOSABLE"; + } +}; +---- + +Now we can do the reactive equivalent of "`try-with-resource`" on it, which looks +like the following: + +.Reactive try-with-resource: `using()` +[source,java] +[%unbreakable] +---- +Flux flux = +Flux.using( + () -> disposableInstance, // <1> + disposable -> Flux.just(disposable.toString()), // <2> + Disposable::dispose // <3> +); +---- +<1> The first lambda generates the resource. Here, we return our mock `Disposable`. +<2> The second lambda processes the resource, returning a `Flux`. +<3> The third lambda is called when the `Flux` from <2> terminates or is cancelled, to +clean up resources. +<4> After subscription and execution of the sequence, the `isDisposed` atomic boolean +becomes `true`. + +[[demonstrating-the-terminal-aspect-of-onerror]] +=== Demonstrating the Terminal Aspect of `onError` + +In order to demonstrate that all these operators cause the upstream original sequence to +terminate when an error happens, we can use a more visual example with a +`Flux.interval`. The `interval` operator ticks every x units of time with an increasing +`Long` value. The following example uses an `interval` operator: + +[source,java] +[%unbreakable] +---- +Flux flux = +Flux.interval(Duration.ofMillis(250)) + .map(input -> { + if (input < 3) return "tick " + input; + throw new RuntimeException("boom"); + }) + .onErrorReturn("Uh oh"); + +flux.subscribe(System.out::println); +Thread.sleep(2100); // <1> +---- +<1> Note that `interval` executes on a *timer* `Scheduler` by default. If we want +to run that example in a main class, we would need to add a `sleep` call here so that the +application does not exit immediately without any value being produced. + +The preceding example prints out one line every 250ms, as follows: + +[source] +[%unbreakable] +---- + +tick 0 +tick 1 +tick 2 +Uh oh +---- + +Even with one extra second of runtime, no more tick comes in from the `interval`. The +sequence was indeed terminated by the error. + +[[retrying]] +=== Retrying + +There is another operator of interest with regards to error handling, and you might be +tempted to use it in the case described in the previous section. `retry`, as its name +indicates, lets you retry an error-producing sequence. + +The thing to keep in mind is that it works by *re-subscribing* to the upstream `Flux`. +This is really a different sequence, and the original one is still terminated. +To verify that, we can re-use the previous example and append a `retry(1)` to +retry once instead of using `onErrorReturn`. The following example shows how to do so: + +[source,java] +[%unbreakable] +---- +Flux.interval(Duration.ofMillis(250)) + .map(input -> { + if (input < 3) return "tick " + input; + throw new RuntimeException("boom"); + }) + .retry(1) + .elapsed() // <1> + .subscribe(System.out::println, System.err::println); // <2> + +Thread.sleep(2100); // <3> +---- +<1> `elapsed` associates each value with the duration since previous value was emitted. +<2> We also want to see when there is an `onError`. +<3> Ensure we have enough time for our 4x2 ticks. + +The preceding example produces the following output: + +[source] +[%unbreakable] +---- + +259,tick 0 +249,tick 1 +251,tick 2 +506,tick 0 <1> +248,tick 1 +253,tick 2 +java.lang.RuntimeException: boom +---- +<1> A new `interval` started, from tick 0. The additional 250ms duration is +coming from the 4th tick, the one that causes the exception and subsequent +retry. + +As you can see from the preceding example, `retry(1)` merely re-subscribed to the original `interval` +once, restarting the tick from 0. The second time around, since the exception +still occurs, it gives up and propagates the error downstream. + +There is a more advanced version of `retry` (called `retryWhen`) that uses a "`companion`" +`Flux` to tell whether or not a particular failure should retry. This companion `Flux` is +created by the operator but decorated by the user, in order to customize the retry +condition. + +The companion `Flux` is a `Flux` that gets passed to a `Retry` strategy/function, +supplied as the sole parameter of `retryWhen`. As the user, you define that function and make it return a new +`Publisher`. The `Retry` class is an abstract class, but it offers a factory method if you +want to transform the companion with a simple lambda (`Retry.from(Function)`). + +Retry cycles go as follows: + +. Each time an error happens (giving potential for a retry), a `RetrySignal` is emitted into the +companion `Flux`, which has been decorated by your function. Having a `Flux` here +gives a bird eye's view of all the attempts so far. The `RetrySignal` gives access to the error +as well as metadata around it. +. If the companion `Flux` emits a value, a retry happens. +. If the companion `Flux` completes, the error is swallowed, the retry cycle stops, +and the resulting sequence completes, too. +. If the companion `Flux` produces an error (`e`), the retry cycle stops and the +resulting sequence errors with `e`. + +The distinction between the previous two cases is important. Simply completing the +companion would effectively swallow an error. Consider the following way of emulating +`retry(3)` by using `retryWhen`: + +[source,java] +[%unbreakable] +---- +Flux flux = Flux + .error(new IllegalArgumentException()) // <1> + .doOnError(System.out::println) // <2> + .retryWhen(Retry.from(companion -> // <3> + companion.take(3))); // <4> +---- +<1> This continuously produces errors, calling for retry attempts. +<2> `doOnError` before the retry lets us log and see all failures. +<3> The `Retry` is adapted from a very simple `Function` lambda +<4> Here, we consider the first three errors as retry-able (`take(3)`) and then give up. + +In effect, the preceding example results in an empty `Flux`, but it completes successfully. Since +`retry(3)` on the same `Flux` would have terminated with the latest error, this +`retryWhen` example is not exactly the same as a `retry(3)`. + +Getting to the same behavior involves a few additional tricks: +include::../snippetRetryWhenRetry.adoc[] + +TIP: One can use the builders exposed in `Retry` to achieve the same in a more fluent manner, as +well as more finely tuned retry strategies. For example: `errorFlux.retryWhen(Retry.max(3));`. + +TIP: You can use similar code to implement an "`exponential backoff and retry`" pattern, +as shown in the xref:faq.adoc#faq.exponentialBackoff[FAQ]. + +The core-provided `Retry` helpers, `RetrySpec` and `RetryBackoffSpec`, both allow advanced customizations like: + +- setting the `filter(Predicate)` for the exceptions that can trigger a retry +- modifying such a previously set filter through `modifyErrorFilter(Function)` +- triggering a side effect like logging around the retry trigger (ie for backoff before and after the delay), provided the retry is validated (`doBeforeRetry()` and `doAfterRetry()` are additive) +- triggering an asynchronous `Mono` around the retry trigger, which allows to add asynchronous behavior on top of the base delay but thus further delay the trigger (`doBeforeRetryAsync` and `doAfterRetryAsync` are additive) +- customizing the exception in case the maximum number of attempts has been reached, through `onRetryExhaustedThrow(BiFunction)`. +By default, `Exceptions.retryExhausted(...)` is used, which can be distinguished with `Exceptions.isRetryExhausted(Throwable)` +- activating the handling of _transient errors_ (see below) + +[[retrying-with-transient-errors]] +==== Retrying with transient errors +Some long-lived sources may see sporadic bursts of errors followed by longer periods of time during which all is running smoothly. +This documentation refers to this pattern of errors as _transient errors_. + +In such cases, it would be desirable to deal with each burst in isolation, so that the next burst doesn't inherit the retry state from the previous one. +For instance, with an exponential backoff strategy each subsequent burst should delay retry attempts starting from the minimum backoff `Duration` instead of an ever-growing one. + +The `RetrySignal` interface, which represents `retryWhen` state, has a `totalRetriesInARow()` value which can be used for this. +Instead of the usual monotonically-increasing `totalRetries()` index, this secondary index is reset to 0 each time an error +is recovered from by the retry (ie. when a retry attempt results in an incoming `onNext` instead of an `onError` again). + +When setting the `transientErrors(boolean)` configuration parameter to `true` in the `RetrySpec` or `RetryBackoffSpec`, the resulting strategy makes use of that `totalRetriesInARow()` index, effectively dealing with _transient errors_. +These specs compute the retry pattern from the index, so in effect all other configuration parameters of the spec apply to each burst of error independently. + +[source,java] +[%unbreakable] +---- +AtomicInteger errorCount = new AtomicInteger(); // <1> +Flux transientFlux = httpRequest.get() // <2> + .doOnError(e -> errorCount.incrementAndGet()); + +transientFlux.retryWhen(Retry.max(2).transientErrors(true)) // <3> + .blockLast(); +assertThat(errorCount).hasValue(6); // <4> +---- +<1> We will count the number of errors in the retried sequence for illustration. +<2> We assume a http request source, eg. a streaming endpoint that will sometimes fail two times in a row, then recover. +<3> We use `retryWhen` on that source, configured for at most 2 retry attempts, but in `transientErrors` mode. +<4> At the end, a valid response is achieved and the `transientFlux` successfully completes after `6` attempts have been registered in `errorCount`. + +Without the `transientErrors(true)`, the configured maximum attempt of `2` would be exceeded by the second burst and the whole sequence would have ultimately failed. + +[NOTE] +==== +If you want to locally try this without an actual http remote endpoint, you can implement a pseudo `httpRequest` method as a `Supplier`, as follows: + +===== +[source,java] +[%unbreakable] +---- +final AtomicInteger transientHelper = new AtomicInteger(); +Supplier> httpRequest = () -> + Flux.generate(sink -> { // <1> + int i = transientHelper.getAndIncrement(); + if (i == 10) { // <2> + sink.next(i); + sink.complete(); + } + else if (i % 3 == 0) { // <3> + sink.next(i); + } + else { + sink.error(new IllegalStateException("Transient error at " + i)); // <4> + } + }); +---- +<1> We `generate` a source that has bursts of errors. +<2> It will successfully complete when the counter reaches 10. +<3> If the `transientHelper` atomic is at a multiple of `3`, we emit `onNext` and thus end the current burst. +<4> In other cases we emit an `onError`. That's 2 out of 3 times, so bursts of 2 `onError` interrupted by 1 `onNext`. +===== +==== + +[[handling-exceptions-in-operators-or-functions]] +== Handling Exceptions in Operators or Functions + +In general, all operators can themselves contain code that potentially trigger an +exception or calls to a user-defined callback that can similarly fail, so they all +contain some form of error handling. + +As a rule of thumb, an unchecked exception is always propagated through `onError`. For +instance, throwing a `RuntimeException` inside a `map` function translates to an +`onError` event, as the following code shows: + +[source,java] +[%unbreakable] +---- +Flux.just("foo") + .map(s -> { throw new IllegalArgumentException(s); }) + .subscribe(v -> System.out.println("GOT VALUE"), + e -> System.out.println("ERROR: " + e)); +---- + +The preceding code prints out the following: + +[source] +[%unbreakable] +---- + +ERROR: java.lang.IllegalArgumentException: foo +---- + +TIP: You can tune the `Exception` before it is passed to `onError`, through the use of a +xref:advancedFeatures/hooks.adoc#hooks-internal[hook]. + +Reactor, however, defines a set of exceptions (such as `OutOfMemoryError`) that are +always deemed to be fatal. See the `Exceptions.throwIfFatal` method. These errors mean that +Reactor cannot keep operating and are thrown rather than propagated. + +NOTE: Internally, there are also cases where an unchecked exception still cannot be +propagated (most notably during the subscribe and request phases), due to concurrency +races that could lead to double `onError` or `onComplete` conditions. When these races +happen, the error that cannot be propagated is "`dropped`". These cases can still be +managed to some extent by using customizable hooks. See xref:advancedFeatures/hooks.adoc#hooks-dropping[Dropping Hooks]. + +You may ask: "`What about checked exceptions?`" + +If, for example, you need to call some method that declares it `throws` exceptions, you +still have to deal with those exceptions in a `try-catch` block. You have several +options, though: + +. Catch the exception and recover from it. The sequence continues normally. +. Catch the exception, wrap it into an _unchecked_ exception, and then throw it +(interrupting the sequence). The `Exceptions` utility class can help you with that (we +get to that next). +. If you need to return a `Flux` (for example, you are in a `flatMap`), wrap the +exception in an error-producing `Flux`, as follows: `return Flux.error(checkedException)`. (The +sequence also terminates.) + +Reactor has an `Exceptions` utility class that you can use to ensure that exceptions are +wrapped only if they are checked exceptions: + +* Use the `Exceptions.propagate` method to wrap exceptions, if necessary. It also calls +`throwIfFatal` first and does not wrap `RuntimeException`. +* Use the `Exceptions.unwrap` method to get the original unwrapped exception (going back +to the root cause of a hierarchy of reactor-specific exceptions). + +Consider the following example of a `map` that uses a conversion method that can throw an +`IOException`: + +[source,java] +[%unbreakable] +---- +public String convert(int i) throws IOException { + if (i > 3) { + throw new IOException("boom " + i); + } + return "OK " + i; +} +---- + +Now imagine that you want to use that method in a `map`. You must now explicitly catch +the exception, and your map function cannot re-throw it. So you can propagate it to the +map's `onError` method as a `RuntimeException`, as follows: + +[source,java] +[%unbreakable] +---- +Flux converted = Flux + .range(1, 10) + .map(i -> { + try { return convert(i); } + catch (IOException e) { throw Exceptions.propagate(e); } + }); +---- + +Later on, when subscribing to the preceding `Flux` and reacting to errors (such as in the +UI), you could revert back to the original exception if you want to do something +special for IOExceptions. The following example shows how to do so: + +[source,java] +[%unbreakable] +---- +converted.subscribe( + v -> System.out.println("RECEIVED: " + v), + e -> { + if (Exceptions.unwrap(e) instanceof IOException) { + System.out.println("Something bad happened with I/O"); + } else { + System.out.println("Something bad happened"); + } + } +); +---- + diff --git a/docs/modules/ROOT/pages/coreFeatures/flux.adoc b/docs/modules/ROOT/pages/coreFeatures/flux.adoc new file mode 100644 index 0000000000..e3b372628b --- /dev/null +++ b/docs/modules/ROOT/pages/coreFeatures/flux.adoc @@ -0,0 +1,19 @@ +[[flux]] += Flux, an Asynchronous Sequence of 0-N Items + +The following image shows how a `Flux` transforms items: + +image::flux.svg[Flux] + +A `Flux` is a standard `Publisher` that represents an asynchronous sequence of 0 to N +emitted items, optionally terminated by either a completion signal or an error. +As in the Reactive Streams spec, these three types of signal translate to calls to a downstream +Subscriber's `onNext`, `onComplete`, and `onError` methods. + +With this large scope of possible signals, `Flux` is the general-purpose reactive type. +Note that all events, even terminating ones, are optional: no `onNext` event but an +`onComplete` event represents an _empty_ finite sequence, but remove the `onComplete` and +you have an _infinite_ empty sequence (not particularly useful, except for tests around cancellation). +Similarly, infinite sequences are not necessarily empty. For example, `Flux.interval(Duration)` +produces a `Flux` that is infinite and emits regular ticks from a clock. + diff --git a/docs/modules/ROOT/pages/coreFeatures/mono.adoc b/docs/modules/ROOT/pages/coreFeatures/mono.adoc new file mode 100644 index 0000000000..0c8c92a925 --- /dev/null +++ b/docs/modules/ROOT/pages/coreFeatures/mono.adoc @@ -0,0 +1,27 @@ +[[mono]] += Mono, an Asynchronous 0-1 Result + +The following image shows how a `Mono` transforms an item: + +image::mono.svg[Mono] + +A `Mono` is a specialized `Publisher` that emits at most one item _via_ the +`onNext` signal then terminates with an `onComplete` signal (successful `Mono`, +with or without value), or only emits a single `onError` signal (failed `Mono`). + + +Most `Mono` implementations are expected to immediately call `onComplete` on their +`Subscriber` after having called `onNext`. `Mono.never()` is an outlier: it doesn't +emit any signal, which is not technically forbidden although not terribly useful outside +of tests. On the other hand, a combination of `onNext` and `onError` is explicitly forbidden. + +`Mono` offers only a subset of the operators that are available for a `Flux`, and +some operators (notably those that combine the `Mono` with another `Publisher`) +switch to a `Flux`. +For example, `Mono#concatWith(Publisher)` returns a `Flux` while `Mono#then(Mono)` +returns another `Mono`. + +Note that you can use a `Mono` to represent no-value asynchronous processes that only +have the concept of completion (similar to a `Runnable`). To create one, you can use an empty +`Mono`. + diff --git a/docs/modules/ROOT/pages/coreFeatures/programmatically-creating-sequence.adoc b/docs/modules/ROOT/pages/coreFeatures/programmatically-creating-sequence.adoc new file mode 100644 index 0000000000..d34cf4adc4 --- /dev/null +++ b/docs/modules/ROOT/pages/coreFeatures/programmatically-creating-sequence.adoc @@ -0,0 +1 @@ +include::../producing.adoc[] diff --git a/docs/modules/ROOT/pages/coreFeatures/schedulers.adoc b/docs/modules/ROOT/pages/coreFeatures/schedulers.adoc new file mode 100644 index 0000000000..f279ec13b1 --- /dev/null +++ b/docs/modules/ROOT/pages/coreFeatures/schedulers.adoc @@ -0,0 +1,206 @@ +[[schedulers]] += Threading and Schedulers + +Reactor, like RxJava, can be considered to be *concurrency-agnostic*. That is, it does not +enforce a concurrency model. Rather, it leaves you, the developer, in command. However, +that does not prevent the library from helping you with concurrency. + +Obtaining a `Flux` or a `Mono` does not necessarily mean that it runs in a dedicated +`Thread`. Instead, most operators continue working in the `Thread` on which the +previous operator executed. Unless specified, the topmost operator (the source) +itself runs on the `Thread` in which the `subscribe()` call was made. The following +example runs a `Mono` in a new thread: + +[source,java] +[%unbreakable] +---- +public static void main(String[] args) throws InterruptedException { + final Mono mono = Mono.just("hello "); //<1> + + Thread t = new Thread(() -> mono + .map(msg -> msg + "thread ") + .subscribe(v -> //<2> + System.out.println(v + Thread.currentThread().getName()) //<3> + ) + ) + t.start(); + t.join(); + +} +---- +<1> The `Mono` is assembled in thread `main`. +<2> However, it is subscribed to in thread `Thread-0`. +<3> As a consequence, both the `map` and the `onNext` callback actually run in `Thread-0` + +The preceding code produces the following output: + +[source] +[%unbreakable] +---- + +hello thread Thread-0 +---- + +In Reactor, the execution model and where the execution happens is determined by the +`Scheduler` that is used. A +{javadoc}/reactor/core/scheduler/Scheduler.html[`Scheduler`] +has scheduling responsibilities similar to an `ExecutorService`, but having a +dedicated abstraction lets it do more, notably acting as a clock and enabling +a wider range of implementations (virtual time for tests, trampolining or +immediate scheduling, and so on). + +The {javadoc}/reactor/core/scheduler/Schedulers.html[`Schedulers`] +class has static methods that give access to the following execution contexts: + +* No execution context (`Schedulers.immediate()`): at processing time, the submitted `Runnable` +will be directly executed, effectively running them on the current `Thread` (can be seen as a "null object" or no-op `Scheduler`). +* A single, reusable thread (`Schedulers.single()`). Note that this method reuses the +same thread for all callers, until the Scheduler is disposed. If you want a per-call +dedicated thread, use `Schedulers.newSingle()` for each call. +* An unbounded elastic thread pool (`Schedulers.elastic()`). This one is no longer preferred +with the introduction of `Schedulers.boundedElastic()`, as it has a tendency to hide backpressure +problems and lead to too many threads (see below). +* A bounded elastic thread pool (`Schedulers.boundedElastic()`). This is a handy way to +give a blocking process its own thread so that it does not tie up other resources. This is a better choice for I/O blocking work. See +xref:faq.adoc#faq.wrap-blocking[How Do I Wrap a Synchronous, Blocking Call?], but doesn't pressure the system too much with new threads. +Starting from 3.6.0 this can offer two different implementations depending on the setup: + - `ExecutorService`-based, which reuses platform threads between tasks. This +implementation, like its predecessor `elastic()`, creates new worker pools as needed +and reuses idle ones. Worker pools that stay idle for too long (the default is 60s) are +also disposed. Unlike its `elastic()` predecessor, it has a cap on the number of backing threads it can create (default is number of CPU cores x 10). +Up to 100 000 tasks submitted after the cap has been reached are enqueued and will be re-scheduled when a thread becomes available +(when scheduling with a delay, the delay starts when the thread becomes available). + - Thread-per-task-based, designed to run on `VirtualThread` instances. +To embrace that functionality, the application should run in Java 21+ environment and set the `reactor.schedulers.defaultBoundedElasticOnVirtualThreads` system property to `true`. +Once the above is set, the shared `Schedulers.boundedElastic()` return a specific implementation +of `BoundedElasticScheduler` tailored to run every task on a new instance of the +`VirtualThread` class. This implementation is similar in terms of the behavior to the +`ExecutorService`-based one but does not have idle pool and creates a new `VirtualThread` +for each task. +* A fixed pool of workers that is tuned for parallel work (`Schedulers.parallel()`). It +creates as many workers as you have CPU cores. + +Additionally, you can create a `Scheduler` out of any pre-existing `ExecutorService` by +using `Schedulers.fromExecutorService(ExecutorService)`. (You can also create one from an +`Executor`, although doing so is discouraged.) + +You can also create new instances of the various scheduler types by using the `newXXX` +methods. For example, `Schedulers.newParallel(yourScheduleName)` creates a new parallel +scheduler named `yourScheduleName`. + +[WARNING] +==== +While `boundedElastic` is made to help with legacy blocking code if it cannot be avoided, +`single` and `parallel` are not. As a consequence, the use of Reactor blocking APIs +(`block()`, `blockFirst()`, `blockLast()` (as well as iterating over `toIterable()` +or `toStream()`) inside the default single and parallel schedulers) results in +an `IllegalStateException` being thrown. + +Custom `Schedulers` can also be marked as "non blocking only" by creating instances of `Thread` +that implement the `NonBlocking` marker interface. +==== + +Some operators use a specific scheduler from `Schedulers` by default (and usually give +you the option of providing a different one). For instance, calling the +`Flux.interval(Duration.ofMillis(300))` factory method produces a `Flux` that ticks every 300ms. +By default, this is enabled by `Schedulers.parallel()`. The following line changes the +Scheduler to a new instance similar to `Schedulers.single()`: + +[source,java] +[%unbreakable] +---- +Flux.interval(Duration.ofMillis(300), Schedulers.newSingle("test")) +---- + +Reactor offers two means of switching the execution context (or `Scheduler`) in a +reactive chain: `publishOn` and `subscribeOn`. Both take a `Scheduler` and let you switch +the execution context to that scheduler. But the placement of `publishOn` in the chain +matters, while the placement of `subscribeOn` does not. To understand that difference, +you first have to remember that xref:reactiveProgramming.adoc#reactive.subscribe[nothing happens until you subscribe] +. + +In Reactor, when you chain operators, you can wrap as many `Flux` and `Mono` +implementations inside one another as you need. Once you subscribe, a chain of +`Subscriber` objects is created, backward (up the chain) to the first +publisher. This is effectively hidden from you. All you can see is the outer layer of +`Flux` (or `Mono`) and `Subscription`, but these intermediate operator-specific +subscribers are where the real work happens. + +With that knowledge, we can have a closer look at the `publishOn` and `subscribeOn` +operators: + +[[the-publishon-method]] +== The `publishOn` Method + +`publishOn` applies in the same way as any other operator, in the middle of the +subscriber chain. It takes signals from upstream and replays them downstream while +executing the callback on a worker from the associated `Scheduler`. Consequently, it +*affects where the subsequent operators execute* (until another `publishOn` is +chained in), as follows: + +* Changes the execution context to one `Thread` picked by the `Scheduler` +* as per the specification, `onNext` calls happen in sequence, so this uses up a single thread +* unless they work on a specific `Scheduler`, operators after `publishOn` continue execution on that same thread + +The following example uses the `publishOn` method: + +[source,java] +[%unbreakable] +---- +Scheduler s = Schedulers.newParallel("parallel-scheduler", 4); //<1> + +final Flux flux = Flux + .range(1, 2) + .map(i -> 10 + i) //<2> + .publishOn(s) //<3> + .map(i -> "value " + i); //<4> + +new Thread(() -> flux.subscribe(System.out::println)); //<5> +---- +<1> Creates a new `Scheduler` backed by four `Thread` instances. +<2> The first `map` runs on the anonymous thread in <5>. +<3> The `publishOn` switches the whole sequence on a `Thread` picked from <1>. +<4> The second `map` runs on the `Thread` from <1>. +<5> This anonymous `Thread` is the one where the _subscription_ happens. +The print happens on the latest execution context, which is the one from `publishOn`. + +[[the-subscribeon-method]] +== The `subscribeOn` Method + +`subscribeOn` applies to the subscription process, when the backward chain is being +constructed. It is usually recommended to place it immediately after the source of data, +as intermediate operators can affect the context of the execution. + +However, this does not affect the +behavior of subsequent calls to `publishOn` -- they still switch the execution context for +the part of the chain after them. + +* Changes the `Thread` from which the *whole chain* of operators subscribes +* Picks one thread from the `Scheduler` + +NOTE: Only the closest `subscribeOn` call in the downstream chain effectively + schedules subscription and request signals to the source or operators that can + intercept them (`doFirst`, `doOnRequest`). Using multiple `subscribeOn` calls will + introduce unnecessary Thread switches that have no value. + +The following example uses the `subscribeOn` method: + +[source,java] +[%unbreakable] +---- +Scheduler s = Schedulers.newParallel("parallel-scheduler", 4); //<1> + +final Flux flux = Flux + .range(1, 2) + .map(i -> 10 + i) //<2> + .subscribeOn(s) //<3> + .map(i -> "value " + i); //<4> + +new Thread(() -> flux.subscribe(System.out::println)); //<5> +---- +<1> Creates a new `Scheduler` backed by four `Thread`. +<2> The first `map` runs on one of these four threads... +<3> ...because `subscribeOn` switches the whole sequence right from subscription time (<5>). +<4> The second `map` also runs on same thread. +<5> This anonymous `Thread` is the one where the _subscription_ initially happens, but `subscribeOn` immediately shifts it to one of the four scheduler threads. + diff --git a/docs/modules/ROOT/pages/coreFeatures/simple-ways-to-create-a-flux-or-mono-and-subscribe-to-it.adoc b/docs/modules/ROOT/pages/coreFeatures/simple-ways-to-create-a-flux-or-mono-and-subscribe-to-it.adoc new file mode 100644 index 0000000000..a228d4150c --- /dev/null +++ b/docs/modules/ROOT/pages/coreFeatures/simple-ways-to-create-a-flux-or-mono-and-subscribe-to-it.adoc @@ -0,0 +1,75 @@ +[[simple-ways-to-create-a-flux-or-mono-and-subscribe-to-it]] += Simple Ways to Create a Flux or Mono and Subscribe to It + +The easiest way to get started with `Flux` and `Mono` is to use one of the numerous +factory methods found in their respective classes. + +For instance, to create a sequence of `String`, you can either enumerate them or put them +in a collection and create the Flux from it, as follows: + +[source,java] +[%unbreakable] +---- +Flux seq1 = Flux.just("foo", "bar", "foobar"); + +List iterable = Arrays.asList("foo", "bar", "foobar"); +Flux seq2 = Flux.fromIterable(iterable); +---- + +Other examples of factory methods include the following: + +[source,java] +[%unbreakable] +---- +Mono noData = Mono.empty(); <1> + +Mono data = Mono.just("foo"); + +Flux numbersFromFiveToSeven = Flux.range(5, 3); <2> +---- +<1> Notice the factory method honors the generic type even though it has no value. +<2> The first parameter is the start of the range, while the second parameter is the +number of items to produce. + +When it comes to subscribing, `Flux` and `Mono` make use of Java 8 lambdas. You +have a wide choice of `.subscribe()` variants that take lambdas for different +combinations of callbacks, as shown in the following method signatures: + +[[subscribeMethods]] +.Lambda-based subscribe variants for `Flux` +[source,java] +[%unbreakable] +---- +subscribe(); <1> + +subscribe(Consumer consumer); <2> + +subscribe(Consumer consumer, + Consumer errorConsumer); <3> + +subscribe(Consumer consumer, + Consumer errorConsumer, + Runnable completeConsumer); <4> + +subscribe(Consumer consumer, + Consumer errorConsumer, + Runnable completeConsumer, + Consumer subscriptionConsumer); <5> +---- +<1> Subscribe and trigger the sequence. +<2> Do something with each produced value. +<3> Deal with values but also react to an error. +<4> Deal with values and errors but also run some code when the sequence successfully +completes. +<5> Deal with values and errors and successful completion but also do something with the +`Subscription` produced by this `subscribe` call. + +TIP: These variants return a reference to the subscription that you can use to cancel the +subscription when no more data is needed. Upon cancellation, the source should stop +producing values and clean up any resources it created. This cancel-and-clean-up behavior +is represented in Reactor by the general-purpose `Disposable` interface. + +include::../subscribe-details.adoc[] + +include::../subscribe-backpressure.adoc[] + diff --git a/docs/modules/ROOT/pages/coreFeatures/sinks.adoc b/docs/modules/ROOT/pages/coreFeatures/sinks.adoc new file mode 100644 index 0000000000..887e3ca78c --- /dev/null +++ b/docs/modules/ROOT/pages/coreFeatures/sinks.adoc @@ -0,0 +1 @@ +include::../processors.adoc[] diff --git a/docs/modules/ROOT/pages/debugging.adoc b/docs/modules/ROOT/pages/debugging.adoc index 4df7297d41..3a3454f345 100644 --- a/docs/modules/ROOT/pages/debugging.adoc +++ b/docs/modules/ROOT/pages/debugging.adoc @@ -11,6 +11,7 @@ of your code? Did the failure occur in some library code? If so, what part of yo called the library, potentially passing in improper parameters that ultimately caused the failure? +[[the-typical-reactor-stack-trace]] == The Typical Reactor Stack Trace When you shift to asynchronous code, things can get much more complicated. @@ -20,9 +21,10 @@ When you shift to asynchronous code, things can get much more complicated. Consider the following stack trace: .A typical Reactor stack trace -==== [source] +[%unbreakable] ---- + java.lang.IndexOutOfBoundsException: Source emitted more than one item at reactor.core.publisher.MonoSingle$SingleSubscriber.onNext(MonoSingle.java:129) at reactor.core.publisher.FluxFlatMap$FlatMapMain.tryEmitScalar(FluxFlatMap.java:445) @@ -44,7 +46,6 @@ java.lang.IndexOutOfBoundsException: Source emitted more than one item at reactor.core.publisher.Mono.subscribe(Mono.java:3029) at reactor.guide.GuideTests.debuggingCommonStacktrace(GuideTests.java:995) ---- -==== There is a lot going on there. We get an `IndexOutOfBoundsException`, which tells us that a `source emitted more than one item`. @@ -73,24 +74,22 @@ line refers to some of our code. Finally, we are getting close. Hold on, though. When we go to the source file, all we see is that a pre-existing `Flux` is subscribed to, as follows: -==== [source,java] +[%unbreakable] ---- toDebug .subscribeOn(Schedulers.immediate()) .subscribe(System.out::println, Throwable::printStackTrace); ---- -==== All of this happened at subscription time, but the `Flux` itself was not declared there. Worse, when we go to where the variable is declared, we see the following: -==== [source,java] +[%unbreakable] ---- public Mono toDebug; //please overlook the public class attribute ---- -==== The variable is not instantiated where it is declared. We must assume a worst-case scenario where we find out that there could be a few different code paths that set it in @@ -108,8 +107,8 @@ the `Flux`. WARNING: this section describes the easiest but also the slowest way to enable the debugging capabilities due to the fact that it captures the stacktrace on every operator. -See <> for a more fine grained way of debugging, -and <> for a more advanced and performant global option. +See xref:debugging.adoc#checkpoint-alternative[The `checkpoint()` Alternative] for a more fine grained way of debugging, +and xref:debugging.adoc#reactor-tools-debug[Production-ready Global Debugging] for a more advanced and performant global option. Even though the stacktrace was still able to convey some information for someone with a bit of experience, we can see that it is not ideal by itself in more advanced cases. @@ -119,12 +118,11 @@ Fortunately, Reactor comes with assembly-time instrumentation that is designed This is done by activating a global debug mode via the `Hooks.onOperatorDebug()` method at application start (or at least before the incriminated `Flux` or `Mono` can be instantiated), as follows: -==== [source,java] +[%unbreakable] ---- Hooks.onOperatorDebug(); ---- -==== This starts instrumenting the calls to Reactor operator methods (where they are assembled into the chain) by wrapping the construction of the operator and @@ -141,6 +139,7 @@ exceptions by Reactor in general) a *traceback*. In the next section, we see how the stack trace differs and how to interpret that new information. +[[reading-a-stack-trace-in-debug-mode]] == Reading a Stack Trace in Debug Mode When we reuse our initial example but activate the `operatorStacktrace` debug feature, @@ -156,8 +155,8 @@ several things happen: The full stack trace, once printed, is as follows: -==== [source] +[%unbreakable] ---- java.lang.IndexOutOfBoundsException: Source emitted more than one item at reactor.core.publisher.MonoSingle$SingleSubscriber.onNext(MonoSingle.java:127) <1> @@ -190,7 +189,6 @@ was used. Here we have a "root". <6> Here we have a simple part of the chain. <7> The rest of the stack trace is moved at the end... <8> ...showing a bit of the operator's internals (so we removed a bit of the snippet here). -==== The captured stack trace is appended to the original error as a suppressed `OnAssemblyException`. There are three parts to it, but the first section is the @@ -201,8 +199,8 @@ exception. Here, it shows that the `single` that caused our issue was actually c Now that we are armed with enough information to find the culprit, we can have a meaningful look at that `scatterAndGather` method: -==== [source,java] +[%unbreakable] ---- private Mono scatterAndGather(Flux urls) { return urls.flatMap(url -> doRequest(url)) @@ -210,7 +208,6 @@ private Mono scatterAndGather(Flux urls) { } ---- <1> Sure enough, here is our `single`. -==== Now we can see what the root cause of the error was a `flatMap` that performs several HTTP calls to a few URLs but that is chained with `single`, which is too @@ -221,34 +218,34 @@ We have solved our problem. Now consider the following section in the stack trace: -==== [source] +[%unbreakable] ---- + Error has been observed at the following site(s): ---- -==== That second part of the traceback was not necessarily interesting in this particular example, because the error was actually happening in the last operator in the chain (the one closest to `subscribe`). Considering another example might make it more clear: -==== [source,java] +[%unbreakable] ---- FakeRepository.findAllUserByName(Flux.just("pedro", "simon", "stephane")) .transform(FakeUtils1.applyFilters) .transform(FakeUtils2.enrichUser) .blockLast(); ---- -==== Now imagine that, inside `findAllUserByName`, there is a `map` that fails. Here, we would see the following in the second part of the traceback: -==== [source] +[%unbreakable] ---- + Error has been observed at the following site(s): *________Flux.map ⇢ at reactor.guide.FakeRepository.findAllUserByName(FakeRepository.java:27) |_ Flux.map ⇢ at reactor.guide.FakeRepository.findAllUserByName(FakeRepository.java:28) @@ -257,7 +254,6 @@ Error has been observed at the following site(s): |_ Flux.elapsed ⇢ at reactor.guide.FakeUtils2.lambda$static$0(FakeUtils2.java:30) |_ Flux.transform ⇢ at reactor.guide.GuideDebuggingExtraTests.debuggingActivatedWithDeepTraceback(GuideDebuggingExtraTests.java:40) ---- -==== This corresponds to the section of the chain(s) of operators that gets notified of the error: @@ -277,8 +273,8 @@ If a site is seen several time, there will be an `(observed x times)` after the For instance, let us consider the following snippet: -==== [source,java] +[%unbreakable] ---- public class MyClass { public void myMethod() { @@ -290,14 +286,14 @@ public class MyClass { } } ---- -==== In the code above, error propagates to the `when`, going through two separate chains `chain1` and `chain2`. It would lead to a traceback containing the following: -==== [source] +[%unbreakable] ---- + Error has been observed at the following site(s): *_____Flux.error ⇢ at myClass.myMethod(MyClass.java:3) (observed 2 times) |_ Flux.map ⇢ at myClass.myMethod(MyClass.java:4) @@ -307,7 +303,6 @@ Error has been observed at the following site(s): |_ Flux.distinct ⇢ at myClass.myMethod(MyClass.java:5) *______Mono.when ⇢ at myClass.myMethod(MyClass.java:7) ---- -==== We see that: @@ -369,9 +364,10 @@ behavior by using the `checkpoint("description", true)` version. We are now back initial message for the traceback, augmented with a `description`, as shown in the following example: -==== [source] +[%unbreakable] ---- + Assembly trace from producer [reactor.core.publisher.ParallelSource], described as [descriptionCorrelation1234] : <1> reactor.core.publisher.ParallelFlux.checkpoint(ParallelFlux.java:215) reactor.core.publisher.FluxOnAssemblyTest.parallelFluxCheckpointDescriptionAndForceStack(FluxOnAssemblyTest.java:225) @@ -379,7 +375,6 @@ Error has been observed at the following site(s): |_ ParallelFlux.checkpoint ⇢ reactor.core.publisher.FluxOnAssemblyTest.parallelFluxCheckpointDescriptionAndForceStack(FluxOnAssemblyTest.java:225) ---- <1> `descriptionCorrelation1234` is the description provided in the `checkpoint`. -==== The description could be a static identifier or user-readable description or a wider correlation ID (for instance, coming from a header in the case of an HTTP request). @@ -393,14 +388,13 @@ As a result, the name of heavy checkpoints is not visible in this case. == Production-ready Global Debugging Project Reactor comes with a separate Java Agent that instruments your code and adds debugging info without paying the cost of capturing the stacktrace on every operator call. -The behaviour is very similar to <>, but without the runtime performance overhead. +The behaviour is very similar to xref:debugging.adoc#debug-activate[Activating Debug Mode - aka tracebacks], but without the runtime performance overhead. To use it in your app, you must add it as a dependency. The following example shows how to add `reactor-tools` as a dependency in Maven: .reactor-tools in Maven, in `` -==== [source,xml] ---- @@ -409,69 +403,64 @@ The following example shows how to add `reactor-tools` as a dependency in Maven: <1> ---- -<1> If you use the <>, you do not need to specify a ``. -==== +<1> If you use the xref:gettingStarted.adoc#getting[BOM], you do not need to specify a ``. The following example shows how to add `reactor-tools` as a dependency in Gradle: .reactor-tools in Gradle, amend the `dependencies` block -==== [source,groovy] ---- dependencies { compile 'io.projectreactor:reactor-tools' } ---- -==== It also needs to be explicitly initialized with: -==== [source,java] +[%unbreakable] ---- ReactorDebugAgent.init(); ---- -==== TIP: Since the implementation will instrument your classes when they are loaded, the best place to put it is before everything else in your main(String[]) method: -==== [source,java] +[%unbreakable] ---- public static void main(String[] args) { ReactorDebugAgent.init(); SpringApplication.run(Application.class, args); } ---- -==== You may also re-process existing classes with `processExistingClasses()` if you cannot run the init eagerly. For example, in https://junit.org/junit5/docs/current/user-guide/#launcher-api-listeners-custom[JUnit5 tests from a `TestExecutionListener`] or even in the class `static` initializer block: -==== [source,java] +[%unbreakable] ---- ReactorDebugAgent.init(); ReactorDebugAgent.processExistingClasses(); ---- -==== WARNING: Be aware that the re-processing takes a couple of seconds due to the need to iterate over all loaded classes and apply the transformation. Use it only if you see that some call-sites are not instrumented. +[[limitations]] === Limitations `ReactorDebugAgent` is implemented as a Java Agent and uses https://bytebuddy.net/#/[ByteBuddy] to perform the self-attach. Self-attach may not work on some JVMs, please refer to ByteBuddy's documentation for more details. +[[running-reactordebugagent-as-a-java-agent]] === Running ReactorDebugAgent as a Java Agent If your environment does not support ByteBuddy's self-attachment, you can run `reactor-tools` as a Java Agent: -==== [source,shell] ---- java -javaagent reactor-tools.jar -jar app.jar ---- -==== +[[running-reactordebugagent-at-build-time]] === Running ReactorDebugAgent at build time It is also possible to run `reactor-tools` at build time. To do so, you need to apply it as a plugin for ByteBuddy's build instrumentation. @@ -479,7 +468,6 @@ plugin for ByteBuddy's build instrumentation. WARNING: The transformation will only be applied to your project's classes. The classpath libraries will not be instrumented. .reactor-tools with https://github.com/raphw/byte-buddy/tree/byte-buddy-1.10.9/byte-buddy-maven-plugin[ByteBuddy's Maven plugin] -==== [source,xml] ---- @@ -508,12 +496,10 @@ WARNING: The transformation will only be applied to your project's classes. The ---- -<1> If you use the <>, you do not need to specify a ``. +<1> If you use the xref:gettingStarted.adoc#getting[BOM], you do not need to specify a ``. <2> `classifier` here is important. -==== .reactor-tools with https://github.com/raphw/byte-buddy/tree/byte-buddy-1.10.9/byte-buddy-gradle-plugin[ByteBuddy's Gradle plugin] -==== [source,groovy] ---- plugins { @@ -540,11 +526,11 @@ byteBuddy { } } ---- -<1> If you use the <>, you do not need to specify a `version`. +<1> If you use the xref:gettingStarted.adoc#getting[BOM], you do not need to specify a `version`. <2> `classifier` here is important. -==== +[[logging-a-sequence]] == Logging a Sequence In addition to stack trace debugging and analysis, another powerful tool to have in your @@ -576,19 +562,17 @@ For instance, suppose we have Logback activated and configured and a chain like insight into how it works and what kind of events it propagates upstream to the range, as the following example shows: -==== [source,java] +[%unbreakable] ---- Flux flux = Flux.range(1, 10) .log() .take(3); flux.subscribe(); ---- -==== This prints out the following (through the logger's console appender): -==== ---- 10:45:20.200 [main] INFO reactor.Flux.Range.1 - | onSubscribe([Synchronous Fuseable] FluxRange.RangeSubscription) <1> 10:45:20.205 [main] INFO reactor.Flux.Range.1 - | request(3) <2> @@ -614,7 +598,6 @@ synchronous or asynchronous fusion. <2> On the second line, we can see that take limited the request to upstream to 3. <3> Then the range sends three values in a row. <4> On the last line, we see `cancel()`. -==== The second (2) and last lines (4) are the most interesting. We can see the `take` in action there. It leverages backpressure in order to ask the source for exactly the expected amount of elements. diff --git a/docs/modules/ROOT/pages/faq.adoc b/docs/modules/ROOT/pages/faq.adoc index d8b177e726..bf57d58a88 100644 --- a/docs/modules/ROOT/pages/faq.adoc +++ b/docs/modules/ROOT/pages/faq.adoc @@ -1,25 +1,25 @@ [[faq]] -= FAQ, Best Practices, and "How do I...?" +== FAQ, Best Practices, and "How do I...?" This section covers the following content: -* <> -* <> -* <> -* <> -* <> -* <> -* <> +* xref:faq.adoc#faq.wrap-blocking[How Do I Wrap a Synchronous, Blocking Call?] +* xref:faq.adoc#faq.chain[I Used an Operator on my `Flux` but it Doesn't Seem to Apply. What Gives?] +* xref:faq.adoc#faq.monoThen[My `Mono` `zipWith` or `zipWhen` is never called] +* xref:faq.adoc#faq.retryWhen[How to Use `retryWhen` to Emulate `retry(3)`?] +* xref:faq.adoc#faq.exponentialBackoff[How can I use `retryWhen` for Exponential Backoff?] +* xref:faq.adoc#faq.thread-affinity-publishon[How Do I Ensure Thread Affinity when I Use `publishOn()`?] +* xref:faq.adoc#faq.mdc[What Is a Good Pattern for Contextual Logging? (MDC)] [[faq.wrap-blocking]] -== How Do I Wrap a Synchronous, Blocking Call? +=== How Do I Wrap a Synchronous, Blocking Call? It is often the case that a source of information is synchronous and blocking. To deal with such sources in your Reactor applications, apply the following pattern: -==== [source,java] +[%unbreakable] ---- Mono blockingWrapper = Mono.fromCallable(() -> { <1> return /* make a remote synchronous call */ <2> @@ -30,7 +30,6 @@ blockingWrapper = blockingWrapper.subscribeOn(Schedulers.boundedElastic()); <3> <2> Return the asynchronous, blocking resource. <3> Ensure each subscription happens on a dedicated worker from `Schedulers.boundedElastic()`. -==== You should use a `Mono`, because the source returns one value. You should use `Schedulers.boundedElastic`, because it creates a dedicated thread to wait for the @@ -45,7 +44,7 @@ Also, note that `subscribeOn` operator should immediately follow the source and further operators are defined after the `subscribeOn` wrapper. [[faq.chain]] -== I Used an Operator on my `Flux` but it Doesn't Seem to Apply. What Gives? +=== I Used an Operator on my `Flux` but it Doesn't Seem to Apply. What Gives? Make sure that the variable you `.subscribe()` to has been affected by the operators you think should have been applied to it. @@ -58,71 +57,67 @@ Compare the following two examples: .without chaining (incorrect) -==== [source,java] +[%unbreakable] ---- Flux flux = Flux.just("something", "chain"); flux.map(secret -> secret.replaceAll(".", "*")); <1> flux.subscribe(next -> System.out.println("Received: " + next)); ---- <1> The mistake is here. The result is not attached to the `flux` variable. -==== .without chaining (correct) -==== [source,java] +[%unbreakable] ---- Flux flux = Flux.just("something", "chain"); flux = flux.map(secret -> secret.replaceAll(".", "*")); flux.subscribe(next -> System.out.println("Received: " + next)); ---- -==== The following sample is even better (because it is simpler): .with chaining (best) -==== [source,java] +[%unbreakable] ---- Flux.just("something", "chain") .map(secret -> secret.replaceAll(".", "*")) .subscribe(next -> System.out.println("Received: " + next)); ---- -==== The first version outputs the following: -==== [source] +[%unbreakable] ---- + Received: something Received: chain ---- -==== The two other versions output the expected values, as follows: -==== [source] +[%unbreakable] ---- + Received: ********* Received: ***** ---- -==== [[faq.monoThen]] -== My `Mono` `zipWith` or `zipWhen` is never called +=== My `Mono` `zipWith` or `zipWhen` is never called Consider the following example: -==== [source,java] +[%unbreakable] ---- myMethod.process("a") // this method returns Mono .zipWith(myMethod.process("b"), combinator) //this is never called .subscribe(); ---- -==== If the source `Mono` is either `empty` or a `Mono` (a `Mono` is empty for all intents and purposes), some combinations are never called. @@ -147,25 +142,24 @@ which is still guaranteed to be empty. The following example uses `defaultIfEmpt .use `defaultIfEmpty` before `zipWhen` -==== [source,java] +[%unbreakable] ---- myMethod.emptySequenceForKey("a") // this method returns empty Mono .defaultIfEmpty("") // this converts empty sequence to just the empty String .zipWhen(aString -> myMethod.process("b")) //this is called with the empty String .subscribe(); ---- -==== [[faq.monoZipEmptyCompletion]] -== Using `zip` along with empty-completed publishers +=== Using `zip` along with empty-completed publishers When using the `zip` operator along with empty-completed publishers (i.e., publishers completing without emitting an item), it is important to be aware of the following behavior. Consider the following test case: -==== [source,java] +[%unbreakable] ---- @Test public void testZipEmptyCompletionAllSubscribed() { @@ -183,12 +177,11 @@ Consider the following test case: assertEquals(2, cnt.get()); } ---- -==== While in this case the resulting `zippedMono` subscribes to both `mono1` and `mono2`, such behaviour is not guaranteed for all cases. For instance, consider the following test case: -==== [source,java] +[%unbreakable] ---- @Test public void testZipEmptyCompletionOneSubscribed() { @@ -210,15 +203,14 @@ While in this case the resulting `zippedMono` subscribes to both `mono1` and `mo assertEquals(1, cnt.get()); } ---- -==== In this case upon empty completion of `mono1`, `zippedMono` completes immediately and does not subscribe to `mono2` and `mono3`. Therefore, in cases where `zip` operator is used to combine empty-completed publishers, it is not guaranteed that the resulting publisher will subscribe to all the empty-completed publishers. If it is necessary to keep the semantics as shown in the second test case and to ensure subscription to all the publishers to be zipped, consider using `singleOptional` operator, as demonstrated in the test case below: -==== [source,java] +[%unbreakable] ---- @Test @@ -245,10 +237,9 @@ public void testZipOptionalAllSubscribed() { assertEquals(3, cnt.get()); } ---- -==== [[faq.retryWhen]] -== How to Use `retryWhen` to Emulate `retry(3)`? +=== How to Use `retryWhen` to Emulate `retry(3)`? The `retryWhen` operator can be quite complex. Hopefully the following snippet of code can help you understand how it works by attempting to emulate a simpler @@ -257,7 +248,7 @@ can help you understand how it works by attempting to emulate a simpler include::snippetRetryWhenRetry.adoc[] [[faq.exponentialBackoff]] -== How can I use `retryWhen` for Exponential Backoff? +=== How can I use `retryWhen` for Exponential Backoff? Exponential backoff produces retry attempts with a growing delay between each of the attempts, so as not to overload the source systems and risk an all-out @@ -273,8 +264,8 @@ and after the retry attempt delays. It delays retries and increases the delay between each attempt (pseudocode: delay = 100ms * 2^attempt_number_starting_at_zero): -==== [source,java] +[%unbreakable] ---- AtomicInteger errorCount = new AtomicInteger(); Flux flux = @@ -294,11 +285,9 @@ Flux.error(new IllegalStateException("boom")) <3> We also log the time at which the retry happens, and the retry attempt number (starting from 0). <4> By default, an `Exceptions.retryExhausted` exception would be thrown, with the last `failure()` as a cause. Here we customize that to directly emit the cause as `onError`. -==== When subscribed to, this fails and terminates after printing out the following: -==== ---- java.lang.IllegalStateException: boom at 00:00:00.0 retried at 00:00:00.101, attempt 0 <1> @@ -312,20 +301,19 @@ java.lang.IllegalStateException: boom at 00:00:00.702 <1> First retry after about 100ms <2> Second retry after about 200ms <3> Third retry after about 400ms -==== [[faq.thread-affinity-publishon]] -== How Do I Ensure Thread Affinity when I Use `publishOn()`? +=== How Do I Ensure Thread Affinity when I Use `publishOn()`? -As described in <>, `publishOn()` can be used to switch +As described in xref:apdx-reactorExtra.adoc#extra-schedulers[Schedulers], `publishOn()` can be used to switch execution contexts. The `publishOn` operator influences the threading context where the rest of the operators in the chain below it run, up to a new occurrence of `publishOn`. So the placement of `publishOn` is significant. Consider the following example: -==== [source,java] +[%unbreakable] ---- Sinks.Many dataSinks = Sinks.many().unicast().onBackpressureBuffer(); Flux source = dataSinks.asFlux(); @@ -335,7 +323,6 @@ source.publishOn(scheduler1) .doOnNext(i -> processNext(i)) .subscribe(); ---- -==== The `transform` function in `map()` is run on a worker of `scheduler1`, and the `processNext` method in @@ -348,7 +335,7 @@ chain or for different subscribers. [[faq.mdc]] -== What Is a Good Pattern for Contextual Logging? (MDC) +=== What Is a Good Pattern for Contextual Logging? (MDC) Most logging frameworks allow contextual logging, letting users store variables that are reflected in the logging pattern, generally by way of a `Map` called the MDC ("Mapped Diagnostic Context"). This is one of the most recurring use of `ThreadLocal` in Java, and as a consequence this pattern assumes that the code being logged is tied in a one-to-one relationship with a `Thread`. @@ -371,15 +358,15 @@ So instead of using `ThreadLocal`, Reactor offers this map-like object that is t Now that we've established that MDC "just working" is not the best assumption to make in a declarative API, how can we perform contextualized log statements in relation to events in a Reactive Stream (`onNext`, `onError`, and `onComplete`)? This entry of the FAQ offers a possible intermediate solution when one wants to log in relation to these signals in a straightforward and explicit manner. -Make sure to read the <> section beforehand, and especially how a write must happen towards the bottom of the operator chain for operators above it to see it. +Make sure to read the xref:advancedFeatures/context.adoc[Adding a Context to a Reactive Sequence] section beforehand, and especially how a write must happen towards the bottom of the operator chain for operators above it to see it. To get contextual information from the `Context` to the MDC, the simplest way is to wrap logging statements in a `doOnEach` operator with a little bit of boilerplate code. This boilerplate depends on both the logging framework/abstraction of your choice and the information you want to put in the MDC, so it has to be in your codebase. The following is an example of such a helper function around a single MDC variable and focused on logging `onNext` events, using Java 9 enhanced `Optional` API: -==== [source,java] +[%unbreakable] ---- public static Consumer> logOnNext(Consumer logStatement) { return signal -> { @@ -396,11 +383,10 @@ public static Consumer> logOnNext(Consumer logStatement) { } ---- <1> `doOnEach` signals include `onComplete` and `onError`. In this example we're only interested in logging `onNext` -<2> We will extract one interesting value from the Reactor `Context` (see the <> section) +<2> We will extract one interesting value from the Reactor `Context` (see the xref:advancedFeatures/context.adoc#context.api[The `Context` API] section) <3> We use the `MDCCloseable` from SLF4J 2 in this example, allowing try-with-resource syntax for automatic cleanup of the MDC after the log statement is executed <4> Proper log statement is provided by the caller as a `Consumer` (consumer of the onNext value) <5> In case the expected key wasn't set in the `Context` we use the alternative path where nothing is put in the MDC -==== Using this boilerplate code ensures that we are good citizens with the MDC: we set a key right before we execute a logging statement and remove it immediately after. There is no risk of polluting the MDC for subsequent logging statements. @@ -410,8 +396,8 @@ You might want to create additional helper methods for these cases or craft a si In any case, the usage of the preceding helper method could look like the following reactive web controller: -==== [source,java] +[%unbreakable] ---- @GetMapping("/byPrice") public Flux byPrice(@RequestParam Double maxPrice, @RequestHeader(required = false, name = "X-UserId") String userId) { @@ -426,14 +412,13 @@ public Flux byPrice(@RequestParam Double maxPrice, @RequestHeader(re <1> We need to get the contextual information from the request header to put it in the `Context` <2> Here we apply our helper method to the `Flux`, using `doOnEach`. Remember: operators see `Context` values defined below them. <3> We write the value from the header to the `Context` using the chosen key `CONTEXT_KEY`. -==== In this configuration, the `restaurantService` can emit its data on a shared thread, yet the logs will still reference the correct `X-UserId` for each request. For completeness, we can also see what an error-logging helper could look like: -==== [source,java] +[%unbreakable] ---- public static Consumer> logOnError(Consumer errorLogStatement) { return signal -> { @@ -449,14 +434,13 @@ public static Consumer> logOnError(Consumer errorLogStateme }; } ---- -==== Nothing much has changed, except for the fact that we check that the `Signal` is effectively an `onError`, and that we provide said error (a `Throwable`) to the log statement lambda. Applying this helper in the controller is very similar to what we've done before: -==== [source,java] +[%unbreakable] ---- @GetMapping("/byPrice") public Flux byPrice(@RequestParam Double maxPrice, @RequestHeader(required = false, name = "X-UserId") String userId) { @@ -469,6 +453,5 @@ public Flux byPrice(@RequestParam Double maxPrice, @RequestHeader(re } ---- <1> In case the `restaurantService` emits an error, it will be logged with MDC context here -==== //TODO reactive gems diff --git a/docs/modules/ROOT/pages/gettingStarted.adoc b/docs/modules/ROOT/pages/gettingStarted.adoc index 0b4d0fccbe..ee09c50435 100644 --- a/docs/modules/ROOT/pages/gettingStarted.adoc +++ b/docs/modules/ROOT/pages/gettingStarted.adoc @@ -4,10 +4,10 @@ This section contains information that should help you get going with Reactor. It includes the following sections: -* <> -* <> -* <> -* <> +* xref:gettingStarted.adoc#getting-started-introducing-reactor[Introducing Reactor] +* xref:gettingStarted.adoc#prerequisites[Prerequisites] +* xref:gettingStarted.adoc#getting-started-understanding-bom[Understanding the BOM and versioning scheme] +* xref:gettingStarted.adoc#getting[Getting Reactor] [[getting-started-introducing-reactor]] == Introducing Reactor @@ -90,7 +90,7 @@ like YYYY.0.X-SNAPSHOT so we get 1 snapshot per PATCH) [[getting]] == Getting Reactor -As <>, the easiest way to use Reactor in your core is to use the BOM and +As xref:gettingStarted.adoc#getting-started-understanding-bom[mentioned earlier], the easiest way to use Reactor in your core is to use the BOM and add the relevant dependencies to your project. Note that, when you add such a dependency, you must omit the version so that the version gets picked up from the BOM. @@ -103,12 +103,12 @@ release train line is `{reactorReleaseTrain}`, which is what is used in snippets There might be newer versions since then (including snapshots, milestones and new release train lines), see https://projectreactor.io/docs for the latest artifacts and BOMs. +[[maven-installation]] === Maven Installation Maven natively supports the BOM concept. First, you need to import the BOM by adding the following snippet to your `pom.xml`: -==== [source,xml,subs=attributes+] ---- <1> @@ -125,14 +125,12 @@ adding the following snippet to your `pom.xml`: ---- <1> Notice the `dependencyManagement` tag. This is in addition to the regular `dependencies` section. -==== If the top section (`dependencyManagement`) already exists in your pom, add only the contents. Next, add your dependencies to the relevant reactor projects, as usual, except without a ``, as follows: -==== [source,xml] ---- @@ -151,8 +149,8 @@ Next, add your dependencies to the relevant reactor projects, as usual, except w <1> Dependency on the core library. <2> No version tag here. <3> `reactor-test` provides facilities to unit test reactive streams. -==== +[[gradle-installation]] === Gradle Installation Prior to version 5.0, Gradle has no core support for Maven BOMs, but you can use Spring's @@ -161,7 +159,6 @@ plugin. First, apply the plugin from the Gradle Plugin Portal, as follows: -==== [source,groovy] ---- plugins { @@ -170,11 +167,9 @@ plugins { ---- <1> as of this writing, 1.0.7.RELEASE is the latest version of the plugin. Check for updates. -==== Then use it to import the BOM, as follows: -==== [source,groovy,subs=attributes+] ---- dependencyManagement { @@ -183,11 +178,9 @@ dependencyManagement { } } ---- -==== Finally add a dependency to your project, without a version number, as follows: -==== [source,groovy] ---- dependencies { @@ -196,11 +189,9 @@ dependencies { ---- <1> There is no third `:` separated section for the version. It is taken from the BOM. -==== Since Gradle 5.0, you can use the native Gradle support for BOMs: -==== [source,groovy,subs=attributes+] ---- dependencies { @@ -210,9 +201,9 @@ dependencies { ---- <1> There is no third `:` separated section for the version. It is taken from the BOM. -==== +[[milestones-and-snapshots]] === Milestones and Snapshots Milestones and developer previews are distributed through the Spring Milestones @@ -220,7 +211,6 @@ repository rather than Maven Central. To add it to your build configuration file, use the following snippet: .Milestones in Maven -==== [source,xml] ---- @@ -231,12 +221,10 @@ file, use the following snippet: ---- -==== For Gradle, use the following snippet: .Milestones in Gradle -==== [source,groovy] ---- repositories { @@ -244,12 +232,10 @@ repositories { mavenCentral() } ---- -==== Similarly, snapshots are also available in a separate dedicated repository, as the following example show: .-SNAPSHOTs in Maven -==== [source,xml] ---- @@ -260,10 +246,8 @@ Similarly, snapshots are also available in a separate dedicated repository, as t ---- -==== .-SNAPSHOTs in Gradle -==== [source,groovy] ---- repositories { @@ -271,11 +255,10 @@ repositories { mavenCentral() } ---- -==== [[support]] == Support and policies The entries below are mirroring https://github.com/reactor/.github/blob/main/SUPPORT.adoc -include::https://raw.githubusercontent.com/reactor/.github/main/SUPPORT.adoc[leveloffset=3] +include::partial$SUPPORT.adoc[leveloffset=2] diff --git a/docs/modules/ROOT/pages/kotlin.adoc b/docs/modules/ROOT/pages/kotlin.adoc index 4c9552f11c..bb82b7e7c6 100644 --- a/docs/modules/ROOT/pages/kotlin.adoc +++ b/docs/modules/ROOT/pages/kotlin.adoc @@ -29,12 +29,10 @@ module with new package names that start with `reactor.kotlin` instead of simply As a consequence, Kotlin extensions in `reactor-core` module are deprecated. The new dependency's groupId and artifactId are: -==== [source,gradle] ---- io.projectreactor.kotlin:reactor-kotlin-extensions ---- -==== ===== Thanks to its great https://kotlinlang.org/docs/reference/java-interop.html[Java interoperability] @@ -52,7 +50,7 @@ provide a workaround for JVM https://docs.oracle.com/javase/tutorial/java/generi and Reactor provides some extensions to take advantage of this feature. The following table compares Reactor with Java against Reactor with Kotlin and extensions: - +[%unbreakable] |=== |*Java*|*Kotlin with extensions* |`Mono.just("foo")` @@ -82,8 +80,8 @@ declarations and expressive "`value or no value`" semantics without paying the c (Kotlin allows using functional constructs with nullable values. See this https://www.baeldung.com/kotlin-null-safety[comprehensive guide to Kotlin null-safety].) -Although Java does not let one express null safety in its type-system, Reactor <> of the whole Reactor API through tooling-friendly annotations declared +Although Java does not let one express null safety in its type-system, Reactor xref:kotlin.adoc#kotlin-null-safety[now provides null safety] + of the whole Reactor API through tooling-friendly annotations declared in the `reactor.util.annotation` package. By default, types from Java APIs used in Kotlin are recognized as https://kotlinlang.org/docs/reference/java-interop.html#null-safety-and-platform-types[platform types] diff --git a/docs/modules/ROOT/pages/metrics-details.adoc b/docs/modules/ROOT/pages/metrics-details.adoc index 5cde5febe3..5553996a15 100644 --- a/docs/modules/ROOT/pages/metrics-details.adoc +++ b/docs/modules/ROOT/pages/metrics-details.adoc @@ -1,10 +1,8 @@ -:root-target: ./../../build/documentedMetrics/ - -### Meters and tags for Reactor-Core-Micrometer module += Meters and tags for Reactor-Core-Micrometer module [[micrometer-details-metrics]] -#### `Micrometer.metrics()` +== `Micrometer.metrics()` Below is the list of meters used by the metrics tap listener feature, as exposed via `Micrometer.metrics(MeterRegistry meterRegistry)`. @@ -12,19 +10,19 @@ IMPORTANT: Please note that metrics below use a dynamic `%s` prefix. When applied on a `Flux` or `Mono` that uses the `name(String n)` operator, this is replaced with `n`. Otherwise, this is replaced by the default value of `"reactor"`. -include::{root-target}meterListener_metrics.adoc[leveloffset=4] +include::partial$meterListener_metrics.adoc[leveloffset=2] [[micrometer-details-timedScheduler]] -#### `Micrometer.timedScheduler()` +== `Micrometer.timedScheduler()` Below is the list of meters used by the TimedScheduler feature, as exposed via `Micrometer.timedScheduler(Scheduler original, MeterRegistry meterRegistry, String metricsPrefix)`. IMPORTANT: Please note that metrics below use a dynamic `%s` prefix. This is replaced with the provided `metricsPrefix` in practice. -include::{root-target}timedScheduler_metrics.adoc[leveloffset=4] +include::partial$timedScheduler_metrics.adoc[leveloffset=2] [[micrometer-details-observation]] -#### `Micrometer.observation()` +== `Micrometer.observation()` Below is the list of meters used by the observation tap listener feature, as exposed via `Micrometer.observation(ObservationRegistry registry)`. @@ -34,4 +32,4 @@ NOTE: You can also fully customize Micrometer's Observation via `Micrometer.observation(ObservationRegistry registry, Function observationSupplier)` with your own Observation supplier, allowing to configure its attributes (name, contextual name, low and high cardinality keys, ...). -include::{root-target}observation_metrics.adoc[leveloffset=4] \ No newline at end of file +include::partial$observation_metrics.adoc[leveloffset=2] diff --git a/docs/modules/ROOT/pages/metrics.adoc b/docs/modules/ROOT/pages/metrics.adoc index 083117c4d6..517f23638e 100644 --- a/docs/modules/ROOT/pages/metrics.adoc +++ b/docs/modules/ROOT/pages/metrics.adoc @@ -13,14 +13,15 @@ The `reactor-core-micrometer` APIs require the user to provide a form of _regist When applying instrumentation to classes that have a NATIVE notion of naming or tags, these APIs will attempt to discover such elements in the reactive chain. Otherwise, the API will expect that a _prefix_ for naming meters is provided alongside the registry. +[[scheduler-metrics]] == Scheduler metrics -Every async operation in Reactor is done via the Scheduler abstraction described in <>. +Every async operation in Reactor is done via the Scheduler abstraction described in xref:coreFeatures/schedulers#schedulers[Threading and Schedulers]. This is why it is important to monitor your schedulers, watch out for key metrics that start to look suspicious and react accordingly. The `reactor-core-micrometer` module offers a "timed" `Scheduler` wrapper that perform measurements around tasks submitted through it, which can be used as follows: -==== [source,java] +[%unbreakable] ---- Scheduler originalScheduler = Schedulers.newParallel("test", 4); @@ -31,7 +32,6 @@ Scheduler schedulerWithMetrics = Micrometer.timedScheduler( Tags.of(Tag.of("additionalTag", "yes")) // <4> ); ---- -==== <1> the `Scheduler` to wrap <2> the `MeterRegistry` in which to publish metrics <3> the prefix to use in naming meters. This would for example lead to a `testingMetrics.scheduler.tasks.completed` meter being created. @@ -40,11 +40,12 @@ Scheduler schedulerWithMetrics = Micrometer.timedScheduler( IMPORTANT: When wrapping a common `Scheduler` (eg. `Schedulers.single()`) or a `Scheduler` that is used in multiple places, only the `Runnable` tasks that are submitted through the wrapper instance returned by `Micrometer#timedScheduler` are going to be instrumented. -See <> for produced meters and associated default tags. +See xref:metrics-details.adoc#micrometer-details-timedScheduler[Micrometer.timedScheduler()] for produced meters and associated default tags. -// FIXME reactor-monitoring-demo won't be in sync with 3.5.0 anymore +FIXME reactor-monitoring-demo won't be in sync with 3.5.0 anymore //TIP: Grafana + Prometheus users can use https://raw.githubusercontent.com/reactor/reactor-monitoring-demo/master/dashboards/schedulers.json[a pre-built dashboard] which includes panels for threads, completed tasks, task queues and other handy metrics. +[[publisher-metrics]] == Publisher metrics Sometimes it is useful to be able to record metrics at some stage in your reactive pipeline. @@ -53,8 +54,8 @@ provided to the `tap` operator. An out-of-the-box implementation is actually provided by the `reactor-core-micrometer` module, via `Micrometer#metrics` APIs. Consider the following pipeline: -==== [source,java] +[%unbreakable] ---- listenToEvents() .doOnNext(event -> log.info("Received {}", event)) @@ -62,12 +63,11 @@ listenToEvents() .retry() .subscribe(); ---- -==== To enable the metrics for this source `Flux` (returned from `listenToEvents()`), we need to turn on the metrics collection: -==== [source,java] +[%unbreakable] ---- listenToEvents() .name("events") // <1> @@ -82,9 +82,8 @@ listenToEvents() <1> Every metric at this stage of the reactive pipeline will use "events" as a naming prefix (optional, defaults to `reactor` prefix). <2> We use the `tap` operator combined with a `SignalListener` implementation provided in `reactor-core-micrometer` for metrics collection. <3> As with other APIs in that module, the `MeterRegistry` into which to publish metrics needs to be explicitly provided. -==== -The detail of the exposed metrics is available in <>. +The detail of the exposed metrics is available in xref:metrics-details.adoc#micrometer-details-metrics[Micrometer.metrics()]. //TODO update and reintroduce tips for using the metrics //Want to know how many times your event processing has restarted due to some error? Read `[name].subscribed`, because `retry()` operator will re-subscribe to the source publisher on error. @@ -96,11 +95,12 @@ The detail of the exposed metrics is available in <> // //Please note that when giving a name to a sequence, this sequence could not be aggregated with others anymore. As a compromise if you want to identify your sequence but still make it possible to aggregate with other views, you can use a <> for the name by calling `(tag("flow", "events"))` for example. +[[tags]] === Tags -In addition to the common tags described in <>, users can add custom tags to their reactive chains via the `tag` operator: -==== +In addition to the common tags described in xref:metrics-details.adoc#micrometer-details-metrics[Micrometer.metrics()], users can add custom tags to their reactive chains via the `tag` operator: [source,java] +[%unbreakable] ---- listenToEvents() .name("events") // <1> @@ -114,18 +114,18 @@ listenToEvents() <1> Every metric at this stage will be identified with the "events" prefix. <2> Set a custom tag "source" to value "kafka". <3> All reported metrics will have `source=kafka` tag assigned in addition to the common tags. -==== Please note that depending on the monitoring system you're using, using a name can be considered mandatory when using tags, since it would otherwise result in a different set of tags between two default-named sequences. Some systems like Prometheus might also require to have the exact same set of tags for each metric with the same name. +[[observation]] === Observation In addition to full metrics, the `reactor-core-micrometer` module offers an alternative based on Micrometer's `Observation`. Depending on the configuration and runtime classpath, an `Observation` could translate to timers, spans, logging statements or any combination. A reactive chain can be observed via the `tap` operator and `Micrometer.observation` utility, as follows: -==== [source,java] +[%unbreakable] ---- listenToEvents() .name("events") // <1> @@ -139,15 +139,14 @@ listenToEvents() <1> The `Observation` for this pipeline will be identified with the "events" prefix. <2> We use the `tap` operator with the `observation` utility. <3> A registry must be provided into which to publish the observation results. Note this is an `ObservationRegistry`. -==== -The detail of the observation and its tags is provided in <>. +The detail of the observation and its tags is provided in xref:metrics-details.adoc#micrometer-details-observation[Micrometer.observation()]. You can also fully customize Micrometer's Observation via `Micrometer.observation(ObservationRegistry registry, Function observationSupplier)` with your own Observation supplier, as follows: -==== [source,java] +[%unbreakable] ---- listenToEvents() .name("events") // <1> @@ -168,6 +167,5 @@ listenToEvents() <4> We provide our own function to create the Observation <5> with a custom `ObservationConvention` <6> and a custom `Supplier`. -==== - +include::metrics-details.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/processors.adoc b/docs/modules/ROOT/pages/processors.adoc index b332f12d83..41af9f8d87 100644 --- a/docs/modules/ROOT/pages/processors.adoc +++ b/docs/modules/ROOT/pages/processors.adoc @@ -1,9 +1,12 @@ +[[sinks]] += Sinks + In Reactor a sink is a class that allows safe manual triggering of signals in a standalone fashion, creating a `Publisher`-like structure capable of dealing with multiple `Subscriber` (with the exception of `unicast()` flavors). Before `3.5.0`, there was also a set of `Processor` implementations which has been phased out. [[sinks-intro]] -= Safely Produce from Multiple Threads by Using `Sinks.One` and `Sinks.Many` +== Safely Produce from Multiple Threads by Using `Sinks.One` and `Sinks.Many` Default flavors of `Sinks` exposed by reactor-core ensure that multi-threaded usage is detected and cannot lead to spec violations or undefined behavior from the perspective of downstream @@ -35,17 +38,16 @@ Sinks are usually a better alternative. The `Sinks` builder provide a guided API to the main supported producer types. You will recognize some of the behavior found in `Flux` such as `onBackpressureBuffer`. -==== [source,java] +[%unbreakable] ---- Sinks.Many replaySink = Sinks.many().replay().all(); ---- -==== Multiple producer threads may concurrently generate data on the sink by doing the following: -==== [source,java] +[%unbreakable] ---- //thread1 replaySink.emitNext(1, EmitFailureHandler.FAIL_FAST); @@ -63,7 +65,6 @@ EmitResult result = replaySink.tryEmitNext(4); ---- -==== [NOTE] ==== @@ -74,8 +75,8 @@ Also, it is recommended to use a timeout above 100ms since smaller values don’ The `Sinks.Many` can be presented to downstream consumers as a `Flux`, like in the below example: -==== [source,java] +[%unbreakable] ---- Flux fluxView = replaySink.asFlux(); fluxView @@ -83,7 +84,6 @@ fluxView .log() .blockLast(); ---- -==== Similarly, the `Sinks.Empty` and `Sinks.One` flavors can be viewed as a `Mono` with the `asMono()` method. @@ -97,9 +97,10 @@ The `Sinks` categories are: . `empty()`: a sink that will play a terminal signal only to its subscribers (error or complete), but can still be viewed as a `Mono` (notice the generic type ``). [[sinks-overview]] -= Overview of Available Sinks +== Overview of Available Sinks -== Sinks.many().unicast().onBackpressureBuffer(args?) +[[sinks-many-unicast-onbackpressurebufferargs]] +=== Sinks.many().unicast().onBackpressureBuffer(args?) A unicast `Sinks.Many` can deal with backpressure by using an internal buffer. The trade-off is that it can have _at most one_ `Subscriber`. @@ -114,7 +115,8 @@ buffering in the `Sinks.many().unicast().onBackpressureBuffer(Queue)` factory me If that queue is bounded, the sink could reject the push of a value when the buffer is full and not enough requests from downstream have been received. -== Sinks.many().multicast().onBackpressureBuffer(args?) +[[sinks-many-multicast-onbackpressurebufferargs]] +=== Sinks.many().multicast().onBackpressureBuffer(args?) A multicast `Sinks.Many` can emit to several subscribers while honoring backpressure for each of its subscribers. Subscribers receive only the signals pushed through the sink after they have subscribed. @@ -126,7 +128,8 @@ un-subscribed), it clears its internal buffer and stops accepting new subscriber You can tune this by using the `autoCancel` parameter in the `multicast` static factory methods under `Sinks.many().multicast()`. -== Sinks.many().multicast().directAllOrNothing() +[[sinks-many-multicast-directallornothing]] +=== Sinks.many().multicast().directAllOrNothing() A multicast `Sinks.Many` with a simplistic handling of backpressure: if *any* of the subscribers is too slow (has zero demand), the `onNext` is dropped for *all* subscribers. @@ -138,7 +141,8 @@ Once the `Sinks.Many` has terminated (usually through its `emitError(Throwable)` `emitComplete()` methods being called), it lets more subscribers subscribe but replays the termination signal to them immediately. -== Sinks.many().multicast().directBestEffort() +[[sinks-many-multicast-directbesteffort]] +=== Sinks.many().multicast().directBestEffort() A multicast `Sinks.Many` with a best effort handling of backpressure: if a subscriber is too slow (has zero demand), the `onNext` is dropped for *this slow subscriber only*. @@ -150,7 +154,8 @@ Once the `Sinks.Many` has terminated (usually through its `emitError(Throwable)` `emitComplete()` methods being called), it lets more subscribers subscribe but replays the termination signal to them immediately. -== Sinks.many().replay() +[[sinks-many-replay]] +=== Sinks.many().replay() A replay `Sinks.Many` caches emitted elements and replays them to late subscribers. @@ -163,7 +168,8 @@ It can be created in multiple configurations: Additional overloads for fine tuning of the above can also be found under `Sinks.many().replay()`, as well as a variant that allows caching of a single element (`latest()` and `latestOrDefault(T)`). -== Sinks.unsafe().many() +[[sinks-unsafe-many]] +=== Sinks.unsafe().many() Advanced users and operators builders might want to consider using `Sinks.unsafe().many()` which will provide the same `Sinks.Many` factories _without_ the extra producer thread safety. @@ -173,7 +179,8 @@ Library developers should not expose unsafe sinks but can use them internally in calling environment where they can ensure external synchronization of the calls that lead to `onNext`, `onComplete` and `onError` signals, in respect of the Reactive Streams specification. -== Sinks.one() +[[sinks-one]] +=== Sinks.one() This method directly construct a simple instance of `Sinks.One`. This flavor of `Sinks` is viewable as a `Mono` (through its `asMono()` view method), and @@ -186,7 +193,8 @@ has slightly different `emit` methods to better convey this Mono-like semantics: `Sinks.one()` accepts _one_ call of any of these methods, effectively generating a `Mono` that either completed with a value, completed empty or failed. -== Sinks.empty() +[[sinks-empty]] +=== Sinks.empty() This method directly constructs a simple instance of `Sinks.Empty`. This flavor of `Sinks` is like `Sinks.One`, except it doesn't offer the `emitValue` method. diff --git a/docs/modules/ROOT/pages/producing.adoc b/docs/modules/ROOT/pages/producing.adoc index ae6cc7929f..67634cf613 100644 --- a/docs/modules/ROOT/pages/producing.adoc +++ b/docs/modules/ROOT/pages/producing.adoc @@ -25,9 +25,10 @@ state, and your generator function now returns a new state on each round. For instance, you could use an `int` as the state: .Example of state-based `generate` -==== [source,java] +[%unbreakable] ---- + Flux flux = Flux.generate( () -> 0, // <1> (state, sink) -> { @@ -42,11 +43,9 @@ of 3). <3> We also use it to choose when to stop. <4> We return a new state that we use in the next invocation (unless the sequence terminated in this one). -==== The preceding code generates the table of 3, as the following sequence: - -==== +[%unbreakable] ---- 3 x 0 = 0 3 x 1 = 3 @@ -60,15 +59,15 @@ The preceding code generates the table of 3, as the following sequence: 3 x 9 = 27 3 x 10 = 30 ---- -==== You can also use a mutable ``. The example above could for instance be rewritten using a single `AtomicLong` as the state, mutating it on each round: .Mutable state variant -==== [source,java] +[%unbreakable] ---- + Flux flux = Flux.generate( AtomicLong::new, // <1> (state, sink) -> { @@ -81,7 +80,6 @@ Flux flux = Flux.generate( <1> This time, we generate a mutable object as the state. <2> We mutate the state here. <3> We return the *same* instance as the new state. -==== TIP: If your state object needs to clean up some resources, use the `generate(Supplier, BiFunction, Consumer)` variant to clean up the last @@ -89,7 +87,6 @@ state instance. The following example uses the `generate` method that includes a `Consumer`: -==== [source, java] ---- Flux flux = Flux.generate( @@ -105,7 +102,6 @@ Flux flux = Flux.generate( <2> We mutate the state here. <3> We return the *same* instance as the new state. <4> We see the last state value (11) as the output of this `Consumer` lambda. -==== In the case of the state containing a database connection or other resource that needs to be handled at the end of the process, the `Consumer` lambda could @@ -138,21 +134,22 @@ Imagine that you use a listener-based API. It processes data by chunks and has two events: (1) a chunk of data is ready and (2) the processing is complete (terminal event), as represented in the `MyEventListener` interface: -==== [source,java] +[%unbreakable] ---- + interface MyEventListener { void onDataChunk(List chunk); void processComplete(); } ---- -==== You can use `create` to bridge this into a `Flux`: -==== [source,java] +[%unbreakable] ---- + Flux bridge = Flux.create(sink -> { myEventProcessor.register( // <4> new MyEventListener() { // <1> @@ -173,7 +170,6 @@ Flux bridge = Flux.create(sink -> { <2> Each element in a chunk becomes an element in the `Flux`. <3> The `processComplete` event is translated to `onComplete`. <4> All of this is done asynchronously whenever the `myEventProcessor` executes. -==== Additionally, since `create` can bridge asynchronous APIs and manages backpressure, you can refine how to behave backpressure-wise, by indicating an `OverflowStrategy`: @@ -191,6 +187,7 @@ This may yield `IllegalStateException` when queues get full downstream. NOTE: `Mono` also has a `create` generator. The `MonoSink` of Mono's create doesn't allow several emissions. It will drop all signals after the first one. +[[asynchronous-but-single-threaded:-push]] == Asynchronous but single-threaded: `push` `push` is a middle ground between `generate` and `create` which is suitable for processing events from a single producer. It is similar to `create` in the sense @@ -198,9 +195,10 @@ that it can also be asynchronous and can manage backpressure using any of the overflow strategies supported by `create`. However, **only one producing thread** may invoke `next`, `complete` or `error` at a time. -==== [source,java] +[%unbreakable] ---- + Flux bridge = Flux.push(sink -> { myEventProcessor.register( new SingleThreadEventListener() { // <1> @@ -225,8 +223,8 @@ Flux bridge = Flux.push(sink -> { <2> Events are pushed to the sink using `next` from a single listener thread. <3> `complete` event generated from the same listener thread. <4> `error` event also generated from the same listener thread. -==== +[[a-hybrid-push-pull-model]] === A hybrid push/pull model Most Reactor operators, like `create`, follow a hybrid **push/pull** model. What we mean by that is that despite most of the processing being asynchronous @@ -241,9 +239,10 @@ Note that `push()` and `create()` both allow to set up an `onRequest` consumer in order to manage the request amount and to ensure that data is pushed through the sink only when there is pending request. -==== [source,java] +[%unbreakable] ---- + Flux bridge = Flux.create(sink -> { myMessageProcessor.register( new MyMessageListener() { @@ -265,8 +264,8 @@ Flux bridge = Flux.create(sink -> { <1> Poll for messages when requests are made. <2> If messages are available immediately, push them to the sink. <3> The remaining messages that arrive asynchronously later are also delivered. -==== +[[cleaning-up-after-push-or-create]] === Cleaning up after `push()` or `create()` Two callbacks, `onDispose` and `onCancel`, perform any cleanup on cancellation @@ -274,9 +273,10 @@ or termination. `onDispose` can be used to perform cleanup when the `Flux` completes, errors out, or is cancelled. `onCancel` can be used to perform any action specific to cancellation prior to cleanup with `onDispose`. -==== [source,java] +[%unbreakable] ---- + Flux bridge = Flux.create(sink -> { sink.onRequest(n -> channel.poll(n)) .onCancel(() -> channel.cancel()) // <1> @@ -285,8 +285,8 @@ Flux bridge = Flux.create(sink -> { ---- <1> `onCancel` is invoked first, for cancel signal only. <2> `onDispose` is invoked for complete, error, or cancel signals. -==== +[[handle]] == Handle The `handle` method is a bit different: it is an instance method, meaning that it is chained on an existing source (as are the common operators). It is present @@ -298,12 +298,12 @@ arbitrary value out of each source element, possibly skipping some elements. In this way, it can serve as a combination of `map` and `filter`. The signature of handle is as follows: -==== [source,java] +[%unbreakable] ---- + Flux handle(BiConsumer>); ---- -==== Let's consider an example. The reactive streams specification disallows `null` values in a sequence. What if you want to perform a `map` but you want to use @@ -312,9 +312,10 @@ a preexisting method as the map function, and that method sometimes returns null For instance, the following method can be applied safely to a source of integers: -==== [source,java] +[%unbreakable] ---- + public String alphabet(int letterNumber) { if (letterNumber < 1 || letterNumber > 26) { return null; @@ -323,13 +324,12 @@ public String alphabet(int letterNumber) { return "" + (char) letterIndexAscii; } ---- -==== We can then use `handle` to remove any nulls: .Using `handle` for a "map and eliminate nulls" scenario -==== [source,java] +[%unbreakable] ---- Flux alphabet = Flux.just(-1, 30, 13, 9, 20) .handle((i, sink) -> { @@ -343,14 +343,11 @@ alphabet.subscribe(System.out::println); <1> Map to letters. <2> If the "map function" returns null.... <3> Filter it out by not calling `sink.next`. -==== Which will print out: -==== ---- M I T ---- -==== diff --git a/docs/modules/ROOT/pages/reactiveProgramming.adoc b/docs/modules/ROOT/pages/reactiveProgramming.adoc index fb0e298d89..a5a31b3c73 100644 --- a/docs/modules/ROOT/pages/reactiveProgramming.adoc +++ b/docs/modules/ROOT/pages/reactiveProgramming.adoc @@ -38,12 +38,12 @@ calling `onNext`) but can also signal an error (by calling `onError`) or complet calling `onComplete`). Both errors and completion terminate the sequence. This can be summed up as follows: -==== [source] +[%unbreakable] ---- + onNext x 0..N [onError | onComplete] ---- -==== This approach is very flexible. The pattern supports use cases where there is no value, one value, or n values (including an infinite sequence of values, such as the continuing @@ -51,6 +51,7 @@ ticks of a clock). But why do we need such an asynchronous reactive library in the first place? +[[blocking-can-be-wasteful]] == Blocking Can Be Wasteful Modern applications can reach huge numbers of concurrent users, and, even though the @@ -77,6 +78,7 @@ So the parallelization approach is not a silver bullet. It is necessary to access the full power of the hardware, but it is also complex to reason about and susceptible to resource wasting. +[[asynchronicity-to-the-rescue]] == Asynchronicity to the Rescue? The second approach mentioned earlier, seeking more efficiency, can be a solution @@ -108,8 +110,8 @@ the second fetches favorite details, and the third offers suggestions with detai follows: .Example of Callback Hell -==== [source,java] +[%unbreakable] ---- userService.getFavorites(userId, new Callback>() { //<1> public void onSuccess(List list) { //<2> @@ -164,15 +166,15 @@ go to the `favoriteService` to get detailed `Favorite` objects. Since we want on we first stream the list of IDs to limit it to five. <9> Once again, a callback. This time we get a fully-fledged `Favorite` object that we push to the UI inside the UI thread. -==== That is a lot of code, and it is a bit hard to follow and has repetitive parts. Consider its equivalent in Reactor: .Example of Reactor code equivalent to callback code -==== [source,java] +[%unbreakable] ---- + userService.getFavorites(userId) // <1> .flatMap(favoriteService::getDetails) // <2> .switchIfEmpty(suggestionService.getSuggestions()) // <3> @@ -189,16 +191,16 @@ userService.getFavorites(userId) // <1> <5> At the end, we want to process each piece of data in the UI thread. <6> We trigger the flow by describing what to do with the final form of the data (show it in a UI list) and what to do in case of an error (show a popup). -==== What if you want to ensure the favorite IDs are retrieved in less than 800ms or, if it takes longer, get them from a cache? In the callback-based code, that is a complicated task. In Reactor it becomes as easy as adding a `timeout` operator in the chain, as follows: .Example of Reactor code with timeout and fallback -==== [source,java] +[%unbreakable] ---- + userService.getFavorites(userId) .timeout(Duration.ofMillis(800)) // <1> .onErrorResume(cacheService.cachedFavoritesFor(userId)) // <2> @@ -211,7 +213,6 @@ userService.getFavorites(userId) <1> If the part above emits nothing for more than 800ms, propagate an error. <2> In case of an error, fall back to the `cacheService`. <3> The rest of the chain is similar to the previous example. -==== `Future` objects are a bit better than callbacks, but they still do not do well at composition, despite the improvements brought in Java 8 by `CompletableFuture`. Orchestrating multiple @@ -227,9 +228,10 @@ statistic and combine these pair-wise, all of it asynchronously. The following e does so with a list of type `CompletableFuture`: .Example of `CompletableFuture` combination -==== [source,java] +[%unbreakable] ---- + CompletableFuture> ids = ifhIds(); // <1> CompletableFuture> result = ids.thenComposeAsync(l -> { // <2> @@ -272,15 +274,15 @@ reiterate over the list of futures, collecting their results by using `join()` (which, here, does not block, since `allOf` ensures the futures are all done). <10> Once the whole asynchronous pipeline has been triggered, we wait for it to be processed and return the list of results that we can assert. -==== Since Reactor has more combination operators out of the box, this process can be simplified, as follows: .Example of Reactor code equivalent to future code -==== [source,java] +[%unbreakable] ---- + Flux ids = ifhrIds(); // <1> Flux combinations = @@ -316,11 +318,11 @@ combining it or subscribing to it. Most probably, we would return the `result` ` Since we are in a test, we instead block, waiting for the processing to finish, and then directly return the aggregated list of values. <8> Assert the result. -==== The perils of using callbacks and `Future` objects are similar and are what reactive programming addresses with the `Publisher-Subscriber` pair. +[[from-imperative-to-reactive-programming]] == From Imperative to Reactive Programming Reactive libraries, such as Reactor, aim to address these drawbacks of "`classic`" @@ -333,6 +335,7 @@ asynchronous approaches on the JVM while also focusing on a few additional aspec emission is too high_ * *High level* but *high value* abstraction that is _concurrency-agnostic_ +[[composability-and-readability]] === Composability and Readability By "`composability`", we mean the ability to orchestrate multiple asynchronous tasks, in @@ -353,6 +356,7 @@ Reactor offers rich composition options, wherein code mirrors the organization o abstract process, and everything is generally kept at the same level (nesting is minimized). +[[the-assembly-line-analogy]] === The Assembly Line Analogy You can think of data processed by a reactive application as moving through an assembly @@ -366,6 +370,7 @@ is a glitch or clogging at one point (perhaps boxing the products takes a disproportionately long time), the afflicted workstation can signal upstream to limit the flow of raw material. +[[operators]] === Operators In Reactor, operators are the workstations in our assembly analogy. Each operator adds @@ -377,7 +382,7 @@ as we will see shortly. TIP: Understanding that operators create new instances can help you avoid a common mistake that would lead you to believe that an operator you used in your chain is not -being applied. See this <> in the FAQ. +being applied. See this xref:faq.adoc#faq.chain[item] in the FAQ. While the Reactive Streams specification does not specify operators at all, one of the best added values of reactive libraries, such as Reactor, is the rich vocabulary of @@ -434,6 +439,6 @@ a general perspective, a hot sequence can even emit when no subscriber is listen exception to the "`nothing happens before you subscribe`" rule). For more information on hot vs cold in the context of Reactor, see -<>. +xref:advancedFeatures/reactor-hotCold.adoc[this reactor-specific section]. //TODO talk about being concurrency-agnostic? Elements of functional style? diff --git a/docs/modules/ROOT/pages/snippetRetryWhenRetry.adoc b/docs/modules/ROOT/pages/snippetRetryWhenRetry.adoc index 75aad1989e..b6b88f1b27 100644 --- a/docs/modules/ROOT/pages/snippetRetryWhenRetry.adoc +++ b/docs/modules/ROOT/pages/snippetRetryWhenRetry.adoc @@ -1,6 +1,7 @@ -==== [source,java] +[%unbreakable] ---- + AtomicInteger errorCount = new AtomicInteger(); Flux flux = Flux.error(new IllegalArgumentException()) @@ -17,4 +18,3 @@ Flux flux = <3> To allow for three retries, we consider indexes < 3 and return a value to emit (here we simply return the index). <4> In order to terminate the sequence in error, we throw the original exception after these three retries. -==== diff --git a/docs/modules/ROOT/pages/subscribe-backpressure.adoc b/docs/modules/ROOT/pages/subscribe-backpressure.adoc index ab29e49118..a75aeedf9d 100644 --- a/docs/modules/ROOT/pages/subscribe-backpressure.adoc +++ b/docs/modules/ROOT/pages/subscribe-backpressure.adoc @@ -1,4 +1,5 @@ -=== On Backpressure and Ways to Reshape Requests +[[on-backpressure-and-ways-to-reshape-requests]] +== On Backpressure and Ways to Reshape Requests When implementing backpressure in Reactor, the way consumer pressure is propagated back to the source is by sending a `request` to the upstream operator. The sum of current requests is sometimes referenced to as the current "`demand`", or "`pending request`". @@ -12,9 +13,10 @@ The first request comes from the final subscriber at subscription time, yet the The simplest way of customizing the original request is to `subscribe` with a `BaseSubscriber` with the `hookOnSubscribe` method overridden, as the following example shows: -==== [source,java] +[%unbreakable] ---- + Flux.range(1, 10) .doOnRequest(r -> System.out.println("request of " + r)) .subscribe(new BaseSubscriber() { @@ -31,24 +33,24 @@ Flux.range(1, 10) } }); ---- -==== The preceding snippet prints out the following: -==== [source] +[%unbreakable] ---- + request of 1 Cancelling after having received 1 ---- -==== WARNING: When manipulating a request, you must be careful to produce enough demand for the sequence to advance, or your Flux can get "`stuck`". That is why `BaseSubscriber` defaults to an unbounded request in `hookOnSubscribe`. When overriding this hook, you should usually call `request` at least once. -==== Operators that Change the Demand from Downstream +[[operators-that-change-the-demand-from-downstream]] +=== Operators that Change the Demand from Downstream One thing to keep in mind is that demand expressed at the subscribe level *can* be reshaped by each operator in the upstream chain. A textbook case is the `buffer(N)` operator: If it receives a `request(2)`, it is interpreted as a demand for *two full buffers*. diff --git a/docs/modules/ROOT/pages/subscribe-details.adoc b/docs/modules/ROOT/pages/subscribe-details.adoc index e0328f1f02..df449a3274 100644 --- a/docs/modules/ROOT/pages/subscribe-details.adoc +++ b/docs/modules/ROOT/pages/subscribe-details.adoc @@ -1,47 +1,47 @@ -=== `subscribe` Method Examples +[[subscribe-method-examples]] +== `subscribe` Method Examples This section contains minimal examples of each of the five signatures for the `subscribe` method. The following code shows an example of the basic method with no arguments: -==== [source,java] +[%unbreakable] ---- + Flux ints = Flux.range(1, 3); <1> ints.subscribe(); <2> ---- <1> Set up a `Flux` that produces three values when a subscriber attaches. <2> Subscribe in the simplest way. -==== The preceding code produces no visible output, but it does work. The `Flux` produces three values. If we provide a lambda, we can make the values visible. The next example for the `subscribe` method shows one way to make the values appear: -==== [source,java] +[%unbreakable] ---- + Flux ints = Flux.range(1, 3); <1> ints.subscribe(i -> System.out.println(i)); <2> ---- <1> Set up a `Flux` that produces three values when a subscriber attaches. <2> Subscribe with a subscriber that will print the values. -==== The preceding code produces the following output: -==== [source] +[%unbreakable] ---- + 1 2 3 ---- -==== To demonstrate the next signature, we intentionally introduce an error, as shown in the following example: -==== [source, java] ---- Flux ints = Flux.range(1, 4) <1> @@ -57,27 +57,27 @@ ints.subscribe(i -> System.out.println(i), <5> <3> For most values, return the value. <4> For one value, force an error. <5> Subscribe with a subscriber that includes an error handler. -==== We now have two lambda expressions: one for the content we expect and one for errors. The preceding code produces the following output: -==== [source] +[%unbreakable] ---- + 1 2 3 Error: java.lang.RuntimeException: Got to 4 ---- -==== The next signature of the `subscribe` method includes both an error handler and a handler for completion events, as shown in the following example: -==== [source,java] +[%unbreakable] ---- + Flux ints = Flux.range(1, 4); <1> ints.subscribe(i -> System.out.println(i), error -> System.err.println("Error " + error), @@ -85,7 +85,6 @@ ints.subscribe(i -> System.out.println(i), ---- <1> Set up a Flux that produces four values when a subscriber attaches. <2> Subscribe with a Subscriber that includes a handler for completion events. -==== Error signals and completion signals are both terminal events and are exclusive of one another (you never get both). To make the completion consumer work, we must take care not @@ -95,18 +94,19 @@ The completion callback has no input, as represented by an empty pair of parentheses: It matches the `run` method in the `Runnable` interface. The preceding code produces the following output: -==== [source] +[%unbreakable] ---- + 1 2 3 4 Done ---- -==== -=== Cancelling a `subscribe()` with Its `Disposable` +[[cancelling-a-subscribe-with-its-disposable]] +== Cancelling a `subscribe()` with Its `Disposable` All these lambda-based variants of `subscribe()` have a `Disposable` return type. In this case, the `Disposable` interface represents the fact that the subscription @@ -130,7 +130,8 @@ associated with a service call -- and dispose all of them at once later on. Once the composite's `dispose()` method has been called, any attempt to add another `Disposable` immediately disposes it. -=== An Alternative to Lambdas: `BaseSubscriber` +[[an-alternative-to-lambdas-basesubscriber]] +== An Alternative to Lambdas: `BaseSubscriber` There is an additional `subscribe` method that is more generic and takes a full-blown `Subscriber` rather than composing one out of lambdas. In order to help with writing @@ -147,21 +148,22 @@ the call to `Publisher#subscribe(Subscriber)`. Now we can implement one of these. We call it a `SampleSubscriber`. The following example shows how it would be attached to a `Flux`: -==== [source,java] +[%unbreakable] ---- + SampleSubscriber ss = new SampleSubscriber(); Flux ints = Flux.range(1, 4); ints.subscribe(ss); ---- -==== The following example shows what `SampleSubscriber` could look like, as a minimalistic implementation of a `BaseSubscriber`: -==== [source,java] +[%unbreakable] ---- + package io.projectreactor.samples; import org.reactivestreams.Subscription; @@ -183,7 +185,6 @@ public class SampleSubscriber extends BaseSubscriber { } } ---- -==== The `SampleSubscriber` class extends `BaseSubscriber`, which is the recommended abstract class for user-defined `Subscribers` in Reactor. The class offers hooks that can be @@ -199,16 +200,16 @@ at a time. The `SampleSubscriber` class produces the following output: -==== [source] +[%unbreakable] ---- + Subscribed 1 2 3 4 ---- -==== `BaseSubscriber` also offers a `requestUnbounded()` method to switch to unbounded mode (equivalent to `request(Long.MAX_VALUE)`), as well as a `cancel()` method. diff --git a/docs/modules/ROOT/pages/testing.adoc b/docs/modules/ROOT/pages/testing.adoc index d3d4ca731a..07b94dcacc 100644 --- a/docs/modules/ROOT/pages/testing.adoc +++ b/docs/modules/ROOT/pages/testing.adoc @@ -13,7 +13,6 @@ To use it in your tests, you must add it as a test dependency. The following example shows how to add `reactor-test` as a dependency in Maven: .reactor-test in Maven, in `` -==== [source,xml] ---- @@ -23,20 +22,17 @@ The following example shows how to add `reactor-test` as a dependency in Maven: <1> ---- -<1> If you use the <>, you do not need to specify a ``. -==== +<1> If you use the xref:gettingStarted.adoc#getting[BOM], you do not need to specify a ``. The following example shows how to add `reactor-test` as a dependency in Gradle: .reactor-test in Gradle, amend the `dependencies` block -==== [source,groovy] ---- dependencies { testCompile 'io.projectreactor:reactor-test' } ---- -==== The three main uses of `reactor-test` are as follows: @@ -46,6 +42,7 @@ operators) with `TestPublisher`. * In sequences that can go through several alternative `Publisher` (for example, a chain that uses `switchIfEmpty`, probing such a `Publisher` to ensure it was used (that is, subscribed to). +[[testing-a-scenario-with-stepverifier]] == Testing a Scenario with `StepVerifier` The most common case for testing a Reactor sequence is to have a `Flux` or a `Mono` defined @@ -65,14 +62,14 @@ You can express all of that through the `StepVerifier` API. For instance, you could have the following utility method in your codebase that decorates a `Flux`: -==== [source,java] +[%unbreakable] ---- + public Flux appendBoomError(Flux source) { return source.concatWith(Mono.error(new IllegalArgumentException("boom"))); } ---- -==== In order to test it, you want to verify the following scenario: @@ -81,9 +78,10 @@ error* with the message, `boom`. Subscribe and *verify* these expectations. In the `StepVerifier` API, this translates to the following test: -==== [source,java] +[%unbreakable] ---- + @Test public void testAppendBoomError() { Flux source = Flux.just("thing1", "thing2"); // <1> @@ -104,7 +102,6 @@ of `thing1`. <5> The last signal we expect to happen is a termination of the sequence with an `onError`. The exception should have `boom` as a message. <6> It is important to trigger the test by calling `verify()`. -==== The API is a builder. You start by creating a `StepVerifier` and passing the sequence to be tested. This offers a choice of methods that let you: @@ -145,6 +142,7 @@ TIP: By default, the `verify()` method and derived shortcut methods (`verifyThen `StepVerifier.setDefaultTimeout(Duration)` to globally set a timeout for these methods, or specify one on a per-call basis with `verify(Duration)`. +[[better-identifying-test-failures]] === Better Identifying Test Failures `StepVerifier` provides two options to better identify exactly which expectation step caused @@ -162,6 +160,7 @@ Note that, in both cases, the use of the description or name in messages is only manually or through an assertion library in `assertNext` does not add the description or name to the error's message). +[[manipulating-time]] == Manipulating Time You can use `StepVerifier` with time-based operators to avoid long run times for @@ -169,13 +168,13 @@ corresponding tests. You can do so through the `StepVerifier.withVirtualTime` bu It looks like the following example: -==== [source,java] +[%unbreakable] ---- + StepVerifier.withVirtualTime(() -> Mono.delay(Duration.ofDays(1))) //... continue expectations here ---- -==== This virtual time feature plugs in a custom `Scheduler` in Reactor's `Schedulers` factory. Since these timed operators usually use the default `Schedulers.parallel()` @@ -211,20 +210,20 @@ first step, it usually fails because the subscription signal is detected. Use In order to quickly evaluate the behavior of our `Mono.delay` above, we can finish writing our code as follows: -==== [source,java] +[%unbreakable] ---- + StepVerifier.withVirtualTime(() -> Mono.delay(Duration.ofDays(1))) .expectSubscription() // <1> .expectNoEvent(Duration.ofDays(1)) // <2> .expectNext(0L) // <3> .verifyComplete(); // <4> ---- -<1> See the preceding <>. +<1> See the preceding xref:testing.adoc#tip-expectNoEvent[tip]. <2> Expect nothing to happen for a full day. <3> Then expect a delay that emits `0`. <4> Then expect completion (and trigger the verification). -==== We could have used `thenAwait(Duration.ofDays(1))` above, but `expectNoEvent` has the benefit of guaranteeing that nothing happened earlier than it should have. @@ -240,6 +239,7 @@ produced by advancing time. In most cases, you need to advance the virtual clock sequences to emit. Virtual time also gets very limited with infinite sequences, which might hog the thread on which both the sequence and its verification run. +[[performing-post-execution-assertions-with-stepverifier]] == Performing Post-execution Assertions with `StepVerifier` After having described the final expectation of your scenario, you can switch to a @@ -250,11 +250,12 @@ complementary assertion API instead of triggering `verify()`. To do so, use assert a few elements of state once the whole scenario has played out successfully (because it also calls `verify()`). Typical (albeit advanced) usage is to capture elements that have been dropped by some operator and assert them (see the section on -<>). +xref:advancedFeatures/hooks.adoc[Hooks]). +[[testing-the-context]] == Testing the `Context` -For more information about the `Context`, see <>. +For more information about the `Context`, see xref:advancedFeatures/context.adoc[Adding a Context to a Reactive Sequence]. `StepVerifier` comes with a couple of expectations around the propagation of a `Context`: @@ -272,9 +273,10 @@ using `StepVerifierOptions` to create the verifier. These features are demonstrated in the following snippet: -==== [source,java] +[%unbreakable] ---- + StepVerifier.create(Mono.just(1).map(i -> i + 10), StepVerifierOptions.create().withInitialContext(Context.of("thing1", "thing2"))) // <1> .expectAccessibleContext() //<2> @@ -289,8 +291,8 @@ StepVerifier.create(Mono.just(1).map(i -> i + 10), <3> An example of a `Context`-specific expectation. It must contain value "thing2" for key "thing1". <4> We `then()` switch back to setting up normal expectations on the data. <5> Let us not forget to `verify()` the whole set of expectations. -==== +[[manually-emitting-with-testpublisher]] == Manually Emitting with `TestPublisher` For more advanced test cases, it might be useful to have complete mastery over the source @@ -330,6 +332,7 @@ be asserted through its various `assert*` methods. You can use it as a `Flux` or `Mono` by using the conversion methods, `flux()` and `mono()`. +[[checking-the-execution-path-with-publisherprobe]] == Checking the Execution Path with `PublisherProbe` When building complex chains of operators, you could come across cases where @@ -342,22 +345,23 @@ For instance, consider the following method, which builds a chain of operators f source and uses a `switchIfEmpty` to fall back to a particular alternative if the source is empty: -==== [source,java] +[%unbreakable] ---- + public Flux processOrFallback(Mono source, Publisher fallback) { return source .flatMapMany(phrase -> Flux.fromArray(phrase.split("\\s+"))) .switchIfEmpty(fallback); } ---- -==== You can test which logical branch of the switchIfEmpty was used, as follows: -==== [source,java] +[%unbreakable] ---- + @Test public void testSplitPathIsUsed() { StepVerifier.create(processOrFallback(Mono.just("just a phrase with tabs!"), @@ -373,16 +377,16 @@ public void testEmptyPathIsUsed() { .verifyComplete(); } ---- -==== However, think about an example where the method produces a `Mono` instead. It waits for the source to complete, performs an additional task, and completes. If the source is empty, a fallback `Runnable`-like task must be performed instead. The following example shows such a case: -==== [source,java] +[%unbreakable] ---- + private Mono executeCommand(String command) { return Mono.just(command + " DONE"); } @@ -395,7 +399,6 @@ public Mono processOrFallback(Mono commandSource, Mono doWhe ---- <1> `then()` forgets about the command result. It cares only that it was completed. <2> How to distinguish between two cases that are both empty sequences? -==== To verify that your `processOrFallback` method does indeed go through the `doWhenEmpty` path, you need to write a bit of boilerplate. Namely you need a `Mono` that: @@ -409,9 +412,10 @@ to evaluate. This could be a lot of boilerplate when having to apply this patter regularly. Fortunately, 3.1.0 introduced an alternative with `PublisherProbe`. The following example shows how to use it: -==== [source,java] +[%unbreakable] ---- + @Test public void testCommandEmptyPathIsUsed() { PublisherProbe probe = PublisherProbe.empty(); // <1> @@ -430,7 +434,6 @@ public void testCommandEmptyPathIsUsed() { can check that is was subscribed to... <4> ...as well as actually requested data... <5> ...and whether or not it was cancelled. -==== You can also use the probe in place of a `Flux` by calling `.flux()` instead of `.mono()`. For cases where you need to probe an execution path but also need the From 90e86ee64cc884bb0aec5ca71515cea1b3c0f61d Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 2 May 2024 18:14:13 +0200 Subject: [PATCH 03/26] Add antora yml files. --- docs/antora-assembler.yml | 12 +++++++++ docs/antora-playbook.yml | 52 +++++++++++++++++++++++++++++++++++++++ docs/antora.yml | 24 ++++++++++++++++++ docs/pdf-theme.yml | 4 +++ 4 files changed, 92 insertions(+) create mode 100644 docs/antora-assembler.yml create mode 100644 docs/antora-playbook.yml create mode 100644 docs/antora.yml create mode 100644 docs/pdf-theme.yml diff --git a/docs/antora-assembler.yml b/docs/antora-assembler.yml new file mode 100644 index 0000000000..8942f7ac9e --- /dev/null +++ b/docs/antora-assembler.yml @@ -0,0 +1,12 @@ +root_level: 0 +#component_versions: '*' +build: + publish: false +asciidoc: + build: + command: bundle exec asciidoctor-pdf -n -d book + attributes: + source-highlighter: rouge + pdf-theme: ./pdf-theme.yml + + diff --git a/docs/antora-playbook.yml b/docs/antora-playbook.yml new file mode 100644 index 0000000000..4c8c2cdd6e --- /dev/null +++ b/docs/antora-playbook.yml @@ -0,0 +1,52 @@ +antora: + extensions: + - '@springio/antora-extensions/partial-build-extension' + # atlas-extension must be before latest-version-extension so latest versions are applied to imported versions + - '@antora/atlas-extension' + - require: '@springio/antora-extensions/latest-version-extension' + - require: '@springio/antora-extensions/inject-collector-cache-config-extension' + - '@antora/collector-extension' + - id: pdf-extension # pdf-extension is enabled from antora-docs.gradle + require: '@antora/pdf-extension' + enabled: false + - require: '@springio/antora-extensions/root-component-extension' + root_component_name: 'reactor' +site: + title: Reactor Core Reference Guide + url: https://projectreactor.io/docs/core/release/reference +content: + sources: + - url: ./.. + branches: HEAD + start_path: docs + worktrees: true +asciidoc: + extensions: + - '@asciidoctor/tabs' + - '@springio/asciidoctor-extensions' + attributes: + page-stackoverflow-url: https://stackoverflow.com/tags/project-reactor + page-pagination: '' + hide-uri-scheme: '@' + tabs-sync-option: '@' + chomp: 'all' + # Adhust the following attributes (they comes from reactor-netty) + doctype: book + icons: font + toc2: true + sectnums: true + sectanchors: true + source-highlighter: highlightjs + highlightjsdir: modules/ROOT/assets/highlight + highlightjs-theme: railscasts + sourcemap: true +urls: + latest_version_segment: '' +runtime: + log: + failure_level: warn + format: pretty +ui: + bundle: + url: https://github.com/reactor/antora-ui-projectreactor/releases/download/latest/ui-bundle.zip + snapshot: true diff --git a/docs/antora.yml b/docs/antora.yml new file mode 100644 index 0000000000..41ddb35aeb --- /dev/null +++ b/docs/antora.yml @@ -0,0 +1,24 @@ +name: reactor +version: true +title: Reactor 3 Reference Guide +start_page: aboutDoc.adoc +nav: + - modules/ROOT/nav.adoc +ext: + collector: + run: + command: gradlew -q "-Dorg.gradle.jvmargs=-Xmx3g -XX:+HeapDumpOnOutOfMemoryError" :docs:generateAntoraResources + local: true + scan: + dir: ./build/generated-antora-resources + +asciidoc: + attributes: + attribute-missing: 'warn' + chomp: 'all' + source-language: java + reactor-github-repo: 'https://github.com/reactor/reactor-core/tree/main' + javadoc: 'https://projectreactor.io/docs/core/{project-version}/api' + author: + - Stephane Maldini + - Simon Baslé diff --git a/docs/pdf-theme.yml b/docs/pdf-theme.yml new file mode 100644 index 0000000000..11d6b180fe --- /dev/null +++ b/docs/pdf-theme.yml @@ -0,0 +1,4 @@ +extends: default +role: + red: + font-color: #FF0000 From 33be68a3b15becb7598a61a80dde79d361c9eca0 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 2 May 2024 18:18:32 +0200 Subject: [PATCH 04/26] Adapted gradle scripts to Antora. --- build.gradle | 8 +- docs/build.gradle | 214 ++++++++++++++++++++++++++++++++++++++ gradle/asciidoc.gradle | 160 ---------------------------- gradle/libs.versions.toml | 14 ++- gradle/setup.gradle | 26 ++++- settings.gradle | 6 +- 6 files changed, 254 insertions(+), 174 deletions(-) create mode 100644 docs/build.gradle delete mode 100644 gradle/asciidoc.gradle diff --git a/build.gradle b/build.gradle index e12f61ea50..85b29caf85 100644 --- a/build.gradle +++ b/build.gradle @@ -28,8 +28,6 @@ buildscript { plugins { alias(libs.plugins.artifactory) alias(libs.plugins.shadow) - alias(libs.plugins.asciidoctor.convert) apply false - alias(libs.plugins.asciidoctor.pdf) apply false alias(libs.plugins.japicmp) alias(libs.plugins.download) // note: build scan plugin now must be applied in settings.gradle @@ -42,7 +40,6 @@ plugins { } apply plugin: "io.reactor.gradle.detect-ci" -apply from: "gradle/asciidoc.gradle" // asciidoc (which is generated from root dir) apply from: "gradle/releaser.gradle" apply from: "gradle/dependencies.gradle" apply from: "gradle/toolchains.gradle" @@ -89,7 +86,8 @@ ext { } nohttp { - source.exclude "docs/asciidoc/highlight/**" + source.exclude "docs/modules/ROOT/assets/highlight/**" + source.exclude "docs/.gradle/**" source.exclude "**/build/reports/tests/**/*.html" allowlistFile = project.file('codequality/nohttp/allowlist.lines') } @@ -201,8 +199,6 @@ configure(subprojects) { p -> } } -assemble.dependsOn docsZip - configure(subprojects) { p -> // these apply once the above configure is done, but before project-specific build.gradle have applied apply plugin: "io.reactor.gradle.java-conventions" diff --git a/docs/build.gradle b/docs/build.gradle new file mode 100644 index 0000000000..7d4fc6dc0e --- /dev/null +++ b/docs/build.gradle @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2024 VMware, Inc. or its affiliates, All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +plugins { + alias(libs.plugins.antora) + alias(libs.plugins.antora.yml) +} + +def isCommandAvailable(String command) { + def result = exec { + commandLine 'which', command + ignoreExitValue true + standardOutput = new ByteArrayOutputStream() + errorOutput = new ByteArrayOutputStream() + } + return result.exitValue == 0 +} + +antora { + //version = "$antoraVersion" + version = libs.versions.antora.version + playbook = findProperty('antora.playbook') ?: "antora-playbook.yml" + options = ['--clean', '--stacktrace'] + + def version = project.version + def forcePdf = project.hasProperty('forcePdf') + + if (!version.endsWith("-SNAPSHOT") || forcePdf) { + if (isCommandAvailable('asciidoctor-pdf')) { + logger.log(LogLevel.DEBUG, "enabling antora pdf-extension") + options.add('--extension=pdf-extension') + } else { + logger.lifecycle("PDF not generated, asciidoctor-pdf not found from the PATH.") + } + } + + environment = [ + 'ALGOLIA_API_KEY' : 'd4bf9918bfc7d63ae68fbf92d69c2f49', + 'ALGOLIA_APP_ID' : '82SNR5M8HE', + 'ALGOLIA_INDEX_NAME': 'projectreactor' + ] + + dependencies = [ + '@antora/atlas-extension' : "${libs.versions.antora.atlas.extension.get()}", + '@antora/pdf-extension' : "${libs.versions.antora.pdf.extension.get()}", + '@antora/collector-extension' : "${libs.versions.antora.collector.extension.get()}", + '@asciidoctor/tabs' : "${libs.versions.antora.tabs.extension.get()}", + '@springio/antora-extensions' : "${libs.versions.antora.springio.antora.extension.get()}", + '@springio/asciidoctor-extensions': "${libs.versions.antora.asciidoctor.extension.get()}" + ] +} + +jar { + enabled = false +} + +javadoc { + enabled = false +} + +tasks.withType(AbstractPublishToMaven).configureEach { + enabled = false +} + +configurations { + adoc +} + +dependencies { + adoc(libs.micrometer.docsGenerator) +} + +task generateObservabilityDocs(dependsOn: [ + "generateMeterListenerDocs", + "generateTimedSchedulerDocs", + "generateObservationDocs", + "polishGeneratedMetricsDocs"]) { + outputs.dir(project.layout.buildDirectory.dir("documentedMetrics/").get().asFile.absolutePath) +} + +task generateMeterListenerDocs(type: JavaExec) { + def outputDir = project.layout.buildDirectory.dir("generatedMetricsDocs/meterListener").get().asFile.absolutePath + outputs.dir(outputDir) + mainClass.set("io.micrometer.docs.DocsGeneratorCommand") + classpath configurations.adoc + args project.rootDir.getAbsolutePath(), + ".*MicrometerMeterListenerDocumentation.*.java", + outputDir +} + +task generateTimedSchedulerDocs(type: JavaExec) { + def outputDir = project.layout.buildDirectory.dir("generatedMetricsDocs/timedScheduler").get().asFile.absolutePath + outputs.dir(outputDir) + mainClass.set("io.micrometer.docs.DocsGeneratorCommand") + classpath configurations.adoc + args project.rootDir.getAbsolutePath(), ".*TimedSchedulerMeterDocumentation.*.java", + outputDir +} + +task generateObservationDocs(type: JavaExec) { + def outputDir = project.layout.buildDirectory.dir("generatedMetricsDocs/observation").get().asFile.absolutePath + outputs.dir(outputDir) + mainClass.set("io.micrometer.docs.DocsGeneratorCommand") + classpath configurations.adoc + args project.rootDir.getAbsolutePath(), + ".*MicrometerObservationListenerDocumentation.*.java", + outputDir +} + +task polishGeneratedMetricsDocs(type: Copy) { + mustRunAfter "generateMeterListenerDocs" + mustRunAfter "generateTimedSchedulerDocs" + mustRunAfter "generateObservationDocs" + outputs.dir(project.layout.buildDirectory.dir("documentedMetrics").get().asFile.absolutePath) + + from(project.layout.buildDirectory.get().asFile.toString() + "/generatedMetricsDocs/meterListener/") { + include "_*.adoc" + rename '_(.*).adoc', 'meterListener_$1.adoc' + } + from(project.layout.buildDirectory.get().asFile.toString() + "/generatedMetricsDocs/timedScheduler/") { + include "_*.adoc" + rename '_(.*).adoc', 'timedScheduler_$1.adoc' + } + from(project.layout.buildDirectory.get().asFile.toString() + "/generatedMetricsDocs/observation/") { + include "_*.adoc" + rename '_(.*).adoc', 'observation_$1.adoc' + } + into project.layout.buildDirectory.get().asFile.toString() + "/documentedMetrics" + filter { String line -> + line.startsWith('[[observability-metrics]]') || + line.startsWith('=== Observability - Metrics') || + line.startsWith('Below you can find a list of all ') || + line.startsWith("Fully qualified name of the enclosing class ") + ? null : line + } + filter { String line -> line.startsWith("====") ? line.replaceFirst("====", "=") : line } + doLast { + //since these are the files that get explicitly included in asciidoc, smoke test they exist + assert file(project.layout.buildDirectory.get().asFile.toString() + "/documentedMetrics/meterListener_metrics.adoc").exists() + assert file(project.layout.buildDirectory.get().asFile.toString() + "/documentedMetrics/timedScheduler_metrics.adoc").exists() + assert file(project.layout.buildDirectory.get().asFile.toString() + "/documentedMetrics/observation_metrics.adoc").exists() + } +} + +tasks.create(name: 'createAntoraPartials', type: Sync) { + from { project(":docs").tasks.generateObservabilityDocs.outputs } + + // Download and include SUPPORT.adoc + doLast { + def url = 'https://raw.githubusercontent.com/reactor/.github/main/SUPPORT.adoc' + def outputFile = file("${buildDir}/generated-antora-resources/modules/ROOT/partials/SUPPORT.adoc") + ant.get(src: url, dest: outputFile) + } + into layout.buildDirectory.dir('generated-antora-resources/modules/ROOT/partials') +} + +tasks.named("generateAntoraYml") { + asciidocAttributes = project.provider({ generateAttributes() }) + baseAntoraYmlFile = file("antora.yml") +} + +tasks.create('generateAntoraResources') { + dependsOn 'createAntoraPartials' + dependsOn 'generateAntoraYml' +} + +def generateAttributes() { + return ['is-snapshot-version': project.version.endsWith("-SNAPSHOT"), + 'project-version' : project.version, + 'reactorReleaseTrain': bomVersion + ] +} + +task docsZip(type: Zip, dependsOn: ':docs:antora') { + archiveBaseName.set("reactor-core") + archiveClassifier.set('docs') + + def isSnapshot = project.version.endsWith('-SNAPSHOT') + def version = isSnapshot ? project.version.takeWhile { it != '-' } : project.version + boolean forcePdf = project.hasProperty('forcePdf') + + from('build/site') { + into 'docs' + } + + if (!isSnapshot || forcePdf) { + def pdfFile = file("build/assembler/reactor/${version}/reactor-3-reference-guide.pdf") + logger.lifecycle("${pdfFile} will be included in docs zip") + from(pdfFile) { + rename { fileName -> + "docs/reactor-core-reference-guide-${project.version}.pdf" + } + } + } +} + +description = "Reactor 3 Antora Docs" + +assemble.dependsOn docsZip + +// docsZip is added to publication in gradle/setup.gradle, see publications -> mavenJava -> afterEvaluate diff --git a/gradle/asciidoc.gradle b/gradle/asciidoc.gradle deleted file mode 100644 index 148497df49..0000000000 --- a/gradle/asciidoc.gradle +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright (c) 2011-2021 VMware Inc. or its affiliates, All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -configure(rootProject) { - apply plugin: 'org.asciidoctor.jvm.convert' - apply plugin: 'org.asciidoctor.jvm.pdf' - - repositories { - maven { url 'https://repo.spring.io/snapshot' } - maven { url 'https://repo.spring.io/milestone' } - } - - // This configuration applies both to the asciidoctor & asciidoctorPdf tasks - asciidoctorj { - options = [doctype: 'book'] - attributes 'allow-uri-read': '', - 'attribute-missing': 'warn', - 'project-version': "${project.version}", - 'reactorReleaseTrain': "${bomVersion}" - } - - asciidoctor { - dependsOn "generateObservabilityDocs" - inputs.dir(layout.buildDirectory.dir("generatedMetricsDocs/").get().asFile) // force the task to consider changes in this folder, making it not UP-TO-DATE - sourceDir "docs/asciidoc/" - sources { - include "index.asciidoc" - } - baseDirFollowsSourceDir() - resources { - from(sourceDir) { - include 'images/**' - include 'highlight/**/*' - } - - } - outputDir layout.buildDirectory.dir("docs/asciidoc/html").get().asFile - logDocuments = true - attributes stylesdir: "stylesheets/", - stylesheet: 'reactor.css', - 'source-highlighter': 'highlightjs', - 'highlightjsdir': "./highlight", - 'highlightjs-theme': 'railscasts', - 'reactorReleaseTrain': "$bomVersion" - } - - asciidoctorPdf { - onlyIf { isCiServer || !rootProject.version.toString().endsWith("-SNAPSHOT") || rootProject.hasProperty("forcePdf") } - dependsOn "generateObservabilityDocs" - sourceDir "docs/asciidoc/" - sources { - include "index.asciidoc" - } - baseDirFollowsSourceDir() - outputDir layout.buildDirectory.dir("docs/asciidoc/pdf").get().asFile - logDocuments = true - attributes 'source-highlighter': 'rouge' - } - - task asciidocs(dependsOn: [asciidoctor, asciidoctorPdf], group: "documentation") { } - - task docsZip(type: Zip, dependsOn: asciidocs) { - archiveBaseName.set("reactor-core") - archiveClassifier.set('docs') - afterEvaluate() { - //we configure the pdf copy late, when a potential customVersion has been applied to rootProject - from(asciidoctorPdf) { - into ("docs/") - rename("index.pdf", "reactor-core-reference-guide-${rootProject.version}.pdf") - } - } - from(asciidoctor) { into("docs/") } - } - - configurations { - adoc - } - - dependencies { - adoc libs.micrometer.docsGenerator - } - - task generateObservabilityDocs(dependsOn: [ - "generateMeterListenerDocs", - "generateTimedSchedulerDocs", - "generateObservationDocs", - "polishGeneratedMetricsDocs"]) { - } - - task generateMeterListenerDocs(type: JavaExec) { - mainClass.set("io.micrometer.docs.DocsGeneratorCommand") - classpath configurations.adoc - args project.rootDir.getAbsolutePath(), - ".*MicrometerMeterListenerDocumentation.*.java", - project.rootProject.layout.buildDirectory.dir("generatedMetricsDocs/meterListener").get().asFile.absolutePath - } - - task generateTimedSchedulerDocs(type: JavaExec) { - mainClass.set("io.micrometer.docs.DocsGeneratorCommand") - classpath configurations.adoc - args project.rootDir.getAbsolutePath(), ".*TimedSchedulerMeterDocumentation.*.java", - project.rootProject.layout.buildDirectory.dir("generatedMetricsDocs/timedScheduler").get().asFile.absolutePath - } - - task generateObservationDocs(type: JavaExec) { - mainClass.set("io.micrometer.docs.DocsGeneratorCommand") - classpath configurations.adoc - args project.rootDir.getAbsolutePath(), - ".*MicrometerObservationListenerDocumentation.*.java", - project.rootProject.layout.buildDirectory.dir("generatedMetricsDocs/observation").get().asFile.absolutePath - } - - task polishGeneratedMetricsDocs(type: Copy) { - mustRunAfter "generateMeterListenerDocs" - mustRunAfter "generateTimedSchedulerDocs" - mustRunAfter "generateObservationDocs" - from(project.rootProject.layout.buildDirectory.get().asFile.toString() + "/generatedMetricsDocs/meterListener/") { - include "_*.adoc" - rename '_(.*).adoc', 'meterListener_$1.adoc' - } - from(project.rootProject.layout.buildDirectory.get().asFile.toString() + "/generatedMetricsDocs/timedScheduler/") { - include "_*.adoc" - rename '_(.*).adoc', 'timedScheduler_$1.adoc' - } - from(project.rootProject.layout.buildDirectory.get().asFile.toString() + "/generatedMetricsDocs/observation/") { - include "_*.adoc" - rename '_(.*).adoc', 'observation_$1.adoc' - } - into project.rootProject.layout.buildDirectory.get().asFile.toString() + "/documentedMetrics" - filter { String line -> - line.startsWith('[[observability-metrics]]') || - line.startsWith('=== Observability - Metrics') || - line.startsWith('Below you can find a list of all ') || - line.startsWith("Fully qualified name of the enclosing class ") - ? null : line - } - filter { String line -> line.startsWith("====") ? line.replaceFirst("====", "=") : line } - doLast { - //since these are the files that get explicitly included in asciidoc, smoke test they exist - assert file(project.rootProject.layout.buildDirectory.get().asFile.toString() + "/documentedMetrics/meterListener_metrics.adoc").exists() - assert file(project.rootProject.layout.buildDirectory.get().asFile.toString() + "/documentedMetrics/timedScheduler_metrics.adoc").exists() - assert file(project.rootProject.layout.buildDirectory.get().asFile.toString() + "/documentedMetrics/observation_metrics.adoc").exists() - } - } - -} - diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 42779a6489..d072810db7 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -12,7 +12,6 @@ baselinePerfCore = "3.6.5" baselinePerfExtra = "3.5.1" # Other shared versions -asciidoctor = "4.0.2" #note that some micrometer artifacts like context-propagation has a different version directly set in libraries below micrometer = "1.12.5" micrometerDocsGenerator = "1.0.2" @@ -20,6 +19,15 @@ micrometerTracingTest="1.2.5" contextPropagation="1.1.1" kotlin = "1.8.22" reactiveStreams = "1.0.4" +antora = "1.0.0" +antora-yml = "0.0.1" +antora-version = "3.2.0-alpha.4" +antora-atlas-extension = "1.0.0-alpha.1" +antora-pdf-extension = "1.0.0-alpha.7" +antora-collector-extension = "1.0.0-alpha.3" +antora-tabs-extension = "1.0.0-beta.6" +antora-springio-antora-extension = "1.8.2" +antora-asciidoctor-extension = "1.0.0-alpha.9" [libraries] jsr166backport = "io.projectreactor:jsr166:1.0.0.RELEASE" @@ -42,8 +50,6 @@ reactor-perfBaseline-extra = { module = "io.projectreactor.addons:reactor-extra" [plugins] artifactory = { id = "com.jfrog.artifactory", version = "4.31.0" } -asciidoctor-convert = { id = "org.asciidoctor.jvm.convert", version.ref = "asciidoctor" } -asciidoctor-pdf = { id = "org.asciidoctor.jvm.pdf", version.ref = "asciidoctor" } bnd = { id = "biz.aQute.bnd.builder", version = "6.4.0" } download = { id = "de.undercouch.download", version = "5.6.0" } japicmp = { id = "me.champeau.gradle.japicmp", version = "0.4.2" } @@ -52,3 +58,5 @@ nohttp = { id = "io.spring.nohttp", version = "0.0.11" } shadow = { id = "com.github.johnrengelman.shadow", version = "8.1.1" } spotless = { id = "com.diffplug.spotless", version = "6.13.0" } mrjar = { id = "me.champeau.mrjar", version = "0.1.1" } +antora = { id = "org.antora", version.ref = "antora" } +antora-yml = { id = "io.spring.antora.generate-antora-yml", version.ref = "antora-yml" } diff --git a/gradle/setup.gradle b/gradle/setup.gradle index 5b6f72584f..2f4311aa53 100644 --- a/gradle/setup.gradle +++ b/gradle/setup.gradle @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2021 VMware Inc. or its affiliates, All Rights Reserved. + * Copyright (c) 2011-2024 VMware Inc. or its affiliates, All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -112,10 +112,28 @@ publishing { artifact javadocJar //consider adding extra artifacts here, conditionally on submodule's name and perhaps in an afterEvaluate block afterEvaluate { - if (project.name == 'reactor-core') { - artifact rootProject.tasks.docsZip + // Find the docs project (it's not a java project, so we need to lookup it) + def docsProject = project.findProject(':docs') + if (docsProject) { + // Access the docsZip task from the docs project + def docsZipTask = docsProject.tasks.findByName('docsZip') + if (docsZipTask) { + // Add the docsZip task as an artifact + artifact docsZipTask + } + } + else { + // If the current JDK version is JDK8, the docs project is not loaded (see settings.gradle), so + // docsTask is not available. In this case, include the docsZip file path directly as an artifact, if it exists. + // (it may exist in case the docs have been previously built using a JDK17 compatible JDK). + def docsZipFile = file("${rootDir}/docs/build/distributions/reactor-core-${project.version}-docs.zip") + if (docsZipFile.exists()) { + artifact(docsZipFile) { + classifier 'docs' + } + } } - // note that reactor-tools has more involved stuff, so we kept it in the submodule's build: + // Note that reactor-tools has more involved stuff, so we kept it in the submodule's build: // (it replaces the original jar with shadow jar and adds the former as -original.jar) } diff --git a/settings.gradle b/settings.gradle index e9650f3a9d..370844064a 100644 --- a/settings.gradle +++ b/settings.gradle @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2023 VMware Inc. or its affiliates, All Rights Reserved. + * Copyright (c) 2011-2024 VMware Inc. or its affiliates, All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,6 +21,10 @@ rootProject.name = 'reactor' include 'benchmarks', 'reactor-core', 'reactor-test', 'reactor-tools', 'reactor-core-micrometer' +if (JavaVersion.current().isCompatibleWith(JavaVersion.VERSION_17)) { + include 'docs' +} + dependencyResolutionManagement { versionCatalogs { libs { From ad2a090b687ac6e868530b7977073697aa4427cd Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 2 May 2024 19:46:12 +0200 Subject: [PATCH 05/26] Build with JDK21 as current JDK --- .github/workflows/publish.yml | 74 +++++++++++++++++++++++++++++++++-- 1 file changed, 71 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 3fb36b4568..9e71d47b09 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -7,6 +7,11 @@ on: - main - 3.6.x permissions: read-all + +env: + DOCS_ZIP: docs-zip + DOCS_ZIP_PATH: docs/build/distributions + jobs: # General job notes: we DON'T want to cancel any previous runs, especially in the case of a "back to snapshots" build right after a release push # We specify the ubuntu version to minimize the chances we have to deal with a migration during a release @@ -63,11 +68,41 @@ jobs: uses: gradle/actions/setup-gradle@e24011a3b5db78bd5ab798036042d9312002f252 # tag=v3 with: arguments: ${{ matrix.test-type.arguments }} + + # Build the docs-zip antora doc for the current branch, and upload generated docs-zip to workflow run. + # JDK21 is used because the antora plugin requires a JDK17 compatible version. + # Each deploy jobs can then download the docs-zip into ./docs/build/distributions/ in order to let it be included in published artifacts. + # (see gradle/setup.gradle publications which includes docs zip file, if found from docs/build/distributions directory) + build-docs-zip: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4 + - name: Set up Ruby for asciidoctor-pdf + uses: ruby/setup-ruby@cacc9f1c0b3f4eb8a16a6bb0ed10897b43b9de49 # v1 + with: + ruby-version: 3.3.0 + - name: Install asciidoctor-pdf / rouge + run: gem install asciidoctor-pdf rouge + - name: Setup java 21 for antora + uses: actions/setup-java@99b8673ff64fbf99d8d325f52d9a5bdedb8483e9 # v4 + with: + distribution: 'temurin' + java-version: '21' + - name: Build antora docs zip distribution for the current branch + run: ./gradlew docs + - name: Upload docs/build to current workflow run + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4 + with: + name: ${{ env.DOCS_ZIP }} + path: ${{ env.DOCS_ZIP_PATH }} + retention-days: 1 + if-no-files-found: error + #deploy the snapshot artifacts to Artifactory deploySnapshot: name: deploySnapshot runs-on: ubuntu-20.04 - needs: prepare + needs: [prepare, build-docs-zip] if: needs.prepare.outputs.versionType == 'SNAPSHOT' environment: snapshots steps: @@ -94,6 +129,11 @@ jobs: with: distribution: 'temurin' java-version: 8 + - name: Download antora docs-zip + uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4 + with: + name: ${{ env.DOCS_ZIP }} + path: ${{ env.DOCS_ZIP_PATH }} - name: deploy env: ORG_GRADLE_PROJECT_artifactory_publish_username: ${{secrets.ARTIFACTORY_SNAPSHOT_USERNAME}} @@ -105,7 +145,7 @@ jobs: deployMilestone: name: deployMilestone runs-on: ubuntu-20.04 - needs: prepare + needs: [prepare, build-docs-zip] if: needs.prepare.outputs.versionType == 'MILESTONE' environment: releases steps: @@ -132,6 +172,11 @@ jobs: with: distribution: 'temurin' java-version: 8 + - name: Download antora docs-zip + uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4 + with: + name: ${{ env.DOCS_ZIP }} + path: ${{ env.DOCS_ZIP_PATH }} - name: deploy env: ORG_GRADLE_PROJECT_artifactory_publish_username: ${{secrets.ARTIFACTORY_USERNAME}} @@ -145,7 +190,7 @@ jobs: deployRelease: name: deployRelease runs-on: ubuntu-20.04 - needs: prepare + needs: [prepare, build-docs-zip] if: needs.prepare.outputs.versionType == 'RELEASE' environment: releases steps: @@ -172,6 +217,11 @@ jobs: with: distribution: 'temurin' java-version: 8 + - name: Download antora docs/build + uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4 + with: + name: ${{ env.DOCS_ZIP }} + path: ${{ env.DOCS_ZIP_PATH }} - name: deploy env: ORG_GRADLE_PROJECT_artifactory_publish_username: ${{secrets.ARTIFACTORY_USERNAME}} @@ -213,5 +263,23 @@ jobs: git tag -m "Release version ${{ needs.prepare.outputs.fullVersion }}" v${{ needs.prepare.outputs.fullVersion }} ${{ github.sha }} git push --tags + cleanup: + name: Cleanup docs-zip artifact + needs: [ deploySnapshot, tagRelease, tagMilestone ] + if: always() # cleanup always run after all needed jobs, regardless of whether they were successful + runs-on: ubuntu-20.04 + permissions: + actions: write + steps: + - name: delete antora docs-zip artifact + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: |- + ARTIFACTS_URL="/repos/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}/artifacts" + ARTIFACT_ID=$(gh api -H 'Accept: application/vnd.github+json' -H 'X-GitHub-Api-Version: 2022-11-28' $ARTIFACTS_URL | jq -r '.artifacts[] | select(.name == "'$DOCS_ZIP'") | .id // ""') + if [ -n "$ARTIFACT_ID" ]; then + gh api --method DELETE -H 'Accept: application/vnd.github+json' -H 'X-GitHub-Api-Version: 2022-11-28' /repos/${{github.repository}}/actions/artifacts/$ARTIFACT_ID} + fi + # For Gradle configuration of signing, see https://docs.gradle.org/current/userguide/signing_plugin.html#sec:in-memory-keys # publishMavenJavaPublicationToSonatypeRepository only sends to a staging repository From 2dc05700aada8f7002061a434333af2aaeffa704 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 2 May 2024 20:12:24 +0200 Subject: [PATCH 06/26] Describe how to build the doc in README --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index 442bc70340..23e9c1085e 100644 --- a/README.md +++ b/README.md @@ -89,6 +89,18 @@ When the installations succeed, try to refresh the project and see that it build The manual Operation-system specific JDK installation is well explained in the [official docs](https://docs.oracle.com/en/java/javase/20/install/overview-jdk-installation.html) +### Building the doc + +The current active shell JDK version must be compatible with JDK17 or higher for Antora to build successfully. +So, just ensure that you have installed JDK 21, as previously described and make it as the current one. + +Then you can build the antora documentation like this: +```shell +./gradlew docs +``` + +The documentation is generated in `docs/build/site/index.html` and in `distribution docs/build/distributions/reactor-core- Date: Tue, 7 May 2024 11:52:20 +0200 Subject: [PATCH 07/26] Fixed comments for build-docs-zip --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 9e71d47b09..feabadecce 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -71,7 +71,7 @@ jobs: # Build the docs-zip antora doc for the current branch, and upload generated docs-zip to workflow run. # JDK21 is used because the antora plugin requires a JDK17 compatible version. - # Each deploy jobs can then download the docs-zip into ./docs/build/distributions/ in order to let it be included in published artifacts. + # Each deploy job can then download the docs-zip to ./docs/build/distributions/ in order to let it be included in published artifacts. # (see gradle/setup.gradle publications which includes docs zip file, if found from docs/build/distributions directory) build-docs-zip: runs-on: ubuntu-20.04 From 8aff270d7287936d0607136c0ca2c12a8c21fff2 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Tue, 7 May 2024 11:55:18 +0200 Subject: [PATCH 08/26] Applied feedback in README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 23e9c1085e..2a3290f504 100644 --- a/README.md +++ b/README.md @@ -92,7 +92,7 @@ is well explained in the [official docs](https://docs.oracle.com/en/java/javase/ ### Building the doc The current active shell JDK version must be compatible with JDK17 or higher for Antora to build successfully. -So, just ensure that you have installed JDK 21, as previously described and make it as the current one. +So, just ensure that you have installed JDK 21, as described above and make it as the current one. Then you can build the antora documentation like this: ```shell From c6641fef6c6c991e6fbbc83514da9d619f8cda7e Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Tue, 7 May 2024 13:54:56 +0200 Subject: [PATCH 09/26] Removed distributed confusing prefix in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2a3290f504..6de779a078 100644 --- a/README.md +++ b/README.md @@ -99,7 +99,7 @@ Then you can build the antora documentation like this: ./gradlew docs ``` -The documentation is generated in `docs/build/site/index.html` and in `distribution docs/build/distributions/reactor-core- Date: Tue, 7 May 2024 13:58:23 +0200 Subject: [PATCH 10/26] Removed commented component_version attribute from antora-assembler.yml --- docs/antora-assembler.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/antora-assembler.yml b/docs/antora-assembler.yml index 8942f7ac9e..845b3985ce 100644 --- a/docs/antora-assembler.yml +++ b/docs/antora-assembler.yml @@ -1,5 +1,4 @@ root_level: 0 -#component_versions: '*' build: publish: false asciidoc: From 3f142067bc15097229f0bf6b15950e45a3a10bd7 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Tue, 7 May 2024 14:05:23 +0200 Subject: [PATCH 11/26] Removed redundant empty lines --- docs/antora-assembler.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/antora-assembler.yml b/docs/antora-assembler.yml index 845b3985ce..472bc1eb55 100644 --- a/docs/antora-assembler.yml +++ b/docs/antora-assembler.yml @@ -7,5 +7,3 @@ asciidoc: attributes: source-highlighter: rouge pdf-theme: ./pdf-theme.yml - - From 05ebe58c1756fd31520948de04657e196315d580 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Tue, 7 May 2024 14:07:07 +0200 Subject: [PATCH 12/26] Removed useless comment --- docs/antora-playbook.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/antora-playbook.yml b/docs/antora-playbook.yml index 4c8c2cdd6e..3ff4351033 100644 --- a/docs/antora-playbook.yml +++ b/docs/antora-playbook.yml @@ -30,7 +30,6 @@ asciidoc: hide-uri-scheme: '@' tabs-sync-option: '@' chomp: 'all' - # Adhust the following attributes (they comes from reactor-netty) doctype: book icons: font toc2: true From 5c8b21e4100260b9c0496918fa0aa699b281a60f Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Tue, 7 May 2024 14:08:50 +0200 Subject: [PATCH 13/26] Removed useless comment in docs/build.gradle --- docs/build.gradle | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/build.gradle b/docs/build.gradle index 7d4fc6dc0e..f48b2862a0 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -30,7 +30,6 @@ def isCommandAvailable(String command) { } antora { - //version = "$antoraVersion" version = libs.versions.antora.version playbook = findProperty('antora.playbook') ?: "antora-playbook.yml" options = ['--clean', '--stacktrace'] From 34c854e6ff3b2234a3bb57e858e33b24be719b36 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Wed, 8 May 2024 10:02:19 +0200 Subject: [PATCH 14/26] Only publish docs zip in reactor-core artifact. Fixed and clarified javadoc. Added a warn when docsZip is not found. --- gradle/setup.gradle | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/gradle/setup.gradle b/gradle/setup.gradle index 2f4311aa53..091a6959fc 100644 --- a/gradle/setup.gradle +++ b/gradle/setup.gradle @@ -112,24 +112,26 @@ publishing { artifact javadocJar //consider adding extra artifacts here, conditionally on submodule's name and perhaps in an afterEvaluate block afterEvaluate { - // Find the docs project (it's not a java project, so we need to lookup it) - def docsProject = project.findProject(':docs') - if (docsProject) { - // Access the docsZip task from the docs project - def docsZipTask = docsProject.tasks.findByName('docsZip') - if (docsZipTask) { - // Add the docsZip task as an artifact - artifact docsZipTask + if (project.name == 'reactor-core') { + // Find the docs project; if found, include docsZip in published artifacts. + // The docs project isn't loaded if the current JDK version is below JDK17, + // as it requires a current JDK17+ compatibility for the Antora Gradle plugin (see settings.gradle) + def docsProject = project.findProject(':docs') + if (docsProject) { + artifact docsProject.tasks.docsZip } - } - else { - // If the current JDK version is JDK8, the docs project is not loaded (see settings.gradle), so - // docsTask is not available. In this case, include the docsZip file path directly as an artifact, if it exists. - // (it may exist in case the docs have been previously built using a JDK17 compatible JDK). - def docsZipFile = file("${rootDir}/docs/build/distributions/reactor-core-${project.version}-docs.zip") - if (docsZipFile.exists()) { - artifact(docsZipFile) { - classifier 'docs' + else { + // docs project not found, indicating a current JDK version below JDK17. + // Assuming the docs zip was built with JDK17+ previously, manually check for the docsZip in + // docs/build/distributions/, and include it in the published artifact if found + def docsZipFile = file("${rootDir}/docs/build/distributions/reactor-core-${project.version}-docs.zip") + if (docsZipFile.exists()) { + artifact(docsZipFile) { + classifier 'docs' + } + } + else { + logger.warn("Antora docs zip not found from docs/ project. Use JDK 17+ to include docs in published artifacts.") } } } From 20f7d80c1a16bf81b5452cdbeb68fff2c2f54d28 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Wed, 8 May 2024 10:13:12 +0200 Subject: [PATCH 15/26] Exclude docs/build/** in nohttp part --- build.gradle | 1 + 1 file changed, 1 insertion(+) diff --git a/build.gradle b/build.gradle index 85b29caf85..1739a86d0e 100644 --- a/build.gradle +++ b/build.gradle @@ -88,6 +88,7 @@ ext { nohttp { source.exclude "docs/modules/ROOT/assets/highlight/**" source.exclude "docs/.gradle/**" + source.exclude "docs/build/**" source.exclude "**/build/reports/tests/**/*.html" allowlistFile = project.file('codequality/nohttp/allowlist.lines') } From d3762ab6a722495a575e867e127a0c32ca599ecb Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Wed, 15 May 2024 16:02:19 +0200 Subject: [PATCH 16/26] Log a friendly message when antora related tasks are used and when JDK version is lower than 17 --- build.gradle | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/build.gradle b/build.gradle index 1739a86d0e..62ba36b22d 100644 --- a/build.gradle +++ b/build.gradle @@ -198,6 +198,23 @@ configure(subprojects) { p -> } } } + + if (!JavaVersion.current().isCompatibleWith(JavaVersion.VERSION_17)) { + // If the JDK version is less than 17, the 'docs' subproject is not loaded, and therefore, the 'antora' or 'docs' tasks are unavailable. + // Display a friendly error message indicating that JDK 17 or higher is required + + task antora { + doLast { + throw new GradleException("antora task requires JDK 17 or higher.") + } + } + + task docs { + doLast { + throw new GradleException("docs task requires JDK 17 or higher") + } + } + } } configure(subprojects) { p -> From 75f03a8aba7c45a936b4eefe26ced8fcb250275b Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Wed, 15 May 2024 16:10:31 +0200 Subject: [PATCH 17/26] Clarify TIP in aboutDocs.adoc --- docs/modules/ROOT/pages/aboutDoc.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/aboutDoc.adoc b/docs/modules/ROOT/pages/aboutDoc.adoc index 5ebe05b10e..e8958b00e2 100644 --- a/docs/modules/ROOT/pages/aboutDoc.adoc +++ b/docs/modules/ROOT/pages/aboutDoc.adoc @@ -34,7 +34,7 @@ ifeval::["{backend}" == "html5"] TIP: To facilitate documentation edits, you can edit the current page from the `Edit this Page` link located in the upper right corner sidebar. The link opens an edit `UI` directly on `GitHub` for the main source file for the current page. These links are only present in the `HTML5` version of this reference guide. They look like the following link: -link:https://github.com/reactor/reactor-core/edit/main/docs/modules/ROOT/pages/aboutDoc.adoc[Edit this Page^, role="fa fa-edit"] to xref:aboutDoc.adoc[About the Documentation]. +link:https://github.com/reactor/reactor-core/edit/main/docs/modules/ROOT/pages/aboutDoc.adoc[Edit this Page^, role="fa fa-edit"] to make changes to xref:aboutDoc.adoc[About the Documentation] page. endif::[] [[getting-help]] From 1ba2fbf6de12ba58e69b1fa3fbe95b6cdfbe8e98 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Wed, 15 May 2024 16:14:03 +0200 Subject: [PATCH 18/26] Programmatically creating a sequence section does not render correctly --- docs/modules/ROOT/pages/producing.adoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/modules/ROOT/pages/producing.adoc b/docs/modules/ROOT/pages/producing.adoc index 67634cf613..062ade5034 100644 --- a/docs/modules/ROOT/pages/producing.adoc +++ b/docs/modules/ROOT/pages/producing.adoc @@ -1,5 +1,6 @@ [[producing]] = Programmatically creating a sequence + In this section, we introduce the creation of a `Flux` or a `Mono` by programmatically defining its associated events (`onNext`, `onError`, and `onComplete`). All these methods share the fact that they expose an API to From b80c9d3be77658c74bf28e3e5d1ab05e330394a4 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Wed, 15 May 2024 19:35:54 +0200 Subject: [PATCH 19/26] Corrected link to advanced features / null-safety --- docs/modules/ROOT/pages/kotlin.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/kotlin.adoc b/docs/modules/ROOT/pages/kotlin.adoc index bb82b7e7c6..2be29f6745 100644 --- a/docs/modules/ROOT/pages/kotlin.adoc +++ b/docs/modules/ROOT/pages/kotlin.adoc @@ -80,7 +80,7 @@ declarations and expressive "`value or no value`" semantics without paying the c (Kotlin allows using functional constructs with nullable values. See this https://www.baeldung.com/kotlin-null-safety[comprehensive guide to Kotlin null-safety].) -Although Java does not let one express null safety in its type-system, Reactor xref:kotlin.adoc#kotlin-null-safety[now provides null safety] +Although Java does not let one express null safety in its type-system, Reactor xref:advancedFeatures/null-safety.adoc[now provides null safety] of the whole Reactor API through tooling-friendly annotations declared in the `reactor.util.annotation` package. By default, types from Java APIs used in Kotlin are recognized as From b2f925c2dac4632726f5452c615adc2fc108098d Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 16 May 2024 15:27:35 +0200 Subject: [PATCH 20/26] Hide FIXME --- docs/modules/ROOT/pages/metrics.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/metrics.adoc b/docs/modules/ROOT/pages/metrics.adoc index 517f23638e..7be61e1857 100644 --- a/docs/modules/ROOT/pages/metrics.adoc +++ b/docs/modules/ROOT/pages/metrics.adoc @@ -42,7 +42,7 @@ submitted through the wrapper instance returned by `Micrometer#timedScheduler` a See xref:metrics-details.adoc#micrometer-details-timedScheduler[Micrometer.timedScheduler()] for produced meters and associated default tags. -FIXME reactor-monitoring-demo won't be in sync with 3.5.0 anymore +// FIXME reactor-monitoring-demo won't be in sync with 3.5.0 anymore //TIP: Grafana + Prometheus users can use https://raw.githubusercontent.com/reactor/reactor-monitoring-demo/master/dashboards/schedulers.json[a pre-built dashboard] which includes panels for threads, completed tasks, task queues and other handy metrics. [[publisher-metrics]] From fad1622c1da5ab72ea90732c09a8d49f5b421489 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 16 May 2024 16:29:12 +0200 Subject: [PATCH 21/26] Restructured levels in Exposing Reactor metrics chapter --- docs/modules/ROOT/pages/metrics-details.adoc | 15 +++++++-------- docs/modules/ROOT/pages/metrics.adoc | 2 +- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/docs/modules/ROOT/pages/metrics-details.adoc b/docs/modules/ROOT/pages/metrics-details.adoc index 5553996a15..0a4059d409 100644 --- a/docs/modules/ROOT/pages/metrics-details.adoc +++ b/docs/modules/ROOT/pages/metrics-details.adoc @@ -1,8 +1,7 @@ - -= Meters and tags for Reactor-Core-Micrometer module +== Meters and tags for Reactor-Core-Micrometer module [[micrometer-details-metrics]] -== `Micrometer.metrics()` +=== `Micrometer.metrics()` Below is the list of meters used by the metrics tap listener feature, as exposed via `Micrometer.metrics(MeterRegistry meterRegistry)`. @@ -10,19 +9,19 @@ IMPORTANT: Please note that metrics below use a dynamic `%s` prefix. When applied on a `Flux` or `Mono` that uses the `name(String n)` operator, this is replaced with `n`. Otherwise, this is replaced by the default value of `"reactor"`. -include::partial$meterListener_metrics.adoc[leveloffset=2] +include::partial$meterListener_metrics.adoc[leveloffset=3] [[micrometer-details-timedScheduler]] -== `Micrometer.timedScheduler()` +=== `Micrometer.timedScheduler()` Below is the list of meters used by the TimedScheduler feature, as exposed via `Micrometer.timedScheduler(Scheduler original, MeterRegistry meterRegistry, String metricsPrefix)`. IMPORTANT: Please note that metrics below use a dynamic `%s` prefix. This is replaced with the provided `metricsPrefix` in practice. -include::partial$timedScheduler_metrics.adoc[leveloffset=2] +include::partial$timedScheduler_metrics.adoc[leveloffset=3] [[micrometer-details-observation]] -== `Micrometer.observation()` +=== `Micrometer.observation()` Below is the list of meters used by the observation tap listener feature, as exposed via `Micrometer.observation(ObservationRegistry registry)`. @@ -32,4 +31,4 @@ NOTE: You can also fully customize Micrometer's Observation via `Micrometer.observation(ObservationRegistry registry, Function observationSupplier)` with your own Observation supplier, allowing to configure its attributes (name, contextual name, low and high cardinality keys, ...). -include::partial$observation_metrics.adoc[leveloffset=2] +include::partial$observation_metrics.adoc[leveloffset=3] diff --git a/docs/modules/ROOT/pages/metrics.adoc b/docs/modules/ROOT/pages/metrics.adoc index 7be61e1857..3fdb9bf48d 100644 --- a/docs/modules/ROOT/pages/metrics.adoc +++ b/docs/modules/ROOT/pages/metrics.adoc @@ -168,4 +168,4 @@ listenToEvents() <5> with a custom `ObservationConvention` <6> and a custom `Supplier`. -include::metrics-details.adoc[] \ No newline at end of file +include::metrics-details.adoc[levelOffset=1] \ No newline at end of file From 63d66615d18724a709dbc6841b763dbf22fd71b9 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 16 May 2024 20:43:23 +0200 Subject: [PATCH 22/26] Split appendices in separate documents --- docs/modules/ROOT/nav.adoc | 13 +++++++++++- docs/modules/ROOT/pages/appendices.adoc | 27 ------------------------- 2 files changed, 12 insertions(+), 28 deletions(-) delete mode 100644 docs/modules/ROOT/pages/appendices.adoc diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index 19f09ce25d..e852a69f5b 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -25,5 +25,16 @@ ** xref:advanced-contextPropagation.adoc[] ** xref:advancedFeatures/cleanup.adoc[] ** xref:advancedFeatures/null-safety.adoc[] -* xref:appendices.adoc[] +* Appendices +** xref:apdx-operatorChoice.adoc[Which operator do I need?] +** xref:apdx-howtoReadMarbles.adoc[How to read marble diagrams?] +** xref:faq.adoc[FAQ, Best Practices, and "How do I...?] +** xref:apdx-reactorExtra.adoc[Reactor-Extra] +//TODO later add appendices about internals, writing operators, fusion +//include::apdx-implem.adoc[levelOffset=1] +//include::apdx-writingOperator.adoc[levelOffset=1] +//include::apdx-optimizations.adoc[levelOffset=1] +//TODO later add appendix about migrating from RxJava? +//include::apdx-migrating.adoc[levelOffset=1] + diff --git a/docs/modules/ROOT/pages/appendices.adoc b/docs/modules/ROOT/pages/appendices.adoc deleted file mode 100644 index 3918f10aa1..0000000000 --- a/docs/modules/ROOT/pages/appendices.adoc +++ /dev/null @@ -1,27 +0,0 @@ -= Appendices - -[appendix] -include::apdx-operatorChoice.adoc[] - -[appendix] -include::apdx-howtoReadMarbles.adoc[] - -[appendix] -include::faq.adoc[] - -[appendix] -include::apdx-reactorExtra.adoc[] - -//TODO later add appendices about internals, writing operators, fusion -//[appendix] -//include::apdx-implem.adoc[levelOffset=1] - -//[appendix] -//include::apdx-writingOperator.adoc[levelOffset=1] - -//[appendix] -//include::apdx-optimizations.adoc[levelOffset=1] - -//TODO later add appendix about migrating from RxJava? -//[appendix] -//include::apdx-migrating.adoc[levelOffset=1] From 8ea88631a0d7bf520b77db3f31a5d44d8fab2be3 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 16 May 2024 20:52:45 +0200 Subject: [PATCH 23/26] Reorder links in apdx-operatorChoice.adoc --- docs/modules/ROOT/pages/apdx-operatorChoice.adoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/modules/ROOT/pages/apdx-operatorChoice.adoc b/docs/modules/ROOT/pages/apdx-operatorChoice.adoc index 09a403039f..8359ff952f 100644 --- a/docs/modules/ROOT/pages/apdx-operatorChoice.adoc +++ b/docs/modules/ROOT/pages/apdx-operatorChoice.adoc @@ -13,10 +13,10 @@ I want to deal with: * xref:apdx-operatorChoice.adoc#which.values[Transforming an Existing Sequence] -* xref:apdx-operatorChoice.adoc#which.filtering[Filtering a Sequence] - * xref:apdx-operatorChoice.adoc#which.peeking[Peeking into a Sequence] +* xref:apdx-operatorChoice.adoc#which.filtering[Filtering a Sequence] + * xref:apdx-operatorChoice.adoc#which.errors[Handling Errors] * xref:apdx-operatorChoice.adoc#which.time[Working with Time] From 9243ea5bd2f274cf4e514e064ad88014f0c09afb Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 16 May 2024 21:26:33 +0200 Subject: [PATCH 24/26] Do not include Flux javadoc in the link --- docs/modules/ROOT/pages/apdx-operatorChoice.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/apdx-operatorChoice.adoc b/docs/modules/ROOT/pages/apdx-operatorChoice.adoc index 8359ff952f..ff58d0005b 100644 --- a/docs/modules/ROOT/pages/apdx-operatorChoice.adoc +++ b/docs/modules/ROOT/pages/apdx-operatorChoice.adoc @@ -21,7 +21,7 @@ I want to deal with: * xref:apdx-operatorChoice.adoc#which.time[Working with Time] -* xref:apdx-operatorChoice.adoc#which.window[Splitting a {javadoc}/reactor/core/publisher/Flux.html[Flux]] +* xref:apdx-operatorChoice.adoc#which.window[Splitting a Flux] * xref:apdx-operatorChoice.adoc#which.blocking[Going Back to the Synchronous World] From bc98827e48210fa62a1bd0bdd9c44ab96c7d3d41 Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 16 May 2024 21:30:24 +0200 Subject: [PATCH 25/26] Do not include Flux javadoc in the link --- docs/modules/ROOT/pages/apdx-operatorChoice.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/ROOT/pages/apdx-operatorChoice.adoc b/docs/modules/ROOT/pages/apdx-operatorChoice.adoc index ff58d0005b..d8316140d7 100644 --- a/docs/modules/ROOT/pages/apdx-operatorChoice.adoc +++ b/docs/modules/ROOT/pages/apdx-operatorChoice.adoc @@ -25,7 +25,7 @@ I want to deal with: * xref:apdx-operatorChoice.adoc#which.blocking[Going Back to the Synchronous World] -* xref:apdx-operatorChoice.adoc#which.multicasting[Multicasting a {javadoc}/reactor/core/publisher/Flux.html[Flux] to several https://www.reactive-streams.org/reactive-streams-1.0.3-javadoc/org/reactivestreams/Subscriber.html?is-external=true[Subscribers]] +* xref:apdx-operatorChoice.adoc#which.multicasting[Multicasting a Flux to several Subscribers] [[which.create]] === Creating a New Sequence... From 539a40486790b75db40d9781ebf99a576c85403a Mon Sep 17 00:00:00 2001 From: Pierre De Rop Date: Thu, 16 May 2024 21:37:29 +0200 Subject: [PATCH 26/26] Added missing link --- docs/modules/ROOT/pages/faq.adoc | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/modules/ROOT/pages/faq.adoc b/docs/modules/ROOT/pages/faq.adoc index bf57d58a88..adf46bd14a 100644 --- a/docs/modules/ROOT/pages/faq.adoc +++ b/docs/modules/ROOT/pages/faq.adoc @@ -6,6 +6,7 @@ This section covers the following content: * xref:faq.adoc#faq.wrap-blocking[How Do I Wrap a Synchronous, Blocking Call?] * xref:faq.adoc#faq.chain[I Used an Operator on my `Flux` but it Doesn't Seem to Apply. What Gives?] * xref:faq.adoc#faq.monoThen[My `Mono` `zipWith` or `zipWhen` is never called] +* xref:faq.adoc#faq.monoZipEmptyCompletion[Using `zip` along with empty-completed publishers] * xref:faq.adoc#faq.retryWhen[How to Use `retryWhen` to Emulate `retry(3)`?] * xref:faq.adoc#faq.exponentialBackoff[How can I use `retryWhen` for Exponential Backoff?] * xref:faq.adoc#faq.thread-affinity-publishon[How Do I Ensure Thread Affinity when I Use `publishOn()`?]