diff --git a/docs/src/adoc/index.adoc b/docs/src/adoc/index.adoc index 17cd4a5e4b..0e18a322ac 100644 --- a/docs/src/adoc/index.adoc +++ b/docs/src/adoc/index.adoc @@ -469,10 +469,10 @@ A link:{jdbidocs}/core/statement/Query.html[Query^] is a link:{jdbidocs}/core/result/ResultBearing.html[result-bearing^] SQL statement that returns a result set from the database. -//[source,java,indent=0] -//---- -//include::{exampledir}/StatementsTest.java[tags=query] -//---- +[source,java,indent=0] +---- +include::{exampledir}/StatementsTest.java[tags=query] +---- For single rows, you can use `findOnly()`, which expects exactly one row (or throws an exception): @@ -585,27 +585,6 @@ include::{exampledir}/ResultsTest.java[tags=rowMapper] This `RowMapper` is equivalent to the lambda mapper above but more explicit. -// TODO move the stream stuff to `ResultIterable`? - -*Stream* integration allows you to use a RowMapper to adapt a ResultSet into -the new Java 8 Streams framework. As long as your database supports streaming -results (for example, PostgreSQL will do it as long as you are in a transaction -and set a fetch size), the stream will lazily fetch rows from the database as -necessary. - -[source,java] ----- -handle.createQuery("SELECT id, name FROM user ORDER BY id ASC") - .map(new UserMapper()) - .useStream(stream -> { - Optional first = stream - .filter(u -> u.id > 2) - .map(u -> u.name) - .findFirst(); - assertThat(first).contains("Charlie"); - }); ----- - ===== RowMappers registry Row mappers can be registered for particular types. This simplifies usage, @@ -1298,24 +1277,45 @@ multiple rows are encountered, it will throw *IllegalStateException*. ===== Stream +*Stream* integration allows you to use a RowMapper to adapt a ResultSet into +the new Java 8 Streams framework. As long as your database supports streaming +results (for example, PostgreSQL will do it as long as you are in a transaction +and set a fetch size), the stream will lazily fetch rows from the database as +necessary. + *#stream* returns a *Stream*. You should then process the stream and produce a result. This stream must be closed to release any database resources held, so -we recommend using a *try-with-resources* block to ensure that no resources are -leaked. +we recommend *useStream*, *withStream* or alternately a *try-with-resources* block +to ensure that no resources are leaked. -TODO: try-with-resources example +[source,java] +---- +handle.createQuery("SELECT id, name FROM user ORDER BY id ASC") + .map(new UserMapper()) + .useStream(stream -> { + Optional first = stream + .filter(u -> u.id > 2) + .map(u -> u.name) + .findFirst(); + assertThat(first).contains("Charlie"); + }); +---- *#withStream* and *#useStream* handle closing the stream for you. You provide a *StreamCallback* that produces a result or a *StreamConsumer* that produces no result, respectively. -TODO: useStream, withStream examples - ===== List *#list* emits a *List*. This necessarily buffers all results in memory. -TODO: example +[source,java] +---- +List users = + handle.createQuery("SELECT id, name FROM user") + .map(new UserMapper()) + .list(); +---- ===== Collectors @@ -1345,7 +1345,7 @@ Map users = h.createQuery("select id, name from something") value and a *BiFunction* it will repeatedly combine *U*s until only a single remains, and then return that. -TODO: example +// TODO: example ===== ResultSetScanner @@ -1568,8 +1568,6 @@ certain patterns of data. Consider performance requirements when deciding whether to use high level mapping or more direct low level access with handwritten mappers. -TODO: move the following into the SQL Objects section - You can also use it with SqlObject: [source,java,indent=0] @@ -1755,16 +1753,11 @@ Both optionally allow you to specify the transaction isolation level. include::{exampledir}/TransactionTest.java[tags=simpleTransaction] ---- -Here, we (probably unnecessarily) guard a simple _SELECT_ statement with a -transaction. - -TODO: +Here, we (probably unnecessarily) guard a simple _SELECT_ statement with a transaction. -* Jdbi.useTransaction (shortcut for useHandle(h -> h.useTransaction(...)) -* Jdbi.inTransaction (shortcut for withHandle(h -> h.inTransaction(...)) -* Handle methods for transaction management: begin(), savepoint(), rollback(), - commit(), etc. Failing to explicitly commit or roll back a transaction will - roll back the transaction and throw an exception. +Additionally, Handle has a number of methods for direct transaction management: +begin(), savepoint(), rollback(), commit(), etc. Normally, you will not need to use these. +Failing to explicitly commit or roll back a transaction will roll back the transaction and throw an exception. ==== Serializable Transactions @@ -1773,8 +1766,6 @@ For more advanced queries, sometimes serializable transactions are required. abort due to serialization failures. It is important that your transaction does not have side effects as it may be executed multiple times. -TODO: convert this example to core API? - [source,java,indent=0] ---- include::{exampledir}/TransactionTest.java[tags=serializable] @@ -2746,12 +2737,14 @@ anything, in which case the isolation level of the outer method is used. include::{exampledir}/TransactionTest.java[tags=sqlObjectNestedTransaction] ---- +//// TODO: Demonstrate Transactional mixin * Call methods on the mixin to begin, checkpoint, rollback, and commit transactions. * Be careful using this mixin with on-demand SQL Objects. Only use inTransaction or useTransaction. None of the others will do what you expect. +//// === Using SQL Objects @@ -2811,6 +2804,7 @@ There is a performance penalty every time a connection is allocated and released. If you need to make successive calls to a SQL Object, consider using one of the above options for better performance, instead of on-demand. +//// TODO: * Explain how nested calls within e.g. a default method use the same handle @@ -2822,7 +2816,7 @@ TODO: `withHandle()` lambda. * `@CreateSqlObject` does not play nice with on-demand. * `Stream` or `Iterable` return types do not play nice with on-demand. - +//// === Additional Annotations @@ -2906,8 +2900,6 @@ instance: jdbi.installPlugin(new H2DatabasePlugin()); ---- -TODO: usage examples - === JodaTime This plugin adds support for using joda-time's `DateTime` type. @@ -2929,8 +2921,6 @@ Then install the plugin into your `Jdbi` instance: jdbi.installPlugin(new JodaTimePlugin()); ---- -TODO: usage example - === JPA Using the JPA plugin is a great way to trick your boss into letting you try @@ -2959,8 +2949,6 @@ Then install the plugin into your `Jdbi` instance: jdbi.installPlugin(new JpaPlugin()); ---- -TODO: usage example - Honestly though.. just tear off the bandage and switch to Jdbi proper. === Kotlin @@ -3087,7 +3075,7 @@ To use this feature, add a Maven dependency: Then, use the `OracleReturning` class with an `Update` or `PreparedBatch` to get the returned DML. -TODO: usage example +// TODO: usage example === PostgreSQL @@ -3618,18 +3606,20 @@ include::{exampledir}/ExampleConfig.java[tags=exampleConfig] === JdbiPlugin -TODO: +JdbiPlugin can be used to bundle bulk configuration. +Plugins may be installed explicitly via `Jdbi.installPlugin(JdbiPlugin)`, or +may be installed automagically from the classpath using the ServiceLoader mechanism +via `installPlugins()`. -* JdbiPlugin can be used to bundle bulk configuration -* Plugins may be installed explicitly via Jdbi.installPlugin(JdbiPlugin) -* Plugins may be installed automagically from the classpath, using the - ServiceLoader mechanism, if you provide a file in - `META-INF/services/org.jdbi.v3.core.spi.JdbiPlugin` containing the fully - qualified class name of your plugin. -** Example -** Call `Jdbi.installPlugins()` to automatically install all SPI plugins on the - classpath. +Jars may provide a file in `META-INF/services/org.jdbi.v3.core.spi.JdbiPlugin` +containing the fully qualified class name of your plugin. +[TIP] +The developers encourage you to install plugins explicitly. Code with declared dependencies +on the module it uses is more robust to refactoring and provides useful data +for static analysis tools about what code is or isn't used. + +//// === JdbiCollectors TODO: @@ -3639,6 +3629,7 @@ TODO: * JdbiCollectors registry * Use GenericTypes utility class to help with generics. * Last-registered factory which supporting a given container type wins. +//// === User-Defined Annotations @@ -4024,6 +4015,7 @@ public interface ContactDao { Decorator order is expressed from outermost to innermost. +//// === SQL Object internal plumbing TODO: @@ -4040,12 +4032,14 @@ TODO: * HandlerDecorators - Apply Handler decorations base on any criteria, not just annotations. ** SqlMethodAnnotatedHandlerDecorator +//// === Extensions Jdbi supports generic extensions. SQL Objects are just one implementation of that spec. If SQL Objects aren't doing it for you, you can create your own! +//// TODO: * Regular extensions @@ -4057,6 +4051,7 @@ TODO: *** Reentrant calls like one method of the interface calling another, reuse the already-open handle. * ExtensionFactory +//// === SQL interpolation @@ -4069,6 +4064,7 @@ Be careful using this feature! This can become a vector for SQL injection attacks if you fail to link:https://xkcd.com/327/[sanitize your database inputs^]. +//// TODO: * Add an angle-bracked marker to your SQL statement, e.g. ``. Whatever @@ -4080,6 +4076,7 @@ TODO: * usage examples: ** `insert into () values ()` ** `select from
where order by ` +//// === TemplateEngine @@ -4214,9 +4211,9 @@ Most users will not need to implement the *ResultProducer* interface. === Best Practices -TODO: - * Test your SQL Objects (DAOs) against real databases when possible. + Jdbi tries to be defensive and fail eagerly when you hold it wrong. + * Use the `-parameters` compiler flag to avoid all those `@Bind("foo") String foo` redundant qualifiers in SQL Object method parameters. @@ -4233,7 +4230,8 @@ TODO: === Related Projects -TODO: +link:https://github.com/opentable/otj-pg-embedded[Embedded Postgres^] +makes testing against a real database quick and easy. * arteam/dropwizard-jdbi3 * arteam/metrics-jdbi3 diff --git a/docs/src/test/java/jdbi/doc/TransactionTest.java b/docs/src/test/java/jdbi/doc/TransactionTest.java index d5964e8990..385821117a 100644 --- a/docs/src/test/java/jdbi/doc/TransactionTest.java +++ b/docs/src/test/java/jdbi/doc/TransactionTest.java @@ -22,9 +22,10 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import java.util.function.BiConsumer; +import java.util.function.Supplier; import org.jdbi.v3.core.Handle; -import org.jdbi.v3.core.HandleCallback; import org.jdbi.v3.core.Jdbi; import org.jdbi.v3.core.mapper.reflect.ConstructorMapper; import org.jdbi.v3.core.transaction.SerializableTransactionRunner; @@ -158,74 +159,46 @@ default void innerMethodWithADifferentLevel() {} // end::sqlObjectNestedTransaction[] } - // tag::serializable[] - public interface IntListDao { - @SqlUpdate("CREATE TABLE ints (value INTEGER)") - void create(); - - @SqlQuery("SELECT sum(value) FROM ints") - int sum(); - - @SqlUpdate("INSERT INTO ints(value) VALUES(:value)") - void insert(int value); - } - - static class SumAndInsert implements Callable, HandleCallback { - private final Jdbi db; - private final CountDownLatch latch; - - public SumAndInsert(CountDownLatch latch, Jdbi db) { - this.latch = latch; - this.db = db; - } - - @Override - public Integer withHandle(Handle handle) throws Exception { - IntListDao dao = handle.attach(IntListDao.class); - int sum = dao.sum(); - - // First time through, make sure neither transaction writes until both have read - latch.countDown(); - latch.await(); - - // Now do the write. - dao.insert(sum); - return sum; - } - - @Override - public Integer call() throws Exception { - // Get a connection and run the transaction - return db.inTransaction(TransactionIsolationLevel.SERIALIZABLE, this); - } - } - @Test public void serializableTransaction() throws Exception { + // tag::serializable[] // Automatically rerun transactions db.setTransactionHandler(new SerializableTransactionRunner()); // Set up some values - IntListDao dao = handle.attach(IntListDao.class); - dao.create(); - dao.insert(10); - dao.insert(20); + BiConsumer insert = (h, i) -> h.execute("INSERT INTO ints(value) VALUES(?)", i); + handle.execute("CREATE TABLE ints (value INTEGER)"); + insert.accept(handle, 10); + insert.accept(handle, 20); + // Run the following twice in parallel, and synchronize ExecutorService executor = Executors.newCachedThreadPool(); CountDownLatch latch = new CountDownLatch(2); - // Both of these would calculate 10 + 20 = 30, but that violates serialization! - SumAndInsert txn1 = new SumAndInsert(latch, db); - SumAndInsert txn2 = new SumAndInsert(latch, db); + Supplier> sumAndInsert = () -> () -> + db.inTransaction(TransactionIsolationLevel.SERIALIZABLE, h -> { + // Both read initial state of table + int sum = h.select("SELECT sum(value) FROM ints").mapTo(int.class).findOnly(); + + // First time through, make sure neither transaction writes until both have read + latch.countDown(); + latch.await(); - Future result1 = executor.submit(txn1); - Future result2 = executor.submit(txn2); + // Now do the write. + insert.accept(h, sum); + return sum; + }); + + // Both of these would calculate 10 + 20 = 30, but that violates serialization! + Future result1 = executor.submit(sumAndInsert.get()); + Future result2 = executor.submit(sumAndInsert.get()); - // One of them gets 30, the other gets 10 + 20 + 30 = 60 + // One of the transactions gets 30, the other will abort and automatically rerun. + // On the second attempt it will compute 10 + 20 + 30 = 60, seeing the update from its sibling. // This assertion fails under any isolation level below SERIALIZABLE! assertThat(result1.get() + result2.get()).isEqualTo(30 + 60); executor.shutdown(); + // end::serializable[] } - // end::serializable[] }