diff --git a/README.adoc b/README.adoc index 88d5e59d1..a75990414 100644 --- a/README.adoc +++ b/README.adoc @@ -45,13 +45,13 @@ Maven (Java):: st.orm storm-java21 - 1.8.0 + 1.8.1 compile st.orm storm-core - 1.8.0 + 1.8.1 runtime ---- @@ -59,15 +59,15 @@ Gradle (Java):: + [source,groovy] ---- -implementation 'st.orm:storm-java21:1.8.0' -runtimeOnly 'st.orm:storm-core:1.8.0' +implementation 'st.orm:storm-java21:1.8.1' +runtimeOnly 'st.orm:storm-core:1.8.1' ---- Gradle (Kotlin):: + [source,groovy] ---- -implementation 'st.orm:storm-kotlin:1.8.0' -runtimeOnly 'st.orm:storm-core:1.8.0' +implementation 'st.orm:storm-kotlin:1.8.1' +runtimeOnly 'st.orm:storm-core:1.8.1' ---- ==== @@ -96,7 +96,7 @@ Java:: record City(@PK Integer id, String name, long population -) implements Entity {} +) implements Entity {} record User(@PK Integer id, String email, @@ -1192,7 +1192,7 @@ Maven:: st.orm storm-oracle - 1.8.0 + 1.8.1 runtime ---- @@ -1200,7 +1200,7 @@ Gradle:: + [source,groovy] ---- -runtimeOnly 'st.orm:storm-oracle:1.8.0' +runtimeOnly 'st.orm:storm-oracle:1.8.1' ---- ==== @@ -1222,7 +1222,7 @@ Maven:: st.orm storm-metamodel-processor - 1.8.0 + 1.8.1 provided ---- @@ -1230,7 +1230,7 @@ Gradle:: + [source,groovy] ---- -annotationProcessor 'st.orm:storm-metamodel-processor:1.8.0' +annotationProcessor 'st.orm:storm-metamodel-processor:1.8.1' ---- ==== @@ -1294,7 +1294,7 @@ Maven (Jackson):: st.orm storm-jackson - 1.8.0 + 1.8.1 compile ---- @@ -1302,13 +1302,13 @@ Gradle (Jackson):: + [source,groovy] ---- -implementation 'st.orm:storm-jackson:1.8.0' +implementation 'st.orm:storm-jackson:1.8.1' ---- Gradle (Kotlinx Serialization):: + [source,groovy] ---- -implementation 'st.orm:storm-kotlinx-serialization:1.8.0' +implementation 'st.orm:storm-kotlinx-serialization:1.8.1' ---- ==== @@ -1431,6 +1431,11 @@ EntityRepository, a DSL query, or a SQL template. This observed state is used as Dirty checking is only applied when updates are executed through an EntityRepository. Manual SQL updates, bulk statements, or custom queries bypass dirty checking entirely and may leave in-memory entities stale. +Unless configured otherwise, entity observation is automatically disabled for `READ_UNCOMMITTED` transactions. At this +isolation level, the application expects to see uncommitted changes from other transactions. Caching observed state +would mask these changes, contradicting the requested isolation semantics. When observation is disabled, dirty checking +treats all entities as dirty, resulting in full-row updates. + Dirty checking affects both whether an UPDATE is issued and how that UPDATE is constructed. This behavior is controlled by UpdateMode and by the dirty checking strategy. @@ -1598,7 +1603,7 @@ Maven (Java):: st.orm storm-spring - 1.8.0 + 1.8.1 compile ---- @@ -1606,7 +1611,7 @@ Gradle (Java):: + [source,groovy] ---- -implementation 'st.orm:storm-spring:1.8.0' +implementation 'st.orm:storm-spring:1.8.1' ---- Maven (Kotlin):: + @@ -1615,7 +1620,7 @@ Maven (Kotlin):: st.orm storm-kotlin-spring - 1.8.0 + 1.8.1 compile ---- @@ -1623,7 +1628,7 @@ Gradle (Kotlin):: + [source,groovy] ---- -implementation 'st.orm:storm-kotlin-spring:1.8.0' +implementation 'st.orm:storm-kotlin-spring:1.8.1' ---- ==== diff --git a/pom.xml b/pom.xml index c8c6abad4..b8d2d744e 100644 --- a/pom.xml +++ b/pom.xml @@ -16,7 +16,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 pom Storm Framework A SQL Template and ORM framework, focusing on modernizing and simplifying database programming. diff --git a/storm-core/pom.xml b/storm-core/pom.xml index b137da53e..f098eb3bc 100644 --- a/storm-core/pom.xml +++ b/storm-core/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-core diff --git a/storm-core/src/main/java/st/orm/core/repository/impl/EntityRepositoryImpl.java b/storm-core/src/main/java/st/orm/core/repository/impl/EntityRepositoryImpl.java index ae868a894..515dbb011 100644 --- a/storm-core/src/main/java/st/orm/core/repository/impl/EntityRepositoryImpl.java +++ b/storm-core/src/main/java/st/orm/core/repository/impl/EntityRepositoryImpl.java @@ -290,15 +290,14 @@ public E insertAndFetch(@Nonnull E entity) { } /** - * Returns the entity cache for the current transaction, or null. + * Returns the entity cache for the current transaction, if available. * - * @return the entity cache for the current transaction, or null. + * @return the entity cache for the current transaction, or empty if not available. * @since 1.7 */ protected Optional> entityCache() { //noinspection unchecked return TRANSACTION_TEMPLATE.currentContext() - .filter(ctx -> !ctx.isReadOnly()) .map(ctx -> (EntityCache) ctx.entityCache(model().type())); } diff --git a/storm-core/src/main/java/st/orm/core/spi/DefaultTransactionTemplateProviderImpl.java b/storm-core/src/main/java/st/orm/core/spi/DefaultTransactionTemplateProviderImpl.java index 19bc48c4a..77df356a4 100644 --- a/storm-core/src/main/java/st/orm/core/spi/DefaultTransactionTemplateProviderImpl.java +++ b/storm-core/src/main/java/st/orm/core/spi/DefaultTransactionTemplateProviderImpl.java @@ -22,6 +22,7 @@ import java.lang.reflect.Method; import java.lang.reflect.Proxy; +import java.sql.Connection; import java.util.HashMap; import java.util.Map; import java.util.Optional; @@ -35,6 +36,42 @@ public class DefaultTransactionTemplateProviderImpl implements TransactionTempla private static final Object SPRING_CTX_RESOURCE_KEY = DefaultTransactionTemplateProviderImpl.class.getName() + ".SPRING_TX_CONTEXT"; + /** + * Minimum transaction isolation level required for entity caching to be enabled. + * + *

Transactions with an isolation level below this threshold will not use entity caching, which means dirty + * checking will treat all entities as dirty (resulting in full-row updates). This prevents the entity cache from + * masking changes that the application expects to see at lower isolation levels.

+ * + *

The default value is {@link Connection#TRANSACTION_READ_COMMITTED}, meaning entity caching is disabled only + * for {@code READ_UNCOMMITTED} transactions. This can be overridden using the system property + * {@code storm.entityCache.minIsolationLevel}.

+ */ + private static final int MIN_ISOLATION_LEVEL_FOR_CACHE = parseMinIsolationLevel(); + + private static int parseMinIsolationLevel() { + String value = System.getProperty("storm.entityCache.minIsolationLevel"); + if (value == null || value.isBlank()) { + return Connection.TRANSACTION_READ_COMMITTED; + } + value = value.trim().toUpperCase(); + return switch (value) { + case "NONE", "0" -> Connection.TRANSACTION_NONE; + case "READ_UNCOMMITTED", "1" -> Connection.TRANSACTION_READ_UNCOMMITTED; + case "READ_COMMITTED", "2" -> Connection.TRANSACTION_READ_COMMITTED; + case "REPEATABLE_READ", "4" -> Connection.TRANSACTION_REPEATABLE_READ; + case "SERIALIZABLE", "8" -> Connection.TRANSACTION_SERIALIZABLE; + default -> { + try { + yield Integer.parseInt(value); + } catch (NumberFormatException e) { + throw new PersistenceException( + "Invalid value for storm.entityCache.minIsolationLevel: '%s'.".formatted(value)); + } + } + }; + } + @Override public TransactionTemplate getTransactionTemplate() { return new TransactionTemplate() { @@ -126,6 +163,13 @@ public boolean isReadOnly() { @Override public EntityCache, ?> entityCache(@Nonnull Class> entityType) { + // Check if entity caching is disabled for this isolation level. + Integer isolationLevel = springReflection.getCurrentTransactionIsolationLevel(); + // Spring returns null when no explicit isolation level is set (database default). + // In that case, we assume the database default (typically READ_COMMITTED or higher) and enable caching. + if (isolationLevel != null && isolationLevel < MIN_ISOLATION_LEVEL_FOR_CACHE) { + return null; + } // We use computeIfAbsent so the "get or create" is a single operation. // // Why: @@ -158,6 +202,7 @@ private static final class SpringReflection { private final Method isActualTransactionActive; private final Method isCurrentTransactionReadOnly; + private final Method getCurrentTransactionIsolationLevel; private final Method getResource; private final Method bindResource; private final Method registerSynchronization; @@ -167,6 +212,7 @@ private static final class SpringReflection { private SpringReflection( Method isActualTransactionActive, Method isCurrentTransactionReadOnly, + Method getCurrentTransactionIsolationLevel, Method getResource, Method bindResource, Method registerSynchronization, @@ -175,6 +221,7 @@ private SpringReflection( ) { this.isActualTransactionActive = isActualTransactionActive; this.isCurrentTransactionReadOnly = isCurrentTransactionReadOnly; + this.getCurrentTransactionIsolationLevel = getCurrentTransactionIsolationLevel; this.getResource = getResource; this.bindResource = bindResource; this.registerSynchronization = registerSynchronization; @@ -188,6 +235,7 @@ static SpringReflection tryLoad() { Class tsm = Class.forName(TSM_FQCN, false, classLoader); Method isActualTransactionActive = tsm.getMethod("isActualTransactionActive"); Method isCurrentTransactionReadOnly = tsm.getMethod("isCurrentTransactionReadOnly"); + Method getCurrentTransactionIsolationLevel = tsm.getMethod("getCurrentTransactionIsolationLevel"); Method getResource = tsm.getMethod("getResource", Object.class); Method bindResource = tsm.getMethod("bindResource", Object.class, Object.class); // Cleanup hooks (may not exist in very old Spring). @@ -204,6 +252,7 @@ static SpringReflection tryLoad() { return new SpringReflection( isActualTransactionActive, isCurrentTransactionReadOnly, + getCurrentTransactionIsolationLevel, getResource, bindResource, registerSynchronization, @@ -231,6 +280,14 @@ boolean isCurrentTransactionReadOnly() { } } + Integer getCurrentTransactionIsolationLevel() { + try { + return (Integer) getCurrentTransactionIsolationLevel.invoke(null); + } catch (Throwable t) { + return null; + } + } + Object getResource(Object key) { try { return getResource.invoke(null, key); diff --git a/storm-core/src/main/java/st/orm/core/spi/EntityCacheImpl.java b/storm-core/src/main/java/st/orm/core/spi/EntityCacheImpl.java index b44d86178..33e76a196 100644 --- a/storm-core/src/main/java/st/orm/core/spi/EntityCacheImpl.java +++ b/storm-core/src/main/java/st/orm/core/spi/EntityCacheImpl.java @@ -57,8 +57,11 @@ */ public final class EntityCacheImpl, ID> implements EntityCache { + /** Queue for tracking garbage-collected entities to enable lazy cleanup of {@link #map}. */ private final ReferenceQueue queue = new ReferenceQueue<>(); - private final Map> map = new HashMap<>(); + + /** Map from primary key to weakly-referenced entity. Keys are held strongly; values are weak references. */ + private final Map> map = new HashMap<>(); /** * Retrieves an entity from the cache by primary key, if available. @@ -76,7 +79,7 @@ public final class EntityCacheImpl, ID> implements EntityCa @Override public Optional get(@Nonnull ID pk) { drainQueue(); - PkWeakRef ref = map.get(pk); + PkWeakReference ref = map.get(pk); if (ref == null) { return Optional.empty(); } @@ -110,14 +113,14 @@ public Optional get(@Nonnull ID pk) { public E intern(@Nonnull E entity) { drainQueue(); ID pk = entity.id(); - PkWeakRef existingRef = map.get(pk); + PkWeakReference existingRef = map.get(pk); if (existingRef != null) { E existing = existingRef.get(); if (existing != null && existing.equals(entity)) { return existing; } } - map.put(pk, new PkWeakRef<>(pk, entity, queue)); + map.put(pk, new PkWeakReference<>(pk, entity, queue)); return entity; } @@ -135,7 +138,7 @@ public E intern(@Nonnull E entity) { public void set(@Nonnull E entity) { drainQueue(); ID pk = entity.id(); - map.put(pk, new PkWeakRef<>(pk, entity, queue)); + map.put(pk, new PkWeakReference<>(pk, entity, queue)); } /** @@ -153,7 +156,7 @@ public void set(@Nonnull Iterable entities) { drainQueue(); for (E entity : entities) { ID pk = entity.id(); - map.put(pk, new PkWeakRef<>(pk, entity, queue)); + map.put(pk, new PkWeakReference<>(pk, entity, queue)); } } @@ -239,18 +242,34 @@ public void clear() { drainQueue(); } + /** + * Removes stale entries from {@link #map} by polling the reference queue. + * + *

When an entity is garbage collected, its {@link PkWeakReference} is enqueued. This method polls the queue + * and removes the corresponding entries from the map. Uses a two-argument remove to ensure only the exact + * weak reference is removed, preventing removal of a newer entry that may have been added with the same key.

+ */ private void drainQueue() { - PkWeakRef ref; + PkWeakReference weakReference; //noinspection unchecked - while ((ref = (PkWeakRef) queue.poll()) != null) { - map.remove(ref.pk, ref); + while ((weakReference = (PkWeakReference) queue.poll()) != null) { + map.remove(weakReference.pk, weakReference); } } - private static final class PkWeakRef extends WeakReference { + /** + * A weak reference to an entity that retains the associated primary key for map cleanup. + * + *

When the entity is garbage collected, this reference is enqueued in the {@link ReferenceQueue}, allowing + * {@link #drainQueue()} to remove the corresponding entry from {@link #map} using the stored primary key.

+ * + * @param the primary key type. + * @param the entity type. + */ + private static final class PkWeakReference extends WeakReference { final ID pk; - PkWeakRef(ID pk, E referent, ReferenceQueue q) { + PkWeakReference(ID pk, E referent, ReferenceQueue q) { super(referent, q); this.pk = pk; } diff --git a/storm-core/src/main/java/st/orm/core/spi/TransactionContext.java b/storm-core/src/main/java/st/orm/core/spi/TransactionContext.java index acdf0ac67..601c1996b 100644 --- a/storm-core/src/main/java/st/orm/core/spi/TransactionContext.java +++ b/storm-core/src/main/java/st/orm/core/spi/TransactionContext.java @@ -16,6 +16,7 @@ package st.orm.core.spi; import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import st.orm.Entity; /** @@ -33,7 +34,19 @@ public interface TransactionContext { /** * Returns a transaction-local cache for entities of the given type, keyed by primary key. + * + *

Returns {@code null} if entity caching is disabled for this transaction. This can happen when the + * transaction's isolation level is below the configured minimum for entity caching. At low isolation levels + * (e.g., {@code READ_UNCOMMITTED}), entity caching is disabled to prevent the cache from masking changes + * that the application expects to see.

+ * + *

When {@code null} is returned, dirty checking will treat all entities as dirty, resulting in full-row + * updates.

+ * + * @param entityType the entity type for which to retrieve the cache. + * @return the entity cache, or {@code null} if caching is disabled for this transaction. */ + @Nullable EntityCache, ?> entityCache(@Nonnull Class> entityType); /** diff --git a/storm-core/src/main/java/st/orm/core/spi/WeakInterner.java b/storm-core/src/main/java/st/orm/core/spi/WeakInterner.java index 527641a28..d4071ef1a 100644 --- a/storm-core/src/main/java/st/orm/core/spi/WeakInterner.java +++ b/storm-core/src/main/java/st/orm/core/spi/WeakInterner.java @@ -16,64 +16,180 @@ package st.orm.core.spi; import jakarta.annotation.Nonnull; +import st.orm.Entity; +import st.orm.Ref; +import java.lang.ref.ReferenceQueue; import java.lang.ref.WeakReference; +import java.util.HashMap; import java.util.Map; import java.util.WeakHashMap; import static java.util.Objects.requireNonNull; /** - * A weak interner that allows fast lookups and retrieval of existing instances based on equality, while holding - * elements weakly to permit garbage collection. + * A weak interner that ensures canonical instances of objects while holding them weakly to permit garbage collection. + * + *

This class uses a dual-path interning strategy optimized for different object types:

+ *
    + *
  • Entities: Uses primary key-based lookup via {@link Ref} for efficient equality checks. Entities are + * stored in a separate map with {@link ReferenceQueue}-based cleanup to ensure stale entries are removed when + * entities are garbage collected.
  • + *
  • Non-entities: Uses object equality-based lookup via {@link WeakHashMap}, which provides automatic + * cleanup when objects are no longer strongly referenced.
  • + *
+ * + *

The primary key-based lookup for entities avoids potentially expensive deep equality checks on complex entity + * objects, while maintaining correct identity semantics (same primary key = same canonical instance).

+ * + *

This class is not thread-safe. A new instance is expected to be created for each result set processing call, + * ensuring that interning is scoped to a single query execution.

*/ public final class WeakInterner { + + /** Map for non-entity objects, using object equality for lookup. Keys are held weakly. */ private final Map> map; + /** Queue for tracking garbage-collected entities to enable lazy cleanup of {@link #entityMap}. */ + private final ReferenceQueue> queue; + + /** Map for entities, using {@link Ref} (primary key) for efficient lookup. Keys are held strongly. */ + private final Map, RefWeakReference> entityMap; + + /** + * Creates a new weak interner. + */ public WeakInterner() { map = new WeakHashMap<>(); + queue = new ReferenceQueue<>(); + entityMap = new HashMap<>(); } /** * Interns the given object, ensuring that only one canonical instance exists. If an equivalent object is already * present, returns the existing instance. Otherwise, adds the new object to the interner and returns it. * - * @param obj The object to intern. + *

For {@link Entity} instances, lookup is performed using the entity's primary key (via {@link Ref}) for + * efficiency. For all other objects, lookup is based on object equality.

+ * + * @param object the object to intern. + * @param the type of the object. * @return the canonical instance of the object. + * @throws NullPointerException if {@code object} is {@code null}. + */ + public T intern(@Nonnull T object) { + requireNonNull(object, "Cannot intern null object."); + if (object instanceof Entity entity) { + //noinspection unchecked + return (T) internEntity(entity); + } + return internObject(object); + } + + /** + * Retrieves a cached entity by its type and primary key, if available. + * + *

This method enables early cache lookups before constructing nested objects. If an entity with the given + * type and primary key was previously interned and is still reachable, it is returned.

+ * + * @param entityType the entity class. + * @param pk the primary key value. + * @param the entity type. + * @return the cached entity, or {@code null} if not found or already garbage collected. */ - public T intern(@Nonnull T obj) { - requireNonNull(obj, "Cannot intern null object."); - // Check if an equivalent object already exists. - WeakReference existing = map.get(obj); + public > E get(@Nonnull Class entityType, @Nonnull Object pk) { + drainQueue(); + Ref ref = Ref.of(entityType, pk); + WeakReference> existing = entityMap.get(ref); + if (existing != null) { + Entity result = existing.get(); + if (result != null) { + //noinspection unchecked + return (E) result; + } + } + return null; + } + + /** + * Interns an entity using its primary key (via {@link Ref}) for efficient lookup. + * + *

This avoids expensive deep equality checks on complex entity objects. The entity is stored with a weak + * reference, and cleanup is handled via {@link #drainQueue()} when entities are garbage collected.

+ * + * @param entity the entity to intern. + * @param the entity type. + * @return the canonical instance for the entity's primary key. + */ + private > E internEntity(@Nonnull E entity) { + drainQueue(); + Ref ref = Ref.of(entity); + WeakReference> existing = entityMap.get(ref); + if (existing != null) { + var result = existing.get(); + if (result != null) { + //noinspection unchecked + return (E) result; + } + } + entityMap.put(ref, new RefWeakReference(ref, entity, queue)); + return entity; + } + + /** + * Interns a non-entity object using object equality for lookup. + * + *

Uses {@link WeakHashMap} which automatically removes entries when keys are garbage collected.

+ * + * @param object the object to intern. + * @param the type of the object. + * @return the canonical instance. + * @throws IllegalArgumentException if an equivalent object of a different class is already interned. + */ + private T internObject(@Nonnull T object) { + WeakReference existing = map.get(object); if (existing != null) { // Equivalent object found; return existing instance var result = existing.get(); if (result != null) { - if (result.getClass() != obj.getClass()) { + if (result.getClass() != object.getClass()) { throw new IllegalArgumentException("Cannot intern objects of different classes."); } //noinspection unchecked return (T) result; } - return obj; + return object; } - map.put(obj, new WeakReference<>(obj)); - return obj; + map.put(object, new WeakReference<>(object)); + return object; } /** - * Returns the number of interned objects currently in the interner. + * Removes stale entries from {@link #entityMap} by polling the reference queue. * - * @return The number of interned objects. + *

When an entity is garbage collected, its {@link RefWeakReference} is enqueued. This method polls the queue + * and removes the corresponding entries from the map. Uses a two-argument remove to ensure only the exact + * weak reference is removed, preventing removal of a newer entry with the same key.

*/ - public int size() { - return map.size(); + private void drainQueue() { + RefWeakReference weakReference; + while ((weakReference = (RefWeakReference) queue.poll()) != null) { + entityMap.remove(weakReference.ref, weakReference); + } } /** - * Clears all interned objects from the interner. + * A weak reference to an entity that retains the associated {@link Ref} for map cleanup. + * + *

When the entity is garbage collected, this reference is enqueued in the {@link ReferenceQueue}, allowing + * {@link #drainQueue()} to remove the corresponding entry from {@link #entityMap} using the stored ref.

*/ - public void clear() { - map.clear(); + private static final class RefWeakReference extends WeakReference> { + final Ref ref; + + RefWeakReference(Ref ref, Entity referent, ReferenceQueue> q) { + super(referent, q); + this.ref = ref; + } } } \ No newline at end of file diff --git a/storm-core/src/main/java/st/orm/core/template/impl/PreparedStatementTemplateImpl.java b/storm-core/src/main/java/st/orm/core/template/impl/PreparedStatementTemplateImpl.java index feebf8191..2179df184 100644 --- a/storm-core/src/main/java/st/orm/core/template/impl/PreparedStatementTemplateImpl.java +++ b/storm-core/src/main/java/st/orm/core/template/impl/PreparedStatementTemplateImpl.java @@ -108,6 +108,7 @@ public PreparedStatementTemplateImpl(@Nonnull DataSource dataSource) { var transactionContext = transactionTemplate.currentContext().orElse(null); Connection connection = getConnection(dataSource, transactionContext); PreparedStatement preparedStatement = null; + boolean success = false; try { if (!generatedKeys.isEmpty()) { try { @@ -128,8 +129,14 @@ public PreparedStatementTemplateImpl(@Nonnull DataSource dataSource) { } else { bindVariables.setBatchListener(getBatchListener(preparedStatement, parameters)); } + success = true; } finally { - if (preparedStatement == null) { + if (!success) { + if (preparedStatement != null) { + try { + preparedStatement.close(); + } catch (SQLException ignore) {} + } releaseConnection(connection, dataSource, transactionContext); } } @@ -156,27 +163,37 @@ public PreparedStatementTemplateImpl(@Nonnull Connection connection) { var bindVariables = sql.bindVariables().orElse(null); var generatedKeys = sql.generatedKeys(); PreparedStatement preparedStatement = null; - if (!generatedKeys.isEmpty()) { - try { + boolean success = false; + try { + if (!generatedKeys.isEmpty()) { + try { + //noinspection SqlSourceToSinkFlow + preparedStatement = connection.prepareStatement(statement, generatedKeys.toArray(new String[0])); + } catch (SQLFeatureNotSupportedException ignore) {} + } + if (preparedStatement == null) { //noinspection SqlSourceToSinkFlow - preparedStatement = connection.prepareStatement(statement, generatedKeys.toArray(new String[0])); - } catch (SQLFeatureNotSupportedException ignore) {} - } - if (preparedStatement == null) { - //noinspection SqlSourceToSinkFlow - preparedStatement = connection.prepareStatement(statement); - } - var transactionContext = transactionTemplate.currentContext().orElse(null); - if (transactionContext != null) { - preparedStatement = transactionContext.getDecorator(PreparedStatement.class) - .decorate(preparedStatement); - } - if (bindVariables == null) { - setParameters(preparedStatement, parameters); - } else { - bindVariables.setBatchListener(getBatchListener(preparedStatement, parameters)); + preparedStatement = connection.prepareStatement(statement); + } + var transactionContext = transactionTemplate.currentContext().orElse(null); + if (transactionContext != null) { + preparedStatement = transactionContext.getDecorator(PreparedStatement.class) + .decorate(preparedStatement); + } + if (bindVariables == null) { + setParameters(preparedStatement, parameters); + } else { + bindVariables.setBatchListener(getBatchListener(preparedStatement, parameters)); + } + success = true; + return preparedStatement; + } finally { + if (!success && preparedStatement != null) { + try { + preparedStatement.close(); + } catch (SQLException ignore) {} + } } - return preparedStatement; }; this.modelBuilder = ModelBuilder.newInstance(); this.tableAliasResolver = TableAliasResolver.DEFAULT; diff --git a/storm-core/src/main/java/st/orm/core/template/impl/RecordMapper.java b/storm-core/src/main/java/st/orm/core/template/impl/RecordMapper.java index 0f56b8182..895d6ea10 100644 --- a/storm-core/src/main/java/st/orm/core/template/impl/RecordMapper.java +++ b/storm-core/src/main/java/st/orm/core/template/impl/RecordMapper.java @@ -46,13 +46,67 @@ import static st.orm.UpdateMode.OFF; import static st.orm.core.repository.impl.DirtySupport.getUpdateMode; import static st.orm.core.spi.Providers.getORMConverter; +import static st.orm.core.template.impl.RecordReflection.findPkField; import static st.orm.core.template.impl.RecordReflection.getRecordType; import static st.orm.core.template.impl.RecordReflection.getRefPkType; import static st.orm.core.template.impl.RecordReflection.getRefDataType; import static st.orm.core.template.impl.RecordReflection.isRecord; /** - * Factory for creating instances for record types. + * Factory for creating {@link ObjectMapper} instances that construct Java records from JDBC result set columns. + * + *

This class handles the complex mapping from flat JDBC column arrays to nested record structures, including:

+ *
    + *
  • Recursive expansion of nested records
  • + *
  • Custom type converters via {@code @Convert} annotation
  • + *
  • Enum mapping (by name or ordinal)
  • + *
  • {@link Ref} creation for entity references
  • + *
  • Nullable field handling
  • + *
+ * + *

Compilation and Caching

+ *

Record mapping plans are compiled once per record type and cached globally. The compilation produces:

+ *
    + *
  • An {@link ArgumentPlan} containing {@link Step} instances for each constructor parameter
  • + *
  • Expanded parameter types reflecting the flattened JDBC column structure
  • + *
+ * + *

Interning and Caching

+ *

To ensure object identity consistency and reduce memory usage, constructed records are interned:

+ *
    + *
  • Entities within a transaction: Interned via {@link EntityCache} (transaction-scoped)
  • + *
  • Other records and entities: Interned via {@link WeakInterner} (query-scoped)
  • + *
+ * + *

Entity Cache Scoping

+ *

The entity cache is transaction-scoped and available in both read-write and read-only transactions. This provides + * object identity consistency within a transaction: reading the same entity multiple times returns the same instance.

+ * + *

The entity cache serves two purposes:

+ *
    + *
  • Dirty tracking: The cached state serves as the baseline for detecting changes when updating entities + * (see {@link st.orm.DynamicUpdate})
  • + *
  • Construction optimization: Entities already in cache can be returned directly, skipping reconstruction
  • + *
+ * + *

The entity cache is not available when:

+ *
    + *
  • There is no active transaction (e.g., {@code NOT_SUPPORTED} propagation)
  • + *
  • The transaction isolation level is below the configured minimum (default: {@code READ_UNCOMMITTED})
  • + *
+ * + *

At {@code READ_UNCOMMITTED}, the entity cache is disabled because the application explicitly expects to see + * uncommitted changes from other transactions. Caching would mask these changes, contradicting the requested + * isolation semantics.

+ * + *

Early Cache Lookup Optimization

+ *

For both top-level and nested entities, the mapper extracts the primary key directly from the flat column array + * before constructing the entity or its nested objects. If a cached entity with that PK exists, construction + * is skipped entirely, improving performance for queries that return duplicate entity references.

+ * + * @see ObjectMapper + * @see EntityCache + * @see WeakInterner */ final class RecordMapper { @@ -74,25 +128,39 @@ static Optional> getFactory(int columnCount, @Nonnull RefFactory refFactory, @Nullable TransactionContext transactionContext) throws SqlTemplateException { if (getParameterCount(type) == columnCount) { - return Optional.of(wrapConstructor( - type, - refFactory, - transactionContext == null ? null : transactionContext.isReadOnly() ? null : transactionContext - )); + return Optional.of(wrapConstructor(type, refFactory, transactionContext)); } return empty(); } - private record Compiled(@Nonnull ArgumentPlan plan, @Nonnull Class[] parameterTypes) {} + /** + * Holds the compiled mapping plan and expanded parameter types for a record type. + * + * @param plan the compiled argument plan for adapting flat JDBC args to constructor args. + * @param parameterTypes the expanded JDBC column types (flattened from nested records). + */ + private record Compiled(@Nonnull ArgumentPlan plan, @Nonnull Class[] parameterTypes, @Nonnull PkInfo pkInfo) {} + /** Global cache of compiled plans, keyed by record class. Thread-safe for concurrent access. */ private static final ConcurrentMap, Compiled> COMPILED = new ConcurrentHashMap<>(); + /** + * Returns the compiled plan for the given record type, creating and caching it if necessary. + * + * @param type the record type to compile. + * @param refFactory the factory for resolving Ref parameter types. + * @return the compiled plan. + * @throws SqlTemplateException if compilation fails. + */ private static Compiled compiledFor(@Nonnull RecordType type, @Nonnull RefFactory refFactory) throws SqlTemplateException { try { return COMPILED.computeIfAbsent(type.type(), t -> { try { - return new Compiled(compilePlan(type), expandParameterTypes(type, refFactory)); + PkInfo pkInfo = Entity.class.isAssignableFrom(type.type()) + ? calculatePkInfo(type) + : PkInfo.NONE; + return new Compiled(compilePlan(type), expandParameterTypes(type, refFactory), pkInfo); } catch (SqlTemplateException e) { throw new RuntimeException(e); } @@ -140,13 +208,15 @@ private static ObjectMapper wrapConstructor(@Nonnull RecordType type, @Nonnull RefFactory refFactory, @Nullable TransactionContext transactionContext) throws SqlTemplateException { Compiled compiled = compiledFor(type, refFactory); + boolean isEntity = Entity.class.isAssignableFrom(type.type()); EntityCache, ?> entityCache; - if (transactionContext != null && Entity.class.isAssignableFrom(type.type()) && getUpdateMode(type) != OFF) { + if (transactionContext != null && isEntity && getUpdateMode(type) != OFF) { //noinspection unchecked entityCache = (EntityCache, ?>) transactionContext.entityCache((Class>) type.type()); } else { entityCache = null; } + PkInfo pkInfo = compiled.pkInfo(); var interner = new WeakInterner(); return new ObjectMapper<>() { @Override @@ -157,6 +227,19 @@ public Class[] getParameterTypes() { @SuppressWarnings("unchecked") @Override public T newInstance(@Nonnull Object[] args) throws SqlTemplateException { + // Early cache lookup optimization for top-level entities. + // If we can extract the PK early, check the cache before constructing nested objects. + if (entityCache != null && pkInfo.offset >= 0) { + Object pk = extractPk(args, pkInfo); + if (pk != null) { + //noinspection rawtypes + Optional> cached = ((EntityCache) entityCache).get(pk); + if (cached.isPresent()) { + // Cache hit - skip construction entirely. + return (T) cached.get(); + } + } + } Object[] adaptedArgs = compiled.plan() .adapt(args, 0, false, refFactory, interner, transactionContext) .constructorArgs(); @@ -168,6 +251,36 @@ var record = ObjectMapperFactory.construct((Constructor) type.constructor(), } return record; } + + /** + * Extracts the primary key from args at the configured offset. + * + * @param args the flat argument array. + * @param pkInfo the PK offset and column count information. + * @return the PK value, or null if any PK column is null or PK cannot be extracted. + */ + private Object extractPk(@Nonnull Object[] args, @Nonnull PkInfo pkInfo) throws SqlTemplateException { + int pkStart = pkInfo.offset; + int pkColumnCount = pkInfo.columnCount; + if (pkColumnCount == 1) { + // Simple PK - just return the value. + return args[pkStart]; + } + // Composite PK - construct from columns. + if (pkInfo.constructor == null) { + // Cannot construct composite PK without constructor. + return null; + } + Object[] pkArgs = new Object[pkColumnCount]; + for (int i = 0; i < pkColumnCount; i++) { + Object arg = args[pkStart + i]; + if (arg == null) { + return null; // Null in composite PK means no valid PK. + } + pkArgs[i] = arg; + } + return ObjectMapperFactory.construct(pkInfo.constructor, pkArgs, pkStart); + } }; } @@ -204,6 +317,7 @@ private static Class[] expandParameterTypes(@Nonnull RecordType type, return expandedTypes.toArray(new Class[0]); } + /** Pattern for validating ordinal enum values. */ private static final Pattern INT_PATTERN = Pattern.compile("\\d+"); private static boolean isArgNull(@Nullable Object arg) { @@ -211,9 +325,26 @@ private static boolean isArgNull(@Nullable Object arg) { } /** - * Compiled, reusable plan for adapting flat JDBC args into constructor args for a specific record type. + * A compiled, reusable plan for adapting flat JDBC column values into constructor arguments. + * + *

An argument plan is compiled once per record type and cached. It transforms a flat array of JDBC + * column values (in declaration order) into properly nested constructor arguments, handling type + * conversion, nullable fields, and recursive record construction.

*/ private interface ArgumentPlan { + + /** + * Adapts flat JDBC column values into constructor arguments for a record type. + * + * @param flatArgs the flat array of JDBC column values. + * @param offset the starting offset into flatArgs. + * @param parentNullable whether the parent context allows null values. + * @param refFactory factory for creating {@link Ref} instances. + * @param interner interner for deduplicating records and entities. + * @param tx the transaction context, or null if not in a transaction. + * @return the result containing constructor args and updated offset. + * @throws SqlTemplateException if adaptation fails due to null constraint violations. + */ Result adapt(@Nonnull Object[] flatArgs, int offset, boolean parentNullable, @@ -221,11 +352,42 @@ Result adapt(@Nonnull Object[] flatArgs, @Nonnull WeakInterner interner, @Nullable TransactionContext tx) throws SqlTemplateException; + /** + * The result of adapting flat args. + * + * @param constructorArgs the constructor arguments ready for record instantiation. + * @param offset the updated offset into flatArgs after consuming this record's columns. + */ record Result(@Nonnull Object[] constructorArgs, int offset) {} } + /** + * A single step in the argument adaptation process, responsible for processing one constructor parameter. + * + *

Steps are composed into an {@link ArgumentPlan}. Each step type handles a specific kind of + * constructor parameter:

+ *
    + *
  • {@link PlainStep}: Simple pass-through for primitive/simple types
  • + *
  • {@link ConverterStep}: Custom type conversion via {@code @Convert}
  • + *
  • {@link EnumStep}: Enum mapping by name or ordinal
  • + *
  • {@link RefStep}: Creates {@link Ref} instances for entity references
  • + *
  • {@link RecordStep}: Recursive construction of nested records/entities
  • + *
+ */ private interface Step { + /** + * Applies this step to extract and transform a value from the flat args array. + * + * @param flatArgs the flat array of JDBC column values. + * @param offset mutable offset tracker into flatArgs. + * @param parentNullable whether the parent context allows null values. + * @param refFactory factory for creating {@link Ref} instances. + * @param interner interner for deduplicating records and entities. + * @param tx the transaction context, or null if not in a transaction. + * @return the processed value for this constructor parameter. + * @throws SqlTemplateException if processing fails. + */ Object apply(@Nonnull Object[] flatArgs, @Nonnull Offset offset, boolean parentNullable, @@ -234,7 +396,9 @@ Object apply(@Nonnull Object[] flatArgs, @Nullable TransactionContext tx) throws SqlTemplateException; /** - * Mutable offset holder to avoid allocating pairs/results per step. + * Mutable offset holder to track position in flatArgs across steps. + * + *

Using a mutable holder avoids allocating result pairs for each step.

*/ final class Offset { int i; @@ -242,6 +406,11 @@ final class Offset { } } + /** + * Default implementation of {@link ArgumentPlan} that applies a sequence of steps. + * + *

Each step corresponds to one constructor parameter of the target record type.

+ */ private static final class CompiledArgumentPlan implements ArgumentPlan { private final RecordType type; private final Step[] steps; @@ -276,6 +445,11 @@ public Result adapt(@Nonnull Object[] flatArgs, } } + /** + * Step that passes a single column value through unchanged. + * + *

Used for simple types (primitives, strings, etc.) that don't require conversion.

+ */ private static final class PlainStep implements Step { @Override public Object apply(@Nonnull Object[] flatArgs, @@ -288,8 +462,14 @@ public Object apply(@Nonnull Object[] flatArgs, } } + /** + * Step that applies a custom type converter to one or more columns. + * + *

Used for fields annotated with {@code @Convert}. The converter may consume multiple + * columns (e.g., for composite types) as specified by its parameter count.

+ */ private static final class ConverterStep implements Step { - private final Object converter; // keep the concrete type from Providers (compile-time type is whatever getORMConverter returns) + private final Object converter; private final int paramCount; private ConverterStep(Object converter, int paramCount) { @@ -311,8 +491,9 @@ public Object apply(@Nonnull Object[] flatArgs, } /** - * Small indirection so the compiled step can stay fast without reflection. - * We wrap the real converter in an invoker during compilation. + * Functional interface for invoking converters without reflection at runtime. + * + *

The actual converter is wrapped in this interface during compilation.

*/ @FunctionalInterface interface ConverterInvoker { @@ -320,6 +501,15 @@ interface ConverterInvoker { } } + /** + * Step that maps a column value to an enum constant. + * + *

Supports two mapping strategies via {@link DbEnum} annotation:

+ *
    + *
  • {@link EnumType#NAME}: Maps string values to enum constants by name (default)
  • + *
  • {@link EnumType#ORDINAL}: Maps integer values to enum constants by ordinal
  • + *
+ */ private static final class EnumStep implements Step { private final Class enumType; private final EnumType mapping; @@ -360,6 +550,13 @@ public Object apply(@Nonnull Object[] flatArgs, } } + /** + * Step that creates a {@link Ref} instance from a primary key column. + * + *

Refs are lazy references to entities or projections. The actual entity is not loaded + * until the ref is dereferenced. This step consumes a single PK column and delegates to + * {@link RefFactory} for ref creation and interning.

+ */ private static final class RefStep implements Step { private final Class dataType; @@ -384,6 +581,27 @@ public Object apply(@Nonnull Object[] flatArgs, } } + /** + * Step that recursively constructs a nested record or entity from multiple columns. + * + *

This step handles the most complex case: nested record types that may themselves contain + * further nested records. It delegates to a sub-{@link ArgumentPlan} for recursive construction.

+ * + *

Early Cache Lookup Optimization

+ *

For entity types, this step can extract the primary key directly from the flat column array + * before constructing the entity and its nested objects. If a cached entity with that PK + * exists (in {@link EntityCache} or {@link WeakInterner}), construction is skipped entirely.

+ * + *

This optimization is particularly valuable for queries that return duplicate entity references + * (e.g., joins that repeat the same entity across multiple rows).

+ * + *

Interning

+ *

After construction, entities are interned to ensure identity consistency:

+ *
    + *
  • Entities with dirty tracking: via {@link EntityCache} (transaction-scoped)
  • + *
  • Other entities and records: via {@link WeakInterner} (query-scoped)
  • + *
+ */ private static final class RecordStep implements Step { private final RecordField field; private final RecordType subType; @@ -391,12 +609,33 @@ private static final class RecordStep implements Step { private final boolean subIsEntity; private final boolean subNeedsCache; - private RecordStep(@Nonnull RecordField field, @Nonnull RecordType subType, @Nonnull ArgumentPlan subPlan) { + // Fields for early PK cache lookup optimization. + + /** Offset within this record's flatArgs where PK starts (-1 if not applicable). */ + private final int pkFlatOffset; + /** Number of columns the PK spans. */ + private final int pkColumnCount; + /** Total columns this record consumes (for skipping on cache hit). */ + private final int totalColumnCount; + /** Constructor for composite PKs (null for simple single-column PKs). */ + private final Constructor pkConstructor; + + private RecordStep(@Nonnull RecordField field, + @Nonnull RecordType subType, + @Nonnull ArgumentPlan subPlan, + int pkFlatOffset, + int pkColumnCount, + int totalColumnCount, + @Nullable Constructor pkConstructor) { this.field = field; this.subType = subType; this.subPlan = subPlan; this.subIsEntity = Entity.class.isAssignableFrom(subType.type()); this.subNeedsCache = getUpdateMode(subType) != OFF; + this.pkFlatOffset = pkFlatOffset; + this.pkColumnCount = pkColumnCount; + this.totalColumnCount = totalColumnCount; + this.pkConstructor = pkConstructor; } @Override @@ -408,6 +647,36 @@ public Object apply(@Nonnull Object[] flatArgs, @Nullable TransactionContext context) throws SqlTemplateException { boolean nullableHere = parentNullable || field.nullable(); int start = offset.i; + // Early cache lookup optimization for entities. + // If we can extract the PK early, check the cache before constructing nested objects. + EntityCache, ?> entityCache = null; + if (context != null && subIsEntity && subNeedsCache) { + //noinspection unchecked + entityCache = (EntityCache, ?>) context.entityCache((Class>) subType.type()); + } + if (subIsEntity && pkFlatOffset >= 0) { + Object pk = extractPk(flatArgs, start + pkFlatOffset); + if (pk != null) { + // Try EntityCache first if available, otherwise use WeakInterner. + if (entityCache != null) { + //noinspection unchecked,rawtypes + Optional> cached = ((EntityCache) entityCache).get(pk); + if (cached.isPresent()) { + // Cache hit - skip construction entirely. + offset.i = start + totalColumnCount; + return cached.get(); + } + } else { + //noinspection unchecked + Entity cached = interner.get((Class>) subType.type(), pk); + if (cached != null) { + // Cache hit - skip construction entirely. + offset.i = start + totalColumnCount; + return cached; + } + } + } + } ArgumentPlan.Result r = subPlan.adapt(flatArgs, offset.i, nullableHere, refFactory, interner, context); offset.i = r.offset(); if (field.nullable()) { @@ -443,20 +712,60 @@ public Object apply(@Nonnull Object[] flatArgs, } // Construct nested record. Object record = ObjectMapperFactory.construct(subType.constructor(), childArgs, start); - EntityCache, ?> entityCache; - if (context != null && subIsEntity && subNeedsCache) { - //noinspection unchecked - entityCache = (EntityCache, ?>) context.entityCache((Class>) subType.type()); - } else { - entityCache = null; - } if (entityCache != null) { return entityCache.intern((Entity) record); } return interner.intern(record); } + + /** + * Extracts the primary key from flatArgs at the given offset. + * + * @param flatArgs the flat argument array. + * @param pkStart the starting offset for PK columns. + * @return the PK value, or null if any PK column is null or PK cannot be extracted. + */ + private Object extractPk(@Nonnull Object[] flatArgs, int pkStart) throws SqlTemplateException { + if (pkColumnCount == 1) { + // Simple PK - just return the value. + return flatArgs[pkStart]; + } + // Composite PK - construct from columns. + if (pkConstructor == null) { + // Cannot construct composite PK without constructor. + return null; + } + Object[] pkArgs = new Object[pkColumnCount]; + for (int i = 0; i < pkColumnCount; i++) { + Object arg = flatArgs[pkStart + i]; + if (arg == null) { + return null; // Null in composite PK means no valid PK. + } + pkArgs[i] = arg; + } + return ObjectMapperFactory.construct(pkConstructor, pkArgs, pkStart); + } } + /** + * Compiles an argument plan for the given record type. + * + *

This method analyzes the record's constructor parameters and creates an appropriate + * {@link Step} for each one. The resulting plan can be reused across multiple row mappings.

+ * + *

Step selection is based on parameter type:

+ *
    + *
  • Fields with {@code @Convert}: {@link ConverterStep}
  • + *
  • Nested records: {@link RecordStep} (recursive)
  • + *
  • Enums: {@link EnumStep}
  • + *
  • {@link Ref} types: {@link RefStep}
  • + *
  • All other types: {@link PlainStep}
  • + *
+ * + * @param type the record type to compile a plan for. + * @return the compiled argument plan. + * @throws SqlTemplateException if compilation fails. + */ private static ArgumentPlan compilePlan(@Nonnull RecordType type) throws SqlTemplateException { Class[] paramTypes = type.constructor().getParameterTypes(); Step[] steps = new Step[paramTypes.length]; @@ -475,7 +784,11 @@ private static ArgumentPlan compilePlan(@Nonnull RecordType type) throws SqlTemp if (isRecord(p)) { RecordType sub = getRecordType(p); ArgumentPlan subPlan = compilePlan(sub); - steps[i] = new RecordStep(field, sub, subPlan); + // Calculate PK information for early cache lookup optimization. + PkInfo pkInfo = calculatePkInfo(sub); + int totalColumnCount = getParameterCount(sub); + steps[i] = new RecordStep(field, sub, subPlan, pkInfo.offset, pkInfo.columnCount, + totalColumnCount, pkInfo.constructor); } else if (p.isEnum()) { EnumType enumType = ofNullable(field.getAnnotation(DbEnum.class)).map(DbEnum::value).orElse(NAME); steps[i] = new EnumStep(p, enumType, type.type().getSimpleName(), field.name()); @@ -487,4 +800,81 @@ private static ArgumentPlan compilePlan(@Nonnull RecordType type) throws SqlTemp } return new CompiledArgumentPlan(type, steps); } + + /** + * Holds primary key location and construction information for an entity type. + * + *

This information enables the early cache lookup optimization in {@link RecordStep}.

+ * + * @param offset the offset into flatArgs where the PK columns start (-1 if not applicable). + * @param columnCount the number of columns the PK spans. + * @param constructor the constructor for composite PKs (null for simple single-column PKs). + */ + private record PkInfo(int offset, int columnCount, @Nullable Constructor constructor) { + /** Sentinel value indicating no PK information is available (non-entity types). */ + static final PkInfo NONE = new PkInfo(-1, 0, null); + } + + /** + * Calculates the primary key offset, column count, and constructor for the given record type. + * + *

This information enables early cache lookups by extracting the PK directly from flatArgs + * before constructing nested objects.

+ * + * @param type the record type to analyze. + * @return PkInfo containing offset, column count, and constructor (for composite PKs). + */ + private static PkInfo calculatePkInfo(@Nonnull RecordType type) throws SqlTemplateException { + // Only entities have PKs. + if (!Entity.class.isAssignableFrom(type.type())) { + return PkInfo.NONE; + } + // Find the PK field. + Optional pkFieldOpt = findPkField(type.type()); + if (pkFieldOpt.isEmpty()) { + return PkInfo.NONE; + } + RecordField pkField = pkFieldOpt.get(); + // Calculate the offset: sum of column counts for all fields before the PK field. + int offset = 0; + for (RecordField field : type.fields()) { + if (field.name().equals(pkField.name())) { + break; + } + offset += getFieldColumnCount(field); + } + // Calculate how many columns the PK spans. + int pkColumnCount = getFieldColumnCount(pkField); + // For composite PKs (record types), we need the constructor. + Constructor pkConstructor = null; + if (isRecord(pkField.type()) && pkColumnCount > 1) { + pkConstructor = getRecordType(pkField.type()).constructor(); + } + return new PkInfo(offset, pkColumnCount, pkConstructor); + } + + /** + * Returns the number of JDBC columns a field consumes in the flat args array. + * + *

This accounts for:

+ *
    + *
  • Custom converters that may consume multiple columns
  • + *
  • Nested records that expand to multiple columns recursively
  • + *
  • Simple fields that consume exactly one column
  • + *
+ * + * @param field the field to calculate column count for. + * @return the number of columns the field consumes. + * @throws SqlTemplateException if the field type cannot be analyzed. + */ + private static int getFieldColumnCount(@Nonnull RecordField field) throws SqlTemplateException { + var converter = getORMConverter(field); + if (converter.isPresent()) { + return converter.get().getParameterCount(); + } + if (isRecord(field.type())) { + return getParameterCount(getRecordType(field.type())); + } + return 1; + } } \ No newline at end of file diff --git a/storm-core/src/main/java/st/orm/core/template/impl/SqlTemplateImpl.java b/storm-core/src/main/java/st/orm/core/template/impl/SqlTemplateImpl.java index 98576a269..f02e717b3 100644 --- a/storm-core/src/main/java/st/orm/core/template/impl/SqlTemplateImpl.java +++ b/storm-core/src/main/java/st/orm/core/template/impl/SqlTemplateImpl.java @@ -33,8 +33,6 @@ import java.util.ArrayList; import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; import static java.lang.Integer.parseInt; @@ -52,7 +50,8 @@ public final class SqlTemplateImpl implements SqlTemplate { private static final TemplateMetrics TEMPLATE_METRICS = new TemplateMetrics(LoggerFactory.getLogger("st.orm.metrics")); - private static final Map> CACHE_MAP = new ConcurrentHashMap<>(); + private static final SegmentedLruCache> CACHE_MAP = + new SegmentedLruCache<>(64); private static final int TEMPLATE_CACHE_SIZE = Math.max(0, parseInt(System.getProperty("storm.templateCacheSize", "2048"))); @@ -101,7 +100,7 @@ public SqlTemplateImpl(boolean positionalOnly, this.cache = null; } else { var key = List.of(positionalOnly, expandCollection, supportRecords, new IdentityKey(modelBuilder), new IdentityKey(tableAliasResolver), dialect.name()); - this.cache = CACHE_MAP.computeIfAbsent(key, ignore -> new SegmentedLruCache<>(TEMPLATE_CACHE_SIZE)); + this.cache = CACHE_MAP.getOrCompute(key, () -> new SegmentedLruCache<>(TEMPLATE_CACHE_SIZE)); } } @@ -343,7 +342,10 @@ public Sql process(@Nonnull TemplateString template) throws SqlTemplateException preparedTemplate.processor().compile(preparedTemplate.context(), false); processor = preparedTemplate.processor(); if (compilationKey != null) { - cache.put(compilationKey, processor); + var existing = cache.putIfAbsent(compilationKey, processor); + if (existing != null) { + processor = existing; // Use the processor that won the race. + } } } else { request.hit(); diff --git a/storm-core/src/main/java/st/orm/core/template/impl/UncheckedSqlTemplateException.java b/storm-core/src/main/java/st/orm/core/template/impl/UncheckedSqlTemplateException.java index 80ae76365..70fb01200 100644 --- a/storm-core/src/main/java/st/orm/core/template/impl/UncheckedSqlTemplateException.java +++ b/storm-core/src/main/java/st/orm/core/template/impl/UncheckedSqlTemplateException.java @@ -15,8 +15,11 @@ */ package st.orm.core.template.impl; +import jakarta.annotation.Nonnull; import st.orm.core.template.SqlTemplateException; +import static java.util.Objects.requireNonNull; + /** * Runtime wrapper for {@link SqlTemplateException}. * @@ -37,8 +40,8 @@ final class UncheckedSqlTemplateException extends RuntimeException { * * @param e the checked exception to wrap */ - public UncheckedSqlTemplateException(SqlTemplateException e) { - super(e); + public UncheckedSqlTemplateException(@Nonnull SqlTemplateException e) { + super(requireNonNull(e)); this.cause = e; } diff --git a/storm-core/src/test/java/st/orm/core/RepositoryPreparedStatementIntegrationTest.java b/storm-core/src/test/java/st/orm/core/RepositoryPreparedStatementIntegrationTest.java index aa5b17175..e75cc0a0e 100644 --- a/storm-core/src/test/java/st/orm/core/RepositoryPreparedStatementIntegrationTest.java +++ b/storm-core/src/test/java/st/orm/core/RepositoryPreparedStatementIntegrationTest.java @@ -12,6 +12,7 @@ import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit.jupiter.SpringExtension; +import org.springframework.transaction.annotation.Transactional; import st.orm.Convert; import st.orm.Converter; import st.orm.Data; @@ -65,11 +66,13 @@ import java.sql.SQLIntegrityConstraintViolationException; import java.time.Instant; import java.time.LocalDate; -import java.util.Collections; import java.util.IdentityHashMap; import java.util.List; import java.util.concurrent.atomic.AtomicReference; +import static java.util.Collections.newSetFromMap; +import static java.util.stream.Collectors.toCollection; +import static java.util.stream.Collectors.toSet; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -77,6 +80,9 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.springframework.transaction.annotation.Isolation.READ_UNCOMMITTED; +import static org.springframework.transaction.annotation.Propagation.NOT_SUPPORTED; +import static org.springframework.transaction.annotation.Propagation.REQUIRED; import static st.orm.GenerationStrategy.NONE; import static st.orm.core.template.Templates.alias; import static st.orm.core.template.Templates.column; @@ -971,7 +977,7 @@ public void testSelectWithTwoPetRefsWithoutPathTemplate() { @Test public void testInternerRecord() { var pets = ORMTemplate.of(dataSource).entity(Pet.class).select().getResultList(); - var owners = Collections.newSetFromMap(new IdentityHashMap<>()); + var owners = newSetFromMap(new IdentityHashMap<>()); owners.addAll(pets.stream().map(Pet::owner).toList()); assertEquals(11, owners.size()); } @@ -979,7 +985,7 @@ public void testInternerRecord() { @Test public void testInternerRef() { var pets = ORMTemplate.of(dataSource).entity(PetOwnerRef.class).select().getResultList(); - var owners = Collections.newSetFromMap(new IdentityHashMap<>()); + var owners = newSetFromMap(new IdentityHashMap<>()); owners.addAll(pets.stream().map(PetOwnerRef::owner).toList()); assertEquals(11, owners.size()); } @@ -1603,4 +1609,98 @@ public void testSelectCompoundFKNestedDbColumns() { assertInstanceOf(JdbcSQLSyntaxErrorException.class, e.getCause()); }); } + + @Transactional(propagation = REQUIRED) + @Test + public void testInternerRegularTransaction() { + // Uses EntityCache. + var visits = ORMTemplate.of(dataSource).entity(Visit.class) + .select() + .getResultList(); + assertEquals(14, visits.size()); + var identitySet = visits.stream().map(it -> it.pet()).collect(toCollection(() -> newSetFromMap(new IdentityHashMap<>()))); + var pkSet = visits.stream().map(it -> it.pet().id()).collect(toSet()); + assertEquals(pkSet.size(), identitySet.size()); + } + + @Transactional(readOnly = true) + @Test + public void testInternerReadOnlyTransaction() { + // Uses WeakInterner. + var visits = ORMTemplate.of(dataSource).entity(Visit.class) + .select() + .getResultList(); + assertEquals(14, visits.size()); + var identitySet = visits.stream().map(it -> it.pet()).collect(toCollection(() -> newSetFromMap(new IdentityHashMap<>()))); + var pkSet = visits.stream().map(it -> it.pet().id()).collect(toSet()); + assertEquals(pkSet.size(), identitySet.size()); + } + + @Transactional(propagation = NOT_SUPPORTED) + @Test + public void testInternerNoTransaction() { + // Uses WeakInterner. + var visits = ORMTemplate.of(dataSource).entity(Visit.class) + .select() + .getResultList(); + assertEquals(14, visits.size()); + var identitySet = visits.stream().map(it -> it.pet()).collect(toCollection(() -> newSetFromMap(new IdentityHashMap<>()))); + var pkSet = visits.stream().map(it -> it.pet().id()).collect(toSet()); + assertEquals(pkSet.size(), identitySet.size()); + } + + @Transactional(isolation = READ_UNCOMMITTED) + @Test + public void testInternerReadUncomitted() { + // Uses WeakInterner. + var visits = ORMTemplate.of(dataSource).entity(Visit.class) + .select() + .getResultList(); + assertEquals(14, visits.size()); + var identitySet = visits.stream().map(it -> it.pet()).collect(toCollection(() -> newSetFromMap(new IdentityHashMap<>()))); + var pkSet = visits.stream().map(it -> it.pet().id()).collect(toSet()); + assertEquals(pkSet.size(), identitySet.size()); + } + + @Transactional(propagation = REQUIRED) + @Test + public void testEntityCacheAcrossQueries() { + // With entity cache, the same entity should be returned across multiple queries. + var repository = ORMTemplate.of(dataSource).entity(Pet.class); + var pet1 = repository.getById(1); + var pet2 = repository.getById(1); + assertSame(pet1, pet2, "Entity cache should return the same instance across queries"); + } + + @Transactional(readOnly = true) + @Test + public void testEntityCacheInReadOnlyTransaction() { + // Read-only transactions should also use entity cache for optimization. + var repository = ORMTemplate.of(dataSource).entity(Pet.class); + var pet1 = repository.getById(1); + var pet2 = repository.getById(1); + assertSame(pet1, pet2, "Entity cache should return the same instance in read-only transactions"); + } + + @Transactional(propagation = NOT_SUPPORTED) + @Test + public void testNoEntityCacheWithoutTransaction() { + // Without a transaction, there is no scope for entity cache. + var repository = ORMTemplate.of(dataSource).entity(Pet.class); + var pet1 = repository.getById(1); + var pet2 = repository.getById(1); + assertEquals(pet1, pet2, "Entities should be equal"); + assertTrue(pet1 != pet2, "Without transaction, different instances should be returned"); + } + + @Transactional(isolation = READ_UNCOMMITTED) + @Test + public void testNoEntityCacheAtReadUncommitted() { + // Without entity cache (READ_UNCOMMITTED), different instances should be returned. + var repository = ORMTemplate.of(dataSource).entity(Pet.class); + var pet1 = repository.getById(1); + var pet2 = repository.getById(1); + assertEquals(pet1, pet2, "Entities should be equal"); + assertTrue(pet1 != pet2, "Without entity cache, different instances should be returned"); + } } \ No newline at end of file diff --git a/storm-foundation/pom.xml b/storm-foundation/pom.xml index b75f00306..d5aa78e65 100644 --- a/storm-foundation/pom.xml +++ b/storm-foundation/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-foundation diff --git a/storm-foundation/src/main/java/st/orm/DynamicUpdate.java b/storm-foundation/src/main/java/st/orm/DynamicUpdate.java index f9854e146..44afcb958 100644 --- a/storm-foundation/src/main/java/st/orm/DynamicUpdate.java +++ b/storm-foundation/src/main/java/st/orm/DynamicUpdate.java @@ -64,6 +64,11 @@ *
  • Manual or bulk SQL updates bypass dirty checking and may leave cached entities stale.
  • *
  • Field-level updates are a performance optimization and may fall back to full-row updates to preserve batching * efficiency. This does not affect dirty detection.
  • + *
  • Unless configured otherwise, entity observation is automatically disabled for {@code READ_UNCOMMITTED} + * transactions. At this isolation level, the application expects to see uncommitted changes from other + * transactions. Caching observed state would mask these changes, contradicting the requested isolation + * semantics. When observation is disabled, all entities are treated as dirty, resulting in full-row + * updates.
  • * * * @since 1.7 diff --git a/storm-foundation/src/main/java/st/orm/Operator.java b/storm-foundation/src/main/java/st/orm/Operator.java index ab133e865..400f6c8da 100644 --- a/storm-foundation/src/main/java/st/orm/Operator.java +++ b/storm-foundation/src/main/java/st/orm/Operator.java @@ -20,6 +20,12 @@ /** * Represents a comparison operator in a SQL query. + * + *

    Caching contract: Operators may participate in template caching where the operator instance + * becomes part of the cache key for a compiled SQL shape. Custom operator implementations must therefore implement + * stable {@link Object#equals(Object)} and {@link Object#hashCode()} semantics. Two operators must be considered equal + * (and have the same hash code) if they produce the same SQL shape, so that equivalent templates map to the same cached + * compilation result.

    */ @SuppressWarnings({"SwitchStatementWithTooFewBranches", "unused"}) public interface Operator { diff --git a/storm-foundation/src/main/java/st/orm/Ref.java b/storm-foundation/src/main/java/st/orm/Ref.java index b10093912..934aeafd9 100644 --- a/storm-foundation/src/main/java/st/orm/Ref.java +++ b/storm-foundation/src/main/java/st/orm/Ref.java @@ -59,8 +59,9 @@ static > Ref of(@Nonnull E entity) { class DetachedEntity> extends AbstractRef { private final TE entity; - DetachedEntity(TE entity) { + DetachedEntity(@Nonnull TE entity) { requireNonNull(entity, "Entity cannot be null."); + requireNonNull(entity.id(), "Entity ID cannot be null."); this.entity = entity; } diff --git a/storm-jackson/pom.xml b/storm-jackson/pom.xml index f8cb1d618..3d315b08b 100644 --- a/storm-jackson/pom.xml +++ b/storm-jackson/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-jackson diff --git a/storm-jackson/src/main/java/st/orm/jackson/spi/JsonORMConverterImpl.java b/storm-jackson/src/main/java/st/orm/jackson/spi/JsonORMConverterImpl.java index 5430210e2..7f9a75e29 100644 --- a/storm-jackson/src/main/java/st/orm/jackson/spi/JsonORMConverterImpl.java +++ b/storm-jackson/src/main/java/st/orm/jackson/spi/JsonORMConverterImpl.java @@ -40,9 +40,8 @@ import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.util.List; -import java.util.Map; import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; +import st.orm.core.template.impl.SegmentedLruCache; import static com.fasterxml.jackson.databind.DeserializationFeature.FAIL_ON_MISSING_CREATOR_PROPERTIES; import static com.fasterxml.jackson.databind.DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES; @@ -56,7 +55,7 @@ */ public final class JsonORMConverterImpl implements ORMConverter { private static final ORMReflection REFLECTION = Providers.getORMReflection(); - private static final Map OBJECT_MAPPER = new ConcurrentHashMap<>(); + private static final SegmentedLruCache OBJECT_MAPPER = new SegmentedLruCache<>(1024); private static final ThreadLocal REF_FACTORY = new ThreadLocal<>(); private final RecordField field; @@ -88,9 +87,9 @@ public JsonORMConverterImpl(@Nonnull RecordField field, deserializeAnnotation != null && deserializeAnnotation.using() != JsonDeserializer.None.class ? (Class>) deserializeAnnotation.using() : null; - this.mapper = OBJECT_MAPPER.computeIfAbsent( + this.mapper = OBJECT_MAPPER.getOrCompute( new CacheKey(requireNonNull(json, "json"), type, serializerClass, deserializerClass), - key -> { + () -> { var mapper = new ObjectMapper(); mapper.findAndRegisterModules(); if (!json.failOnUnknown()) { @@ -99,30 +98,30 @@ public JsonORMConverterImpl(@Nonnull RecordField field, if (!json.failOnMissing()) { mapper.disable(FAIL_ON_MISSING_CREATOR_PROPERTIES); } - if (key.sealedType != null) { - mapper.registerSubtypes(getPermittedSubtypes(key.sealedType)); + if (type != null) { + mapper.registerSubtypes(getPermittedSubtypes(type)); } // Register StormModule with supplier for dynamic RefFactory resolution. mapper.registerModule(new StormModule(REF_FACTORY::get)); // Register custom serializers/deserializers if specified. - if (key.serializer != null || key.deserializer != null) { + if (serializerClass != null || deserializerClass != null) { var customModule = new SimpleModule(); - if (key.serializer != null) { + if (serializerClass != null) { try { Class fieldType = getRawType(typeReference.getType()).orElse(Object.class); - JsonSerializer serializerInstance = key.serializer.getDeclaredConstructor().newInstance(); + JsonSerializer serializerInstance = serializerClass.getDeclaredConstructor().newInstance(); customModule.addSerializer(fieldType, serializerInstance); } catch (Exception e) { - throw new RuntimeException("Failed to instantiate custom serializer: " + key.serializer, e); + throw new RuntimeException("Failed to instantiate custom serializer: " + serializerClass, e); } } - if (key.deserializer != null) { + if (deserializerClass != null) { try { Class fieldType = getRawType(typeReference.getType()).orElse(Object.class); - JsonDeserializer deserializerInstance = key.deserializer.getDeclaredConstructor().newInstance(); + JsonDeserializer deserializerInstance = deserializerClass.getDeclaredConstructor().newInstance(); customModule.addDeserializer(fieldType, deserializerInstance); } catch (Exception e) { - throw new RuntimeException("Failed to instantiate custom deserializer: " + key.deserializer, e); + throw new RuntimeException("Failed to instantiate custom deserializer: " + deserializerClass, e); } } mapper.registerModule(customModule); diff --git a/storm-java21/pom.xml b/storm-java21/pom.xml index 66a226c4a..ee72c6070 100644 --- a/storm-java21/pom.xml +++ b/storm-java21/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-java21 diff --git a/storm-kotlin-spring/pom.xml b/storm-kotlin-spring/pom.xml index 764d0bfda..8e9738af0 100644 --- a/storm-kotlin-spring/pom.xml +++ b/storm-kotlin-spring/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-kotlin-spring diff --git a/storm-kotlin-spring/src/main/kotlin/st/orm/spring/impl/SpringTransactionContext.kt b/storm-kotlin-spring/src/main/kotlin/st/orm/spring/impl/SpringTransactionContext.kt index 2a210d311..475de2fc6 100644 --- a/storm-kotlin-spring/src/main/kotlin/st/orm/spring/impl/SpringTransactionContext.kt +++ b/storm-kotlin-spring/src/main/kotlin/st/orm/spring/impl/SpringTransactionContext.kt @@ -37,6 +37,34 @@ import javax.sql.DataSource import kotlin.math.min import kotlin.reflect.KClass +/** + * Minimum transaction isolation level required for entity caching to be enabled. + * + * Transactions with an isolation level below this threshold will not use entity caching, which means dirty checking + * will treat all entities as dirty (resulting in full-row updates). This prevents the entity cache from masking + * changes that the application expects to see at lower isolation levels. + * + * The default value is [TRANSACTION_READ_COMMITTED], meaning entity caching is disabled only for + * `READ_UNCOMMITTED` transactions. This can be overridden using the system property + * `storm.entityCache.minIsolationLevel`. + * + * Valid values: `NONE`, `READ_UNCOMMITTED`, `READ_COMMITTED`, `REPEATABLE_READ`, `SERIALIZABLE`, or the + * corresponding JDBC integer constants (0, 1, 2, 4, 8). + */ +private val MIN_ISOLATION_LEVEL_FOR_CACHE: Int = run { + val value = System.getProperty("storm.entityCache.minIsolationLevel")?.trim()?.uppercase() + when { + value.isNullOrBlank() -> TRANSACTION_READ_COMMITTED + value == "NONE" || value == "0" -> TRANSACTION_NONE + value == "READ_UNCOMMITTED" || value == "1" -> TRANSACTION_READ_UNCOMMITTED + value == "READ_COMMITTED" || value == "2" -> TRANSACTION_READ_COMMITTED + value == "REPEATABLE_READ" || value == "4" -> TRANSACTION_REPEATABLE_READ + value == "SERIALIZABLE" || value == "8" -> TRANSACTION_SERIALIZABLE + else -> value.toIntOrNull() + ?: throw PersistenceException("Invalid value for storm.entityCache.minIsolationLevel: '$value'.") + } +} + /** * Internal implementation of transaction context management for Spring-managed transactions. * This class provides thread-local transaction context tracking and management capabilities. @@ -183,8 +211,18 @@ internal class SpringTransactionContext : TransactionContext { /** * Returns a transaction-local cache for entities of the given type, keyed by primary key. + * + * Returns `null` if the transaction's isolation level is below the configured minimum for entity caching + * (see [MIN_ISOLATION_LEVEL_FOR_CACHE]). At low isolation levels, entity caching is disabled to prevent + * the cache from masking changes that the application expects to see. */ - override fun entityCache(entityType: Class>): EntityCache, *> { + override fun entityCache(entityType: Class>): EntityCache, *>? { + val isolationLevel = currentState.transactionDefinition?.isolationLevel + // Spring uses ISOLATION_DEFAULT (-1) when no specific isolation level is set. + // In that case, we assume the database default (typically READ_COMMITTED or higher) and enable caching. + if (isolationLevel != null && isolationLevel >= 0 && isolationLevel < MIN_ISOLATION_LEVEL_FOR_CACHE) { + return null + } @Suppress("UNCHECKED_CAST") return currentState.entityCacheMap.getOrPut(entityType.kotlin) { EntityCacheImpl, Any>() diff --git a/storm-kotlin-validator/pom.xml b/storm-kotlin-validator/pom.xml index 98a162675..4243d5a63 100644 --- a/storm-kotlin-validator/pom.xml +++ b/storm-kotlin-validator/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml diff --git a/storm-kotlin/pom.xml b/storm-kotlin/pom.xml index ba96cb4c3..069288a73 100644 --- a/storm-kotlin/pom.xml +++ b/storm-kotlin/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-kotlin diff --git a/storm-kotlin/src/main/kotlin/st/orm/template/impl/JdbcTransactionContext.kt b/storm-kotlin/src/main/kotlin/st/orm/template/impl/JdbcTransactionContext.kt index 75eff8893..4ebcfd086 100644 --- a/storm-kotlin/src/main/kotlin/st/orm/template/impl/JdbcTransactionContext.kt +++ b/storm-kotlin/src/main/kotlin/st/orm/template/impl/JdbcTransactionContext.kt @@ -29,6 +29,34 @@ import java.util.* import javax.sql.DataSource import kotlin.reflect.KClass +/** + * Minimum transaction isolation level required for entity caching to be enabled. + * + * Transactions with an isolation level below this threshold will not use entity caching, which means dirty checking + * will treat all entities as dirty (resulting in full-row updates). This prevents the entity cache from masking + * changes that the application expects to see at lower isolation levels. + * + * The default value is [TRANSACTION_READ_COMMITTED], meaning entity caching is disabled only for + * `READ_UNCOMMITTED` transactions. This can be overridden using the system property + * `storm.entityCache.minIsolationLevel`. + * + * Valid values: `NONE`, `READ_UNCOMMITTED`, `READ_COMMITTED`, `REPEATABLE_READ`, `SERIALIZABLE`, or the + * corresponding JDBC integer constants (0, 1, 2, 4, 8). + */ +private val MIN_ISOLATION_LEVEL_FOR_CACHE: Int = run { + val value = System.getProperty("storm.entityCache.minIsolationLevel")?.trim()?.uppercase() + when { + value.isNullOrBlank() -> TRANSACTION_READ_COMMITTED + value == "NONE" || value == "0" -> TRANSACTION_NONE + value == "READ_UNCOMMITTED" || value == "1" -> TRANSACTION_READ_UNCOMMITTED + value == "READ_COMMITTED" || value == "2" -> TRANSACTION_READ_COMMITTED + value == "REPEATABLE_READ" || value == "4" -> TRANSACTION_REPEATABLE_READ + value == "SERIALIZABLE" || value == "8" -> TRANSACTION_SERIALIZABLE + else -> value.toIntOrNull() + ?: throw PersistenceException("Invalid value for storm.entityCache.minIsolationLevel: '$value'.") + } +} + /** * A JDBC transaction context implementation that provides lightweight transaction management based on JDBC. * This supports various transaction propagation behaviors. @@ -156,8 +184,16 @@ internal class JdbcTransactionContext : TransactionContext { /** * Returns a transaction-local cache for entities of the given type, keyed by primary key. + * + * Returns `null` if the transaction's isolation level is below the configured minimum for entity caching + * (see [MIN_ISOLATION_LEVEL_FOR_CACHE]). At low isolation levels, entity caching is disabled to prevent + * the cache from masking changes that the application expects to see. */ - override fun entityCache(entityType: Class>): EntityCache, *> { + override fun entityCache(entityType: Class>): EntityCache, *>? { + val isolationLevel = currentState.isolationLevel + if (isolationLevel != null && isolationLevel < MIN_ISOLATION_LEVEL_FOR_CACHE) { + return null + } @Suppress("UNCHECKED_CAST") return currentState.entityCacheMap.getOrPut(entityType.kotlin) { EntityCacheImpl, Any>() diff --git a/storm-kotlinx-serialization/pom.xml b/storm-kotlinx-serialization/pom.xml index fb7a7cafc..e44da40ac 100644 --- a/storm-kotlinx-serialization/pom.xml +++ b/storm-kotlinx-serialization/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-kotlinx-serialization diff --git a/storm-mariadb/pom.xml b/storm-mariadb/pom.xml index 3c279cad2..7ed22e156 100644 --- a/storm-mariadb/pom.xml +++ b/storm-mariadb/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-mariadb diff --git a/storm-metamodel-ksp/pom.xml b/storm-metamodel-ksp/pom.xml index b6603c866..397c3c5d7 100644 --- a/storm-metamodel-ksp/pom.xml +++ b/storm-metamodel-ksp/pom.xml @@ -5,7 +5,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-metamodel-ksp diff --git a/storm-metamodel-processor/pom.xml b/storm-metamodel-processor/pom.xml index 5badc6934..1aa230fd8 100644 --- a/storm-metamodel-processor/pom.xml +++ b/storm-metamodel-processor/pom.xml @@ -5,7 +5,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-metamodel-processor diff --git a/storm-mssqlserver/pom.xml b/storm-mssqlserver/pom.xml index fcc53921c..17b9930b0 100644 --- a/storm-mssqlserver/pom.xml +++ b/storm-mssqlserver/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-mssqlserver diff --git a/storm-mysql/pom.xml b/storm-mysql/pom.xml index e77004092..d3034a604 100644 --- a/storm-mysql/pom.xml +++ b/storm-mysql/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-mysql diff --git a/storm-oracle/pom.xml b/storm-oracle/pom.xml index 86deb271b..3a97985de 100644 --- a/storm-oracle/pom.xml +++ b/storm-oracle/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-oracle diff --git a/storm-postgresql/pom.xml b/storm-postgresql/pom.xml index 0df6dfe1b..21c3db167 100644 --- a/storm-postgresql/pom.xml +++ b/storm-postgresql/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-postgresql diff --git a/storm-spring/pom.xml b/storm-spring/pom.xml index fb160f77b..2386b633a 100644 --- a/storm-spring/pom.xml +++ b/storm-spring/pom.xml @@ -6,7 +6,7 @@ st.orm storm-framework - 1.8.0 + 1.8.1 ../pom.xml storm-spring