From 02a5a30e401ee55ae2f6a43f2eb669116e36c7fc Mon Sep 17 00:00:00 2001 From: Chris Vest Date: Tue, 5 Jan 2016 16:04:36 +0100 Subject: [PATCH] Make the fast page cache tests pass - OptiLock.writeLock has been changed to tryWriteLock and no longer blocks. This means OptiLock is now entirely non-blocking and allocation free. - A bunch of data race bugs has been fixed. Particularly around page faulting and pinning. And eviction. And background flushing. - Pages are now always exclusively locked when they are in the freelist. They are locked by eviction and unlocked by page faulting. This solves a race where a page pin, an eviction and a page fault happen concurrently and interleave in a particular way. - RandomPageCacheTestHarness now also simulates entity locks, and ensures that records are locked when written to. - Flushing a paged file will now spin on exclusively locked pages in its translation table. This way, the flush won't skip pages that are being looked at by the eviciton thread, or the background flush thread. This is important because the flushes might fail, in which case we don't want to miss dirty pages since this can lead to lost data. Especially if we are flushing the file because we want to unmap it. The slow page cache tests are still failing, and we still haven't allowed multiple open page cursors per thread. --- .../org/neo4j/io/pagecache/PagedFile.java | 7 +- .../io/pagecache/impl/muninn/MuninnPage.java | 19 +- .../impl/muninn/MuninnPageCache.java | 25 ++- .../impl/muninn/MuninnPageCursor.java | 61 ++++--- .../impl/muninn/MuninnPagedFile.java | 66 ++++--- .../impl/muninn/MuninnReadPageCursor.java | 3 +- .../impl/muninn/MuninnWritePageCursor.java | 11 +- .../io/pagecache/impl/muninn/OptiLock.java | 75 +++----- .../org/neo4j/io/pagecache/PageCacheTest.java | 163 ++---------------- .../impl/muninn/OptiLockStressTest.java | 30 +++- .../pagecache/impl/muninn/OptiLockTest.java | 134 +++++++++----- .../randomharness/CommandPrimer.java | 41 +++-- .../randomharness/StandardRecordFormat.java | 5 +- 13 files changed, 310 insertions(+), 330 deletions(-) diff --git a/community/io/src/main/java/org/neo4j/io/pagecache/PagedFile.java b/community/io/src/main/java/org/neo4j/io/pagecache/PagedFile.java index 4d6fd3649d294..1936460baa78a 100644 --- a/community/io/src/main/java/org/neo4j/io/pagecache/PagedFile.java +++ b/community/io/src/main/java/org/neo4j/io/pagecache/PagedFile.java @@ -36,7 +36,7 @@ public interface PagedFile extends AutoCloseable * * This cannot be combined with PF_EXCLUSIVE_LOCK. */ - int PF_SHARED_LOCK = 1; + int PF_SHARED_LOCK = 1; // TODO rename PF_SHARED_READ_LOCK /** * Pin the pages with an exclusive lock. * @@ -45,7 +45,7 @@ public interface PagedFile extends AutoCloseable * * This cannot be combined with PF_SHARED_LOCK. */ - int PF_EXCLUSIVE_LOCK = 1 << 1; + int PF_EXCLUSIVE_LOCK = 1 << 1; // TODO rename to PF_SHARED_WRITE_LOCK /** * Disallow pinning and navigating to pages outside the range of the * underlying file. @@ -160,6 +160,9 @@ public interface PagedFile extends AutoCloseable * * If this is the last handle to the file, it will be flushed and closed. * + * Note that this operation assumes that there are no write page cursors open on the paged file. If there are, then + * their writes may be lost, as they might miss the last flush that can happen on their data. + * * @see AutoCloseable#close() * @throws IOException instead of the Exception superclass as defined in AutoCloseable, if . */ diff --git a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPage.java b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPage.java index 42d5ce00a711d..12f5831861719 100644 --- a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPage.java +++ b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPage.java @@ -58,7 +58,7 @@ final class MuninnPage extends OptiLock implements Page public Object nextFree; private PageSwapper swapper; - private long filePageId = PageCursor.UNBOUND_PAGE_ID; + private volatile long filePageId = PageCursor.UNBOUND_PAGE_ID; public MuninnPage( int cachePageSize, MemoryManager memoryManager ) { @@ -106,7 +106,7 @@ public void markAsClean() public void incrementUsage() { // This is intentionally left benignly racy for performance. - byte usage = UnsafeUtil.getByteVolatile( this, usageStampOffset ); + byte usage = getUsageCounter(); if ( usage < 4 ) // avoid cache sloshing by not doing a write if counter is already maxed out { usage <<= 1; @@ -120,14 +120,19 @@ public void incrementUsage() public boolean decrementUsage() { // This is intentionally left benignly racy for performance. - byte usage = UnsafeUtil.getByteVolatile( this, usageStampOffset ); + byte usage = getUsageCounter(); usage >>>= 1; UnsafeUtil.putByteVolatile( this, usageStampOffset, usage ); return usage == 0; } + private byte getUsageCounter() + { + return UnsafeUtil.getByteVolatile( this, usageStampOffset ); + } + /** - * NOTE: This method must be called while holding a pessimistic lock on the page. + * NOTE: This method must be called while holding an exclusive lock on the page. */ public void flush( FlushEventOpportunity flushOpportunity ) throws IOException { @@ -159,7 +164,7 @@ private void doFlush( } /** - * NOTE: This method MUST be called while holding the page write lock. + * NOTE: This method MUST be called while holding the exclusive page lock. */ public void fault( PageSwapper swapper, @@ -242,8 +247,8 @@ public long getFilePageId() @Override public String toString() { - return format( "MuninnPage@%x[%s -> %x, filePageId = %s%s, swapper = %s]%s", + return format( "MuninnPage@%x[%s -> %x, filePageId = %s%s, swapper = %s, usage counter = %s, %s]", hashCode(), getCachePageId(), pointer, filePageId, (isDirty() ? ", dirty" : ""), - swapper, super.toString() ); + swapper, getUsageCounter(), super.toString() ); } } diff --git a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPageCache.java b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPageCache.java index 65f5203c15dbb..6fef24664e2ea 100644 --- a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPageCache.java +++ b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPageCache.java @@ -246,6 +246,7 @@ public MuninnPageCache( while ( pageIndex --> 0 ) { MuninnPage page = new MuninnPage( cachePageSize, memoryManager ); + page.tryExclusiveLock(); // All pages in the free-list are exclusively locked, and unlocked by page fault. pages[pageIndex] = page; if ( pageList == null ) @@ -584,7 +585,7 @@ int getPageCacheId() return pageCacheId; } - MuninnPage grabFreePage( PageFaultEvent faultEvent ) throws IOException + MuninnPage grabFreeAndExclusivelyLockedPage( PageFaultEvent faultEvent ) throws IOException { // Review the comment on the freelist field before making changes to // this part of the code. @@ -672,7 +673,10 @@ private MuninnPage cooperativelyEvict( PageFaultEvent faultEvent ) throws IOExce } finally { - page.unlockExclusive(); + if ( !evicted ) + { + page.unlockExclusive(); + } } } } @@ -774,7 +778,8 @@ else if ( freelistHead.getClass() == FreePage.class ) int evictPages( int pageCountToEvict, int clockArm, EvictionRunEvent evictionRunEvent ) { - while ( pageCountToEvict > 0 && !closed ) { + while ( pageCountToEvict > 0 && !closed ) + { if ( clockArm == pages.length ) { clockArm = 0; @@ -808,10 +813,6 @@ int evictPages( int pageCountToEvict, int clockArm, EvictionRunEvent evictionRun { pageEvicted = page.isLoaded() && evictPage( page, evictionEvent ); } - finally - { - page.unlockExclusive(); - } if ( pageEvicted ) { @@ -826,8 +827,14 @@ int evictPages( int pageCountToEvict, int clockArm, EvictionRunEvent evictionRun freePage.setNext( (FreePage) current ); nextListHead = freePage; } - while ( !compareAndSetFreelistHead( - current, nextListHead ) ); + while ( !compareAndSetFreelistHead( current, nextListHead ) ); + } + else + { + // Pages we put into the free-list remain exclusively locked until a page fault unlocks them + // If we somehow failed to evict the page, then we need to make sure that we release the + // exclusive lock. + page.unlockExclusive(); } } } diff --git a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPageCursor.java b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPageCursor.java index 87313efde8b18..576f4243fcfc9 100644 --- a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPageCursor.java +++ b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPageCursor.java @@ -139,12 +139,12 @@ public final File getCurrentFile() /** * Pin the desired file page to this cursor, page faulting it into memory if it isn't there already. * @param filePageId The file page id we want to pin this cursor to. - * @param exclusive 'true' if we will be taking an exclusive lock on the page as part of the pin. + * @param writeLock 'true' if we will be taking a write lock on the page as part of the pin. * @throws IOException if anything goes wrong with the pin, most likely during a page fault. */ - protected void pin( long filePageId, boolean exclusive ) throws IOException + protected void pin( long filePageId, boolean writeLock ) throws IOException { - pinEvent = tracer.beginPin( exclusive, filePageId, swapper ); + pinEvent = tracer.beginPin( writeLock, filePageId, swapper ); int chunkId = MuninnPagedFile.computeChunkId( filePageId ); // The chunkOffset is the addressing offset into the chunk array object for the relevant array slot. Using // this, we can access the array slot with Unsafe. @@ -173,10 +173,16 @@ protected void pin( long filePageId, boolean exclusive ) throws IOException // been evicted, and possibly even page faulted into something else. In this case, we discard the // item and try again, as the eviction thread would have set the chunk array slot to null. MuninnPage page = (MuninnPage) item; - lockPage( page ); - if ( !page.isBoundTo( swapper, filePageId ) ) + if ( tryLockPage( page ) ) // TODO try simplifying this + { + if ( !page.isBoundTo( swapper, filePageId ) ) + { + unlockPage( page ); + item = null; + } + } + else { - unlockPage( page ); item = null; } } @@ -201,8 +207,7 @@ private Object[][] expandTranslationTableCapacity( int chunkId ) return pagedFile.expandCapacity( chunkId ); } - private Object initiatePageFault( long filePageId, long chunkOffset, Object[] chunk ) - throws IOException + private Object initiatePageFault( long filePageId, long chunkOffset, Object[] chunk ) throws IOException { BinaryLatch latch = new BinaryLatch(); Object item = null; @@ -237,27 +242,18 @@ private MuninnPage pageFault( try { // The grabFreePage method might throw. - page = pagedFile.grabFreePage( faultEvent ); + page = pagedFile.grabFreeAndExclusivelyLockedPage( faultEvent ); // We got a free page, and we know that we have race-free access to it. Well, it's not entirely race // free, because other paged files might have it in their translation tables (or rather, their reads of // their translation tables might race with eviction) and try to pin it. - // However, they will all fail because when they try to pin, the page will either be 1) free, 2) bound to - // our file, or 3) the page is write locked. - if ( !page.tryExclusiveLock() ) - { - throw new AssertionError( "Unable to take exclusive lock on free page" ); - } + // However, they will all fail because when they try to pin, because the page will be exclusively locked + // and possibly bound to our page. } catch ( Throwable throwable ) { // Make sure to unstuck the page fault latch. - UnsafeUtil.putObjectVolatile( chunk, chunkOffset, null ); - latch.release(); - faultEvent.done( throwable ); - pinEvent.done(); - // We don't need to worry about the 'stamp' here, because the writeLock call is uninterruptible, so it - // can't really fail. + abortPageFault( throwable, chunk, chunkOffset, latch, faultEvent ); throw throwable; } try @@ -275,19 +271,30 @@ private MuninnPage pageFault( // Make sure to unlock the page, so the eviction thread can pick up our trash. page.unlockExclusive(); // Make sure to unstuck the page fault latch. - UnsafeUtil.putObjectVolatile( chunk, chunkOffset, null ); - latch.release(); - faultEvent.done( throwable ); - pinEvent.done(); + abortPageFault( throwable, chunk, chunkOffset, latch, faultEvent ); throw throwable; } - convertPageFaultLock( page ); + // Put the page in the translation table before we undo the exclusive lock, as we could otherwise race with + // eviction, and the onEvict callback expects to find a MuninnPage object in the table. UnsafeUtil.putObjectVolatile( chunk, chunkOffset, page ); + // Once we page has been published to the translation table, we can convert our exclusive lock to whatever we + // need for the page cursor. + convertPageFaultLock( page ); latch.release(); faultEvent.done(); return page; } + private void abortPageFault( Throwable throwable, Object[] chunk, long chunkOffset, + BinaryLatch latch, + PageFaultEvent faultEvent ) throws IOException + { + UnsafeUtil.putObjectVolatile( chunk, chunkOffset, null ); + latch.release(); + faultEvent.done( throwable ); + pinEvent.done(); + } + protected long assertPagedFileStillMappedAndGetIdOfLastPage() { return pagedFile.getLastPageId(); @@ -299,7 +306,7 @@ protected long assertPagedFileStillMappedAndGetIdOfLastPage() protected abstract void pinCursorToPage( MuninnPage page, long filePageId, PageSwapper swapper ); - protected abstract void lockPage( MuninnPage page ); + protected abstract boolean tryLockPage( MuninnPage page ); protected abstract void unlockPage( MuninnPage page ); diff --git a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPagedFile.java b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPagedFile.java index 16b8fd2fa2e2d..c309c61c4d332 100644 --- a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPagedFile.java +++ b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnPagedFile.java @@ -203,26 +203,38 @@ void flushAndForceInternal( FlushEventOpportunity flushOpportunity ) throws IOEx // TODO The clean pages in question must still be loaded, though. Otherwise we'll end up writing // TODO garbage to the file. int pagesGrabbed = 0; - for ( Object element : chunk ) + chunkLoop:for ( int i = 0; i < chunk.length; i++ ) { filePageId++; - if ( element instanceof MuninnPage ) + + long offset = computeChunkOffset( filePageId ); + // We might race with eviction, but we also mustn't miss a dirty page, so we loop until we succeed + // in getting a lock on all available pages. + for (;;) { - MuninnPage page = (MuninnPage) element; - page.writeLock(); - if ( page.isBoundTo( swapper, filePageId ) && page.isDirty() ) - { - // The page is still bound to the expected file and file page id after we locked it, - // so we didn't race with eviction and faulting, and the page is dirty. - // So we add it to our IO vector. - pages[pagesGrabbed] = page; - pagesGrabbed++; - continue; - } - else + Object element = UnsafeUtil.getObjectVolatile( chunk, offset ); + if ( element instanceof MuninnPage ) { - page.unlockWrite(); + MuninnPage page = (MuninnPage) element; + if ( !page.tryWriteLock() ) + { + continue; + } + if ( page.isBoundTo( swapper, filePageId ) && page.isDirty() ) + { + // The page is still bound to the expected file and file page id after we locked it, + // so we didn't race with eviction and faulting, and the page is dirty. + // So we add it to our IO vector. + pages[pagesGrabbed] = page; + pagesGrabbed++; + continue chunkLoop; + } + else + { + page.unlockWrite(); + } } + break; } if ( pagesGrabbed > 0 ) { @@ -252,6 +264,15 @@ private int vectoredFlush( MuninnPage[] pages, int pagesGrabbed, FlushEventOppor // Write the pages vector MuninnPage firstPage = pages[0]; long startFilePageId = firstPage.getFilePageId(); + + // Mark the flushed pages as clean before our flush, so concurrent page writes can mark it as dirty and + // we'll be able to write those changes out on the next flush. + for ( int j = 0; j < pagesGrabbed; j++ ) + { + // If the flush fails, we'll undo this + pages[j].markAsClean(); + } + flush = flushOpportunity.beginFlush( startFilePageId, firstPage.getCachePageId(), swapper ); long bytesWritten = swapper.write( startFilePageId, pages, 0, pagesGrabbed ); @@ -260,17 +281,16 @@ private int vectoredFlush( MuninnPage[] pages, int pagesGrabbed, FlushEventOppor flush.addPagesFlushed( pagesGrabbed ); flush.done(); - // Mark the flushed pages as clean - for ( int j = 0; j < pagesGrabbed; j++ ) - { - pages[j].markAsClean(); - } - // There are now 0 'grabbed' pages return 0; } catch ( IOException ioe ) { + // Undo marking the pages as clean + for ( int j = 0; j < pagesGrabbed; j++ ) + { + pages[j].markAsDirty(); + } if ( flush != null ) { flush.done( ioe ); @@ -399,9 +419,9 @@ int getRefCount() * none are immediately available. * @param faultEvent The trace event for the current page fault. */ - MuninnPage grabFreePage( PageFaultEvent faultEvent ) throws IOException + MuninnPage grabFreeAndExclusivelyLockedPage( PageFaultEvent faultEvent ) throws IOException { - return pageCache.grabFreePage( faultEvent ); + return pageCache.grabFreeAndExclusivelyLockedPage( faultEvent ); } /** diff --git a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnReadPageCursor.java b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnReadPageCursor.java index 45a0f45f861f8..3e5de9832e202 100644 --- a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnReadPageCursor.java +++ b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnReadPageCursor.java @@ -56,9 +56,10 @@ public boolean next() throws IOException } @Override - protected void lockPage( MuninnPage page ) + protected boolean tryLockPage( MuninnPage page ) { lockStamp = page.tryOptimisticReadLock(); + return true; } @Override diff --git a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnWritePageCursor.java b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnWritePageCursor.java index 3fb1a4d7bc412..c49f49697b0ef 100644 --- a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnWritePageCursor.java +++ b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/MuninnWritePageCursor.java @@ -31,6 +31,9 @@ protected void unpinCurrentPage() { if ( page != null ) { + // Mark the page as dirty *after* our write access, to make sure it's dirty even if it was concurrently + // flushed + page.markAsDirty(); pinEvent.done(); unlockPage( page ); } @@ -60,9 +63,9 @@ public boolean next() throws IOException } @Override - protected void lockPage( MuninnPage page ) + protected boolean tryLockPage( MuninnPage page ) { - page.writeLock(); + return page.tryWriteLock(); } @Override @@ -82,14 +85,12 @@ protected void pinCursorToPage( MuninnPage page, long filePageId, PageSwapper sw // to get flushed. assertPagedFileStillMappedAndGetIdOfLastPage(); page.incrementUsage(); - page.markAsDirty(); } @Override protected void convertPageFaultLock( MuninnPage page ) { - page.unlockExclusive(); // TODO page.unlockExclusiveAndTakeWriteLock - page.writeLock(); + page.unlockExclusiveAndTakeWriteLock(); } @Override diff --git a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/OptiLock.java b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/OptiLock.java index d45b78277b853..f428e8d5fdfeb 100644 --- a/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/OptiLock.java +++ b/community/io/src/main/java/org/neo4j/io/pagecache/impl/muninn/OptiLock.java @@ -19,8 +19,6 @@ */ package org.neo4j.io.pagecache.impl.muninn; -import org.neo4j.concurrent.BinaryLatch; -import org.neo4j.concurrent.jsr166e.StampedLock; import org.neo4j.unsafe.impl.internal.dragons.UnsafeUtil; /** @@ -44,21 +42,6 @@ */ public class OptiLock { - /** - * StampedLock methods used: - * - {@link StampedLock#writeLock()} - page faulting, write locking pages - * - {@link StampedLock#tryWriteLock()} - eviction - * - {@link StampedLock#readLock()} - flushing, pessimistic page read-locks - * - {@link StampedLock#tryReadLock()} - background flushing - * - {@link StampedLock#tryOptimisticRead()} - optimistic page read-locking - * - {@link StampedLock#validate(long stamp)} - optimistic page read-locking - * - {@link StampedLock#unlockRead(long stamp)} - flushing, pessimistic page read-locks - * - {@link StampedLock#unlockWrite(long stamp)} - eviction, page faulting, page write-locks - * - {@link StampedLock#tryConvertToReadLock(long stamp)} - page fault in page read cursor - * - {@link StampedLock#isWriteLocked()} - assertions - * - {@link StampedLock#isReadLocked()} - assertions - */ - private static final long CNT_BITS = 17; // Bits for counting concurrent write-locks private static final long SEQ_BITS = 64 - 1 - CNT_BITS; @@ -72,7 +55,6 @@ public class OptiLock @SuppressWarnings( "unused" ) // accessed via unsafe private volatile long state; - private volatile BinaryLatch exclusiveLatch; private long getState() { @@ -111,13 +93,14 @@ public boolean validateReadLock( long stamp ) } /** - * Take a concurrent write lock. Multiple write locks can be held at the same time. Write locks will invalidate any - * optimistic read lock that overlaps with them, and write locks will make any attempt at grabbing an exclusive - * lock fail. If an exclusive lock is currently held, then taking a write lock will block on the exclusive lock. + * Try taking a concurrent write lock. Multiple write locks can be held at the same time. Write locks will + * invalidate any optimistic read lock that overlaps with them, and write locks will make any attempt at grabbing + * an exclusive lock fail. If an exclusive lock is currently held, then the attempt to take a write lock will fail. *

* Write locks must be paired with a corresponding {@link #unlockWrite()}. + * @return {@code true} if the write lock was taken, {@code false} otherwise. */ - public void writeLock() + public boolean tryWriteLock() { long s, n; for (; ; ) @@ -125,8 +108,7 @@ public void writeLock() s = getState(); if ( (s & EXCL_MASK) == EXCL_MASK ) { - blockOnExclusiveLock(); - continue; + return false; } if ( (s & CNT_MASK) == CNT_MASK ) { @@ -135,27 +117,18 @@ public void writeLock() n = s + CNT_UNIT; if ( compareAndSetState( s, n ) ) { - return; + return true; } } } - private void blockOnExclusiveLock() - { - BinaryLatch latch = exclusiveLatch; - if ( latch != null ) - { - latch.await( this ); - } - } - private long throwWriteLockOverflow( long s ) { throw new IllegalMonitorStateException( "Write lock counter overflow: " + describeState( s ) ); } /** - * Release a write lock taking with {@link #writeLock()}. + * Release a write lock taking with {@link #tryWriteLock()}. */ public void unlockWrite() { @@ -194,12 +167,7 @@ private long nextSeq( long s ) public boolean tryExclusiveLock() { long s = getState(); - if ( ((s & CNT_MASK) == 0) & ((s & EXCL_MASK) == 0) && compareAndSetState( s, s + EXCL_MASK ) ) - { - exclusiveLatch = new BinaryLatch(); - return true; - } - return false; + return ((s & CNT_MASK) == 0) & ((s & EXCL_MASK) == 0) && compareAndSetState( s, s + EXCL_MASK ); } /** @@ -210,21 +178,30 @@ public boolean tryExclusiveLock() */ public long unlockExclusive() { - long s = getState(); - if ( (s & EXCL_MASK) != EXCL_MASK ) - { - throwUnmatchedUnlockExclusive( s ); - } - exclusiveLatch.release(); - exclusiveLatch = null; + long s = initiateExclusiveLockRelease(); long n = nextSeq( s ) - EXCL_MASK; compareAndSetState( s, n ); return n; } + /** + * Atomically unlock the currently held exclusive lock, and take a write lock. + */ public void unlockExclusiveAndTakeWriteLock() { - // TODO + long s = initiateExclusiveLockRelease(); + long n = nextSeq( s ) - EXCL_MASK + CNT_UNIT; + compareAndSetState( s, n ); + } + + private long initiateExclusiveLockRelease() + { + long s = getState(); + if ( (s & EXCL_MASK) != EXCL_MASK ) + { + throwUnmatchedUnlockExclusive( s ); + } + return s; } private void throwUnmatchedUnlockExclusive( long s ) diff --git a/community/io/src/test/java/org/neo4j/io/pagecache/PageCacheTest.java b/community/io/src/test/java/org/neo4j/io/pagecache/PageCacheTest.java index e7f215106163c..99b8d91a50a0d 100644 --- a/community/io/src/test/java/org/neo4j/io/pagecache/PageCacheTest.java +++ b/community/io/src/test/java/org/neo4j/io/pagecache/PageCacheTest.java @@ -35,7 +35,6 @@ import java.nio.file.NoSuchFileException; import java.nio.file.OpenOption; import java.nio.file.StandardOpenOption; -import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import java.util.Queue; @@ -43,6 +42,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -86,7 +86,6 @@ import static org.neo4j.io.pagecache.PagedFile.PF_NO_GROW; import static org.neo4j.io.pagecache.PagedFile.PF_SHARED_LOCK; import static org.neo4j.test.ByteArrayMatcher.byteArray; -import static org.neo4j.test.ThreadTestUtils.awaitThreadState; import static org.neo4j.test.ThreadTestUtils.fork; public abstract class PageCacheTest extends PageCacheTestSupport @@ -531,6 +530,7 @@ public void dirtyPagesMustBeFlushedWhenTheCacheIsClosed() throws IOException verifyRecordsInFile( file, recordCount ); } + @RepeatRule.Repeat( times = 100 ) @Test( timeout = SHORT_TIMEOUT_MILLIS ) public void flushingDuringPagedFileCloseMustRetryUntilItSucceeds() throws IOException { @@ -1452,136 +1452,18 @@ public void pagesAddedWithNextWithPageIdMustBeAccessibleWithNoGrowSpecified() th @Test( timeout = SEMI_LONG_TIMEOUT_MILLIS ) public void readsAndWritesMustBeMutuallyConsistent() throws Exception { - // The idea is this: have a range of pages and we set off a bunch of threads to - // do writes within a small region of the page set. The writes they'll perform - // is to fill a random page within the region, with the same random byte value. - // We then have our main thread scan through all the pages over and over, and - // check that all pages can be read consistently, such that all the bytes in a - // given page have the same value. We do this check many times, because the - // test is inherently about catching data races in the act. - - final int pageCount = 100; - int writerThreads = 8; - final CountDownLatch startLatch = new CountDownLatch( writerThreads ); - final CountDownLatch writersDoneLatch = new CountDownLatch( writerThreads ); - List> writers = new ArrayList<>(); - - getPageCache( fs, maxPages, pageCachePageSize, PageCacheTracer.NULL ); - final PagedFile pagedFile = pageCache.map( file( "a" ), filePageSize ); - - // zero-fill the file - try ( PageCursor cursor = pagedFile.io( 0, PF_EXCLUSIVE_LOCK ) ) - { - for ( int i = 0; i < pageCount; i++ ) - { - assertTrue( cursor.next() ); - } - } - - Runnable writer = () -> { - try - { - int pageRangeMin = pageCount / 2; - int pageRangeMax = pageRangeMin + 5; - ThreadLocalRandom rng = ThreadLocalRandom.current(); - int[] offsets = new int[filePageSize]; - for ( int i = 0; i < offsets.length; i++ ) - { - offsets[i] = i; - } - - startLatch.countDown(); - - while ( !Thread.interrupted() ) - { - byte value = (byte) rng.nextInt(); - int pageId = rng.nextInt( pageRangeMin, pageRangeMax ); - // shuffle offsets - for ( int i = 0; i < offsets.length; i++ ) - { - int j = rng.nextInt( i, offsets.length ); - int s = offsets[i]; - offsets[i] = offsets[j]; - offsets[j] = s; - } - // fill page - try ( PageCursor cursor = pagedFile.io( pageId, PF_EXCLUSIVE_LOCK ) ) - { - if ( cursor.next() ) - { - do - { - for ( int offset : offsets ) - { - cursor.setOffset( offset ); - cursor.putByte( value ); - } - } while ( cursor.shouldRetry() ); - } - } - } - } - catch ( IOException e ) - { - throw new RuntimeException( e ); - } - finally - { - writersDoneLatch.countDown(); - } - }; - - for ( int i = 0; i < writerThreads; i++ ) - { - writers.add( executor.submit( writer ) ); - } - - startLatch.await(); - - try - { - for ( int i = 0; i < 2000; i++ ) - { - int countedConsistentPageReads = 0; - try ( PageCursor cursor = pagedFile.io( 0, PF_SHARED_LOCK ) ) - { - while ( cursor.next() ) - { - boolean consistent; - do - { - consistent = true; - byte first = cursor.getByte(); - for ( int j = 1; j < filePageSize; j++ ) - { - byte b = cursor.getByte(); - consistent = consistent && b == first; - } - } while ( cursor.shouldRetry() ); - assertTrue( "checked consistency at itr " + i, consistent ); - countedConsistentPageReads++; - } - } - assertThat( countedConsistentPageReads, is( pageCount ) ); - } - - for ( Future future : writers ) - { - if ( future.isDone() ) - { - future.get(); - } - else - { - future.cancel( true ); - } - } - writersDoneLatch.await(); - } - finally - { - pagedFile.close(); - } + int filePageCount = 100; + RandomPageCacheTestHarness harness = new RandomPageCacheTestHarness(); + harness.disableCommands( Command.FlushCache, Command.FlushFile, Command.MapFile, Command.UnmapFile ); + harness.setCommandProbabilityFactor( Command.ReadRecord, 0.5 ); + harness.setCommandProbabilityFactor( Command.WriteRecord, 0.5 ); + harness.setConcurrencyLevel( 8 ); + harness.setFilePageCount( filePageCount ); + harness.setInitialMappedFiles( 1 ); + harness.setCachePageSize( pageCachePageSize ); + harness.setFilePageSize( pageCachePageSize ); + harness.setVerification( filesAreCorrectlyWrittenVerification( new StandardRecordFormat(), filePageCount ) ); + harness.run( SEMI_LONG_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS ); } @Test( timeout = SHORT_TIMEOUT_MILLIS ) @@ -2386,7 +2268,7 @@ public void readLockedPageCursorNextWithIdMustThrowIfFileIsUnmapped() throws IOE } @Test( timeout = SHORT_TIMEOUT_MILLIS ) - public void writeLockedPageMustBlockFileUnmapping() throws Exception + public void writeLockedPageMustNotBlockFileUnmapping() throws Exception { getPageCache( fs, maxPages, pageCachePageSize, PageCacheTracer.NULL ); @@ -2394,18 +2276,13 @@ public void writeLockedPageMustBlockFileUnmapping() throws Exception PageCursor cursor = pagedFile.io( 0, PF_EXCLUSIVE_LOCK ); assertTrue( cursor.next() ); - Thread unmapper = fork( $close( pagedFile ) ); - awaitThreadState( unmapper, 1000, - Thread.State.BLOCKED, Thread.State.WAITING, Thread.State.TIMED_WAITING ); + fork( $close( pagedFile ) ).join(); - assertFalse( cursor.shouldRetry() ); cursor.close(); - - unmapper.join(); } @Test( timeout = SHORT_TIMEOUT_MILLIS ) - public void pessimisticReadLockedPageMustNotBlockFileUnmapping() throws Exception + public void optimisticReadLockedPageMustNotBlockFileUnmapping() throws Exception { generateFileWithRecords( file( "a" ), 1, recordSize ); @@ -2413,11 +2290,10 @@ public void pessimisticReadLockedPageMustNotBlockFileUnmapping() throws Exceptio PagedFile pagedFile = pageCache.map( file( "a" ), filePageSize ); PageCursor cursor = pagedFile.io( 0, PF_SHARED_LOCK ); - assertTrue( cursor.next() ); // Got a pessimistic read lock + assertTrue( cursor.next() ); // Got a read lock fork( $close( pagedFile ) ).join(); - assertFalse( cursor.shouldRetry() ); cursor.close(); } @@ -2452,7 +2328,6 @@ public void advancingOptimisticReadLockingCursorAfterUnmappingMustThrow() throws fork( $close( pagedFile ) ).join(); - assertFalse( cursor.shouldRetry() ); try { cursor.next(); fail( "Advancing the cursor should have thrown" ); @@ -2481,7 +2356,7 @@ public void readingAndRetryingOnPageWithOptimisticReadLockingAfterUnmappingMustN pageCache = null; cursor.getByte(); - assertFalse( cursor.shouldRetry() ); + cursor.shouldRetry(); try { cursor.next(); fail( "Advancing the cursor should have thrown" ); diff --git a/community/io/src/test/java/org/neo4j/io/pagecache/impl/muninn/OptiLockStressTest.java b/community/io/src/test/java/org/neo4j/io/pagecache/impl/muninn/OptiLockStressTest.java index 7c6fa0d02cbb9..fc1a20e078dfa 100644 --- a/community/io/src/test/java/org/neo4j/io/pagecache/impl/muninn/OptiLockStressTest.java +++ b/community/io/src/test/java/org/neo4j/io/pagecache/impl/muninn/OptiLockStressTest.java @@ -49,7 +49,7 @@ public static void shutDownExecutor() private OptiLock lock = new OptiLock(); - @RepeatRule.Repeat( times = 300 ) + @RepeatRule.Repeat( times = 20 ) @Test public void stressTest() throws Exception { @@ -114,17 +114,19 @@ protected void doWork() while ( !stop.get() ) { - lock.writeLock(); - int[] record = data[id]; - for ( int i = 0; i < record.length; i++ ) + if ( lock.tryWriteLock() ) { - record[i] = counter; - for ( int j = 0; j < smallSpin; j++ ) + int[] record = data[id]; + for ( int i = 0; i < record.length; i++ ) { - unused = rng.nextLong(); + record[i] = counter; + for ( int j = 0; j < smallSpin; j++ ) + { + unused = rng.nextLong(); + } } + lock.unlockWrite(); } - lock.unlockWrite(); for ( int j = 0; j < bigSpin; j++ ) { @@ -204,4 +206,16 @@ protected void doWork() future.get(); } } + + @Test + public void thoroughlyEnsureAtomicityOfUnlockExclusiveAndTakeWriteLock() throws Exception + { + OptiLockTest test = new OptiLockTest(); + for ( int i = 0; i < 30000; i++ ) + { + test.unlockExclusiveAndTakeWriteLockMustBeAtomic(); + test.lock = new OptiLock(); + } + + } } diff --git a/community/io/src/test/java/org/neo4j/io/pagecache/impl/muninn/OptiLockTest.java b/community/io/src/test/java/org/neo4j/io/pagecache/impl/muninn/OptiLockTest.java index e2d5f1b0c25f5..d22dec8164eb2 100644 --- a/community/io/src/test/java/org/neo4j/io/pagecache/impl/muninn/OptiLockTest.java +++ b/community/io/src/test/java/org/neo4j/io/pagecache/impl/muninn/OptiLockTest.java @@ -22,22 +22,18 @@ import org.junit.AfterClass; import org.junit.Test; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; -import org.neo4j.concurrent.BinaryLatch; - -import static java.lang.Thread.State.BLOCKED; -import static java.lang.Thread.State.TIMED_WAITING; -import static java.lang.Thread.State.WAITING; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; -import static org.neo4j.test.ThreadTestUtils.awaitThreadState; public class OptiLockTest { @@ -50,7 +46,7 @@ public static void shutDownExecutor() executor.shutdown(); } - private OptiLock lock = new OptiLock(); + OptiLock lock = new OptiLock(); @Test public void uncontendedOptimisticLockMustValidate() throws Exception @@ -69,7 +65,7 @@ public void mustNotValidateRandomStamp() throws Exception public void writeLockMustInvalidateOptimisticLock() throws Exception { long r = lock.tryOptimisticReadLock(); - lock.writeLock(); + lock.tryWriteLock(); lock.unlockWrite(); assertFalse( lock.validateReadLock( r ) ); } @@ -78,14 +74,14 @@ public void writeLockMustInvalidateOptimisticLock() throws Exception public void takingWriteLockMustInvalidateOptimisticLock() throws Exception { long r = lock.tryOptimisticReadLock(); - lock.writeLock(); + lock.tryWriteLock(); assertFalse( lock.validateReadLock( r ) ); } @Test public void optimisticReadLockMustNotValidateUnderWriteLock() throws Exception { - lock.writeLock(); + lock.tryWriteLock(); long r = lock.tryOptimisticReadLock(); assertFalse( lock.validateReadLock( r ) ); } @@ -93,16 +89,22 @@ public void optimisticReadLockMustNotValidateUnderWriteLock() throws Exception @Test public void writeLockReleaseMustInvalidateOptimisticReadLock() throws Exception { - lock.writeLock(); + lock.tryWriteLock(); long r = lock.tryOptimisticReadLock(); lock.unlockWrite(); assertFalse( lock.validateReadLock( r ) ); } + @Test + public void uncontendedWriteLockMustBeAvailable() throws Exception + { + assertTrue( lock.tryWriteLock() ); + } + @Test public void uncontendedOptimisticReadLockMustValidateAfterWriteLockRelease() throws Exception { - lock.writeLock(); + lock.tryWriteLock(); lock.unlockWrite(); long r = lock.tryOptimisticReadLock(); assertTrue( lock.validateReadLock( r ) ); @@ -111,8 +113,8 @@ public void uncontendedOptimisticReadLockMustValidateAfterWriteLockRelease() thr @Test( timeout = TIMEOUT ) public void writeLocksMustNotBlockOtherWriteLocks() throws Exception { - lock.writeLock(); - lock.writeLock(); + assertTrue( lock.tryWriteLock() ); + assertTrue( lock.tryWriteLock() ); } @Test( timeout = TIMEOUT ) @@ -121,14 +123,19 @@ public void writeLocksMustNotBlockOtherWriteLocksInOtherThreads() throws Excepti int threads = 10; CountDownLatch end = new CountDownLatch( threads ); Runnable runnable = () -> { - lock.writeLock(); + assertTrue( lock.tryWriteLock() ); end.countDown(); }; + List> futures = new ArrayList<>(); for ( int i = 0; i < threads; i++ ) { - executor.submit( runnable ); + futures.add( executor.submit( runnable ) ); } end.await(); + for ( Future future : futures ) + { + future.get(); + } } @Test( expected = IllegalMonitorStateException.class ) @@ -140,10 +147,10 @@ public void unmatchedUnlockWriteLockMustThrow() throws Exception @Test( expected = IllegalMonitorStateException.class, timeout = TIMEOUT ) public void writeLockCountOverflowMustThrow() throws Exception { - // TODO its possible we might want to spin-yield instead of throwing, hoping someone will give us a lock + // TODO its possible we might want to spin-yield or fail instead of throwing, hoping someone will give us a lock for ( ;; ) { - lock.writeLock(); + assertTrue( lock.tryWriteLock() ); } } @@ -199,14 +206,14 @@ public void canTakeUncontendedExclusiveLocks() throws Exception @Test public void writeLocksMustFailExclusiveLocks() throws Exception { - lock.writeLock(); + lock.tryWriteLock(); assertFalse( lock.tryExclusiveLock() ); } @Test public void exclusiveLockMustBeAvailableAfterWriteLock() throws Exception { - lock.writeLock(); + lock.tryWriteLock(); lock.unlockWrite(); assertTrue( lock.tryExclusiveLock() ); } @@ -227,24 +234,10 @@ public void exclusiveLockMustBeAvailableAfterExclusiveLock() throws Exception } @Test( timeout = TIMEOUT ) - public void exclusiveLockMustBlockWriteLocks() throws Exception + public void exclusiveLockMustFailWriteLocks() throws Exception { - BinaryLatch start = new BinaryLatch(); - AtomicReference threadRef = new AtomicReference<>(); - AtomicInteger counter = new AtomicInteger(); lock.tryExclusiveLock(); - executor.submit( () -> { - threadRef.set( Thread.currentThread() ); - start.release(); - lock.writeLock(); - counter.incrementAndGet(); - } ); - - start.await(); - Thread thread = threadRef.get(); - awaitThreadState( thread, 2 * TIMEOUT, BLOCKED, WAITING, TIMED_WAITING ); - assertThat( counter.get(), is( 0 ) ); - lock.unlockExclusive(); + assertFalse( lock.tryWriteLock() ); } @Test( expected = IllegalMonitorStateException.class ) @@ -265,7 +258,7 @@ public void writeLockMustBeAvailableAfterExclusiveLock() throws Exception { lock.tryExclusiveLock(); lock.unlockExclusive(); - lock.writeLock(); + assertTrue( lock.tryWriteLock() ); lock.unlockWrite(); } @@ -277,12 +270,71 @@ public void unlockExclusiveMustReturnStampForOptimisticReadLock() throws Excepti assertTrue( lock.validateReadLock( r ) ); } + @Test + public void unlockExclusiveAndTakeWriteLockMustInvalidateOptimisticReadLocks() throws Exception + { + lock.tryExclusiveLock(); + lock.unlockExclusiveAndTakeWriteLock(); + long r = lock.tryOptimisticReadLock(); + assertFalse( lock.validateReadLock( r ) ); + } + + @Test + public void unlockExclusiveAndTakeWriteLockMustPreventExclusiveLocks() throws Exception + { + lock.tryExclusiveLock(); + lock.unlockExclusiveAndTakeWriteLock(); + assertFalse( lock.tryExclusiveLock() ); + } + + @Test( timeout = TIMEOUT ) + public void unlockExclusiveAndTakeWriteLockMustAllowConcurrentWriteLocks() throws Exception + { + lock.tryExclusiveLock(); + lock.unlockExclusiveAndTakeWriteLock(); + assertTrue( lock.tryWriteLock() ); + } + + @Test( timeout = TIMEOUT ) + public void unlockExclusiveAndTakeWriteLockMustBeAtomic() throws Exception + { + int threads = 12; + CountDownLatch start = new CountDownLatch( threads ); + AtomicBoolean stop = new AtomicBoolean(); + lock.tryExclusiveLock(); + Runnable runnable = () -> { + while ( !stop.get() ) + { + if ( lock.tryExclusiveLock() ) + { + lock.unlockExclusive(); + throw new RuntimeException( "I should not have gotten that lock" ); + } + start.countDown(); + } + }; + + List> futures = new ArrayList<>(); + for ( int i = 0; i < threads; i++ ) + { + futures.add( executor.submit( runnable ) ); + } + + start.await(); + lock.unlockExclusiveAndTakeWriteLock(); + stop.set( true ); + for ( Future future : futures ) + { + future.get(); // Assert that this does not throw + } + } + @Test public void stampFromUnlockExclusiveMustNotBeValidIfThereAreWriteLocks() throws Exception { lock.tryExclusiveLock(); long r = lock.unlockExclusive(); - lock.writeLock(); + assertTrue( lock.tryWriteLock() ); assertFalse( lock.validateReadLock( r ) ); } @@ -290,7 +342,7 @@ public void stampFromUnlockExclusiveMustNotBeValidIfThereAreWriteLocks() throws public void toStringMustDescribeState() throws Exception { assertThat( lock.toString(), is( "OptiLock[E:0, W:0:0, S:0:0]" ) ); - lock.writeLock(); + lock.tryWriteLock(); assertThat( lock.toString(), is( "OptiLock[E:0, W:1:1, S:0:0]" ) ); lock.unlockWrite(); assertThat( lock.toString(), is( "OptiLock[E:0, W:0:0, S:1:1]" ) ); diff --git a/community/io/src/test/java/org/neo4j/io/pagecache/randomharness/CommandPrimer.java b/community/io/src/test/java/org/neo4j/io/pagecache/randomharness/CommandPrimer.java index 126a2c39c92f4..69687a3c98e35 100644 --- a/community/io/src/test/java/org/neo4j/io/pagecache/randomharness/CommandPrimer.java +++ b/community/io/src/test/java/org/neo4j/io/pagecache/randomharness/CommandPrimer.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.Random; import java.util.Set; +import java.util.concurrent.locks.ReentrantLock; import org.neo4j.io.pagecache.PageCursor; import org.neo4j.io.pagecache.PagedFile; @@ -47,6 +48,10 @@ class CommandPrimer private final int filePageCount; private final int filePageSize; private final RecordFormat recordFormat; + private final int maxRecordCount; + private final int recordsPerPage; + // Entity-locks that protect the individual records, since page write locks are not exclusive. + private final ReentrantLock[] recordLocks; public CommandPrimer( Random rng, @@ -69,9 +74,16 @@ public CommandPrimer( filesTouched = new HashSet<>(); filesTouched.addAll( mappedFiles ); recordsWrittenTo = new HashMap<>(); + recordsPerPage = cache.pageSize() / recordFormat.getRecordSize(); + maxRecordCount = filePageCount * recordsPerPage; + recordLocks = new ReentrantLock[maxRecordCount]; + for ( int i = 0; i < maxRecordCount; i++ ) + { + recordLocks[i] = new ReentrantLock(); + } for ( File file : files ) { - recordsWrittenTo.put( file, new ArrayList() ); + recordsWrittenTo.put( file, new ArrayList<>() ); } } @@ -179,14 +191,12 @@ private Action readRecord() } final File file = mappedFiles.get( rng.nextInt( mappedFilesCount ) ); List recordsWritten = recordsWrittenTo.get( file ); - int recordSize = recordFormat.getRecordSize(); - int recordsPerPage = cache.pageSize() / recordSize; final int recordId = recordsWritten.isEmpty()? - rng.nextInt( filePageCount * recordsPerPage ) + rng.nextInt( maxRecordCount ) : recordsWritten.get( rng.nextInt( recordsWritten.size() ) ); final int pageId = recordId / recordsPerPage; - final int pageOffset = (recordId % recordsPerPage) * recordSize; + final int pageOffset = (recordId % recordsPerPage) * recordFormat.getRecordSize(); final Record expectedRecord = recordFormat.createRecord( file, recordId ); return new Action( Command.ReadRecord, "[file=%s, recordId=%s, pageId=%s, pageOffset=%s, expectedRecord=%s]", @@ -221,12 +231,10 @@ private Action writeRecord() } final File file = mappedFiles.get( rng.nextInt( mappedFilesCount ) ); filesTouched.add( file ); - int recordSize = 16; - int recordsPerPage = cache.pageSize() / recordSize; final int recordId = rng.nextInt( filePageCount * recordsPerPage ); recordsWrittenTo.get( file ).add( recordId ); final int pageId = recordId / recordsPerPage; - final int pageOffset = (recordId % recordsPerPage) * recordSize; + final int pageOffset = (recordId % recordsPerPage) * recordFormat.getRecordSize(); final Record record = recordFormat.createRecord( file, recordId ); return new Action( Command.WriteRecord, "[file=%s, recordId=%s, pageId=%s, pageOffset=%s, record=%s]", @@ -238,14 +246,23 @@ public void perform() throws Exception PagedFile pagedFile = fileMap.get( file ); if ( pagedFile != null ) { - try ( PageCursor cursor = pagedFile.io( pageId, PagedFile.PF_EXCLUSIVE_LOCK ) ) + ReentrantLock recordLock = recordLocks[recordId]; + recordLock.lock(); + try { - if ( cursor.next() ) + try ( PageCursor cursor = pagedFile.io( pageId, PagedFile.PF_EXCLUSIVE_LOCK ) ) { - cursor.setOffset( pageOffset ); - recordFormat.write( record, cursor ); + if ( cursor.next() ) + { + cursor.setOffset( pageOffset ); + recordFormat.write( record, cursor ); + } } } + finally + { + recordLock.unlock(); + } } } }; diff --git a/community/io/src/test/java/org/neo4j/io/pagecache/randomharness/StandardRecordFormat.java b/community/io/src/test/java/org/neo4j/io/pagecache/randomharness/StandardRecordFormat.java index 9b8ee67bed2c4..dbc4c5788e6dc 100644 --- a/community/io/src/test/java/org/neo4j/io/pagecache/randomharness/StandardRecordFormat.java +++ b/community/io/src/test/java/org/neo4j/io/pagecache/randomharness/StandardRecordFormat.java @@ -76,9 +76,10 @@ public Record zeroRecord() public void write( Record record, PageCursor cursor ) { StandardRecord r = (StandardRecord) record; - cursor.putByte( r.type ); byte[] pathBytes = r.file.getPath().getBytes( StandardCharsets.UTF_8 ); - cursor.putByte( pathBytes[pathBytes.length - 1] ); + byte fileByte = pathBytes[pathBytes.length - 1]; + cursor.putByte( r.type ); + cursor.putByte( fileByte ); cursor.putShort( r.fill1 ); cursor.putInt( r.recordId ); cursor.putLong( r.fill2 );