Skip to content

Commit

Permalink
Rename PF_SHARED_LOCK and PF_EXCLUSIVE_LOCK to PF_SHARED_READ_LOCK an…
Browse files Browse the repository at this point in the history
…d PF_SHARED_WRITE_LOCK respectively.
  • Loading branch information
chrisvest committed Jan 20, 2016
1 parent c857723 commit f2abd6c
Show file tree
Hide file tree
Showing 29 changed files with 290 additions and 292 deletions.
14 changes: 7 additions & 7 deletions community/io/src/main/java/org/neo4j/io/pagecache/PagedFile.java
Expand Up @@ -33,9 +33,9 @@ public interface PagedFile extends AutoCloseable
* pages under read locks cannot be safely written to anyway, so there's
* no point in trying to go beyond the end of the file.
* <p>
* This cannot be combined with {@link #PF_EXCLUSIVE_LOCK}.
* This cannot be combined with {@link #PF_SHARED_WRITE_LOCK}.
*/
int PF_SHARED_LOCK = 1; // TODO rename PF_SHARED_READ_LOCK
int PF_SHARED_READ_LOCK = 1;
/**
* Pin the pages with a shared write lock.
* <p>
Expand All @@ -44,9 +44,9 @@ public interface PagedFile extends AutoCloseable
* Note that write locks are <em>not</em> exclusive. You must use other means to coordinate access to the data on
* the pages. The write lock only means that the page will not be concurrently evicted.
* <p>
* This cannot be combined with {@link #PF_SHARED_LOCK}.
* This cannot be combined with {@link #PF_SHARED_READ_LOCK}.
*/
int PF_EXCLUSIVE_LOCK = 1 << 1; // TODO rename to PF_SHARED_WRITE_LOCK
int PF_SHARED_WRITE_LOCK = 1 << 1;
/**
* Disallow pinning and navigating to pages outside the range of the
* underlying file.
Expand Down Expand Up @@ -101,12 +101,12 @@ public interface PagedFile extends AutoCloseable
* <p>
* The {@code pf_flags} argument expresses the intent of the IO operation. It is a bitmap that combines various
* {@code PF_*} constants. You must always specify your desired locking behaviour, with either
* {@link org.neo4j.io.pagecache.PagedFile#PF_EXCLUSIVE_LOCK} or
* {@link org.neo4j.io.pagecache.PagedFile#PF_SHARED_LOCK}.
* {@link org.neo4j.io.pagecache.PagedFile#PF_SHARED_WRITE_LOCK} or
* {@link org.neo4j.io.pagecache.PagedFile#PF_SHARED_READ_LOCK}.
* <p>
* The two locking modes cannot be combined, but other intents can be combined with them. For instance, if you want
* to write to a page, but also make sure that you don't write beyond the end of the file, then you can express your
* intent with {@code PF_EXCLUSIVE_LOCK | PF_NO_GROW} – note how the flags are combined with a bitwise-OR operator.
* intent with {@code PF_SHARED_WRITE_LOCK | PF_NO_GROW} – note how the flags are combined with a bitwise-OR operator.
* Arithmetic addition can also be used, but might not make it as clear that we are dealing with a bit-set.
*
* @param pageId The initial file-page-id, that the cursor will be bound to
Expand Down
Expand Up @@ -133,19 +133,19 @@ public String toString()
@Override
public PageCursor io( long pageId, int pf_flags )
{
int lockMask = PF_EXCLUSIVE_LOCK | PF_SHARED_LOCK;
int lockMask = PF_SHARED_WRITE_LOCK | PF_SHARED_READ_LOCK;
if ( (pf_flags & lockMask) == 0 )
{
throw new IllegalArgumentException(
"Must specify either PF_EXCLUSIVE_LOCK or PF_SHARED_LOCK" );
"Must specify either PF_SHARED_WRITE_LOCK or PF_SHARED_READ_LOCK" );
}
if ( (pf_flags & lockMask) == lockMask )
{
throw new IllegalArgumentException(
"Cannot specify both PF_EXCLUSIVE_LOCK and PF_SHARED_LOCK" );
"Cannot specify both PF_SHARED_WRITE_LOCK and PF_SHARED_READ_LOCK" );
}
MuninnPageCursor cursor;
if ( (pf_flags & PF_SHARED_LOCK) == 0 )
if ( (pf_flags & PF_SHARED_READ_LOCK) == 0 )
{
cursor = cursorPool.takeWriteCursor();
}
Expand Down
Expand Up @@ -89,7 +89,7 @@
* cursor will scan linearly through the file.
* <p>
* The {@code next} method returns {@code true} if it successfully bound to the next page in its sequence. This is
* usually the case, but when {@link org.neo4j.io.pagecache.PagedFile#PF_SHARED_LOCK} or
* usually the case, but when {@link org.neo4j.io.pagecache.PagedFile#PF_SHARED_READ_LOCK} or
* {@link org.neo4j.io.pagecache.PagedFile#PF_NO_GROW} is specified, the {@code next} method will return {@code false}
* if the cursor would otherwise move beyond the end of the file.
* <p>
Expand Down
Expand Up @@ -281,7 +281,7 @@ public EvictionRunEvent beginPageEvictions( int pageCountToEvict )
}

@Override
public PinEvent beginPin( boolean exclusiveLock, long filePageId, PageSwapper swapper )
public PinEvent beginPin( boolean writeLock, long filePageId, PageSwapper swapper )
{
try
{
Expand Down
Expand Up @@ -53,7 +53,7 @@ public EvictionRunEvent beginPageEvictions( int pageCountToEvict )
}

@Override
public PinEvent beginPin( boolean exclusiveLock, long filePageId, PageSwapper swapper )
public PinEvent beginPin( boolean writeLock, long filePageId, PageSwapper swapper )
{
return PinEvent.NULL;
}
Expand Down Expand Up @@ -159,7 +159,7 @@ public String toString()
/**
* A page is to be pinned.
*/
PinEvent beginPin( boolean exclusiveLock, long filePageId, PageSwapper swapper );
PinEvent beginPin( boolean writeLock, long filePageId, PageSwapper swapper );

/**
* A PagedFile wants to flush all its bound pages.
Expand Down
Expand Up @@ -51,7 +51,7 @@ public PageCursor io( long pageId, int pf_flags ) throws IOException
{
adversary.injectFailure( IllegalStateException.class );
PageCursor pageCursor = delegate.io( pageId, pf_flags );
if ( (pf_flags & PF_SHARED_LOCK) == PF_SHARED_LOCK )
if ( (pf_flags & PF_SHARED_READ_LOCK) == PF_SHARED_READ_LOCK )
{
return new AdversarialReadPageCursor( pageCursor, adversary );
}
Expand Down
Expand Up @@ -38,7 +38,7 @@
* while loop with {@link PageCursor#shouldRetry()} as a condition.
* <p>
* Write operations will always throw an {@link IllegalStateException} because this is a read cursor.
* See {@link org.neo4j.io.pagecache.PagedFile#PF_SHARED_LOCK} flag.
* See {@link org.neo4j.io.pagecache.PagedFile#PF_SHARED_READ_LOCK} flag.
*/
@SuppressWarnings( "unchecked" )
class AdversarialReadPageCursor implements PageCursor
Expand Down
Expand Up @@ -34,8 +34,8 @@
* Depending on the adversary each read and write operation can throw either {@link RuntimeException} like
* {@link SecurityException} or {@link IOException} like {@link FileNotFoundException}.
* <p>
* Read operations will always return a consistent value because the underlying page is exclusively write locked.
* See {@link org.neo4j.io.pagecache.PagedFile#PF_EXCLUSIVE_LOCK} flag.
* Read operations will always return a consistent value because the underlying page is write locked.
* See {@link org.neo4j.io.pagecache.PagedFile#PF_SHARED_WRITE_LOCK} flag.
*/
@SuppressWarnings( "unchecked" )
class AdversarialWritePageCursor implements PageCursor
Expand Down
Expand Up @@ -49,8 +49,8 @@
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.neo4j.io.pagecache.PagedFile.PF_EXCLUSIVE_LOCK;
import static org.neo4j.io.pagecache.PagedFile.PF_SHARED_LOCK;
import static org.neo4j.io.pagecache.PagedFile.PF_SHARED_WRITE_LOCK;
import static org.neo4j.io.pagecache.PagedFile.PF_SHARED_READ_LOCK;
import static org.neo4j.test.ByteArrayMatcher.byteArray;

public abstract class PageCacheSlowTest<T extends PageCache> extends PageCacheTestSupport<T>
Expand Down Expand Up @@ -90,7 +90,7 @@ public void mustNotLoseUpdates() throws Exception
final PagedFile pagedFile = pageCache.map( file( "a" ), pageSize );

// Ensure all the pages exist
try ( PageCursor cursor = pagedFile.io( 0, PF_EXCLUSIVE_LOCK ) )
try ( PageCursor cursor = pagedFile.io( 0, PF_SHARED_WRITE_LOCK ) )
{
for ( int i = 0; i < filePages; i++ )
{
Expand Down Expand Up @@ -135,7 +135,7 @@ public Result call() throws Exception
int pageId = rng.nextInt( 0, filePages );
int offset = threadId * 4;
boolean updateCounter = rng.nextBoolean();
int pf_flags = updateCounter? PF_EXCLUSIVE_LOCK : PF_SHARED_LOCK;
int pf_flags = updateCounter ? PF_SHARED_WRITE_LOCK : PF_SHARED_READ_LOCK;
try ( PageCursor cursor = pagedFile.io( pageId, pf_flags ) )
{
int counter;
Expand All @@ -148,7 +148,7 @@ public Result call() throws Exception
counter = cursor.getInt();
}
while ( cursor.shouldRetry() );
String lockName = updateCounter ? "PF_EXCLUSIVE_LOCK" : "PF_SHARED_LOCK";
String lockName = updateCounter ? "PF_SHARED_WRITE_LOCK" : "PF_SHARED_READ_LOCK";
assertThat( "inconsistent page read from filePageId = " + pageId + ", with " + lockName +
", workerId = " + threadId + " [t:" + Thread.currentThread().getId() + "]",
counter, is( pageCounts[pageId] ) );
Expand Down Expand Up @@ -184,7 +184,7 @@ public Result call() throws Exception
for ( Future<Result> future : futures )
{
Result result = future.get();
try ( PageCursor cursor = pagedFile.io( 0, PF_SHARED_LOCK ) )
try ( PageCursor cursor = pagedFile.io( 0, PF_SHARED_READ_LOCK ) )
{
for ( int i = 0; i < filePages; i++ )
{
Expand Down Expand Up @@ -232,7 +232,7 @@ public void writeLockingCursorMustThrowWhenLockingPageRacesWithUnmapping() throw
final CountDownLatch secondThreadGotLockLatch = new CountDownLatch( 1 );

executor.submit( () -> {
try ( PageCursor cursor = pf.io( 0, PF_EXCLUSIVE_LOCK ) )
try ( PageCursor cursor = pf.io( 0, PF_SHARED_WRITE_LOCK ) )
{
cursor.next();
hasLockLatch.countDown();
Expand All @@ -241,10 +241,10 @@ public void writeLockingCursorMustThrowWhenLockingPageRacesWithUnmapping() throw
return null;
} );

hasLockLatch.await(); // An exclusive lock is now held on page 0.
hasLockLatch.await(); // A write lock is now held on page 0.

Future<Object> takeLockFuture = executor.submit( () -> {
try ( PageCursor cursor = pf.io( 0, PF_EXCLUSIVE_LOCK ) )
try ( PageCursor cursor = pf.io( 0, PF_SHARED_WRITE_LOCK ) )
{
cursor.next();
secondThreadGotLockLatch.await();
Expand All @@ -264,11 +264,11 @@ public void writeLockingCursorMustThrowWhenLockingPageRacesWithUnmapping() throw
}
catch ( TimeoutException e )
{
// As expected, the close cannot not complete while an exclusive
// As expected, the close cannot not complete while an write
// lock is held
}

// Now, both the close action and a grab for an exclusive page lock is
// Now, both the close action and a grab for an write page lock is
// waiting for our first thread.
// When we release that lock, we should see that either close completes
// and our second thread, the one blocked on the write lock, gets an
Expand Down Expand Up @@ -331,7 +331,7 @@ public void pageCacheMustRemainInternallyConsistentWhenGettingRandomFailures() t
long maxPageId = pagedFile.getLastPageId();
boolean performingRead = rng.nextBoolean() && maxPageId != -1;
long startingPage = maxPageId < 0? 0 : rng.nextLong( maxPageId + 1 );
int pf_flags = performingRead ? PF_SHARED_LOCK : PF_EXCLUSIVE_LOCK;
int pf_flags = performingRead ? PF_SHARED_READ_LOCK : PF_SHARED_WRITE_LOCK;
int pageSize = pagedFile.pageSize();

try ( PageCursor cursor = pagedFile.io( startingPage, pf_flags ) )
Expand All @@ -349,7 +349,7 @@ public void pageCacheMustRemainInternallyConsistentWhenGettingRandomFailures() t
{
// Capture any exception that might have hit the eviction thread.
adversary.setProbabilityFactor( 0.0 );
try ( PageCursor cursor = pagedFile.io( 0, PF_EXCLUSIVE_LOCK ) )
try ( PageCursor cursor = pagedFile.io( 0, PF_SHARED_WRITE_LOCK ) )
{
for ( int j = 0; j < 100; j++ )
{
Expand Down Expand Up @@ -438,7 +438,7 @@ private void performConsistentAdversarialWrite( PageCursor cursor, ThreadLocalRa

private void verifyAdversarialPagedContent( PagedFile pagedFile ) throws IOException
{
try ( PageCursor cursor = pagedFile.io( 0, PF_SHARED_LOCK ) )
try ( PageCursor cursor = pagedFile.io( 0, PF_SHARED_READ_LOCK ) )
{
while ( cursor.next() )
{
Expand Down

0 comments on commit f2abd6c

Please sign in to comment.