Skip to content

Commit

Permalink
pool: Reduce number of stat calls in Bekeley DB backend
Browse files Browse the repository at this point in the history
Motivation:

Java 7 introduced a new API for querying file attributes as a bulk
operation.

Modification:

Use Files.getFileAttributes to read all file attributes in a single
stat system call.

Restructure replica creation and replica loading to reduce duplicated
or unnecessary stat calls further.

Result:

No user visible changes.

Target: trunk
Require-notes: no
Require-book: no
Acked-by: Albert Rossi <arossi@fnal.gov>
Acked-by: Dmitry Litvintsev <litvinse@fnal.gov>

Reviewed at https://rb.dcache.org/r/9426/
  • Loading branch information
gbehrmann committed Jun 28, 2016
1 parent 40a06c1 commit 4a7a21d
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 32 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.attribute.BasicFileAttributeView;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.List;
import java.util.Map;
import java.util.Properties;
Expand Down Expand Up @@ -181,18 +184,23 @@ public MetaDataRecord get(PnfsId id) throws CacheException
{
try {
File file = _fileStore.get(id);
if (!file.isFile()) {
return null;
BasicFileAttributes attributes =
Files.getFileAttributeView(file.toPath(), BasicFileAttributeView.class).readAttributes();
if (!attributes.isRegularFile()) {
throw new DiskErrorCacheException("Not a regular file: " + file);
}

return CacheRepositoryEntryImpl.load(this, id);
return CacheRepositoryEntryImpl.load(this, id, attributes);
} catch (EnvironmentFailureException e) {
if (!isValid()) {
throw new DiskErrorCacheException("Meta data update failed and a pool restart is required: " + e.getMessage(), e);
throw new DiskErrorCacheException("Meta data lookup failed and a pool restart is required: " + e.getMessage(), e);
}
throw new CacheException("Meta data update failed: " + e.getMessage(), e);
throw new CacheException("Meta data lookup failed: " + e.getMessage(), e);
} catch (OperationFailureException e) {
throw new CacheException("Meta data update failed: " + e.getMessage(), e);
throw new CacheException("Meta data lookup failed: " + e.getMessage(), e);
} catch (NoSuchFileException | FileNotFoundException e) {
return null;
} catch (IOException e) {
throw new CacheException("Failed to read " + id + ": " + e.getMessage(), e);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,10 @@
import org.slf4j.LoggerFactory;

import java.io.File;
import java.io.IOException;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.function.Predicate;

import diskCacheV111.util.CacheException;
Expand Down Expand Up @@ -61,35 +62,24 @@ public class CacheRepositoryEntryImpl implements MetaDataRecord

private long _size;

public CacheRepositoryEntryImpl(BerkeleyDBMetaDataRepository repository,
PnfsId pnfsId)
public CacheRepositoryEntryImpl(BerkeleyDBMetaDataRepository repository, PnfsId pnfsId)
{
_repository = repository;
_pnfsId = pnfsId;
_sticky = ImmutableList.of();
_state = EntryState.NEW;
File file = getDataFile();
_lastAccess = file.lastModified();
_size = file.length();
if (_lastAccess == 0) {
_lastAccess = _creationTime;
}
_sticky = ImmutableList.of();
_lastAccess = _creationTime;
}

private CacheRepositoryEntryImpl(BerkeleyDBMetaDataRepository repository,
PnfsId pnfsId,
CacheRepositoryEntryState state)
public CacheRepositoryEntryImpl(BerkeleyDBMetaDataRepository repository, PnfsId pnfsId, EntryState state,
Collection<StickyRecord> sticky, BasicFileAttributes attributes)
{
_repository = repository;
_pnfsId = pnfsId;
_state = state.getState();
setStickyRecords(state.stickyRecords());
File file = getDataFile();
_lastAccess = file.lastModified();
_size = file.length();
if (_lastAccess == 0) {
_lastAccess = _creationTime;
}
_state = state;
setStickyRecords(sticky);
_lastAccess = attributes.lastModifiedTime().toMillis();
_size = attributes.size();
}

private void setStickyRecords(Iterable<StickyRecord> records)
Expand Down Expand Up @@ -305,13 +295,14 @@ private synchronized void storeState() throws CacheException
}
}

static CacheRepositoryEntryImpl load(BerkeleyDBMetaDataRepository repository, PnfsId pnfsId)
static CacheRepositoryEntryImpl load(BerkeleyDBMetaDataRepository repository, PnfsId pnfsId,
BasicFileAttributes attributes) throws IOException
{
try {
String id = pnfsId.toString();
CacheRepositoryEntryState state = repository.getStateMap().get(id);
if (state != null) {
return new CacheRepositoryEntryImpl(repository, pnfsId, state);
return new CacheRepositoryEntryImpl(repository, pnfsId, state.getState(), state.stickyRecords(), attributes);
}
} catch (ClassCastException e) {
_log.warn(e.toString());
Expand All @@ -326,8 +317,7 @@ static CacheRepositoryEntryImpl load(BerkeleyDBMetaDataRepository repository, Pn
}
_log.warn(e.toString());
}

return new CacheRepositoryEntryImpl(repository, pnfsId);
return new CacheRepositoryEntryImpl(repository, pnfsId, EntryState.BROKEN, ImmutableList.of(), attributes);
}

private class UpdatableRecordImpl implements UpdatableRecord
Expand Down

0 comments on commit 4a7a21d

Please sign in to comment.