Skip to content

Commit

Permalink
added more logging to EcoFS
Browse files Browse the repository at this point in the history
git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@4661 6c8d7289-2bf4-0310-a012-ef5d649a1542
  • Loading branch information
orbiter committed Apr 7, 2008
1 parent fb541f9 commit 6e36c15
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 5 deletions.
17 changes: 14 additions & 3 deletions source/de/anomic/kelondro/kelondroEcoFS.java
Expand Up @@ -34,6 +34,8 @@
import java.io.RandomAccessFile;
import java.util.Iterator;

import de.anomic.server.logging.serverLog;

/**
* The EcoFS is a flat file with records of fixed length. The file does not contain
* any meta information and the first record starts right at file position 0
Expand Down Expand Up @@ -528,8 +530,10 @@ private synchronized void cleanLast0() throws IOException {

public static class ChunkIterator implements Iterator<byte[]> {

private int recordsize, chunksize;
private int recordsize, chunksize, chunkcounter;
private DataInputStream stream;
private serverLog log;
private File file;

/**
* create a ChunkIterator
Expand All @@ -540,12 +544,15 @@ public static class ChunkIterator implements Iterator<byte[]> {
* @param chunksize: the size of the chunks that are returned by next(). remaining bytes until the lenght of recordsize are skipped
* @throws FileNotFoundException
*/
public ChunkIterator(File file, int recordsize, int chunksize) throws FileNotFoundException {
public ChunkIterator(File file, int recordsize, int chunksize, serverLog log) throws FileNotFoundException {
assert (file.exists());
assert file.length() % recordsize == 0;
this.recordsize = recordsize;
this.chunksize = chunksize;
this.chunkcounter = 0; // only for logging
this.stream = new DataInputStream(new BufferedInputStream(new FileInputStream(file), 64 * 1024));
this.log = log;
this.file = file;
}

public boolean hasNext() {
Expand All @@ -570,7 +577,11 @@ public byte[] next() {
}
return chunk;
} catch (IOException e) {
e.printStackTrace();
if (log == null) {
serverLog.logWarning("kelondroEcoFS", "ChunkIterator for file " + file.toString() + " ended with " + e.getCause().getMessage() + " at chunk " + this.chunkcounter, e);
} else {
log.logWarning("ChunkIterator for file " + file.toString() + " ended with " + e.getCause().getMessage() + " at chunk " + this.chunkcounter, e);
}
this.stream = null;
return null;
}
Expand Down
4 changes: 2 additions & 2 deletions source/de/anomic/kelondro/kelondroEcoTable.java
Expand Up @@ -139,7 +139,7 @@ public kelondroEcoTable(File tablefile, kelondroRow rowdef, int useTailCache, in
} else {
byte[] record;
key = new byte[rowdef.primaryKeyLength];
Iterator<byte[]> ri = new kelondroEcoFS.ChunkIterator(tablefile, rowdef.objectsize, rowdef.objectsize);
Iterator<byte[]> ri = new kelondroEcoFS.ChunkIterator(tablefile, rowdef.objectsize, rowdef.objectsize, null);
while (ri.hasNext()) {
record = ri.next();
assert record != null;
Expand Down Expand Up @@ -217,7 +217,7 @@ assert record != null;
*/
public Iterator<byte[]> keyIterator(File file, kelondroRow rowdef) throws FileNotFoundException {
assert rowdef.primaryKeyIndex == 0;
return new kelondroEcoFS.ChunkIterator(file, rowdef.objectsize, rowdef.primaryKeyLength);
return new kelondroEcoFS.ChunkIterator(file, rowdef.objectsize, rowdef.primaryKeyLength, null);
}

public static long tableSize(File tablefile, int recordsize) {
Expand Down

0 comments on commit 6e36c15

Please sign in to comment.