Skip to content

Commit

Permalink
some security additions, keep maximum byte[] size to 2^27
Browse files Browse the repository at this point in the history
git-svn-id: https://svn.berlios.de/svnroot/repos/yacy/trunk@4350 6c8d7289-2bf4-0310-a012-ef5d649a1542
  • Loading branch information
orbiter committed Jan 20, 2008
1 parent 2f3b2f3 commit f945ee2
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 46 deletions.
32 changes: 16 additions & 16 deletions source/de/anomic/kelondro/kelondroBufferedEcoFS.java
Expand Up @@ -34,7 +34,7 @@ public class kelondroBufferedEcoFS {

private kelondroEcoFS efs;
private int maxEntries;
private TreeMap<Integer, byte[]> buffer;
private TreeMap<Long, byte[]> buffer;

/*
* The kelondroBufferedEcoFS extends the IO reduction to EcoFS by providing a
Expand All @@ -45,20 +45,20 @@ public class kelondroBufferedEcoFS {
public kelondroBufferedEcoFS(kelondroEcoFS efs, int maxEntries) throws IOException {
this.efs = efs;
this.maxEntries = maxEntries;
this.buffer = new TreeMap<Integer, byte[]>();
this.buffer = new TreeMap<Long, byte[]>();
}

private void flushBuffer() throws IOException {
Iterator<Map.Entry<Integer, byte[]>> i = buffer.entrySet().iterator();
Map.Entry<Integer, byte[]> entry;
Iterator<Map.Entry<Long, byte[]>> i = buffer.entrySet().iterator();
Map.Entry<Long, byte[]> entry;
while (i.hasNext()) {
entry = i.next();
efs.put(entry.getKey().intValue(), entry.getValue(), 0);
}
buffer.clear();
}

public synchronized int size() throws IOException {
public synchronized long size() throws IOException {
return efs.size();
}

Expand All @@ -80,26 +80,26 @@ public synchronized void finalize() {
if (this.efs != null) this.close();
}

public synchronized void get(int index, byte[] b, int start) throws IOException {
public synchronized void get(long index, byte[] b, int start) throws IOException {
assert b.length - start >= efs.recordsize;
if (index >= size()) throw new IndexOutOfBoundsException("kelondroBufferedEcoFS.get(" + index + ") outside bounds (" + this.size() + ")");
byte[] bb = buffer.get(new Integer(index));
byte[] bb = buffer.get(new Long(index));
if (bb == null) {
efs.get(index, b, start);
} else {
System.arraycopy(bb, 0, b, start, efs.recordsize);
}
}

public synchronized void put(int index, byte[] b, int start) throws IOException {
public synchronized void put(long index, byte[] b, int start) throws IOException {
assert b.length - start >= efs.recordsize;
if (index > size()) throw new IndexOutOfBoundsException("kelondroEcoFS.put(" + index + ") outside bounds (" + this.size() + ")");
if (index == efs.size()) {
efs.put(index, b, start);
} else {
byte[] bb = new byte[efs.recordsize];
System.arraycopy(b, start, bb, 0, efs.recordsize);
buffer.put(new Integer(index), bb);
buffer.put(new Long(index), bb);
if (buffer.size() > this.maxEntries) flushBuffer();
}
}
Expand All @@ -108,28 +108,28 @@ public synchronized void add(byte[] b, int start) throws IOException {
put(size(), b, start);
}

public synchronized void clean(int index, byte[] b, int start) throws IOException {
public synchronized void clean(long index, byte[] b, int start) throws IOException {
assert b.length - start >= efs.recordsize;
if (index >= size()) throw new IndexOutOfBoundsException("kelondroBufferedEcoFS.clean(" + index + ") outside bounds (" + this.size() + ")");
byte[] bb = buffer.get(new Integer(index));
byte[] bb = buffer.get(new Long(index));
if (bb == null) {
efs.clean(index, b, start);
} else {
System.arraycopy(bb, 0, b, start, efs.recordsize);
buffer.remove(new Integer(index));
buffer.remove(new Long(index));
efs.clean(index);
}
}

public synchronized void clean(int index) throws IOException {
public synchronized void clean(long index) throws IOException {
if (index >= size()) throw new IndexOutOfBoundsException("kelondroBufferedEcoFS.clean(" + index + ") outside bounds (" + this.size() + ")");
buffer.remove(new Integer(index));
buffer.remove(new Long(index));
efs.clean(index);
}

public synchronized void cleanLast(byte[] b, int start) throws IOException {
assert b.length - start >= efs.recordsize;
Integer i = new Integer(size() - 1);
Long i = new Long(size() - 1);
byte[] bb = buffer.get(i);
if (bb == null) {
efs.clean(i.intValue(), b, start);
Expand All @@ -141,7 +141,7 @@ public synchronized void cleanLast(byte[] b, int start) throws IOException {
}

public synchronized void cleanLast() throws IOException {
Integer i = new Integer(size() - 1);
Long i = new Long(size() - 1);
buffer.remove(i);
efs.clean(i.intValue());
}
Expand Down
51 changes: 26 additions & 25 deletions source/de/anomic/kelondro/kelondroEcoFS.java
Expand Up @@ -53,7 +53,8 @@ public class kelondroEcoFS {
private RandomAccessFile raf;
private File tablefile;
protected int recordsize; // number of bytes in one record
private int cacheindex, cachecount, buffercount; // number of entries in buffer
private long cacheindex;
private int cachecount, buffercount; // number of entries in buffer
private byte[] cache, buffer, zero;

private static final int maxBuffer = 4 * 1024; // stay below hard disc cache (is that necessary?)
Expand Down Expand Up @@ -98,49 +99,49 @@ public kelondroEcoFS(File tablefile, int recordsize) throws IOException {
fillCache(0);
}

public static long tableSize(File tablefile, int recordsize) {
public static long tableSize(File tablefile, long recordsize) {
// returns number of records in table
if (!tablefile.exists()) return 0;
long size = tablefile.length();
assert size % recordsize == 0;
return size / recordsize;
return size / (long) recordsize;
}

public synchronized int size() throws IOException {
public synchronized long size() throws IOException {
// return the number of records in file plus number of records in buffer
return filesize() + this.buffercount;
return filesize() + (long) this.buffercount;
}

public File filename() {
return this.tablefile;
}

private int filesize() throws IOException {
return (int) (raf.length() / recordsize);
private long filesize() throws IOException {
return raf.length() / (long) recordsize;
}

private int inCache(int index) {
private int inCache(long index) {
// checks if the index is inside the cache and returns the index offset inside
// the cache if the index is inside the cache
// returns -1 if the index is not in the cache
if ((index >= this.cacheindex) && (index < this.cacheindex + this.cachecount)) {
return index - this.cacheindex;
return (int) (index - this.cacheindex);
}
return -1;
}

private int inBuffer(int index) throws IOException {
private int inBuffer(long index) throws IOException {
// checks if the index is inside the buffer and returns the index offset inside
// the buffer if the index is inside the buffer
// returns -1 if the index is not in the buffer
int fs = filesize();
long fs = filesize();
if ((index >= fs) && (index < fs + this.buffercount)) {
return index - fs;
return (int) (index - fs);
}
return -1;
}

private void fillCache(int index) throws IOException {
private void fillCache(long index) throws IOException {
// load cache with copy of disc content; start with record at index
// if the record would overlap with the write buffer,
// its start is shifted forward until it fits
Expand All @@ -150,14 +151,14 @@ private void fillCache(int index) throws IOException {
if (inCache(index) >= 0) return;

// calculate new start position
int fs = this.filesize();
long fs = this.filesize();
if (index + this.cache.length / this.recordsize > fs) {
index = fs - this.cache.length / this.recordsize;
}
if (index < 0) index = 0;

// calculate number of records that shall be stored in the cache
this.cachecount = Math.min(this.cache.length / this.recordsize, this.filesize() - index);
this.cachecount = (int) Math.min(this.cache.length / this.recordsize, this.filesize() - index);
assert this.cachecount >= 0;

// check if we need to read 0 bytes from the file
Expand Down Expand Up @@ -195,7 +196,7 @@ public synchronized void close() {
cache = null;
}

public synchronized void get(int index, byte[] b, int start) throws IOException {
public synchronized void get(long index, byte[] b, int start) throws IOException {
assert b.length - start >= this.recordsize;
if (index >= size()) throw new IndexOutOfBoundsException("kelondroEcoFS.get(" + index + ") outside bounds (" + this.size() + ")");
// check if index is inside of cache
Expand All @@ -220,7 +221,7 @@ public synchronized void get(int index, byte[] b, int start) throws IOException
assert false;
}

public synchronized void put(int index, byte[] b, int start) throws IOException {
public synchronized void put(long index, byte[] b, int start) throws IOException {
assert b.length - start >= this.recordsize;
if (index > size()) throw new IndexOutOfBoundsException("kelondroEcoFS.put(" + index + ") outside bounds (" + this.size() + ")");
// check if this is an empty entry
Expand All @@ -247,7 +248,7 @@ public synchronized void put(int index, byte[] b, int start) throws IOException
// append the record to the end of the file;

// look if there is space in the buffer
int bufferpos = index - filesize();
int bufferpos = (int) (index - filesize());
if (bufferpos >= this.buffer.length / this.recordsize) {
assert this.buffercount == this.buffer.length / this.recordsize;
// the record does not fit in current buffer
Expand Down Expand Up @@ -281,7 +282,7 @@ private boolean isClean(byte[] b, int offset, int length) {
return true;
}

private boolean isClean(int index) throws IOException {
private boolean isClean(long index) throws IOException {
assert index < size();
// check if index is inside of cache
int p = inCache(index);
Expand All @@ -304,7 +305,7 @@ private boolean isClean(int index) throws IOException {
return false;
}

public synchronized void clean(int index, byte[] b, int start) throws IOException {
public synchronized void clean(long index, byte[] b, int start) throws IOException {
// removes an entry by cleaning (writing zero bytes to the file)
// the entry that had been at the specific place before is copied to the given array b
// if the last entry in the file was cleaned, the file shrinks by the given record
Expand Down Expand Up @@ -349,7 +350,7 @@ public synchronized void clean(int index, byte[] b, int start) throws IOExceptio
assert false;
}

public synchronized void clean(int index) throws IOException {
public synchronized void clean(long index) throws IOException {
if (index >= size()) throw new IndexOutOfBoundsException("kelondroEcoFS.clean(" + index + ") outside bounds (" + this.size() + ")");
if (index == size() - 1) {
cleanLast();
Expand Down Expand Up @@ -378,7 +379,7 @@ public synchronized void clean(int index) throws IOException {

public synchronized void cleanLast(byte[] b, int start) throws IOException {
cleanLast0(b, start);
int i;
long i;
while (((i = size()) > 0) && (isClean(i - 1))) {
//System.out.println("Extra clean/1: before size = " + size());
cleanLast0();
Expand Down Expand Up @@ -422,7 +423,7 @@ private synchronized void cleanLast0(byte[] b, int start) throws IOException {

public synchronized void cleanLast() throws IOException {
cleanLast0();
int i;
long i;
while (((i = size()) > 0) && (isClean(i - 1))) {
//System.out.println("Extra clean/0: before size = " + size());
cleanLast0();
Expand All @@ -433,8 +434,8 @@ public synchronized void cleanLast() throws IOException {
private synchronized void cleanLast0() throws IOException {

// check if index is inside of cache
int p = inCache(this.size() - 1);
int q = (p >= 0) ? -1 : inBuffer(this.size() - 1);
long p = inCache(this.size() - 1);
long q = (p >= 0) ? -1 : inBuffer(this.size() - 1);
if (p >= 0) {
// shrink cache and file
assert this.buffercount == 0;
Expand Down
13 changes: 8 additions & 5 deletions source/de/anomic/kelondro/kelondroEcoTable.java
Expand Up @@ -59,6 +59,8 @@ public class kelondroEcoTable implements kelondroIndex {
public static final int tailCacheForceUsage = 1;
public static final int tailCacheUsageAuto = 2;

public static final long maxarraylength = 134217727; // that may be the maxmimum size of array length in some JVMs

private kelondroRowSet table;
private kelondroBytesIntMap index;
private kelondroBufferedEcoFS file;
Expand Down Expand Up @@ -94,18 +96,19 @@ public kelondroEcoTable(File tablefile, kelondroRow rowdef, int useTailCache, in
this.file = new kelondroBufferedEcoFS(new kelondroEcoFS(tablefile, rowdef.objectsize), this.buffersize);

// initialize index and copy table
int records = Math.max(file.size(), initialSpace);
int records = (int) Math.max(file.size(), initialSpace);
long neededRAM4table = 10 * 1024 * 1024 + records * (rowdef.objectsize + 4) * 3 / 2;
table = ((useTailCache == tailCacheForceUsage) ||
((useTailCache == tailCacheUsageAuto) && (serverMemory.request(neededRAM4table, true)))) ?
table = ((neededRAM4table < maxarraylength) &&
((useTailCache == tailCacheForceUsage) ||
((useTailCache == tailCacheUsageAuto) && (serverMemory.request(neededRAM4table, true))))) ?
new kelondroRowSet(taildef, records) : null;
index = new kelondroBytesIntMap(rowdef.primaryKeyLength, rowdef.objectOrder, records);
System.out.println("*** DEBUG: EcoTable " + tablefile.toString() + " has table copy " + ((table == null) ? "DISABLED" : "ENABLED"));

// read all elements from the file into the copy table
byte[] record = new byte[rowdef.objectsize];
byte[] key = new byte[rowdef.primaryKeyLength];
int fs = file.size();
int fs = (int) file.size();
for (int i = 0; i < fs; i++) {
// read entry
file.get(i, record, 0);
Expand Down Expand Up @@ -177,7 +180,7 @@ public static int staticRAMIndexNeed(File f, kelondroRow rowdef) {
public synchronized void addUnique(Entry row) throws IOException {
assert file.size() == index.size() : "file.size() = " + file.size() + ", index.size() = " + index.size();
assert ((table == null) || (table.size() == index.size()));
int i = file.size();
int i = (int) file.size();
index.addi(row.getPrimaryKeyBytes(), i);
if (table != null) {
assert table.size() == i;
Expand Down

0 comments on commit f945ee2

Please sign in to comment.