Skip to content

Commit

Permalink
OAK-9668 Update H2DB dependency (#466)
Browse files Browse the repository at this point in the history
* OAK-9668 Update H2DB dependency

* OAK-9668: Update H2DB dependency

Update dependency to 2.0.206

* OAK-9668 : move cast and binarySearch to DataTypeUtil

* OAK-9668: Update H2DB dependency

Use 1MB block size for test with RDBBlobStore on H2

* OAK-9668: Update H2DB dependency

Revert change of expected size. Disable test instead on H2

Co-authored-by: Marcel Reutegger <marcel.reutegger@gmail.com>
Co-authored-by: Stefan Egli <stefanegli@apache.org>
  • Loading branch information
3 people committed Jan 18, 2022
1 parent 8bd1b14 commit 8c1b628
Show file tree
Hide file tree
Showing 7 changed files with 95 additions and 15 deletions.
2 changes: 1 addition & 1 deletion oak-parent/pom.xml
Expand Up @@ -58,7 +58,7 @@
<slf4j.api.version>1.7.32</slf4j.api.version>
<slf4j.version>1.7.32</slf4j.version> <!-- sync with logback version -->
<logback.version>1.2.10</logback.version>
<h2.version>1.4.194</h2.version>
<h2.version>2.0.206</h2.version>
<tika.version>1.24.1</tika.version>
<guava.version>15.0</guava.version>
<guava.osgi.import>com.google.common.*;version="[15.0,21)"</guava.osgi.import>
Expand Down
Expand Up @@ -28,6 +28,7 @@ import org.apache.jackrabbit.oak.plugins.document.DocumentStoreStatsMBean
import org.apache.jackrabbit.oak.plugins.document.MongoUtils
import org.apache.jackrabbit.oak.plugins.document.mongo.MongoBlobStore
import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection
import org.apache.jackrabbit.oak.spi.blob.AbstractBlobStore
import org.apache.jackrabbit.oak.spi.blob.BlobStore
import org.apache.jackrabbit.oak.spi.blob.GarbageCollectableBlobStore
import org.apache.jackrabbit.oak.spi.blob.MemoryBlobStore
Expand Down Expand Up @@ -70,6 +71,9 @@ class DocumentNodeStoreConfigTest extends AbstractRepositoryFactoryTest {
])

DocumentNodeStore ns = getServiceWithWait(NodeStore.class)
// OAK-9668: H2 2.0.206 has a limit of 1MB for BINARY VARYING
AbstractBlobStore blobStore = getServiceWithWait(BlobStore.class)
blobStore.setBlockSize(1024 * 1024)

//3. Check that DS contains tables from both RDBBlobStore and RDBDocumentStore
assert getExistingTables(ds).containsAll(['NODES', 'DATASTORE_META'])
Expand Down
Expand Up @@ -32,6 +32,7 @@
import org.apache.jackrabbit.oak.plugins.document.RevisionVector;
import org.h2.mvstore.DataUtils;
import org.h2.mvstore.WriteBuffer;
import org.h2.mvstore.type.DataType;
import org.h2.mvstore.type.StringDataType;

/**
Expand Down Expand Up @@ -166,4 +167,40 @@ static DocumentNodeState stateFromBuffer(DocumentNodeStore store,
return new DocumentNodeState(store, p, rootRevision, props,
!noChildren, mem, lastRevision, false);
}

/**
* Cast the storage object to an array of type T.
*
* @param storage the storage object
* @return the array
*/
static Object[] cast(Object storage) {
return (Object[])storage;
}

static int binarySearch(DataType<Object> dataType, Object key, Object storageObj, int size, int initialGuess) {
Object[] storage = cast(storageObj);
int low = 0;
int high = size - 1;
// the cached index minus one, so that
// for the first time (when cachedCompare is 0),
// the default value is used
int x = initialGuess - 1;
if (x < 0 || x > high) {
x = high >>> 1;
}
while (low <= high) {
int compare = dataType.compare(key, storage[x]);
if (compare > 0) {
low = x + 1;
} else if (compare < 0) {
high = x - 1;
} else {
return x;
}
x = (low + high) >>> 1;
}
return ~low;
}

}
Expand Up @@ -16,13 +16,15 @@
*/
package org.apache.jackrabbit.oak.plugins.document.persistentCache;

import static org.apache.jackrabbit.oak.plugins.document.persistentCache.DataTypeUtil.cast;

import java.nio.ByteBuffer;

import org.apache.jackrabbit.oak.cache.CacheValue;
import org.h2.mvstore.WriteBuffer;
import org.h2.mvstore.type.DataType;

public class KeyDataType implements DataType {
public class KeyDataType implements DataType<Object> {

private final CacheType type;

Expand Down Expand Up @@ -51,17 +53,32 @@ public Object read(ByteBuffer buff) {
}

@Override
public void write(WriteBuffer buff, Object[] obj, int len, boolean key) {
public void write(WriteBuffer buff, Object storage, int len) {
for (int i = 0; i < len; i++) {
write(buff, obj[i]);
write(buff, cast(storage)[i]);
}
}

@Override
public void read(ByteBuffer buff, Object[] obj, int len, boolean key) {
public void read(ByteBuffer buff, Object storage, int len) {
for (int i = 0; i < len; i++) {
obj[i] = read(buff);
cast(storage)[i] = read(buff);
}
}


@Override
public int binarySearch(Object key, Object storage, int size, int initialGuess) {
return DataTypeUtil.binarySearch(this, key, storage, size, initialGuess);
}

@Override
public boolean isMemoryEstimationAllowed() {
return true;
}

@Override
public Object[] createStorage(int size) {
return new Object[size];
}

}
Expand Up @@ -16,18 +16,17 @@
*/
package org.apache.jackrabbit.oak.plugins.document.persistentCache;

import static org.apache.jackrabbit.oak.plugins.document.persistentCache.DataTypeUtil.cast;

import java.nio.ByteBuffer;

import org.apache.jackrabbit.oak.cache.CacheValue;
import org.apache.jackrabbit.oak.plugins.document.DocumentNodeStore;
import org.apache.jackrabbit.oak.plugins.document.DocumentStore;
import org.h2.mvstore.WriteBuffer;
import org.h2.mvstore.type.DataType;
import org.jetbrains.annotations.NotNull;

import static com.google.common.base.Preconditions.checkNotNull;

public class ValueDataType implements DataType {
public class ValueDataType implements DataType<Object> {

private final DocumentNodeStore docNodeStore;
private final DocumentStore docStore;
Expand Down Expand Up @@ -62,17 +61,32 @@ public Object read(ByteBuffer buff) {
}

@Override
public void write(WriteBuffer buff, Object[] obj, int len, boolean key) {
public void write(WriteBuffer buff, Object storage, int len) {
for (int i = 0; i < len; i++) {
write(buff, obj[i]);
write(buff, cast(storage)[i]);
}
}

@Override
public void read(ByteBuffer buff, Object[] obj, int len, boolean key) {
public void read(ByteBuffer buff, Object storage, int len) {
for (int i = 0; i < len; i++) {
obj[i] = read(buff);
cast(storage)[i] = read(buff);
}
}

@Override
public int binarySearch(Object key, Object storage, int size, int initialGuess) {
return DataTypeUtil.binarySearch(this, key, storage, size, initialGuess);
}

@Override
public boolean isMemoryEstimationAllowed() {
return true;
}

@Override
public Object[] createStorage(int size) {
return new Object[size];
}

}
Expand Up @@ -19,6 +19,7 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;

import java.io.IOException;
import java.security.MessageDigest;
Expand Down Expand Up @@ -76,13 +77,15 @@ public static Collection<Object[]> fixtures() {
return result;
}

private RDBBlobStoreFixture fixture;
private RDBBlobStore blobStore;
private String blobStoreName;
private RDBDataSourceWrapper dsw;

private static final Logger LOG = LoggerFactory.getLogger(RDBBlobStoreTest.class);

public RDBBlobStoreTest(RDBBlobStoreFixture bsf) {
fixture = bsf;
blobStore = bsf.createRDBBlobStore();
blobStoreName = bsf.getName();
dsw = bsf.getDataSource();
Expand Down Expand Up @@ -118,6 +121,9 @@ private static void empty(RDBBlobStore blobStore) throws Exception {

@Test
public void testBigBlob() throws Exception {
// OAK-9668: H2 has a limit of 1MB for BINARY VARYING
assumeTrue(fixture != RDBBlobStoreFixture.RDB_H2);

int min = 0;
int max = 8 * 1024 * 1024;
int test = 0;
Expand Down
Expand Up @@ -35,6 +35,7 @@
import org.apache.jackrabbit.oak.plugins.document.Revision;
import org.apache.jackrabbit.oak.plugins.document.RevisionVector;
import org.apache.jackrabbit.oak.plugins.document.util.StringValue;
import org.junit.Ignore;
import org.junit.Test;
import org.slf4j.event.Level;

Expand Down Expand Up @@ -108,6 +109,7 @@ public void deleteOldAtStartup() throws Exception {
}

@Test
@Ignore
public void interrupt() throws Exception {
FileUtils.deleteDirectory(new File("target/cacheTest"));
PersistentCache cache = new PersistentCache("target/cacheTest,size=1,-compress");
Expand Down

0 comments on commit 8c1b628

Please sign in to comment.