diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ccsmap/AbstractHeapChunk.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ccsmap/AbstractHeapChunk.java new file mode 100644 index 000000000000..be3643e50860 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ccsmap/AbstractHeapChunk.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ccsmap; + +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +@InterfaceAudience.Private +public abstract class AbstractHeapChunk implements HeapChunk { + + private final long chunkID; + private final int capacity; + private final boolean isPooled; + + private final AtomicInteger alignOccupancy = new AtomicInteger(0); + + protected final AtomicInteger nextFreeOffset = new AtomicInteger(0); + protected ByteBuffer chunk; + + protected AbstractHeapChunk(long chunkID, int capacity, boolean isPooled) { + this.chunkID = chunkID; + this.capacity = capacity; + this.isPooled = isPooled; + } + + @Override + public long getChunkID() { + return chunkID; + } + + @Override + public int getPosition() { + return nextFreeOffset.get(); + } + + @Override + public int allocate(int len) { + int oldLen = len; + //TODO reuse the removed node's space. + //TODO add config for support unalign + //align + len = align(len); + + while (true) { + int oldOffset = nextFreeOffset.get(); + if (oldOffset + len > getLimit()) { + return -1; // alloc doesn't fit + } + // Try to atomically claim this chunk + if (nextFreeOffset.compareAndSet(oldOffset, oldOffset + len)) { + // we got the alloc + alignOccupancy.addAndGet(oldLen - len); + return oldOffset; + } + } + } + + private int align(int len) { + return (len % 8 != 0) ? ((len / 8 + 1) * 8) : len; + } + + @Override + public int getLimit() { + return capacity; + } + + @Override + public ByteBuffer getByteBuffer() { + return chunk; + } + + @Override + public boolean isPooledChunk() { + return isPooled; + } + + @Override + public ByteBuffer asSubByteBuffer(int offset, int len) { + ByteBuffer duplicate = chunk.duplicate(); + duplicate.limit(offset + len); + duplicate.position(offset); + return duplicate.slice(); + } + + public abstract HeapMode getHeapMode(); + + @Override + public int occupancy() { + return getLimit() - getPosition(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof AbstractHeapChunk)) { + return false; + } + AbstractHeapChunk that = (AbstractHeapChunk) obj; + return getChunkID() == that.getChunkID(); + } + + @Override + public int hashCode() { + return (int) (getChunkID() & CCSMapUtils.FOUR_BYTES_MARK); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ccsmap/CCSMapUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ccsmap/CCSMapUtils.java new file mode 100644 index 000000000000..34a402f77847 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ccsmap/CCSMapUtils.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ccsmap; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +@InterfaceAudience.Private +public final class CCSMapUtils { + + private CCSMapUtils() {} + + static final long FOUR_BYTES_MARK = 0xFFFFFFFF; + + static final String CHUNK_CAPACITY_KEY = "hbase.regionserver.memstore.ccsmap.capacity"; + + static final String CHUNK_SIZE_KEY = "hbase.regionserver.memstore.ccsmap.chunksize"; + + static final String INITIAL_CHUNK_COUNT_KEY = "hbase.regionserver.memstore.ccsmap.chunk.initial"; + + static final String USE_OFFHEAP = "hbase.regionserver.memstore.ccsmap.useoffheap"; + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ccsmap/ChunkAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ccsmap/ChunkAllocator.java new file mode 100644 index 000000000000..c994a10a29ae --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ccsmap/ChunkAllocator.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *
+ * http://www.apache.org/licenses/LICENSE-2.0 + *
+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.ccsmap; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import java.util.concurrent.atomic.AtomicLong; + +@InterfaceAudience.Private +public class ChunkAllocator { + + private final HeapMode heapMode; + // ID starts from maxChunkCount, so the unpooled chunks ID range is [maxChunkCount, +) + private final AtomicLong unpooledChunkIDGenerator; + // ID starts from 0, so the pooled chunks ID range is [0, maxChunkCount) + private final AtomicLong pooledChunkIDGenerator = new AtomicLong(-1); + + public ChunkAllocator(HeapMode heapMode, long maxChunkCount) { + this.heapMode = heapMode; + unpooledChunkIDGenerator = new AtomicLong(maxChunkCount); + } + + /** + * Allocate a pooled chunk with specified size. + * @param size size of a chunk + * @return a chunk + */ + AbstractHeapChunk allocatePooledChunk(int size) { + return heapMode == HeapMode.ON_HEAP ? + new OnHeapChunk(pooledChunkIDGenerator.incrementAndGet(), size) : + new OffHeapChunk(pooledChunkIDGenerator.incrementAndGet(), size); + } + + /** + * Allocate a unpooled chunk with specified size. + * @param size size of a chunk + * @return a chunk + */ + AbstractHeapChunk allocateUnpooledChunk(int size) { + return new OnHeapChunk(unpooledChunkIDGenerator.getAndIncrement(), size, false); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ccsmap/ChunkPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ccsmap/ChunkPool.java new file mode 100644 index 000000000000..9caa172fb921 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ccsmap/ChunkPool.java @@ -0,0 +1,208 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *
+ * http://www.apache.org/licenses/LICENSE-2.0 + *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.ccsmap;
+
+import java.util.Map;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * It mainly focus on managing reusable chunks, to make GC friendy.
+ * Also, if situation needs, this class also keep tracking non-resuable chunks.
+ */
+@InterfaceAudience.Private
+public class ChunkPool {
+
+ private static final Log LOG = LogFactory.getLog(ChunkPool.class);
+ private static final Object initLock = new Object();
+ private static volatile ChunkPool globalInstance;
+
+ private final HeapMode heapMode;
+ private final int maxChunkCount;
+ private final int chunkSize;
+ private final ChunkAllocator chunkAllocator;
+ private final AbstractHeapChunk[] chunkArray;
+ private final Queue
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ccsmap;
+
+import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.ccsmap.ChunkPool.ChunkPoolParameters;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestChunkPool {
+
+ private Configuration conf = HBaseConfiguration.create();
+
+ @Test
+ public void testNormal() {
+ conf.setLong(CCSMapUtils.CHUNK_CAPACITY_KEY, 8 * 1024 * 1024);
+ conf.setInt(CCSMapUtils.CHUNK_SIZE_KEY, 4 * 1024);
+ conf.setInt(CCSMapUtils.INITIAL_CHUNK_COUNT_KEY, Integer.MAX_VALUE);
+ conf.setBoolean(CCSMapUtils.USE_OFFHEAP, true);
+ ChunkPoolParameters parameters = new ChunkPoolParameters(conf);
+ ChunkPool chunkPool = new ChunkPool(parameters);
+
+ int numberOfChunk = chunkPool.getChunkQueue().size();
+ Assert.assertEquals(2 * 1024, numberOfChunk);
+ Assert.assertEquals(numberOfChunk, chunkPool.getChunkArray().length);
+ Assert.assertEquals(2 * 1024, chunkPool.getCurrentChunkCounter());
+ Assert.assertEquals(2 * 1024, chunkPool.getMaxChunkCount());
+
+ AbstractHeapChunk chunk = chunkPool.allocate(4 * 1024 - 1);
+ Assert.assertTrue(chunk.isPooledChunk());
+ Assert.assertEquals(HeapMode.OFF_HEAP, chunk.getHeapMode());
+ Assert.assertEquals(4 * 1024, chunk.getLimit());
+ Assert.assertEquals(0, chunk.getChunkID());
+ Assert.assertEquals(0, chunk.getPosition());
+ Assert.assertEquals(2 * 1024 - 1, chunkPool.getChunkQueue().size());
+
+ for (int i = 0; i < chunkPool.getChunkArray().length; i++) {
+ Assert.assertEquals(i, chunkPool.getChunkArray()[i].getChunkID());
+ }
+
+ Assert.assertEquals(2 * 1024, chunkPool.getCurrentChunkCounter());
+ Assert.assertEquals(0, chunkPool.getUnpooledChunkUsed());
+
+ int unpooledSize = 4 * 1024 + 1;
+ AbstractHeapChunk unpooledChunk = chunkPool.allocate(unpooledSize);
+ Assert.assertTrue(unpooledChunk instanceof OnHeapChunk);
+ Assert.assertFalse(unpooledChunk.isPooledChunk());
+ Assert.assertEquals(HeapMode.ON_HEAP, unpooledChunk.getHeapMode());
+ Assert.assertEquals(4 * 1024 + 1, unpooledChunk.getLimit());
+ long maxChunkCount = chunkPool.getMaxChunkCount();
+ Assert.assertEquals(maxChunkCount, unpooledChunk.getChunkID());
+ // Nothing changed in chunks pool
+ Assert.assertEquals(2 * 1024 - 1, chunkPool.getChunkQueue().size());
+ Assert.assertEquals(2 * 1024, chunkPool.getChunkArray().length);
+ Assert.assertEquals(2 * 1024, chunkPool.getCurrentChunkCounter());
+ Assert.assertEquals(unpooledSize, chunkPool.getUnpooledChunkUsed());
+ Map
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.ccsmap;
+
+import java.nio.ByteBuffer;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestHeapChunk {
+
+ @Test
+ public void testOnHeapNormal() {
+ int len = 4 * 1024 * 1024; // 4MB
+ long chunkID = 1234;
+ OnHeapChunk chunk1 = new OnHeapChunk(chunkID, len);
+ Assert.assertEquals(1234, chunk1.getChunkID());
+ Assert.assertEquals(0, chunk1.getPosition());
+ Assert.assertEquals(len, chunk1.getLimit());
+ Assert.assertTrue(chunk1.isPooledChunk());
+ Assert.assertEquals(len, chunk1.occupancy());
+ Assert.assertEquals(HeapMode.ON_HEAP, chunk1.getHeapMode());
+ Assert.assertEquals(chunkID, chunk1.hashCode());
+ Assert.assertNotNull(chunk1.getByteBuffer());
+
+ int bytes1 = 1023;
+ int startPosition1 = chunk1.allocate(bytes1);
+ Assert.assertEquals(0, startPosition1);
+ // Since alignment happened, it should start from 1024
+ Assert.assertEquals(1024, chunk1.getPosition());
+ Assert.assertEquals(len - 1024, chunk1.occupancy());
+
+ int bytes2 = 1025;
+ int startPosistion2 = chunk1.allocate(bytes2);
+ Assert.assertEquals(1024, startPosistion2);
+ // Since alignment happened, it should start from 1024 + (1024 + 8)
+ Assert.assertEquals(1024 + 1032, chunk1.getPosition());
+ Assert.assertEquals(len - 1024 - 1032, chunk1.occupancy());
+
+ ByteBuffer bb = chunk1.getByteBuffer();
+ Assert.assertEquals(len, bb.limit());
+ Assert.assertEquals(len, bb.capacity());
+ Assert.assertEquals(0, bb.position());
+
+ int len2 = 4096;
+ ByteBuffer bb2 = chunk1.asSubByteBuffer(100, len2);
+ Assert.assertEquals(len2, bb2.limit());
+ Assert.assertEquals(len2, bb2.capacity());
+ Assert.assertEquals(0, bb2.position());
+
+ OnHeapChunk chunk2 = new OnHeapChunk(1234, len);
+ //As long as chunkID is same, Chunk is the same
+ Assert.assertEquals(chunk1, chunk2);
+
+ OnHeapChunk chunk3 = new OnHeapChunk(1235, len, false);
+ Assert.assertFalse(chunk3.isPooledChunk());
+ }
+
+ @Test
+ public void testOffHeapNormal() {
+ int len = 4 * 1024 * 1024; // 4MB
+ long chunkID = 1234;
+ OffHeapChunk chunk1 = new OffHeapChunk(chunkID, len);
+ Assert.assertEquals(1234, chunk1.getChunkID());
+ Assert.assertEquals(0, chunk1.getPosition());
+ Assert.assertEquals(len, chunk1.getLimit());
+ Assert.assertTrue(chunk1.isPooledChunk());
+ Assert.assertEquals(len, chunk1.occupancy());
+ Assert.assertEquals(HeapMode.OFF_HEAP, chunk1.getHeapMode());
+ Assert.assertEquals(chunkID, chunk1.hashCode());
+ Assert.assertNotNull(chunk1.getByteBuffer());
+
+ int bytes1 = 1023;
+ int startPosition1 = chunk1.allocate(bytes1);
+ Assert.assertEquals(0, startPosition1);
+ // Since alignment happened, it should start from 1024
+ Assert.assertEquals(1024, chunk1.getPosition());
+ Assert.assertEquals(len - 1024, chunk1.occupancy());
+
+ int bytes2 = 1025;
+ int startPosistion2 = chunk1.allocate(bytes2);
+ Assert.assertEquals(1024, startPosistion2);
+ // Since alignment happened, it should start from 1024 + (1024 + 8)
+ Assert.assertEquals(1024 + 1032, chunk1.getPosition());
+ Assert.assertEquals(len - 1024 - 1032, chunk1.occupancy());
+
+ ByteBuffer bb = chunk1.getByteBuffer();
+ Assert.assertEquals(len, bb.limit());
+ Assert.assertEquals(len, bb.capacity());
+ Assert.assertEquals(0, bb.position());
+
+ int len2 = 4096;
+ ByteBuffer bb2 = chunk1.asSubByteBuffer(100, len2);
+ Assert.assertEquals(len2, bb2.limit());
+ Assert.assertEquals(len2, bb2.capacity());
+ Assert.assertEquals(0, bb2.position());
+
+ OffHeapChunk chunk2 = new OffHeapChunk(1234, len);
+ //As long as chunkID is same, Chunk is the same
+ Assert.assertEquals(chunk1, chunk2);
+
+ OffHeapChunk chunk3 = new OffHeapChunk(1235, len, false);
+ Assert.assertFalse(chunk3.isPooledChunk());
+ }
+
+ @Test
+ public void testConcurrentWriteOnHeap() throws Exception {
+ int len = 4 * 1024 * 1024;
+ OnHeapChunk chunk = new OnHeapChunk(1234, len);
+
+ int concurrent = 50;
+ final ByteBuffer[] bbArray = new ByteBuffer[concurrent];
+
+ for (int i = 0; i < concurrent; i++) {
+ bbArray[i] = chunk.asSubByteBuffer(i * 2049, 1023);
+ }
+
+ final AtomicBoolean hasError = new AtomicBoolean(false);
+ Thread[] ths = new Thread[concurrent];
+
+ for (int i = 0; i < concurrent; i++) {
+ final int thid = i;
+ ths[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ ByteBuffer bb = ByteBuffer.allocate(13);
+ bb.put((byte) thid);
+ bb.putInt(thid);
+ bb.putLong(thid);
+ bb.flip();
+ try {
+ Assert.assertEquals(0, bbArray[thid].position());
+ Thread.sleep(100);
+ bbArray[thid].put((byte) thid);
+ Assert.assertEquals(1, bbArray[thid].position());
+ Thread.sleep(100);
+ bbArray[thid].putInt(thid);
+ Assert.assertEquals(1 + 4, bbArray[thid].position());
+ Thread.sleep(100);
+ bbArray[thid].putLong(thid);
+ Assert.assertEquals(1 + 4 + 8, bbArray[thid].position());
+ Thread.sleep(100);
+ bbArray[thid].put(bb);
+ Assert.assertEquals(1 + 4 + 8 + 13, bbArray[thid].position());
+ } catch (Throwable e) {
+ e.printStackTrace();
+ hasError.set(true);
+ }
+ }
+ });
+ }
+
+ for (int j = 0; j < concurrent; j++) {
+ ths[j].start();
+ }
+
+ for (int j = 0; j < concurrent; j++) {
+ ths[j].join();
+ }
+
+ Assert.assertFalse(hasError.get());
+
+ for (int j = 0; j < concurrent; j++) {
+ bbArray[j].rewind();
+ Assert.assertEquals(0, bbArray[j].position());
+ Assert.assertEquals(j, bbArray[j].get());
+ Assert.assertEquals(1, bbArray[j].position());
+ Assert.assertEquals(j, bbArray[j].getInt());
+ Assert.assertEquals(1 + 4, bbArray[j].position());
+ Assert.assertEquals(j, bbArray[j].getLong());
+ Assert.assertEquals(1 + 4 + 8, bbArray[j].position());
+ Assert.assertEquals(j, bbArray[j].get());
+ Assert.assertEquals(1 + 4 + 8 + 1, bbArray[j].position());
+ Assert.assertEquals(j, bbArray[j].getInt());
+ Assert.assertEquals(1 + 4 + 8 + 1 + 4, bbArray[j].position());
+ Assert.assertEquals(j, bbArray[j].getLong());
+ Assert.assertEquals(1 + 4 + 8 + 1 + 4 + 8, bbArray[j].position());
+ }
+
+ ByteBuffer bb = chunk.getByteBuffer();
+ bb.rewind();
+ for (int j = 0; j < concurrent; j++) {
+ bb.position(j * 2049);
+ Assert.assertEquals(j, bb.get());
+ Assert.assertEquals(j, bb.getInt());
+ Assert.assertEquals(j, bb.getLong());
+ Assert.assertEquals(j, bb.get());
+ Assert.assertEquals(j, bb.getInt());
+ Assert.assertEquals(j, bb.getLong());
+ }
+ }
+
+ @Test
+ public void testConcurrentWriteOffHeap() throws Exception {
+ int len = 4 * 1024 * 1024; // 4MB
+ OffHeapChunk chunk = new OffHeapChunk(1234, len);
+
+ int concurrent = 50;
+ final ByteBuffer[] bbArray = new ByteBuffer[concurrent];
+
+ for (int i = 0; i < concurrent; i++) {
+ bbArray[i] = chunk.asSubByteBuffer(i * 2049, 1023);
+ }
+
+ final AtomicBoolean hasError = new AtomicBoolean(false);
+ Thread[] ths = new Thread[concurrent];
+
+ for (int i = 0; i < concurrent; i++) {
+ final int thid = i;
+ ths[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ ByteBuffer bb = ByteBuffer.allocate(13);
+ bb.put((byte) thid);
+ bb.putInt(thid);
+ bb.putLong(thid);
+ bb.flip();
+ try {
+ Assert.assertEquals(0, bbArray[thid].position());
+ Thread.sleep(1000);
+ bbArray[thid].put((byte) thid);
+ Assert.assertEquals(1, bbArray[thid].position());
+ Thread.sleep(1000);
+ bbArray[thid].putInt(thid);
+ Assert.assertEquals(1 + 4, bbArray[thid].position());
+ Thread.sleep(1000);
+ bbArray[thid].putLong(thid);
+ Assert.assertEquals(1 + 4 + 8, bbArray[thid].position());
+ Thread.sleep(1000);
+ bbArray[thid].put(bb);
+ Assert.assertEquals(1 + 4 + 8 + 13, bbArray[thid].position());
+ } catch (Throwable e) {
+ e.printStackTrace();
+ hasError.set(true);
+ }
+ }
+ });
+ }
+
+ for (int j = 0; j < concurrent; j++) {
+ ths[j].start();
+ }
+
+ for (int j = 0; j < concurrent; j++) {
+ ths[j].join();
+ }
+
+ Assert.assertFalse(hasError.get());
+
+ for (int j = 0; j < concurrent; j++) {
+ bbArray[j].rewind();
+ Assert.assertEquals(0, bbArray[j].position());
+ Assert.assertEquals(j, bbArray[j].get());
+ Assert.assertEquals(1, bbArray[j].position());
+ Assert.assertEquals(j, bbArray[j].getInt());
+ Assert.assertEquals(1 + 4, bbArray[j].position());
+ Assert.assertEquals(j, bbArray[j].getLong());
+ Assert.assertEquals(1 + 4 + 8, bbArray[j].position());
+ Assert.assertEquals(j, bbArray[j].get());
+ Assert.assertEquals(1 + 4 + 8 + 1, bbArray[j].position());
+ Assert.assertEquals(j, bbArray[j].getInt());
+ Assert.assertEquals(1 + 4 + 8 + 1 + 4, bbArray[j].position());
+ Assert.assertEquals(j, bbArray[j].getLong());
+ Assert.assertEquals(1 + 4 + 8 + 1 + 4 + 8, bbArray[j].position());
+ }
+
+ ByteBuffer bb = chunk.getByteBuffer();
+ bb.rewind();
+ for (int j = 0; j < concurrent; j++) {
+ bb.position(j * 2049);
+ Assert.assertEquals(j, bb.get());
+ Assert.assertEquals(j, bb.getInt());
+ Assert.assertEquals(j, bb.getLong());
+ Assert.assertEquals(j, bb.get());
+ Assert.assertEquals(j, bb.getInt());
+ Assert.assertEquals(j, bb.getLong());
+ }
+ }
+
+}