Skip to content

Commit

Permalink
Fix edge cases in getting readable and writable ByteBuffers from Page…
Browse files Browse the repository at this point in the history
…dBuffer.
  • Loading branch information
Topher committed Sep 30, 2012
1 parent b37463a commit 6f69b9a
Show file tree
Hide file tree
Showing 3 changed files with 105 additions and 37 deletions.
2 changes: 1 addition & 1 deletion project/CpsBuild.scala
Expand Up @@ -35,7 +35,7 @@ object CpsBuild extends Build {
Seq ( Seq (
organization := "com.treode", organization := "com.treode",
name := "cps", name := "cps",
version := "0.2.0", version := "0.2.1-SNAPSHOT",
scalaVersion := "2.9.2", scalaVersion := "2.9.2",


addCompilerPlugin ("org.scala-lang.plugins" % "continuations" % "2.9.2"), addCompilerPlugin ("org.scala-lang.plugins" % "continuations" % "2.9.2"),
Expand Down
47 changes: 31 additions & 16 deletions src/main/scala/com/treode/cps/buffer/PagedBuffer.scala
Expand Up @@ -99,25 +99,40 @@ private final class PagedBuffer private [buffer] (
} }


def readableByteBuffers: Array [ByteBuffer] = { def readableByteBuffers: Array [ByteBuffer] = {
val m = pageOf (_readAt) if (_writeAt == _readAt) {
val n = pageOf (_writeAt) val m = pageOf (_readAt)
val bs = new Array [ByteBuffer] (n - m + 1) if (m >= pages.length) {
for (i <- m to n) new Array [ByteBuffer] (0)
bs (i-m) = ByteBuffer.wrap (pages (i)) } else {
bs (0) .position (pageIndexOf (_readAt)) val bs = new Array [ByteBuffer] (1)
bs (n-m) .limit (pageIndexOf (_writeAt)) bs (0) = ByteBuffer.wrap (pages (m))
bs bs (0) .position (pageIndexOf (_readAt))
} bs (0) .limit (pageIndexOf (_readAt))
bs
}
} else {
val m = pageOf (_readAt)
val n = pageOf (_writeAt-1)
val bs = new Array [ByteBuffer] (n - m + 1)
for (i <- m to n)
bs (i-m) = ByteBuffer.wrap (pages (i))
bs (0) .position (pageIndexOf (_readAt))
bs (n-m) .limit (pageIndexOf (_writeAt-1) + 1)
bs
}}


def writableByteBuffers: Array [ByteBuffer] = { def writableByteBuffers: Array [ByteBuffer] = {
val m = pageOf (_writeAt) val m = pageOf (_writeAt)
val n = pages.length - 1 if (m >= pages.length) {
val bs = new Array [ByteBuffer] (n - m + 1) new Array [ByteBuffer] (0)
for (i <- m to n) } else {
bs (i-m) = ByteBuffer.wrap (pages (i)) val n = pages.length - 1
bs (0) .position (pageIndexOf (_writeAt)) val bs = new Array [ByteBuffer] (n - m + 1)
bs for (i <- m to n)
} bs (i-m) = ByteBuffer.wrap (pages (i))
bs (0) .position (pageIndexOf (_writeAt))
bs
}}


def findByte (start: Int, end: Int, byte: Byte): Int = { def findByte (start: Int, end: Int, byte: Byte): Int = {
// In a simple microbenchmark, this was about 30% slower than Netty's HeapBuffer.indexOf. This // In a simple microbenchmark, this was about 30% slower than Netty's HeapBuffer.indexOf. This
Expand Down
93 changes: 73 additions & 20 deletions src/test/scala/com/treode/cps/buffer/PagedBufferSpec.scala
Expand Up @@ -223,48 +223,101 @@ class PagedBufferSpec extends FlatSpec {
} }


it should "provide readable ByteBuffers" in { it should "provide readable ByteBuffers" in {

def checkBuffers (discard: Int, readAt: Int, writeAt: Int, pos: Int, limit: Int, len: Int) { def checkBuffers (discard: Int, readAt: Int, writeAt: Int, pos: Int, limit: Int, len: Int) {
val b = PagedBuffer () val b = PagedBuffer ()
b.capacity (writeAt) b.capacity (writeAt)
b.writeAt = writeAt b.writeAt = writeAt
b.readAt = readAt b.readAt = readAt
b.discard (discard) b.discard (discard)
val bs = b.readableByteBuffers val bs = b.readableByteBuffers
expectResult (pos) (bs (0) .position) expectResult (len) (bs.length)
if (bs.length > 0) {
expectResult (pos) (bs (0) .position)
expectResult (limit) (bs (bs.length-1) .limit)
}
if (bs.length > 1) { if (bs.length > 1) {
expectResult (pageSize) (bs (0) .limit) expectResult (pageSize) (bs (0) .limit)
expectResult (0) (bs (bs.length-1) .position) expectResult (0) (bs (bs.length-1) .position)
} }}
expectResult (limit) (bs (bs.length-1) .limit)
expectResult (len) (bs.length)
}


checkBuffers (0, 0, 0, 0, 0, 1)
checkBuffers (0, 0, 7890, 0, 7890, 1)
checkBuffers (0, 0, pageSize, 0, pageSize, 1)
checkBuffers (0, 1234, 7890, 1234, 7890, 1) checkBuffers (0, 1234, 7890, 1234, 7890, 1)
checkBuffers (0, 1234, pageSize+7890, 1234, 7890, 2) checkBuffers (0, 1234, pageSize, 1234, pageSize, 1)
checkBuffers (0, 1234, pageSize*2+7890, 1234, 7890, 3) checkBuffers (0, 7890, 7890, 7890, 7890, 1)
checkBuffers (0, pageSize, pageSize, -1, -1, 0)

checkBuffers (123, 123, 123, 123, 123, 1)
checkBuffers (123, 123, 7890, 123, 7890, 1)
checkBuffers (123, 123, pageSize, 123, pageSize, 1)
checkBuffers (123, 1234, 7890, 1234, 7890, 1) checkBuffers (123, 1234, 7890, 1234, 7890, 1)
checkBuffers (123, 1234, pageSize+7890, 1234, 7890, 2) checkBuffers (123, 1234, pageSize, 1234, pageSize, 1)
checkBuffers (1234, 1234, 7890, 1234, 7890, 1) checkBuffers (123, 7890, 7890, 7890, 7890, 1)
checkBuffers (1234, 1234, pageSize+7890, 1234, 7890, 2) checkBuffers (123, pageSize, pageSize, -1, -1, 0)

checkBuffers (0, 0, pageSize+7890, 0, 7890, 2)
checkBuffers (0, 0, pageSize*2, 0, pageSize, 2)
checkBuffers (0, 1234, pageSize+7890, 1234, 7890, 2)
checkBuffers (0, 1234, pageSize*2, 1234, pageSize, 2)
checkBuffers (0, pageSize+7890, pageSize+7890, 7890, 7890, 1)
checkBuffers (0, pageSize*2, pageSize*2, -1, -1, 0)

checkBuffers (0, pageSize, pageSize+7890, 0, 7890, 1)
checkBuffers (0, pageSize, pageSize*2, 0, pageSize, 1)
checkBuffers (0, pageSize+1234, pageSize+7890, 1234, 7890, 1)
checkBuffers (0, pageSize+1234, pageSize*2, 1234, pageSize, 1)

checkBuffers (pageSize, pageSize, pageSize+7890, 0, 7890, 1)
checkBuffers (pageSize, pageSize, pageSize*2, 0, pageSize, 1)
checkBuffers (pageSize, pageSize+1234, pageSize+7890, 1234, 7890, 1)
checkBuffers (pageSize, pageSize+1234, pageSize*2, 1234, pageSize, 1)
} }


it should "provide writable ByteBuffers" in { it should "provide writable ByteBuffers" in {
def checkBuffers (discard: Int, writeAt: Int, pos: Int, limit: Int, len: Int) { def checkBuffers (discard: Int, writeAt: Int, capacity: Int, pos: Int, limit: Int, len: Int) {
val b = PagedBuffer () val b = PagedBuffer ()
b.capacity (writeAt) b.capacity (capacity)
b.writeAt = writeAt b.writeAt = writeAt
b.readAt = discard b.readAt = discard
b.discard (discard) b.discard (discard)
val bs = b.writableByteBuffers val bs = b.writableByteBuffers
expectResult (pos) (bs (0) .position) expectResult (len) (bs.length)
if (bs.length > 0) {
expectResult (pos) (bs (0) .position)
expectResult (limit) (bs (bs.length-1) .limit)
}
if (bs.length > 1) { if (bs.length > 1) {
expectResult (pageSize) (bs (0) .limit) expectResult (pageSize) (bs (0) .limit)
expectResult (0) (bs (bs.length-1) .position) expectResult (0) (bs (bs.length-1) .position)
} }}
expectResult (limit) (bs (bs.length-1) .limit)
expectResult (len) (bs.length) checkBuffers (0, 0, 0, 0, pageSize, 1)
} checkBuffers (0, 0, 7890, 0, pageSize, 1)

checkBuffers (0, 0, pageSize, 0, pageSize, 1)
checkBuffers (0, 1234, 1234, 8192, 1) checkBuffers (0, 1234, 7890, 1234, pageSize, 1)
checkBuffers (123, 1234, 1234, 8192, 1) checkBuffers (0, 1234, pageSize, 1234, pageSize, 1)
checkBuffers (0, 7890, 7890, 7890, pageSize, 1)
checkBuffers (0, pageSize, pageSize, -1, -1, 0)

checkBuffers (123, 123, 123, 123, pageSize, 1)
checkBuffers (123, 123, 7890, 123, pageSize, 1)
checkBuffers (123, 123, pageSize, 123, pageSize, 1)
checkBuffers (123, 1234, 7890, 1234, pageSize, 1)
checkBuffers (123, 1234, pageSize, 1234, pageSize, 1)
checkBuffers (123, 7890, 7890, 7890, pageSize, 1)
checkBuffers (123, pageSize, pageSize, -1, -1, 0)

checkBuffers (0, 0, pageSize+7890, 0, pageSize, 2)
checkBuffers (0, 0, pageSize*2, 0, pageSize, 2)
checkBuffers (0, 1234, pageSize+7890, 1234, pageSize, 2)
checkBuffers (0, 1234, pageSize*2, 1234, pageSize, 2)
checkBuffers (0, pageSize+7890, pageSize+7890, 7890, pageSize, 1)
checkBuffers (0, pageSize*2, pageSize*2, -1, -1, 0)

checkBuffers (pageSize, pageSize, pageSize+7890, 0, pageSize, 1)
checkBuffers (pageSize, pageSize, pageSize*2, 0, pageSize, 1)
checkBuffers (pageSize, pageSize+1234, pageSize+7890, 1234, pageSize, 1)
checkBuffers (pageSize, pageSize+1234, pageSize*2, 1234, pageSize, 1)
}} }}

0 comments on commit 6f69b9a

Please sign in to comment.