Permalink
Browse files

Merge branch 'master' into site

  • Loading branch information...
2 parents 6a5d083 + 9cf4d58 commit 4105f0becb145042921be22af42ef88eb0d95291 Robey Pointer committed Jan 13, 2012
View
@@ -1,5 +1,18 @@
+
+2.1.5
+-----
+release: 12 January 2012
+
+- Only sync to disk when there's data to be written. [Stephan Zuercher]
+- Track latency measurements better, and track the percentage of java heap
+ reserved for in-memory queues. [Stephan Zuercher]
+- Fix the startup script to work in "dash". [Matt Parlane]
+- Send the correct reponse back for the memcached protocol for delete. [Matt
+ Erkkila]
+
2.1.4
-----
+release: 21 November 2011
- Separate timers for journal fsync operations from those used for request
timeouts and queue expiry
@@ -9,7 +22,6 @@
bugs)
- Creating a queue with an illegal name causes an error
-
2.1.3
-----
release: 13 October 2011
View
@@ -21,7 +21,7 @@ new KestrelConfig {
default.defaultJournalSize = 16.megabytes
default.maxMemorySize = 128.megabytes
default.maxJournalSize = 1.gigabyte
- default.syncJournal = 20.milliseconds
+ default.syncJournal = 100.milliseconds
admin.httpPort = 2223
View
@@ -1,9 +1,9 @@
#Project properties
-#Mon Nov 21 15:08:49 PST 2011
+#Thu Jan 12 16:30:20 PST 2012
project.organization=net.lag
project.name=kestrel
sbt.version=0.7.4
-project.version=2.1.5-SNAPSHOT
+project.version=2.1.6-SNAPSHOT
def.scala.version=2.7.7
build.scala.versions=2.8.1
project.initialize=false
@@ -58,7 +58,24 @@ class KestrelProject(info: ProjectInfo) extends StandardServiceProject(info) wit
lazy val packageLoadTests = packageLoadTestsAction
override def packageDistTask = packageLoadTestsAction && super.packageDistTask
-// override def fork = forkRun(List("-Xmx1024m", "-verbosegc", "-XX:+PrintGCDetails"))
+ // generate a distribution zip for release.
+ def releaseDistTask = task {
+ val releaseDistPath = "dist-release" / distName ##
+
+ releaseDistPath.asFile.mkdirs()
+ (releaseDistPath / "libs").asFile.mkdirs()
+ (releaseDistPath / "config").asFile.mkdirs()
+
+ FileUtilities.copyFlat(List(jarPath), releaseDistPath, log).left.toOption orElse
+ FileUtilities.copyFlat(List(outputPath / loadTestJarFilename), releaseDistPath, log).left.toOption orElse
+ FileUtilities.copyFlat(dependentJars.get, releaseDistPath / "libs", log).left.toOption orElse
+ FileUtilities.copy(((configPath ***) --- (configPath ** "*.class")).get, releaseDistPath / "config", log).left.toOption orElse
+ FileUtilities.copy((scriptsOutputPath ***).get, releaseDistPath, log).left.toOption orElse
+ FileUtilities.zip((("dist-release" ##) / distName).get, "dist-release" / (distName + ".zip"), true, log)
+ }
+ val ReleaseDistDescription = "Creates a deployable zip file with dependencies, config, and scripts."
+ lazy val releaseDist = releaseDistTask.dependsOn(`package`, makePom, copyScripts).describedAs(ReleaseDistDescription)
+
lazy val putMany = task { args =>
runTask(Some("net.lag.kestrel.load.PutMany"), testClasspath, args).dependsOn(testCompile)
@@ -1,4 +1,4 @@
#Automatically generated by ReleaseManagement
-#Mon Nov 21 15:08:49 PST 2011
-version=2.1.4
-sha1=10d84a9dee3c23813c8fd76b55ce95d958154423
+#Thu Jan 12 16:30:20 PST 2012
+version=2.1.5
+sha1=459ec824963e31e0da78dbc425be0c7084a3e298
@@ -20,14 +20,14 @@ package net.lag.kestrel
import java.io._
import java.nio.{ByteBuffer, ByteOrder}
import java.nio.channels.FileChannel
-import java.util.concurrent.LinkedBlockingQueue
+import java.util.concurrent.{LinkedBlockingQueue, ScheduledExecutorService}
import java.util.concurrent.atomic.AtomicInteger
import scala.annotation.tailrec
import com.twitter.conversions.storage._
import com.twitter.conversions.time._
import com.twitter.logging.Logger
import com.twitter.ostrich.admin.BackgroundProcess
-import com.twitter.util.{Future, Duration, Timer, Time}
+import com.twitter.util.{Future, Duration, Time}
case class BrokenItemException(lastValidPosition: Long, cause: Throwable) extends IOException(cause)
@@ -51,7 +51,7 @@ object JournalItem {
/**
* Codes for working with the journal file for a PersistentQueue.
*/
-class Journal(queuePath: File, queueName: String, syncTimer: Timer, syncJournal: Duration) {
+class Journal(queuePath: File, queueName: String, syncScheduler: ScheduledExecutorService, syncJournal: Duration) {
import Journal._
private val log = Logger.get(getClass)
@@ -97,7 +97,7 @@ class Journal(queuePath: File, queueName: String, syncTimer: Timer, syncJournal:
def this(fullPath: String) = this(fullPath, Duration.MaxValue)
private def open(file: File) {
- writer = new PeriodicSyncFile(file, syncTimer, syncJournal)
+ writer = new PeriodicSyncFile(file, syncScheduler, syncJournal)
}
def open() {
@@ -18,9 +18,10 @@
package net.lag.kestrel
import java.net.InetSocketAddress
-import java.util.concurrent.{Executors, ExecutorService, TimeUnit}
+import java.util.concurrent._
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.{immutable, mutable}
+import com.twitter.concurrent.NamedPoolThreadFactory
import com.twitter.conversions.time._
import com.twitter.logging.Logger
import com.twitter.naggati.codec.MemcacheCodec
@@ -72,7 +73,7 @@ class Kestrel(defaultQueueConfig: QueueConfig, builders: List[QueueBuilder],
var queueCollection: QueueCollection = null
var timer: Timer = null
- var journalSyncTimer: Timer = null
+ var journalSyncScheduler: ScheduledExecutorService = null
var executor: ExecutorService = null
var channelFactory: ChannelFactory = null
var memcacheAcceptor: Option[Channel] = None
@@ -93,15 +94,25 @@ class Kestrel(defaultQueueConfig: QueueConfig, builders: List[QueueBuilder],
listenAddress, memcacheListenPort, textListenPort, queuePath, protocol,
expirationTimerFrequency, clientTimeout, maxOpenTransactions)
- // this means no timeout will be at better granularity than N ms.
- journalSyncTimer = new HashedWheelTimer(10, TimeUnit.MILLISECONDS)
+ // this means no timeout will be at better granularity than 100 ms.
timer = new HashedWheelTimer(100, TimeUnit.MILLISECONDS)
- queueCollection = new QueueCollection(queuePath, new NettyTimer(timer), new NettyTimer(journalSyncTimer), defaultQueueConfig, builders)
+ journalSyncScheduler =
+ new ScheduledThreadPoolExecutor(
+ Runtime.getRuntime.availableProcessors,
+ new NamedPoolThreadFactory("journal-sync", true),
+ new RejectedExecutionHandler {
+ override def rejectedExecution(r: Runnable, executor: ThreadPoolExecutor) {
+ log.warning("Rejected journal fsync")
+ }
+ })
+
+ queueCollection = new QueueCollection(queuePath, new NettyTimer(timer), journalSyncScheduler, defaultQueueConfig, builders)
queueCollection.loadQueues()
Stats.addGauge("items") { queueCollection.currentItems.toDouble }
Stats.addGauge("bytes") { queueCollection.currentBytes.toDouble }
+ Stats.addGauge("reserved_memory_ratio") { queueCollection.reservedMemoryRatio }
// netty setup:
executor = Executors.newCachedThreadPool()
@@ -161,8 +172,9 @@ class Kestrel(defaultQueueConfig: QueueConfig, builders: List[QueueBuilder],
executor.awaitTermination(5, TimeUnit.SECONDS)
timer.stop()
timer = null
- journalSyncTimer.stop()
- journalSyncTimer = null
+ journalSyncScheduler.shutdown()
+ journalSyncScheduler.awaitTermination(5, TimeUnit.SECONDS)
+ journalSyncScheduler = null
log.info("Goodbye.")
}
@@ -194,21 +194,6 @@ abstract class KestrelHandler(val queues: QueueCollection, val maxOpenTransactio
waitingFor = Some(future)
future.map { itemOption =>
waitingFor = None
- timeout match {
- case None => {
- val usec = (Time.now - startTime).inMicroseconds.toInt
- val statName = if (itemOption.isDefined) "get_hit_latency_usec" else "get_miss_latency_usec"
- Stats.addMetric(statName, usec)
- Stats.addMetric("q/" + key + "/" + statName, usec)
- }
- case Some(_) => {
- if (!itemOption.isDefined) {
- val msec = (Time.now - startTime).inMilliseconds.toInt
- Stats.addMetric("get_timeout_msec", msec)
- Stats.addMetric("q/" + key + "/get_timeout_msec", msec)
- }
- }
- }
itemOption.foreach { item =>
log.debug("get <- %s", item)
if (opening) pendingTransactions.add(key, item.xid)
@@ -85,7 +85,7 @@ extends NettyHandler[MemcacheRequest](channelGroup, queueCollection, maxOpenTran
dumpStats(request.line.drop(1))
case "delete" =>
delete(request.line(1))
- channel.write(new MemcacheResponse("END"))
+ channel.write(new MemcacheResponse("DELETED"))
case "flush_expired" =>
channel.write(new MemcacheResponse(flushExpired(request.line(1)).toString))
case "flush_all_expired" =>
@@ -189,6 +189,7 @@ extends NettyHandler[MemcacheRequest](channelGroup, queueCollection, maxOpenTran
report += (("curr_items", queues.currentItems.toString))
report += (("total_items", Stats.getCounter("total_items")().toString))
report += (("bytes", queues.currentBytes.toString))
+ report += (("reserved_memory_ratio", "%.3f".format(queues.reservedMemoryRatio)))
report += (("curr_connections", Kestrel.sessions.get().toString))
report += (("total_connections", Stats.getCounter("total_connections")().toString))
report += (("cmd_get", Stats.getCounter("cmd_get")().toString))
@@ -1,47 +1,86 @@
package net.lag.kestrel
-import java.nio.ByteBuffer
-import java.util.concurrent.ConcurrentLinkedQueue
import com.twitter.conversions.time._
+import com.twitter.ostrich.stats.Stats
import com.twitter.util._
import java.io.{IOException, FileOutputStream, File}
+import java.nio.ByteBuffer
+import java.util.concurrent.{ConcurrentLinkedQueue, ScheduledExecutorService, ScheduledFuture, TimeUnit}
+
+abstract class PeriodicSyncTask(val scheduler: ScheduledExecutorService, initialDelay: Duration, period: Duration)
+extends Runnable {
+ @volatile private[this] var scheduledFsync: Option[ScheduledFuture[_]] = None
+
+ def start() {
+ synchronized {
+ if (scheduledFsync.isEmpty && period > 0.seconds) {
+ val handle = scheduler.scheduleWithFixedDelay(this, initialDelay.inMilliseconds, period.inMilliseconds,
+ TimeUnit.MILLISECONDS)
+ scheduledFsync = Some(handle)
+ }
+ }
+ }
+
+ def stop() {
+ synchronized { _stop() }
+ }
+
+ def stopIf(f: => Boolean) {
+ synchronized {
+ if (f) _stop()
+ }
+ }
+
+ private[this] def _stop() {
+ scheduledFsync.foreach { _.cancel(false) }
+ scheduledFsync = None
+ }
+}
/**
* Open a file for writing, and fsync it on a schedule. The period may be 0 to force an fsync
* after every write, or `Duration.MaxValue` to never fsync.
*/
-class PeriodicSyncFile(file: File, timer: Timer, period: Duration) {
+class PeriodicSyncFile(file: File, scheduler: ScheduledExecutorService, period: Duration) {
// pre-completed future for writers who are behaving synchronously.
private final val DONE = Future(())
- val writer = new FileOutputStream(file, true).getChannel
- val promises = new ConcurrentLinkedQueue[Promise[Unit]]()
-
- @volatile var closed = false
+ case class TimestampedPromise(val promise: Promise[Unit], val time: Time)
- if (period > 0.seconds && period < Duration.MaxValue) {
- timer.schedule(Time.now, period) {
+ val writer = new FileOutputStream(file, true).getChannel
+ val promises = new ConcurrentLinkedQueue[TimestampedPromise]()
+ val periodicSyncTask = new PeriodicSyncTask(scheduler, period, period) {
+ override def run() {
if (!closed && !promises.isEmpty) fsync()
}
}
+ @volatile var closed = false
+
private def fsync() {
synchronized {
// race: we could underestimate the number of completed writes. that's okay.
val completed = promises.size
+ val fsyncStart = Time.now
try {
writer.force(false)
} catch {
case e: IOException =>
for (i <- 0 until completed) {
- promises.poll().setException(e)
+ promises.poll().promise.setException(e)
}
return;
}
for (i <- 0 until completed) {
- promises.poll().setValue(())
+ val timestampedPromise = promises.poll()
+ timestampedPromise.promise.setValue(())
+ val delaySinceWrite = fsyncStart - timestampedPromise.time
+ val durationBehind = if (delaySinceWrite > period) delaySinceWrite - period else 0.seconds
+ Stats.addMetric("fsync_delay_usec", durationBehind.inMicroseconds.toInt)
}
+
+ periodicSyncTask.stopIf { promises.isEmpty }
}
}
@@ -62,7 +101,8 @@ class PeriodicSyncFile(file: File, timer: Timer, period: Duration) {
DONE
} else {
val promise = new Promise[Unit]()
- promises.add(promise)
+ promises.add(TimestampedPromise(promise, Time.now))
+ periodicSyncTask.start()
promise
}
}
@@ -73,6 +113,7 @@ class PeriodicSyncFile(file: File, timer: Timer, period: Duration) {
*/
def close() {
closed = true
+ periodicSyncTask.stop()
fsync()
writer.close()
}
Oops, something went wrong.

0 comments on commit 4105f0b

Please sign in to comment.