Skip to content

Commit

Permalink
fix multithreaded compaction deadlock
Browse files Browse the repository at this point in the history
patch by Carl Yeksigian; reviewed by jbellis for CASSANDRA-4492
  • Loading branch information
jbellis committed Dec 18, 2012
1 parent 4885bfc commit 134e8c7
Show file tree
Hide file tree
Showing 6 changed files with 18 additions and 11 deletions.
4 changes: 4 additions & 0 deletions CHANGES.txt
@@ -1,3 +1,7 @@
1.1.9
* fix multithreaded compaction deadlock (CASSANDRA-4492)


1.1.8
* reset getRangeSlice filter after finishing a row for get_paged_slice
(CASSANDRA-4919)
Expand Down
15 changes: 6 additions & 9 deletions conf/cassandra.yaml
Expand Up @@ -381,15 +381,12 @@ in_memory_compaction_limit_in_mb: 64
# Uncomment to make compaction mono-threaded, the pre-0.8 default.
#concurrent_compactors: 1

# multithreaded_compaction: false. When enabled, each compaction will
# use up to one thread per core, plus one thread per sstable being
# merged. This is usually only useful for SSD-based hardware:
# otherwise, your concern is usually to get compaction to do LESS i/o
# (see: compaction_throughput_mb_per_sec), not more.
#
# WARNING: this setting has caused compaction deadlocks for multiple
# users (see CASSANDRA-4492). It is recommended to leave this off
# unless you are prepared to help troubleshoot.
# Multi-threaded compaction. When enabled, each compaction will use
# up to one thread per core, plus one thread per sstable being merged.
# This is usually only useful for SSD-based hardware: otherwise,
# your concern is usually to get compaction to do LESS i/o (see:
# compaction_throughput_mb_per_sec), not more.
multithreaded_compaction: false

# Throttles compaction to the given total throughput across the entire
# system. The faster you insert data, the faster you need to compact in
Expand Down
Expand Up @@ -21,6 +21,7 @@
*/


import java.io.Closeable;
import java.io.DataOutput;
import java.io.IOException;
import java.security.MessageDigest;
Expand All @@ -32,7 +33,7 @@
* and can write a compacted version of those rows to an output stream. It does
* NOT necessarily require creating a merged CF object in memory.
*/
public abstract class AbstractCompactedRow
public abstract class AbstractCompactedRow implements Closeable
{
public final DecoratedKey<?> key;

Expand Down
Expand Up @@ -154,7 +154,10 @@ public int execute(CompactionExecutorStatsCollector collector) throws IOExceptio

AbstractCompactedRow row = nni.next();
if (row.isEmpty())
{
row.close();
continue;
}

long position = writer.append(row);
totalkeysWritten++;
Expand Down
Expand Up @@ -193,7 +193,7 @@ public long maxTimestamp()
return maxTimestamp;
}

private void close()
public void close()
{
for (IColumnIterator row : rows)
{
Expand Down
Expand Up @@ -185,4 +185,6 @@ public ColumnFamily getFullColumnFamily() throws IOException
{
return compactedCf;
}

public void close() { }
}

0 comments on commit 134e8c7

Please sign in to comment.