Skip to content

Commit

Permalink
Compress PagedBytesAtomicFieldData's termOrdToBytesOffset.
Browse files Browse the repository at this point in the history
Using MonotonicAppendingLongBuffer instead of a GrowableWriter should help
save several bits per value, especially when the bytes to store have similar
lengths.

Closes #3186
  • Loading branch information
jpountz committed Jun 15, 2013
1 parent b079c0f commit 5e31430
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 27 deletions.
Expand Up @@ -22,8 +22,7 @@
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.PagedBytes;
import org.apache.lucene.util.PagedBytes.Reader;
import org.apache.lucene.util.packed.GrowableWriter;
import org.apache.lucene.util.packed.PackedInts;
import org.apache.lucene.util.packed.MonotonicAppendingLongBuffer;
import org.elasticsearch.index.fielddata.AtomicFieldData;
import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.index.fielddata.ordinals.EmptyOrdinals;
Expand All @@ -40,14 +39,14 @@ public static PagedBytesAtomicFieldData empty(int numDocs) {

// 0 ordinal in values means no value (its null)
private final PagedBytes.Reader bytes;
private final PackedInts.Reader termOrdToBytesOffset;
private final MonotonicAppendingLongBuffer termOrdToBytesOffset;
protected final Ordinals ordinals;

private volatile int[] hashes;
private long size = -1;
private final long readerBytesSize;

public PagedBytesAtomicFieldData(PagedBytes.Reader bytes, long readerBytesSize, PackedInts.Reader termOrdToBytesOffset, Ordinals ordinals) {
public PagedBytesAtomicFieldData(PagedBytes.Reader bytes, long readerBytesSize, MonotonicAppendingLongBuffer termOrdToBytesOffset, Ordinals ordinals) {
this.bytes = bytes;
this.termOrdToBytesOffset = termOrdToBytesOffset;
this.ordinals = ordinals;
Expand Down Expand Up @@ -88,7 +87,7 @@ public long getMemorySizeInBytes() {

private final int[] getHashes() {
if (hashes == null) {
int numberOfValues = termOrdToBytesOffset.size();
int numberOfValues = (int) termOrdToBytesOffset.size();
int[] hashes = new int[numberOfValues];
BytesRef scratch = new BytesRef();
for (int i = 0; i < numberOfValues; i++) {
Expand Down Expand Up @@ -121,12 +120,12 @@ public ScriptDocValues.Strings getScriptValues() {
static abstract class BytesValues extends org.elasticsearch.index.fielddata.BytesValues.WithOrdinals {

protected final PagedBytes.Reader bytes;
protected final PackedInts.Reader termOrdToBytesOffset;
protected final MonotonicAppendingLongBuffer termOrdToBytesOffset;
protected final Ordinals.Docs ordinals;

protected final BytesRef scratch = new BytesRef();

BytesValues(PagedBytes.Reader bytes, PackedInts.Reader termOrdToBytesOffset, Ordinals.Docs ordinals) {
BytesValues(PagedBytes.Reader bytes, MonotonicAppendingLongBuffer termOrdToBytesOffset, Ordinals.Docs ordinals) {
super(ordinals);
this.bytes = bytes;
this.termOrdToBytesOffset = termOrdToBytesOffset;
Expand Down Expand Up @@ -156,7 +155,7 @@ static class Single extends BytesValues {

private final Iter.Single iter;

Single(PagedBytes.Reader bytes, PackedInts.Reader termOrdToBytesOffset, Ordinals.Docs ordinals) {
Single(PagedBytes.Reader bytes, MonotonicAppendingLongBuffer termOrdToBytesOffset, Ordinals.Docs ordinals) {
super(bytes, termOrdToBytesOffset, ordinals);
assert !ordinals.isMultiValued();
iter = newSingleIter();
Expand All @@ -175,7 +174,7 @@ public Iter getIter(int docId) {
static final class SingleHashed extends Single {
private final int[] hashes;

SingleHashed(int[] hashes, Reader bytes, org.apache.lucene.util.packed.PackedInts.Reader termOrdToBytesOffset, Docs ordinals) {
SingleHashed(int[] hashes, Reader bytes, MonotonicAppendingLongBuffer termOrdToBytesOffset, Docs ordinals) {
super(bytes, termOrdToBytesOffset, ordinals);
this.hashes = hashes;
}
Expand Down Expand Up @@ -203,7 +202,7 @@ static class Multi extends BytesValues {

private final Iter.Multi iter;

Multi(PagedBytes.Reader bytes, PackedInts.Reader termOrdToBytesOffset, Ordinals.Docs ordinals) {
Multi(PagedBytes.Reader bytes, MonotonicAppendingLongBuffer termOrdToBytesOffset, Ordinals.Docs ordinals) {
super(bytes, termOrdToBytesOffset, ordinals);
assert ordinals.isMultiValued();
this.iter = newMultiIter();
Expand All @@ -219,7 +218,7 @@ static final class MultiHashed extends Multi {

private final int[] hashes;

MultiHashed(int[] hashes, Reader bytes, org.apache.lucene.util.packed.PackedInts.Reader termOrdToBytesOffset, Docs ordinals) {
MultiHashed(int[] hashes, Reader bytes, MonotonicAppendingLongBuffer termOrdToBytesOffset, Docs ordinals) {
super(bytes, termOrdToBytesOffset, ordinals);
this.hashes = hashes;
}
Expand All @@ -246,7 +245,7 @@ public int getValueHashed(int docId, BytesRef ret) {
static class Empty extends PagedBytesAtomicFieldData {

Empty(int numDocs) {
super(emptyBytes(), 0, new GrowableWriter(1, 2, PackedInts.FASTEST).getMutable(), new EmptyOrdinals(numDocs));
super(emptyBytes(), 0, new MonotonicAppendingLongBuffer(), new EmptyOrdinals(numDocs));
}

static PagedBytes.Reader emptyBytes() {
Expand Down
Expand Up @@ -20,10 +20,9 @@
package org.elasticsearch.index.fielddata.plain;

import org.apache.lucene.index.*;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.PagedBytes;
import org.apache.lucene.util.packed.GrowableWriter;
import org.apache.lucene.util.packed.MonotonicAppendingLongBuffer;
import org.apache.lucene.util.packed.PackedInts;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
Expand Down Expand Up @@ -92,10 +91,8 @@ public PagedBytesAtomicFieldData loadDirect(AtomicReaderContext context) throws
startNumUniqueTerms = 1;
}

// TODO: expose this as an option..., have a nice parser for it...
float acceptableOverheadRatio = PackedInts.FAST;

GrowableWriter termOrdToBytesOffset = new GrowableWriter(startBytesBPV, 1 + startNumUniqueTerms, acceptableOverheadRatio);
final MonotonicAppendingLongBuffer termOrdToBytesOffset = new MonotonicAppendingLongBuffer();
termOrdToBytesOffset.add(0); // first ord is reserved for missing values
boolean preDefineBitsRequired = regex == null && frequency == null;
OrdinalsBuilder builder = new OrdinalsBuilder(terms, preDefineBitsRequired, reader.maxDoc());
try {
Expand All @@ -105,24 +102,18 @@ public PagedBytesAtomicFieldData loadDirect(AtomicReaderContext context) throws
DocsEnum docsEnum = null;
for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
final int termOrd = builder.nextOrdinal();
if (termOrd == termOrdToBytesOffset.size()) {
// NOTE: this code only runs if the incoming
// reader impl doesn't implement
// size (which should be uncommon)
termOrdToBytesOffset = termOrdToBytesOffset.resize(ArrayUtil.oversize(1 + termOrd, 1));
}
termOrdToBytesOffset.set(termOrd, bytes.copyUsingLengthPrefix(term));
assert termOrd == termOrdToBytesOffset.size();
termOrdToBytesOffset.add(bytes.copyUsingLengthPrefix(term));
docsEnum = termsEnum.docs(reader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
for (int docId = docsEnum.nextDoc(); docId != DocsEnum.NO_MORE_DOCS; docId = docsEnum.nextDoc()) {
builder.addDoc(docId);
}
}
final long sizePointer = bytes.getPointer();
PagedBytes.Reader bytesReader = bytes.freeze(true);
PackedInts.Reader termOrdToBytesOffsetReader = termOrdToBytesOffset.getMutable();
final Ordinals ordinals = builder.build(fieldDataType.getSettings());

return new PagedBytesAtomicFieldData(bytesReader, sizePointer, termOrdToBytesOffsetReader, ordinals);
return new PagedBytesAtomicFieldData(bytesReader, sizePointer, termOrdToBytesOffset, ordinals);
} finally {
builder.close();
}
Expand Down

0 comments on commit 5e31430

Please sign in to comment.