Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Store the reason of noop in its document tombstone #30570

Merged
merged 4 commits into from May 15, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -380,8 +380,9 @@ public interface TombstoneDocSupplier {

/**
* Creates a tombstone document for a noop operation.
* @param reason the reason of an a noop
*/
ParsedDocument newNoopTombstoneDoc();
ParsedDocument newNoopTombstoneDoc(String reason);
}

public TombstoneDocSupplier getTombstoneDocSupplier() {
Expand Down
Expand Up @@ -1349,7 +1349,7 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException {
Exception failure = null;
if (softDeleteEnabled) {
try {
final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newNoopTombstoneDoc();
final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newNoopTombstoneDoc(noOp.reason());
tombstone.updateSeqID(noOp.seqNo(), noOp.primaryTerm());
// A noop tombstone does not require a _version but it's added to have a fully dense docvalues for the version field.
// 1L is selected to optimize the compression because it might probably be the most common value in version field.
Expand Down
Expand Up @@ -186,7 +186,7 @@ private Translog.Operation readDocAsOp(int docID) throws IOException {
final Translog.Operation op;
final boolean isTombstone = docValues[leaf.ord].isTombstone(segmentDocID);
if (isTombstone && fields.uid() == null) {
op = new Translog.NoOp(seqNo, primaryTerm, ""); // TODO: store reason in ignored fields?
op = new Translog.NoOp(seqNo, primaryTerm, fields.source().utf8ToString());
assert version == 1L : "Noop tombstone should have version 1L; actual version [" + version + "]";
assert assertDocSoftDeleted(leaf.reader(), segmentDocID) : "Noop but soft_deletes field is not set [" + op + "]";
} else {
Expand Down
Expand Up @@ -19,10 +19,12 @@

package org.elasticsearch.index.mapper;

import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.compress.CompressedXContent;
Expand Down Expand Up @@ -262,10 +264,14 @@ public ParsedDocument createDeleteTombstoneDoc(String index, String type, String
return documentParser.parseDocument(emptySource, deleteTombstoneMetadataFieldMappers).toTombstone();
}

public ParsedDocument createNoopTombstoneDoc(String index) throws MapperParsingException {
public ParsedDocument createNoopTombstoneDoc(String index, String reason) throws MapperParsingException {
final String id = ""; // _id won't be used.
final SourceToParse emptySource = SourceToParse.source(index, type, id, new BytesArray("{}"), XContentType.JSON);
return documentParser.parseDocument(emptySource, noopTombstoneMetadataFieldMappers).toTombstone();
final SourceToParse sourceToParse = SourceToParse.source(index, type, id, new BytesArray("{}"), XContentType.JSON);
final ParsedDocument parsedDoc = documentParser.parseDocument(sourceToParse, noopTombstoneMetadataFieldMappers).toTombstone();
// Store the reason of a noop as a raw string in the _source field
final BytesRef byteRef = new BytesRef(reason);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm doubting about this - should we always make it valid JSON : { "_reason": "bla" } ? I think it might surprising to find non json in the source field.

parsedDoc.rootDoc().add(new StoredField(SourceFieldMapper.NAME, byteRef.bytes, byteRef.offset, byteRef.length));
return parsedDoc;
}

/**
Expand Down
Expand Up @@ -2614,8 +2614,8 @@ public ParsedDocument newDeleteTombstoneDoc(String type, String id) {
return docMapper(type).getDocumentMapper().createDeleteTombstoneDoc(shardId.getIndexName(), type, id);
}
@Override
public ParsedDocument newNoopTombstoneDoc() {
return noopDocumentMapper.createNoopTombstoneDoc(shardId.getIndexName());
public ParsedDocument newNoopTombstoneDoc(String reason) {
return noopDocumentMapper.createNoopTombstoneDoc(shardId.getIndexName(), reason);
}
};
}
Expand Down
Expand Up @@ -3711,7 +3711,7 @@ protected long doGenerateSeqNoForOperation(Operation operation) {
};
noOpEngine.recoverFromTranslog();
final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get());
final String reason = randomAlphaOfLength(16);
final String reason = "filling gaps";
noOpEngine.noOp(new Engine.NoOp(maxSeqNo + 1, primaryTerm.get(), LOCAL_TRANSLOG_RECOVERY, System.nanoTime(), reason));
assertThat(noOpEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo((long) (maxSeqNo + 1)));
assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled));
Expand All @@ -3737,7 +3737,7 @@ protected long doGenerateSeqNoForOperation(Operation operation) {
List<Translog.Operation> operationsFromLucene = readAllOperationsInLucene(noOpEngine, mapperService);
assertThat(operationsFromLucene, hasSize(maxSeqNo + 2 - localCheckpoint)); // fills n gap and 2 manual noop.
for (int i = 0; i < operationsFromLucene.size(); i++) {
assertThat(operationsFromLucene.get(i), equalTo(new Translog.NoOp(localCheckpoint + 1 + i, primaryTerm.get(), "")));
assertThat(operationsFromLucene.get(i), equalTo(new Translog.NoOp(localCheckpoint + 1 + i, primaryTerm.get(), "filling gaps")));
}
assertConsistentHistoryBetweenTranslogAndLuceneIndex(noOpEngine, mapperService);
} finally {
Expand Down
Expand Up @@ -29,6 +29,7 @@
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Constants;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
Expand Down Expand Up @@ -77,7 +78,6 @@
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.engine.EngineTestCase;
import org.elasticsearch.index.engine.InternalEngine;
import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.engine.Segment;
Expand All @@ -88,10 +88,10 @@
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.SourceFieldMapper;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.VersionFieldMapper;
Expand Down Expand Up @@ -3109,13 +3109,15 @@ public void testSupplyTombstoneDoc() throws Exception {
assertThat(deleteDoc.getField(IdFieldMapper.NAME).binaryValue(), equalTo(Uid.encodeId(id)));
assertThat(deleteDoc.getField(SeqNoFieldMapper.TOMBSTONE_NAME).numericValue().longValue(), equalTo(1L));

ParsedDocument noopTombstone = shard.getEngine().config().getTombstoneDocSupplier().newNoopTombstoneDoc();
final String reason = randomUnicodeOfLength(200);
ParsedDocument noopTombstone = shard.getEngine().config().getTombstoneDocSupplier().newNoopTombstoneDoc(reason);
assertThat(noopTombstone.docs(), hasSize(1));
ParseContext.Document noopDoc = noopTombstone.docs().get(0);
assertThat(noopDoc.getFields().stream().map(IndexableField::name).collect(Collectors.toList()),
containsInAnyOrder(VersionFieldMapper.NAME, SeqNoFieldMapper.TOMBSTONE_NAME,
containsInAnyOrder(VersionFieldMapper.NAME, SourceFieldMapper.NAME, SeqNoFieldMapper.TOMBSTONE_NAME,
SeqNoFieldMapper.NAME, SeqNoFieldMapper.NAME, SeqNoFieldMapper.PRIMARY_TERM_NAME));
assertThat(noopDoc.getField(SeqNoFieldMapper.TOMBSTONE_NAME).numericValue().longValue(), equalTo(1L));
assertThat(noopDoc.getField(SourceFieldMapper.NAME).binaryValue(), equalTo(new BytesRef(reason)));

closeShards(shard);
}
Expand Down
Expand Up @@ -306,7 +306,7 @@ public ParsedDocument newDeleteTombstoneDoc(String type, String id) {
}

@Override
public ParsedDocument newNoopTombstoneDoc() {
public ParsedDocument newNoopTombstoneDoc(String reason) {
final ParseContext.Document doc = new ParseContext.Document();
SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
doc.add(seqID.seqNo);
Expand All @@ -316,6 +316,8 @@ public ParsedDocument newNoopTombstoneDoc() {
doc.add(seqID.tombstoneField);
Field versionField = new NumericDocValuesField(VersionFieldMapper.NAME, 0);
doc.add(versionField);
BytesRef byteRef = new BytesRef(reason);
doc.add(new StoredField(SourceFieldMapper.NAME, byteRef.bytes, byteRef.offset, byteRef.length));
return new ParsedDocument(versionField, seqID, null, null, null,
Collections.singletonList(doc), null, XContentType.JSON, null);
}
Expand Down