From f3f7db288efdf729b82bc43f4300f1cb83c83d6a Mon Sep 17 00:00:00 2001
From: Paul Elschot
Date: Sat, 28 Jan 2017 12:55:37 +0100
Subject: [PATCH 1/2] Rename Terms to IndexedField and some related renamings,
LUCENE-7633
---
.../query/QueryAutoStopWordAnalyzer.java | 6 +--
.../sinks/TestTeeSinkTokenFilter.java | 6 +--
.../apache/lucene/index/FixBrokenOffsets.java | 12 ++---
.../legacy/LegacyNumericRangeQuery.java | 6 +--
.../lucene/legacy/LegacyNumericUtils.java | 26 +++++-----
.../lucene54/TestLucene54DocValuesFormat.java | 10 ++--
.../index/TestBackwardsCompatibility.java | 14 ++---
.../apache/lucene/legacy/TestLegacyTerms.java | 16 +++---
.../tasks/SearchTravRetHighlightTask.java | 4 +-
.../quality/utils/QualityQueriesFinder.java | 6 +--
.../benchmark/byTask/TestPerfTasksLogic.java | 12 ++---
.../BooleanPerceptronClassifier.java | 14 ++---
.../CachingNaiveBayesClassifier.java | 10 ++--
.../SimpleNaiveBayesClassifier.java | 10 ++--
.../SimpleNaiveBayesDocumentClassifier.java | 8 +--
.../classification/utils/DatasetSplitter.java | 2 +-
.../utils/DocToDoubleVectorUtils.java | 14 ++---
.../CachingNaiveBayesClassifierTest.java | 6 +--
.../KNearestNeighborClassifierTest.java | 6 +--
.../SimpleNaiveBayesClassifierTest.java | 6 +--
.../utils/DocToDoubleVectorUtilsTest.java | 8 +--
.../codecs/blockterms/BlockTermsReader.java | 8 +--
.../codecs/blockterms/BlockTermsWriter.java | 10 ++--
.../OrdsBlockTreeTermsReader.java | 4 +-
.../OrdsBlockTreeTermsWriter.java | 10 ++--
.../codecs/blocktreeords/OrdsFieldReader.java | 8 +--
.../bloom/BloomFilteringPostingsFormat.java | 32 ++++++------
.../codecs/memory/DirectPostingsFormat.java | 22 ++++----
.../codecs/memory/FSTOrdTermsReader.java | 8 +--
.../codecs/memory/FSTOrdTermsWriter.java | 10 ++--
.../lucene/codecs/memory/FSTTermsReader.java | 8 +--
.../lucene/codecs/memory/FSTTermsWriter.java | 10 ++--
.../codecs/memory/MemoryPostingsFormat.java | 16 +++---
.../simpletext/SimpleTextFieldsReader.java | 8 +--
.../simpletext/SimpleTextFieldsWriter.java | 12 ++---
.../SimpleTextTermVectorsReader.java | 14 ++---
.../blocktreeords/TestOrdsBlockTree.java | 14 ++---
.../apache/lucene/codecs/FieldsConsumer.java | 18 +++----
.../apache/lucene/codecs/FieldsProducer.java | 4 +-
.../lucene/codecs/TermVectorsReader.java | 4 +-
.../lucene/codecs/TermVectorsWriter.java | 18 +++----
.../blocktree/BlockTreeTermsReader.java | 8 +--
.../blocktree/BlockTreeTermsWriter.java | 12 ++---
.../lucene/codecs/blocktree/FieldReader.java | 8 +--
.../codecs/blocktree/IntersectTermsEnum.java | 4 +-
.../CompressingTermVectorsReader.java | 14 ++---
.../CompressingTermVectorsWriter.java | 4 +-
.../codecs/perfield/PerFieldMergeState.java | 6 +--
.../perfield/PerFieldPostingsFormat.java | 14 ++---
.../lucene/index/BaseCompositeReader.java | 2 +-
.../lucene/index/BufferedUpdatesStream.java | 12 ++---
.../org/apache/lucene/index/CheckIndex.java | 52 +++++++++----------
.../org/apache/lucene/index/CodecReader.java | 4 +-
.../lucene/index/ExitableDirectoryReader.java | 24 ++++-----
.../apache/lucene/index/FilterLeafReader.java | 42 +++++++--------
.../apache/lucene/index/FreqProxFields.java | 10 ++--
.../lucene/index/FreqProxTermsWriter.java | 8 +--
.../org/apache/lucene/index/IndexReader.java | 18 +++----
.../index/{Terms.java => IndexedField.java} | 22 ++++----
.../index/{Fields.java => IndexedFields.java} | 16 +++---
.../org/apache/lucene/index/LeafReader.java | 26 +++++-----
.../lucene/index/MappedMultiFields.java | 20 +++----
.../lucene/index/MergeReaderWrapper.java | 4 +-
.../apache/lucene/index/MultiDocValues.java | 2 +-
.../{MultiTerms.java => MultiField.java} | 30 +++++------
.../org/apache/lucene/index/MultiFields.java | 40 +++++++-------
.../lucene/index/ParallelLeafReader.java | 20 +++----
.../index/SimpleMergedSegmentWarmer.java | 2 +-
.../lucene/index/SlowCodecReaderWrapper.java | 8 +--
.../lucene/index/SortingLeafReader.java | 18 +++----
.../index/SortingTermVectorsConsumer.java | 14 ++---
.../org/apache/lucene/index/TermContext.java | 4 +-
.../org/apache/lucene/index/package-info.java | 34 ++++++------
.../apache/lucene/search/AutomatonQuery.java | 4 +-
.../apache/lucene/search/BoostAttribute.java | 4 +-
.../lucene/search/CollectionStatistics.java | 8 +--
.../lucene/search/DocValuesRewriteMethod.java | 6 +--
.../org/apache/lucene/search/FuzzyQuery.java | 6 +--
.../apache/lucene/search/FuzzyTermsEnum.java | 6 +--
.../apache/lucene/search/IndexSearcher.java | 4 +-
.../MaxNonCompetitiveBoostAttribute.java | 6 +--
.../lucene/search/MultiPhraseQuery.java | 8 +--
.../apache/lucene/search/MultiTermQuery.java | 14 ++---
.../MultiTermQueryConstantScoreWrapper.java | 6 +--
.../org/apache/lucene/search/PhraseQuery.java | 6 +--
.../apache/lucene/search/SynonymQuery.java | 2 +-
.../lucene/search/TermCollectingRewrite.java | 4 +-
.../apache/lucene/search/TermInSetQuery.java | 10 ++--
.../org/apache/lucene/search/TermQuery.java | 8 +--
.../search/similarities/BasicStats.java | 6 +--
.../lucene/search/spans/SpanNearQuery.java | 4 +-
.../lucene/search/spans/SpanTermQuery.java | 6 +--
.../apache/lucene/util/DocIdSetBuilder.java | 6 +--
.../util/automaton/CompiledAutomaton.java | 12 ++---
.../lucene50/TestBlockPostingsFormat.java | 2 +-
.../lucene50/TestBlockPostingsFormat3.java | 32 ++++++------
.../lucene70/TestLucene70DocValuesFormat.java | 10 ++--
.../perfield/TestPerFieldPostingsFormat2.java | 4 +-
.../org/apache/lucene/index/Test2BDocs.java | 4 +-
.../org/apache/lucene/index/Test2BTerms.java | 4 +-
.../lucene/index/TestBagOfPositions.java | 4 +-
.../lucene/index/TestBagOfPostings.java | 4 +-
.../org/apache/lucene/index/TestCodecs.java | 18 +++----
.../lucene/index/TestDirectoryReader.java | 28 +++++-----
.../test/org/apache/lucene/index/TestDoc.java | 6 +--
.../org/apache/lucene/index/TestDocCount.java | 8 +--
.../lucene/index/TestDocsAndPositions.java | 8 +--
.../index/TestExitableDirectoryReader.java | 18 +++----
.../lucene/index/TestFilterLeafReader.java | 20 +++----
.../org/apache/lucene/index/TestFlex.java | 4 +-
.../apache/lucene/index/TestIndexWriter.java | 8 +--
.../lucene/index/TestIndexWriterUnicode.java | 2 +-
.../lucene/index/TestIndexableField.java | 8 +--
.../lucene/index/TestMultiTermsEnum.java | 22 ++++----
.../index/TestParallelCompositeReader.java | 30 +++++------
.../lucene/index/TestParallelLeafReader.java | 24 ++++-----
.../lucene/index/TestParallelTermEnum.java | 12 ++---
.../org/apache/lucene/index/TestPayloads.java | 4 +-
.../lucene/index/TestPayloadsOnVectors.java | 8 +--
.../lucene/index/TestPerSegmentDeletes.java | 6 +--
.../lucene/index/TestPostingsOffsets.java | 2 +-
.../lucene/index/TestSegmentMerger.java | 4 +-
.../lucene/index/TestSegmentReader.java | 12 ++---
.../lucene/index/TestSegmentTermDocs.java | 2 +-
.../lucene/index/TestSegmentTermEnum.java | 4 +-
.../lucene/index/TestStressAdvance.java | 2 +-
.../lucene/index/TestStressIndexing2.java | 48 ++++++++---------
.../apache/lucene/index/TestSumDocFreq.java | 8 +--
.../apache/lucene/index/TestTermVectors.java | 2 +-
.../lucene/index/TestTermVectorsReader.java | 20 +++----
.../lucene/index/TestTermVectorsWriter.java | 18 +++----
.../apache/lucene/index/TestTermdocPerf.java | 2 +-
.../org/apache/lucene/index/TestTerms.java | 4 +-
.../apache/lucene/index/TestTermsEnum.java | 28 +++++-----
.../apache/lucene/index/TestTermsEnum2.java | 6 +--
.../lucene/search/TermInSetQueryTest.java | 16 +++---
.../lucene/search/TestAutomatonQuery.java | 6 +--
.../lucene/search/TestMultiPhraseQuery.java | 2 +-
.../search/TestMultiTermQueryRewrites.java | 6 +--
.../search/TestMultiThreadTermVectors.java | 16 +++---
.../lucene/search/TestPhrasePrefixQuery.java | 2 +-
.../lucene/search/TestPrefixRandom.java | 6 +--
.../lucene/search/TestRegexpRandom2.java | 6 +--
.../search/TestSameScoresWithThreads.java | 8 +--
.../lucene/search/TestShardSearching.java | 2 +-
.../apache/lucene/search/TestTermQuery.java | 14 ++---
.../apache/lucene/search/TestWildcard.java | 4 +-
.../lucene/util/TestDocIdSetBuilder.java | 8 +--
.../org/apache/lucene/util/fst/TestFSTs.java | 8 +--
.../directory/DirectoryTaxonomyWriter.java | 14 ++---
.../highlight/TermVectorLeafReader.java | 18 +++----
.../lucene/search/highlight/TokenSources.java | 40 +++++++-------
.../highlight/TokenStreamFromTermVector.java | 12 ++---
.../highlight/WeightedSpanTermExtractor.java | 20 +++----
.../PostingsHighlighter.java | 6 +--
.../uhighlight/FieldOffsetStrategy.java | 10 ++--
.../search/uhighlight/PhraseHelper.java | 14 ++---
...PostingsWithTermVectorsOffsetStrategy.java | 4 +-
.../TermVectorFilteredLeafReader.java | 30 +++++------
.../uhighlight/TermVectorOffsetStrategy.java | 4 +-
.../search/uhighlight/UnifiedHighlighter.java | 6 +--
.../vectorhighlight/FieldTermStack.java | 10 ++--
.../search/vectorhighlight/package-info.java | 2 +-
.../search/highlight/TokenSourcesTest.java | 4 +-
.../TestUnifiedHighlighterTermVec.java | 2 +-
.../search/join/TermsIncludingScoreQuery.java | 10 ++--
.../apache/lucene/search/join/TermsQuery.java | 6 +--
.../lucene/search/join/TestJoinUtil.java | 6 +--
.../lucene/index/memory/MemoryIndex.java | 18 +++----
.../lucene/index/memory/TestMemoryIndex.java | 6 +--
.../memory/TestMemoryIndexAgainstRAMDir.java | 30 +++++------
.../org/apache/lucene/misc/HighFreqTerms.java | 14 ++---
.../index/TestMultiPassIndexSplitter.java | 8 +--
.../lucene/queries/CommonTermsQuery.java | 10 ++--
.../valuesource/JoinDocFreqValueSource.java | 6 +--
.../SumTotalTermFreqValueSource.java | 4 +-
.../function/valuesource/TFValueSource.java | 10 ++--
.../valuesource/TermFreqValueSource.java | 10 ++--
.../lucene/queries/mlt/MoreLikeThis.java | 14 ++---
.../payloads/SpanPayloadCheckQuery.java | 4 +-
.../lucene/queries/CommonTermsQueryTest.java | 6 +--
.../surround/query/SrndPrefixQuery.java | 6 +--
.../surround/query/SrndTermQuery.java | 6 +--
.../surround/query/SrndTruncQuery.java | 6 +--
.../VersionBlockTreeTermsReader.java | 4 +-
.../VersionBlockTreeTermsWriter.java | 10 ++--
.../codecs/idversion/VersionFieldReader.java | 8 +--
.../sandbox/queries/FuzzyLikeThisQuery.java | 8 +--
.../lucene/search/DocValuesTermsQuery.java | 6 +--
.../lucene/search/TermAutomatonQuery.java | 2 +-
.../TestIDVersionPostingsFormat.java | 3 +-
.../prefix/AbstractPrefixTreeQuery.java | 8 +--
.../spatial/util/ShapeFieldCacheProvider.java | 4 +-
.../search/GeoPointMultiTermQuery.java | 6 +--
...GeoPointTermQueryConstantScoreWrapper.java | 4 +-
.../search/spell/DirectSpellChecker.java | 4 +-
.../search/spell/HighFrequencyDictionary.java | 8 +--
.../lucene/search/spell/LuceneDictionary.java | 6 +--
.../lucene/search/spell/SpellChecker.java | 6 +--
.../analyzing/BlendedInfixSuggester.java | 6 +--
.../suggest/analyzing/FreeTextSuggester.java | 6 +--
.../document/CompletionFieldsConsumer.java | 14 ++---
.../document/CompletionFieldsProducer.java | 8 +--
.../suggest/document/CompletionQuery.java | 6 +--
.../suggest/document/CompletionTerms.java | 8 +--
.../suggest/document/CompletionWeight.java | 6 +--
.../document/TestPrefixCompletionQuery.java | 2 +-
.../asserting/AssertingPostingsFormat.java | 18 +++----
.../asserting/AssertingTermVectorsFormat.java | 6 +--
.../codecs/cranky/CrankyPostingsFormat.java | 4 +-
.../codecs/ramonly/RAMOnlyPostingsFormat.java | 16 +++---
.../lucene/index/AssertingLeafReader.java | 50 +++++++++---------
.../index/BasePostingsFormatTestCase.java | 50 +++++++++---------
.../index/BaseTermVectorsFormatTestCase.java | 36 ++++++-------
.../lucene/index/FieldFilterLeafReader.java | 14 ++---
.../lucene/index/PerThreadPKLookup.java | 4 +-
.../lucene/index/RandomPostingsTester.java | 30 +++++------
.../ThreadedIndexingAndSearchingTestCase.java | 6 +--
.../org/apache/lucene/search/QueryUtils.java | 12 ++---
.../apache/lucene/util/LuceneTestCase.java | 46 ++++++++--------
.../java/org/apache/lucene/util/TestUtil.java | 6 +--
.../lucene/analysis/TestMockAnalyzer.java | 10 ++--
.../TestCompressingTermVectorsFormat.java | 6 +--
.../handler/admin/LukeRequestHandler.java | 16 +++---
.../component/QueryElevationComponent.java | 14 ++---
.../component/TermVectorComponent.java | 14 ++---
.../handler/component/TermsComponent.java | 12 ++---
.../highlight/DefaultSolrHighlighter.java | 14 ++---
.../index/SlowCompositeReaderWrapper.java | 6 +--
.../org/apache/solr/query/SolrRangeQuery.java | 14 ++---
.../apache/solr/request/NumericFacets.java | 10 ++--
.../org/apache/solr/request/SimpleFacets.java | 12 ++---
.../org/apache/solr/search/DocSetUtil.java | 10 ++--
.../solr/search/GraphTermsQParserPlugin.java | 14 ++---
.../solr/search/IGainTermsQParserPlugin.java | 6 +--
.../apache/solr/search/JoinQParserPlugin.java | 16 +++---
.../apache/solr/search/SolrIndexSearcher.java | 8 +--
.../TextLogisticRegressionQParserPlugin.java | 6 +--
.../FacetFieldProcessorByEnumTermsStream.java | 10 ++--
.../solr/search/function/FileFloatSource.java | 2 +-
.../apache/solr/uninverting/DocTermOrds.java | 8 +--
.../apache/solr/uninverting/FieldCache.java | 26 +++++-----
.../solr/uninverting/FieldCacheImpl.java | 36 ++++++-------
.../apache/solr/update/SolrIndexSplitter.java | 10 ++--
.../org/apache/solr/update/VersionInfo.java | 6 +--
.../org/apache/solr/search/TestDocSet.java | 6 +--
.../org/apache/solr/search/TestRTGBase.java | 6 +--
.../solr/uninverting/TestDocTermOrds.java | 10 ++--
.../uninverting/TestLegacyFieldCache.java | 6 +--
249 files changed, 1387 insertions(+), 1386 deletions(-)
rename lucene/core/src/java/org/apache/lucene/index/{Terms.java => IndexedField.java} (93%)
rename lucene/core/src/java/org/apache/lucene/index/{Fields.java => IndexedFields.java} (77%)
rename lucene/core/src/java/org/apache/lucene/index/{MultiTerms.java => MultiField.java} (86%)
diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
index 37f044aeb627..80b18c07a541 100644
--- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
+++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
@@ -26,7 +26,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRefBuilder;
@@ -140,10 +140,10 @@ public QueryAutoStopWordAnalyzer(
for (String field : fields) {
Set stopWords = new HashSet<>();
- Terms terms = MultiFields.getTerms(indexReader, field);
+ IndexedField terms = MultiFields.getIndexedField(indexReader, field);
CharsRefBuilder spare = new CharsRefBuilder();
if (terms != null) {
- TermsEnum te = terms.iterator();
+ TermsEnum te = terms.getTermsEnum();
BytesRef text;
while ((text = te.next()) != null) {
if (te.docFreq() > maxDocFreq) {
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
index c0127a330e3b..6e570eb15789 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
@@ -41,7 +41,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.Directory;
@@ -95,9 +95,9 @@ public void testEndOffsetPositionWithTeeSinkTokenFilter() throws Exception {
w.close();
IndexReader r = DirectoryReader.open(dir);
- Terms vector = r.getTermVectors(0).terms("field");
+ IndexedField vector = r.getTermVectors(0).indexedField("field");
assertEquals(1, vector.size());
- TermsEnum termsEnum = vector.iterator();
+ TermsEnum termsEnum = vector.getTermsEnum();
termsEnum.next();
assertEquals(2, termsEnum.totalTermFreq());
PostingsEnum positions = termsEnum.postings(null, PostingsEnum.ALL);
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/index/FixBrokenOffsets.java b/lucene/backward-codecs/src/java/org/apache/lucene/index/FixBrokenOffsets.java
index d4d6f85430bf..3a993b542291 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/index/FixBrokenOffsets.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/index/FixBrokenOffsets.java
@@ -63,18 +63,18 @@ public static void main(String[] args) throws IOException {
for(int i=0;i newFloatRange(final String field,
}
@Override @SuppressWarnings("unchecked")
- protected TermsEnum getTermsEnum(final Terms terms, AttributeSource atts) throws IOException {
+ protected TermsEnum getTermsEnum(final IndexedField terms, AttributeSource atts) throws IOException {
// very strange: java.lang.Number itself is not Comparable, but all subclasses used here are
if (min != null && max != null && ((Comparable) min).compareTo(max) > 0) {
return TermsEnum.EMPTY;
}
- return new NumericRangeTermsEnum(terms.iterator());
+ return new NumericRangeTermsEnum(terms.getTermsEnum());
}
/** Returns true if the lower endpoint is inclusive */
diff --git a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericUtils.java b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericUtils.java
index e6659d7e1025..3c669885b560 100644
--- a/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericUtils.java
+++ b/lucene/backward-codecs/src/java/org/apache/lucene/legacy/LegacyNumericUtils.java
@@ -21,7 +21,7 @@
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.FilteredTermsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
@@ -447,20 +447,20 @@ public SeekStatus seekCeil(BytesRef term) throws IOException {
}
}
- private static Terms intTerms(Terms terms) {
- return new FilterLeafReader.FilterTerms(terms) {
+ private static IndexedField intTerms(IndexedField terms) {
+ return new FilterLeafReader.FilterField(terms) {
@Override
- public TermsEnum iterator() throws IOException {
- return filterPrefixCodedInts(in.iterator());
+ public TermsEnum getTermsEnum() throws IOException {
+ return filterPrefixCodedInts(in.getTermsEnum());
}
};
}
- private static Terms longTerms(Terms terms) {
- return new FilterLeafReader.FilterTerms(terms) {
+ private static IndexedField longTerms(IndexedField terms) {
+ return new FilterLeafReader.FilterField(terms) {
@Override
- public TermsEnum iterator() throws IOException {
- return filterPrefixCodedLongs(in.iterator());
+ public TermsEnum getTermsEnum() throws IOException {
+ return filterPrefixCodedLongs(in.getTermsEnum());
}
};
}
@@ -469,7 +469,7 @@ public TermsEnum iterator() throws IOException {
* Returns the minimum int value indexed into this
* numeric field or null if no terms exist.
*/
- public static Integer getMinInt(Terms terms) throws IOException {
+ public static Integer getMinInt(IndexedField terms) throws IOException {
// All shift=0 terms are sorted first, so we don't need
// to filter the incoming terms; we can just get the
// min:
@@ -481,7 +481,7 @@ public static Integer getMinInt(Terms terms) throws IOException {
* Returns the maximum int value indexed into this
* numeric field or null if no terms exist.
*/
- public static Integer getMaxInt(Terms terms) throws IOException {
+ public static Integer getMaxInt(IndexedField terms) throws IOException {
BytesRef max = intTerms(terms).getMax();
return (max != null) ? LegacyNumericUtils.prefixCodedToInt(max) : null;
}
@@ -490,7 +490,7 @@ public static Integer getMaxInt(Terms terms) throws IOException {
* Returns the minimum long value indexed into this
* numeric field or null if no terms exist.
*/
- public static Long getMinLong(Terms terms) throws IOException {
+ public static Long getMinLong(IndexedField terms) throws IOException {
// All shift=0 terms are sorted first, so we don't need
// to filter the incoming terms; we can just get the
// min:
@@ -502,7 +502,7 @@ public static Long getMinLong(Terms terms) throws IOException {
* Returns the maximum long value indexed into this
* numeric field or null if no terms exist.
*/
- public static Long getMaxLong(Terms terms) throws IOException {
+ public static Long getMaxLong(IndexedField terms) throws IOException {
BytesRef max = longTerms(terms).getMax();
return (max != null) ? LegacyNumericUtils.prefixCodedToLong(max) : null;
}
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene54/TestLucene54DocValuesFormat.java b/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene54/TestLucene54DocValuesFormat.java
index a761dfcec15e..24dad6e0f6a9 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene54/TestLucene54DocValuesFormat.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/codecs/lucene54/TestLucene54DocValuesFormat.java
@@ -59,7 +59,7 @@
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.index.TermsEnum;
@@ -333,11 +333,11 @@ public DocValuesFormat getDocValuesFormatForField(String field) {
DirectoryReader ir = writer.getReader();
for (LeafReaderContext context : ir.leaves()) {
LeafReader r = context.reader();
- Terms terms = r.terms("indexed");
+ IndexedField terms = r.indexedField("indexed");
if (terms != null) {
SortedSetDocValues ssdv = r.getSortedSetDocValues("dv");
assertEquals(terms.size(), ssdv.getValueCount());
- TermsEnum expected = terms.iterator();
+ TermsEnum expected = terms.getTermsEnum();
TermsEnum actual = r.getSortedSetDocValues("dv").termsEnum();
assertEquals(terms.size(), expected, actual);
@@ -351,10 +351,10 @@ public DocValuesFormat getDocValuesFormatForField(String field) {
// now compare again after the merge
ir = writer.getReader();
LeafReader ar = getOnlyLeafReader(ir);
- Terms terms = ar.terms("indexed");
+ IndexedField terms = ar.indexedField("indexed");
if (terms != null) {
assertEquals(terms.size(), ar.getSortedSetDocValues("dv").getValueCount());
- TermsEnum expected = terms.iterator();
+ TermsEnum expected = terms.getTermsEnum();
TermsEnum actual = ar.getSortedSetDocValues("dv").termsEnum();
assertEquals(terms.size(), expected, actual);
}
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index 73b7271d78e7..3db7d7c62b3c 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -805,9 +805,9 @@ public void searchIndex(Directory dir, String oldName) throws IOException {
assertEquals("field with non-ascii name", f.stringValue());
}
- Fields tfvFields = reader.getTermVectors(i);
+ IndexedFields tfvFields = reader.getTermVectors(i);
assertNotNull("i=" + i, tfvFields);
- Terms tfv = tfvFields.terms("utf8");
+ IndexedField tfv = tfvFields.indexedField("utf8");
assertNotNull("docID=" + i + " index=" + oldName, tfv);
} else {
// Only ID 7 is deleted
@@ -1148,7 +1148,7 @@ public void testNextIntoWrongField() throws Exception {
for (String name : oldNames) {
Directory dir = oldIndexDirs.get(name);
IndexReader r = DirectoryReader.open(dir);
- TermsEnum terms = MultiFields.getFields(r).terms("content").iterator();
+ TermsEnum terms = MultiFields.getFields(r).indexedField("content").getTermsEnum();
BytesRef t = terms.next();
assertNotNull(t);
@@ -1262,15 +1262,15 @@ public void testNumericFields() throws Exception {
assertEquals("wrong number of hits", 34, hits.length);
// check decoding of terms
- Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "trieInt");
- TermsEnum termsEnum = LegacyNumericUtils.filterPrefixCodedInts(terms.iterator());
+ IndexedField terms = MultiFields.getIndexedField(searcher.getIndexReader(), "trieInt");
+ TermsEnum termsEnum = LegacyNumericUtils.filterPrefixCodedInts(terms.getTermsEnum());
while (termsEnum.next() != null) {
int val = LegacyNumericUtils.prefixCodedToInt(termsEnum.term());
assertTrue("value in id bounds", val >= 0 && val < 35);
}
- terms = MultiFields.getTerms(searcher.getIndexReader(), "trieLong");
- termsEnum = LegacyNumericUtils.filterPrefixCodedLongs(terms.iterator());
+ terms = MultiFields.getIndexedField(searcher.getIndexReader(), "trieLong");
+ termsEnum = LegacyNumericUtils.filterPrefixCodedLongs(terms.getTermsEnum());
while (termsEnum.next() != null) {
long val = LegacyNumericUtils.prefixCodedToLong(termsEnum.term());
assertTrue("value in id bounds", val >= 0L && val < 35L);
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyTerms.java b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyTerms.java
index 27fae15e9165..62678ee025c2 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyTerms.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/legacy/TestLegacyTerms.java
@@ -21,7 +21,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.legacy.LegacyDoubleField;
import org.apache.lucene.legacy.LegacyFloatField;
@@ -55,7 +55,7 @@ public void testIntFieldMinMax() throws Exception {
}
IndexReader r = w.getReader();
- Terms terms = MultiFields.getTerms(r, "field");
+ IndexedField terms = MultiFields.getIndexedField(r, "field");
assertEquals(new Integer(minValue), LegacyNumericUtils.getMinInt(terms));
assertEquals(new Integer(maxValue), LegacyNumericUtils.getMaxInt(terms));
@@ -86,7 +86,7 @@ public void testLongFieldMinMax() throws Exception {
IndexReader r = w.getReader();
- Terms terms = MultiFields.getTerms(r, "field");
+ IndexedField terms = MultiFields.getIndexedField(r, "field");
assertEquals(new Long(minValue), LegacyNumericUtils.getMinLong(terms));
assertEquals(new Long(maxValue), LegacyNumericUtils.getMaxLong(terms));
@@ -111,7 +111,7 @@ public void testFloatFieldMinMax() throws Exception {
}
IndexReader r = w.getReader();
- Terms terms = MultiFields.getTerms(r, "field");
+ IndexedField terms = MultiFields.getIndexedField(r, "field");
assertEquals(minValue, NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMinInt(terms)), 0.0f);
assertEquals(maxValue, NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMaxInt(terms)), 0.0f);
@@ -137,7 +137,7 @@ public void testDoubleFieldMinMax() throws Exception {
IndexReader r = w.getReader();
- Terms terms = MultiFields.getTerms(r, "field");
+ IndexedField terms = MultiFields.getIndexedField(r, "field");
assertEquals(minValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMinLong(terms)), 0.0);
assertEquals(maxValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMaxLong(terms)), 0.0);
@@ -148,10 +148,10 @@ public void testDoubleFieldMinMax() throws Exception {
}
/**
- * A complete empty Terms instance that has no terms in it and supports no optional statistics
+ * A complete empty IndexedField instance that has no terms in it and supports no optional statistics
*/
- private static Terms EMPTY_TERMS = new Terms() {
- public TermsEnum iterator() { return TermsEnum.EMPTY; }
+ private static IndexedField EMPTY_TERMS = new IndexedField() {
+ public TermsEnum getTermsEnum() { return TermsEnum.EMPTY; }
public long size() { return -1; }
public long getSumTotalTermFreq() { return -1; }
public long getSumDocFreq() { return -1; }
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java
index f36854dd559e..8e21759369c7 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/SearchTravRetHighlightTask.java
@@ -29,7 +29,7 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.document.Document;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.search.IndexSearcher;
@@ -174,7 +174,7 @@ public void withTopDocs(IndexSearcher searcher, Query q, TopDocs hits) throws Ex
// highlighter.setTextFragmenter(); unfortunately no sentence mechanism, not even regex. Default here is trivial
for (ScoreDoc scoreDoc : docIdOrder(hits.scoreDocs)) {
Document document = reader.document(scoreDoc.doc, hlFields);
- Fields tvFields = termVecs ? reader.getTermVectors(scoreDoc.doc) : null;
+ IndexedFields tvFields = termVecs ? reader.getTermVectors(scoreDoc.doc) : null;
for (IndexableField indexableField : document) {
TokenStream tokenStream;
if (termVecs) {
diff --git a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java
index 15ba807e3e2e..43cd57f51ced 100644
--- a/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java
+++ b/lucene/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/QualityQueriesFinder.java
@@ -22,7 +22,7 @@
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
@@ -90,9 +90,9 @@ private static String formatQueryAsTrecTopic (int qnum, String title, String des
IndexReader ir = DirectoryReader.open(dir);
try {
int threshold = ir.maxDoc() / 10; // ignore words too common.
- Terms terms = MultiFields.getTerms(ir, field);
+ IndexedField terms = MultiFields.getIndexedField(ir, field);
if (terms != null) {
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
while (termsEnum.next() != null) {
int df = termsEnum.docFreq();
if (df 0);
reader.close();
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java
index 781a14ff6eec..d7c00600cf04 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/BooleanPerceptronClassifier.java
@@ -30,7 +30,7 @@
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -59,7 +59,7 @@
public class BooleanPerceptronClassifier implements Classifier {
private final Double threshold;
- private final Terms textTerms;
+ private final IndexedField textTerms;
private final Analyzer analyzer;
private final String textFieldName;
private FST fst;
@@ -80,7 +80,7 @@ public class BooleanPerceptronClassifier implements Classifier {
*/
public BooleanPerceptronClassifier(IndexReader indexReader, Analyzer analyzer, Query query, Integer batchSize,
Double threshold, String classFieldName, String textFieldName) throws IOException {
- this.textTerms = MultiFields.getTerms(indexReader, textFieldName);
+ this.textTerms = MultiFields.getIndexedField(indexReader, textFieldName);
if (textTerms == null) {
throw new IOException("term vectors need to be available for field " + textFieldName);
@@ -106,7 +106,7 @@ public BooleanPerceptronClassifier(IndexReader indexReader, Analyzer analyzer, Q
// TODO : remove this map as soon as we have a writable FST
SortedMap weights = new ConcurrentSkipListMap<>();
- TermsEnum termsEnum = textTerms.iterator();
+ TermsEnum termsEnum = textTerms.getTermsEnum();
BytesRef textTerm;
while ((textTerm = termsEnum.next()) != null) {
weights.put(textTerm.utf8ToString(), (double) termsEnum.totalTermFreq());
@@ -152,17 +152,17 @@ public BooleanPerceptronClassifier(IndexReader indexReader, Analyzer analyzer, Q
private void updateWeights(IndexReader indexReader,
int docId, Boolean assignedClass, SortedMap weights,
double modifier, boolean updateFST) throws IOException {
- TermsEnum cte = textTerms.iterator();
+ TermsEnum cte = textTerms.getTermsEnum();
// get the doc term vectors
- Terms terms = indexReader.getTermVector(docId, textFieldName);
+ IndexedField terms = indexReader.getTermVector(docId, textFieldName);
if (terms == null) {
throw new IOException("term vectors must be stored for field "
+ textFieldName);
}
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
BytesRef term;
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/CachingNaiveBayesClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/CachingNaiveBayesClassifier.java
index 6fe683546d06..08b36889ca3c 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/CachingNaiveBayesClassifier.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/CachingNaiveBayesClassifier.java
@@ -27,7 +27,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -210,7 +210,7 @@ public void reInitCache(int minTermOccurrenceInCache, boolean justCachedTerms) t
// build the cache for the word
Map frequencyMap = new HashMap<>();
for (String textFieldName : textFieldNames) {
- TermsEnum termsEnum = MultiFields.getTerms(indexReader, textFieldName).iterator();
+ TermsEnum termsEnum = MultiFields.getIndexedField(indexReader, textFieldName).getTermsEnum();
while (termsEnum.next() != null) {
BytesRef term = termsEnum.term();
String termText = term.utf8ToString();
@@ -227,8 +227,8 @@ public void reInitCache(int minTermOccurrenceInCache, boolean justCachedTerms) t
}
// fill the class list
- Terms terms = MultiFields.getTerms(indexReader, classFieldName);
- TermsEnum termsEnum = terms.iterator();
+ IndexedField terms = MultiFields.getIndexedField(indexReader, classFieldName);
+ TermsEnum termsEnum = terms.getTermsEnum();
while ((termsEnum.next()) != null) {
cclasses.add(BytesRef.deepCopyOf(termsEnum.term()));
}
@@ -236,7 +236,7 @@ public void reInitCache(int minTermOccurrenceInCache, boolean justCachedTerms) t
for (BytesRef cclass : cclasses) {
double avgNumberOfUniqueTerms = 0;
for (String textFieldName : textFieldNames) {
- terms = MultiFields.getTerms(indexReader, textFieldName);
+ terms = MultiFields.getIndexedField(indexReader, textFieldName);
long numPostings = terms.getSumDocFreq(); // number of term/doc pairs
avgNumberOfUniqueTerms += numPostings / (double) terms.getDocCount();
}
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java
index 3509df585111..4f9204acd42a 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/SimpleNaiveBayesClassifier.java
@@ -29,7 +29,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -144,9 +144,9 @@ public List> getClasses(String text, int max) thr
protected List> assignClassNormalizedList(String inputDocument) throws IOException {
List> assignedClasses = new ArrayList<>();
- Terms classes = MultiFields.getTerms(indexReader, classFieldName);
+ IndexedField classes = MultiFields.getIndexedField(indexReader, classFieldName);
if (classes != null) {
- TermsEnum classesEnum = classes.iterator();
+ TermsEnum classesEnum = classes.getTermsEnum();
BytesRef next;
String[] tokenizedText = tokenize(inputDocument);
int docsWithClassSize = countDocsWithClass();
@@ -169,7 +169,7 @@ protected List> assignClassNormalizedList(String
* @throws IOException if accessing to term vectors or search fails
*/
protected int countDocsWithClass() throws IOException {
- Terms terms = MultiFields.getTerms(this.indexReader, this.classFieldName);
+ IndexedField terms = MultiFields.getIndexedField(this.indexReader, this.classFieldName);
int docCount;
if (terms == null || terms.getDocCount() == -1) { // in case codec doesn't support getDocCount
TotalHitCountCollector classQueryCountCollector = new TotalHitCountCollector();
@@ -240,7 +240,7 @@ private double calculateLogLikelihood(String[] tokenizedText, Term term, int doc
private double getTextTermFreqForClass(Term term) throws IOException {
double avgNumberOfUniqueTerms = 0;
for (String textFieldName : textFieldNames) {
- Terms terms = MultiFields.getTerms(indexReader, textFieldName);
+ IndexedField terms = MultiFields.getIndexedField(indexReader, textFieldName);
long numPostings = terms.getSumDocFreq(); // number of term/doc pairs
avgNumberOfUniqueTerms += numPostings / (double) terms.getDocCount(); // avg # of unique terms per doc
}
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/document/SimpleNaiveBayesDocumentClassifier.java b/lucene/classification/src/java/org/apache/lucene/classification/document/SimpleNaiveBayesDocumentClassifier.java
index 21ad7d134a47..e41c8b50fffe 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/document/SimpleNaiveBayesDocumentClassifier.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/document/SimpleNaiveBayesDocumentClassifier.java
@@ -36,7 +36,7 @@
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -112,8 +112,8 @@ private List> assignNormClasses(Document inputDoc
List> assignedClasses = new ArrayList<>();
Map> fieldName2tokensArray = new LinkedHashMap<>();
Map fieldName2boost = new LinkedHashMap<>();
- Terms classes = MultiFields.getTerms(indexReader, classFieldName);
- TermsEnum classesEnum = classes.iterator();
+ IndexedField classes = MultiFields.getIndexedField(indexReader, classFieldName);
+ TermsEnum classesEnum = classes.getTermsEnum();
BytesRef c;
analyzeSeedDocument(inputDocument, fieldName2tokensArray, fieldName2boost);
@@ -225,7 +225,7 @@ private double calculateLogLikelihood(String[] tokenizedText, String fieldName,
*/
private double getTextTermFreqForClass(Term term, String fieldName) throws IOException {
double avgNumberOfUniqueTerms;
- Terms terms = MultiFields.getTerms(indexReader, fieldName);
+ IndexedField terms = MultiFields.getIndexedField(indexReader, fieldName);
long numPostings = terms.getSumDocFreq(); // number of term/doc pairs
avgNumberOfUniqueTerms = numPostings / (double) terms.getDocCount(); // avg # of unique terms per doc
int docsWithC = indexReader.docFreq(term);
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/utils/DatasetSplitter.java b/lucene/classification/src/java/org/apache/lucene/classification/utils/DatasetSplitter.java
index 7ab674eafdda..79be9f38e6b5 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/utils/DatasetSplitter.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/utils/DatasetSplitter.java
@@ -95,7 +95,7 @@ public void split(IndexReader originalIndex, Directory trainingIndex, Directory
}
if (classValues == null) {
// approximate with no. of terms
- noOfClasses += leave.reader().terms(classFieldName).size();
+ noOfClasses += leave.reader().indexedField(classFieldName).size();
}
noOfClasses += valueCount;
}
diff --git a/lucene/classification/src/java/org/apache/lucene/classification/utils/DocToDoubleVectorUtils.java b/lucene/classification/src/java/org/apache/lucene/classification/utils/DocToDoubleVectorUtils.java
index 288d8c180760..c5cd536c940f 100644
--- a/lucene/classification/src/java/org/apache/lucene/classification/utils/DocToDoubleVectorUtils.java
+++ b/lucene/classification/src/java/org/apache/lucene/classification/utils/DocToDoubleVectorUtils.java
@@ -18,7 +18,7 @@
import java.io.IOException;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
@@ -39,18 +39,18 @@ private DocToDoubleVectorUtils() {
* @return a sparse vector of Doubles as an array
* @throws IOException in case accessing the underlying index fails
*/
- public static Double[] toSparseLocalFreqDoubleArray(Terms docTerms, Terms fieldTerms) throws IOException {
- TermsEnum fieldTermsEnum = fieldTerms.iterator();
+ public static Double[] toSparseLocalFreqDoubleArray(IndexedField docTerms, IndexedField fieldTerms) throws IOException {
+ TermsEnum fieldTermsEnum = fieldTerms.getTermsEnum();
Double[] freqVector = null;
if (docTerms != null && fieldTerms.size() > -1) {
freqVector = new Double[(int) fieldTerms.size()];
int i = 0;
- TermsEnum docTermsEnum = docTerms.iterator();
+ TermsEnum docTermsEnum = docTerms.getTermsEnum();
BytesRef term;
while ((term = fieldTermsEnum.next()) != null) {
TermsEnum.SeekStatus seekStatus = docTermsEnum.seekCeil(term);
if (seekStatus.equals(TermsEnum.SeekStatus.END)) {
- docTermsEnum = docTerms.iterator();
+ docTermsEnum = docTerms.getTermsEnum();
}
if (seekStatus.equals(TermsEnum.SeekStatus.FOUND)) {
long termFreqLocal = docTermsEnum.totalTermFreq(); // the total number of occurrences of this term in the given document
@@ -71,12 +71,12 @@ public static Double[] toSparseLocalFreqDoubleArray(Terms docTerms, Terms fieldT
* @return a dense vector of Doubles as an array
* @throws IOException in case accessing the underlying index fails
*/
- public static Double[] toDenseLocalFreqDoubleArray(Terms docTerms) throws IOException {
+ public static Double[] toDenseLocalFreqDoubleArray(IndexedField docTerms) throws IOException {
Double[] freqVector = null;
if (docTerms != null) {
freqVector = new Double[(int) docTerms.size()];
int i = 0;
- TermsEnum docTermsEnum = docTerms.iterator();
+ TermsEnum docTermsEnum = docTerms.getTermsEnum();
while (docTermsEnum.next() != null) {
long termFreqLocal = docTermsEnum.totalTermFreq(); // the total number of occurrences of this term in the given document
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
index acbfe82be9a8..734d70308ecb 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/CachingNaiveBayesClassifierTest.java
@@ -27,7 +27,7 @@
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
@@ -123,8 +123,8 @@ public void testPerformance() throws Exception {
assertTrue(precision >= 0d);
assertTrue(precision <= 1d);
- Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
- TermsEnum iterator = terms.iterator();
+ IndexedField terms = MultiFields.getIndexedField(leafReader, categoryFieldName);
+ TermsEnum iterator = terms.getTermsEnum();
BytesRef term;
while ((term = iterator.next()) != null) {
String s = term.utf8ToString();
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/KNearestNeighborClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/KNearestNeighborClassifierTest.java
index 5e3ce7bfff33..bbdc3d7e7591 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/KNearestNeighborClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/KNearestNeighborClassifierTest.java
@@ -25,7 +25,7 @@
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.similarities.BM25Similarity;
@@ -153,8 +153,8 @@ public void testPerformance() throws Exception {
assertTrue(precision >= 0d);
assertTrue(precision <= 1d);
- Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
- TermsEnum iterator = terms.iterator();
+ IndexedField terms = MultiFields.getIndexedField(leafReader, categoryFieldName);
+ TermsEnum iterator = terms.getTermsEnum();
BytesRef term;
while ((term = iterator.next()) != null) {
String s = term.utf8ToString();
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java b/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
index 830ce2c251b7..9d536f4610e1 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/SimpleNaiveBayesClassifierTest.java
@@ -26,7 +26,7 @@
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
@@ -124,8 +124,8 @@ public void testPerformance() throws Exception {
assertTrue(precision >= 0d);
assertTrue(precision <= 1d);
- Terms terms = MultiFields.getTerms(leafReader, categoryFieldName);
- TermsEnum iterator = terms.iterator();
+ IndexedField terms = MultiFields.getIndexedField(leafReader, categoryFieldName);
+ TermsEnum iterator = terms.getTermsEnum();
BytesRef term;
while ((term = iterator.next()) != null) {
String s = term.utf8ToString();
diff --git a/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java b/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
index a90d1a50f5e8..42c7637717da 100644
--- a/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
+++ b/lucene/classification/src/test/org/apache/lucene/classification/utils/DocToDoubleVectorUtilsTest.java
@@ -23,7 +23,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.ScoreDoc;
@@ -80,7 +80,7 @@ public void tearDown() throws Exception {
public void testDenseFreqDoubleArrayConversion() throws Exception {
IndexSearcher indexSearcher = new IndexSearcher(index);
for (ScoreDoc scoreDoc : indexSearcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE).scoreDocs) {
- Terms docTerms = index.getTermVector(scoreDoc.doc, "text");
+ IndexedField docTerms = index.getTermVector(scoreDoc.doc, "text");
Double[] vector = DocToDoubleVectorUtils.toDenseLocalFreqDoubleArray(docTerms);
assertNotNull(vector);
assertTrue(vector.length > 0);
@@ -89,11 +89,11 @@ public void testDenseFreqDoubleArrayConversion() throws Exception {
@Test
public void testSparseFreqDoubleArrayConversion() throws Exception {
- Terms fieldTerms = MultiFields.getTerms(index, "text");
+ IndexedField fieldTerms = MultiFields.getIndexedField(index, "text");
if (fieldTerms != null && fieldTerms.size() != -1) {
IndexSearcher indexSearcher = new IndexSearcher(index);
for (ScoreDoc scoreDoc : indexSearcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE).scoreDocs) {
- Terms docTerms = index.getTermVector(scoreDoc.doc, "text");
+ IndexedField docTerms = index.getTermVector(scoreDoc.doc, "text");
Double[] vector = DocToDoubleVectorUtils.toSparseLocalFreqDoubleArray(docTerms, fieldTerms);
assertNotNull(vector);
assertTrue(vector.length > 0);
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
index b8d23bdb3d62..667e8646fa90 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
@@ -36,7 +36,7 @@
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.ByteArrayDataInput;
import org.apache.lucene.store.IndexInput;
@@ -202,7 +202,7 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
assert field != null;
return fields.get(field);
}
@@ -213,7 +213,7 @@ public int size() {
}
private static final long FIELD_READER_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FieldReader.class);
- private class FieldReader extends Terms implements Accountable {
+ private class FieldReader extends IndexedField implements Accountable {
final long numTerms;
final FieldInfo fieldInfo;
final long termsStartPointer;
@@ -239,7 +239,7 @@ public long ramBytesUsed() {
}
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
return new SegmentTermsEnum();
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java
index f19cd2c052ae..5251db60d0bc 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java
@@ -31,10 +31,10 @@
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.RAMOutputStream;
@@ -127,16 +127,16 @@ public BlockTermsWriter(TermsIndexWriterBase termsIndexWriter,
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
for(String field : fields) {
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
continue;
}
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
TermsWriter termsWriter = addField(fieldInfos.fieldInfo(field));
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsReader.java
index afdaf5aad17e..b9c20f195a7b 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsReader.java
@@ -34,7 +34,7 @@
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentReadState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables;
@@ -199,7 +199,7 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
assert field != null;
return fields.get(field);
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java
index b16bb1566cba..d5ae3ce63fcd 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java
@@ -29,11 +29,11 @@
import org.apache.lucene.codecs.blocktreeords.FSTOrdsOutputs.Output;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.RAMOutputStream;
@@ -213,19 +213,19 @@ public OrdsBlockTreeTermsWriter(
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
String lastField = null;
for(String field : fields) {
assert lastField == null || lastField.compareTo(field) < 0;
lastField = field;
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
continue;
}
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
TermsWriter termsWriter = new TermsWriter(fieldInfos.fieldInfo(field));
while (true) {
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsFieldReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsFieldReader.java
index 5d02258837d3..2877510991c5 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsFieldReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsFieldReader.java
@@ -24,7 +24,7 @@
import org.apache.lucene.codecs.blocktreeords.FSTOrdsOutputs.Output;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.ByteArrayDataInput;
import org.apache.lucene.store.IndexInput;
@@ -34,8 +34,8 @@
import org.apache.lucene.util.automaton.CompiledAutomaton;
import org.apache.lucene.util.fst.FST;
-/** BlockTree's implementation of {@link Terms}. */
-final class OrdsFieldReader extends Terms implements Accountable {
+/** BlockTree's implementation of {@link IndexedField}. */
+final class OrdsFieldReader extends IndexedField implements Accountable {
final long numTerms;
final FieldInfo fieldInfo;
final long sumTotalTermFreq;
@@ -137,7 +137,7 @@ public boolean hasPayloads() {
}
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
return new OrdsSegmentTermsEnum(this);
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
index ffe9fa1002af..cb3e9530688f 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
@@ -33,11 +33,11 @@
import org.apache.lucene.codecs.bloom.FuzzySet.ContainsResult;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.ChecksumIndexInput;
import org.apache.lucene.store.DataOutput;
@@ -200,12 +200,12 @@ public void close() throws IOException {
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
FuzzySet filter = bloomsByFieldName.get(field);
if (filter == null) {
- return delegateFieldsProducer.terms(field);
+ return delegateFieldsProducer.indexedField(field);
} else {
- Terms result = delegateFieldsProducer.terms(field);
+ IndexedField result = delegateFieldsProducer.indexedField(field);
if (result == null) {
return null;
}
@@ -218,11 +218,11 @@ public int size() {
return delegateFieldsProducer.size();
}
- class BloomFilteredTerms extends Terms {
- private Terms delegateTerms;
+ class BloomFilteredTerms extends IndexedField {
+ private IndexedField delegateTerms;
private FuzzySet filter;
- public BloomFilteredTerms(Terms terms, FuzzySet filter) {
+ public BloomFilteredTerms(IndexedField terms, FuzzySet filter) {
this.delegateTerms = terms;
this.filter = filter;
}
@@ -234,7 +234,7 @@ public TermsEnum intersect(CompiledAutomaton compiled,
}
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
return new BloomFilteredTermsEnum(delegateTerms, filter);
}
@@ -290,16 +290,16 @@ public BytesRef getMax() throws IOException {
}
final class BloomFilteredTermsEnum extends TermsEnum {
- private Terms delegateTerms;
+ private IndexedField delegateTerms;
private TermsEnum delegateTermsEnum;
private final FuzzySet filter;
- public BloomFilteredTermsEnum(Terms delegateTerms, FuzzySet filter) throws IOException {
+ public BloomFilteredTermsEnum(IndexedField delegateTerms, FuzzySet filter) throws IOException {
this.delegateTerms = delegateTerms;
this.filter = filter;
}
- void reset(Terms delegateTerms) throws IOException {
+ void reset(IndexedField delegateTerms) throws IOException {
this.delegateTerms = delegateTerms;
this.delegateTermsEnum = null;
}
@@ -310,7 +310,7 @@ private TermsEnum delegate() throws IOException {
* this can be a relativly heavy operation depending on the
* delegate postings format and they underlying directory
* (clone IndexInput) */
- delegateTermsEnum = delegateTerms.iterator();
+ delegateTermsEnum = delegateTerms.getTermsEnum();
}
return delegateTermsEnum;
}
@@ -416,7 +416,7 @@ public BloomFilteredFieldsConsumer(FieldsConsumer fieldsConsumer,
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
// Delegate must write first: it may have opened files
// on creating the class
@@ -427,12 +427,12 @@ public void write(Fields fields) throws IOException {
delegateFieldsConsumer.write(fields);
for(String field : fields) {
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
continue;
}
FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field);
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
FuzzySet bloomFilter = null;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
index 00f25cf189c3..117c0c44a38e 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
@@ -28,14 +28,14 @@
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.OrdTermState;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.RAMOutputStream;
@@ -88,7 +88,7 @@ public DirectPostingsFormat() {
}
/** minSkipCount is how many terms in a row must have the
- * same prefix before we put a skip pointer down. Terms
+ * same prefix before we put a skip pointer down. IndexedField
* with docFreq <= lowFreqCutoff will use a single int[]
* to hold all docs, freqs, position and offsets; terms
* with higher docFreq will use separate arrays. */
@@ -124,9 +124,9 @@ public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException
private static final class DirectFields extends FieldsProducer {
private final Map fields = new TreeMap<>();
- public DirectFields(SegmentReadState state, Fields fields, int minSkipCount, int lowFreqCutoff) throws IOException {
+ public DirectFields(SegmentReadState state, IndexedFields fields, int minSkipCount, int lowFreqCutoff) throws IOException {
for (String field : fields) {
- this.fields.put(field, new DirectField(state, field, fields.terms(field), minSkipCount, lowFreqCutoff));
+ this.fields.put(field, new DirectField(state, field, fields.indexedField(field), minSkipCount, lowFreqCutoff));
}
}
@@ -136,7 +136,7 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) {
+ public IndexedField indexedField(String field) {
return fields.get(field);
}
@@ -176,7 +176,7 @@ public String toString() {
}
}
- private final static class DirectField extends Terms implements Accountable {
+ private final static class DirectField extends IndexedField implements Accountable {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DirectField.class);
@@ -299,7 +299,7 @@ public int[] get() {
}
}
- public DirectField(SegmentReadState state, String field, Terms termsIn, int minSkipCount, int lowFreqCutoff) throws IOException {
+ public DirectField(SegmentReadState state, String field, IndexedField termsIn, int minSkipCount, int lowFreqCutoff) throws IOException {
final FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field);
sumTotalTermFreq = termsIn.getSumTotalTermFreq();
@@ -308,7 +308,7 @@ public DirectField(SegmentReadState state, String field, Terms termsIn, int minS
final int numTerms = (int) termsIn.size();
if (numTerms == -1) {
- throw new IllegalArgumentException("codec does not provide Terms.size()");
+ throw new IllegalArgumentException("codec does not provide IndexedField.size()");
}
terms = new TermAndSkip[numTerms];
termOffsets = new int[1+numTerms];
@@ -325,7 +325,7 @@ public DirectField(SegmentReadState state, String field, Terms termsIn, int minS
BytesRef term;
PostingsEnum postingsEnum = null;
PostingsEnum docsAndPositionsEnum = null;
- final TermsEnum termsEnum = termsIn.iterator();
+ final TermsEnum termsEnum = termsIn.getTermsEnum();
int termOffset = 0;
final IntArrayWriter scratch = new IntArrayWriter();
@@ -653,7 +653,7 @@ private void saveSkip(int ord, int backCount) {
}
@Override
- public TermsEnum iterator() {
+ public TermsEnum getTermsEnum() {
return new DirectTermsEnum();
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
index 97bbea3ddef1..c2b801bc5bd0 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
@@ -40,7 +40,7 @@
import org.apache.lucene.index.SegmentInfo;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.ByteArrayDataInput;
import org.apache.lucene.store.ChecksumIndexInput;
@@ -160,7 +160,7 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
assert field != null;
return fields.get(field);
}
@@ -179,7 +179,7 @@ public void close() throws IOException {
}
}
- final class TermsReader extends Terms implements Accountable {
+ final class TermsReader extends IndexedField implements Accountable {
final FieldInfo fieldInfo;
final long numTerms;
final long sumTotalTermFreq;
@@ -264,7 +264,7 @@ public int getDocCount() throws IOException {
}
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
return new SegmentTermsEnum();
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java
index cbe6583e892b..f829c7d79a2a 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java
@@ -27,11 +27,11 @@
import org.apache.lucene.codecs.PostingsWriterBase;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.store.IndexOutput;
@@ -186,15 +186,15 @@ public FSTOrdTermsWriter(SegmentWriteState state, PostingsWriterBase postingsWri
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
for(String field : fields) {
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
continue;
}
FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
boolean hasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
TermsWriter termsWriter = new TermsWriter(fieldInfo);
long sumTotalTermFreq = 0;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
index b120656688cf..a9f4aec19917 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
@@ -39,7 +39,7 @@
import org.apache.lucene.index.SegmentInfo;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.ByteArrayDataInput;
import org.apache.lucene.store.IndexInput;
@@ -140,7 +140,7 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
assert field != null;
return fields.get(field);
}
@@ -160,7 +160,7 @@ public void close() throws IOException {
}
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(TermsReader.class);
- final class TermsReader extends Terms implements Accountable {
+ final class TermsReader extends IndexedField implements Accountable {
final FieldInfo fieldInfo;
final long numTerms;
@@ -244,7 +244,7 @@ public int getDocCount() throws IOException {
}
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
return new SegmentTermsEnum();
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java
index 8284d7444f26..cbd61a70cd8e 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java
@@ -28,10 +28,10 @@
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.store.IndexOutput;
@@ -158,15 +158,15 @@ private void writeTrailer(IndexOutput out, long dirStart) throws IOException {
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
for(String field : fields) {
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
continue;
}
FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
boolean hasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
TermsWriter termsWriter = new TermsWriter(fieldInfo);
long sumTotalTermFreq = 0;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
index 2f7176542cd8..dd387e2b2361 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
@@ -33,12 +33,12 @@
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.ByteArrayDataInput;
import org.apache.lucene.store.ChecksumIndexInput;
@@ -287,15 +287,15 @@ private MemoryFieldsConsumer(SegmentWriteState state) throws IOException {
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
for(String field : fields) {
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
continue;
}
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field);
TermsWriter termsWriter = new TermsWriter(out, fieldInfo);
@@ -857,7 +857,7 @@ public long ord() {
}
}
- private final static class TermsReader extends Terms implements Accountable {
+ private final static class TermsReader extends IndexedField implements Accountable {
private final long sumTotalTermFreq;
private final long sumDocFreq;
@@ -905,7 +905,7 @@ public long size() {
}
@Override
- public TermsEnum iterator() {
+ public TermsEnum getTermsEnum() {
return new FSTTermsEnum(field, fst);
}
@@ -982,7 +982,7 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) {
+ public IndexedField indexedField(String field) {
return fields.get(field);
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
index faab7885f6c3..0b4db593ee49 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
@@ -32,7 +32,7 @@
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SegmentReadState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.BufferedChecksumIndexInput;
import org.apache.lucene.store.ChecksumIndexInput;
@@ -521,7 +521,7 @@ public TermData(long docsStart, int docFreq) {
RamUsageEstimator.shallowSizeOfInstance(SimpleTextTerms.class)
+ RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
+ RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
- private class SimpleTextTerms extends Terms implements Accountable {
+ private class SimpleTextTerms extends IndexedField implements Accountable {
private final long termsStart;
private final FieldInfo fieldInfo;
private final int maxDoc;
@@ -622,7 +622,7 @@ public String toString() {
}
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
if (fst != null) {
return new SimpleTextTermsEnum(fst, fieldInfo.getIndexOptions());
} else {
@@ -679,7 +679,7 @@ public Iterator iterator() {
private final Map termsCache = new HashMap<>();
@Override
- synchronized public Terms terms(String field) throws IOException {
+ synchronized public IndexedField indexedField(String field) throws IOException {
SimpleTextTerms terms = termsCache.get(field);
if (terms == null) {
Long fp = fields.get(field);
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
index 20235528dcad..229c1d4e1d8f 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
@@ -23,9 +23,9 @@
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.BytesRef;
@@ -56,15 +56,15 @@ public SimpleTextFieldsWriter(SegmentWriteState writeState) throws IOException {
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
write(writeState.fieldInfos, fields);
}
- public void write(FieldInfos fieldInfos, Fields fields) throws IOException {
+ public void write(FieldInfos fieldInfos, IndexedFields fields) throws IOException {
// for each field
for(String field : fields) {
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
// Annoyingly, this can happen!
continue;
@@ -93,7 +93,7 @@ public void write(FieldInfos fieldInfos, Fields fields) throws IOException {
}
}
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
PostingsEnum postingsEnum = null;
// for each term in field
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
index c1808bfb76c9..7a3b460cb88a 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
@@ -26,10 +26,10 @@
import org.apache.lucene.codecs.TermVectorsReader;
import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.BufferedChecksumIndexInput;
@@ -106,7 +106,7 @@ private void readIndex(int maxDoc) throws IOException {
}
@Override
- public Fields get(int doc) throws IOException {
+ public IndexedFields get(int doc) throws IOException {
SortedMap fields = new TreeMap<>();
in.seek(offsets[doc]);
readLine();
@@ -239,7 +239,7 @@ private String readString(int offset, BytesRefBuilder scratch) {
return scratchUTF16.toString();
}
- private class SimpleTVFields extends Fields {
+ private class SimpleTVFields extends IndexedFields {
private final SortedMap fields;
SimpleTVFields(SortedMap fields) {
@@ -252,7 +252,7 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
return fields.get(field);
}
@@ -262,7 +262,7 @@ public int size() {
}
}
- private static class SimpleTVTerms extends Terms {
+ private static class SimpleTVTerms extends IndexedField {
final SortedMap terms;
final boolean hasOffsets;
final boolean hasPositions;
@@ -276,7 +276,7 @@ private static class SimpleTVTerms extends Terms {
}
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
// TODO: reuse
return new SimpleTVTermsEnum(terms);
}
diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java b/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java
index 773bbf518271..4b49eb400dfa 100644
--- a/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java
+++ b/lucene/codecs/src/test/org/apache/lucene/codecs/blocktreeords/TestOrdsBlockTree.java
@@ -53,7 +53,7 @@ public void testBasic() throws Exception {
doc.add(newTextField("field", "a b c", Field.Store.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
- TermsEnum te = MultiFields.getTerms(r, "field").iterator();
+ TermsEnum te = MultiFields.getIndexedField(r, "field").getTermsEnum();
// Test next()
assertEquals(new BytesRef("a"), te.next());
@@ -114,7 +114,7 @@ public void testTwoBlocks() throws Exception {
}
w.forceMerge(1);
IndexReader r = w.getReader();
- TermsEnum te = MultiFields.getTerms(r, "field").iterator();
+ TermsEnum te = MultiFields.getIndexedField(r, "field").getTermsEnum();
assertTrue(te.seekExact(new BytesRef("mo")));
assertEquals(27, te.ord());
@@ -190,7 +190,7 @@ public void testThreeBlocks() throws Exception {
}
w.forceMerge(1);
IndexReader r = w.getReader();
- TermsEnum te = MultiFields.getTerms(r, "field").iterator();
+ TermsEnum te = MultiFields.getIndexedField(r, "field").getTermsEnum();
if (VERBOSE) {
while (te.next() != null) {
@@ -250,7 +250,7 @@ public void testFloorBlocks() throws Exception {
}
w.forceMerge(1);
IndexReader r = DirectoryReader.open(w);
- TermsEnum te = MultiFields.getTerms(r, "field").iterator();
+ TermsEnum te = MultiFields.getIndexedField(r, "field").getTermsEnum();
if (VERBOSE) {
BytesRef term;
@@ -300,7 +300,7 @@ public void testNonRootFloorBlocks() throws Exception {
}
w.forceMerge(1);
IndexReader r = DirectoryReader.open(w);
- TermsEnum te = MultiFields.getTerms(r, "field").iterator();
+ TermsEnum te = MultiFields.getIndexedField(r, "field").getTermsEnum();
BytesRef term;
int ord = 0;
@@ -338,7 +338,7 @@ public void testSeveralNonRootBlocks() throws Exception {
}
w.forceMerge(1);
IndexReader r = DirectoryReader.open(w);
- TermsEnum te = MultiFields.getTerms(r, "body").iterator();
+ TermsEnum te = MultiFields.getIndexedField(r, "body").getTermsEnum();
for(int i=0;i<30;i++) {
for(int j=0;j<30;j++) {
@@ -379,7 +379,7 @@ public void testSeekCeilNotFound() throws Exception {
w.forceMerge(1);
IndexReader r = w.getReader();
- TermsEnum te = MultiFields.getTerms(r, "field").iterator();
+ TermsEnum te = MultiFields.getIndexedField(r, "field").getTermsEnum();
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef(new byte[] {0x22})));
assertEquals("a", te.term().utf8ToString());
assertEquals(1L, te.ord());
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java b/lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java
index 28bae1d07c59..19365769ac8f 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java
@@ -22,7 +22,7 @@
import java.util.ArrayList;
import java.util.List;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.MappedMultiFields;
import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.MultiFields;
@@ -48,7 +48,7 @@ protected FieldsConsumer() {
// TODO: maybe we should factor out "limited" (only
// iterables, no counts/stats) base classes from
- // Fields/Terms/Docs/AndPositions?
+ // IndexedFields/IndexedField/Docs/AndPositions?
/** Write all fields, terms and postings. This the "pull"
* API, allowing you to iterate more than once over the
@@ -65,26 +65,26 @@ protected FieldsConsumer() {
*
*
You must skip terms that have no docs and
* fields that have no terms, even though the provided
- * Fields API will expose them; this typically
+ * IndexedFields API will expose them; this typically
* requires lazily writing the field or term until
* you've actually seen the first term or
* document.
*
- *
The provided Fields instance is limited: you
+ *
The provided IndexedFields instance is limited: you
* cannot call any methods that return
* statistics/counts; you cannot pass a non-null
* live docs when pulling docs/positions enums.
*
*/
- public abstract void write(Fields fields) throws IOException;
+ public abstract void write(IndexedFields fields) throws IOException;
/** Merges in the fields from the readers in
* mergeState. The default implementation skips
- * and maps around deleted documents, and calls {@link #write(Fields)}.
+ * and maps around deleted documents, and calls {@link #write(IndexedFields)}.
* Implementations can override this method for more sophisticated
* merging (bulk-byte copying, etc). */
public void merge(MergeState mergeState) throws IOException {
- final List fields = new ArrayList<>();
+ final List fields = new ArrayList<>();
final List slices = new ArrayList<>();
int docBase = 0;
@@ -99,8 +99,8 @@ public void merge(MergeState mergeState) throws IOException {
docBase += maxDoc;
}
- Fields mergedFields = new MappedMultiFields(mergeState,
- new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
+ IndexedFields mergedFields = new MappedMultiFields(mergeState,
+ new MultiFields(fields.toArray(IndexedFields.EMPTY_ARRAY),
slices.toArray(ReaderSlice.EMPTY_ARRAY)));
write(mergedFields);
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/FieldsProducer.java b/lucene/core/src/java/org/apache/lucene/codecs/FieldsProducer.java
index cd6386c721b6..26ef8a337d74 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/FieldsProducer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/FieldsProducer.java
@@ -20,7 +20,7 @@
import java.io.Closeable;
import java.io.IOException;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.util.Accountable;
/** Abstract API that produces terms, doc, freq, prox, offset and
@@ -29,7 +29,7 @@
* @lucene.experimental
*/
-public abstract class FieldsProducer extends Fields implements Closeable, Accountable {
+public abstract class FieldsProducer extends IndexedFields implements Closeable, Accountable {
/** Sole constructor. (For invocation by subclass
* constructors, typically implicit.) */
protected FieldsProducer() {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java
index 71041369b561..40d43222f8e6 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsReader.java
@@ -21,7 +21,7 @@
import java.io.IOException;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; // javadocs
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.util.Accountable;
/**
@@ -40,7 +40,7 @@ protected TermVectorsReader() {
* term vectors were not indexed. If offsets are
* available they are in an {@link OffsetAttribute}
* available from the {@link org.apache.lucene.index.PostingsEnum}. */
- public abstract Fields get(int doc) throws IOException;
+ public abstract IndexedFields get(int doc) throws IOException;
/**
* Checks consistency of this reader.
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java
index b84065af66ad..5265dc4d7463 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java
@@ -25,10 +25,10 @@
import org.apache.lucene.index.DocIDMerger;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.DataInput;
@@ -216,7 +216,7 @@ public int merge(MergeState mergeState) throws IOException {
// NOTE: it's very important to first assign to vectors then pass it to
// termVectorsWriter.addAllDocVectors; see LUCENE-1282
- Fields vectors;
+ IndexedFields vectors;
if (sub.reader == null) {
vectors = null;
} else {
@@ -231,7 +231,7 @@ public int merge(MergeState mergeState) throws IOException {
/** Safe (but, slowish) default method to write every
* vector field in the document. */
- protected final void addAllDocVectors(Fields vectors, MergeState mergeState) throws IOException {
+ protected final void addAllDocVectors(IndexedFields vectors, MergeState mergeState) throws IOException {
if (vectors == null) {
startDocument(0);
finishDocument();
@@ -240,7 +240,7 @@ protected final void addAllDocVectors(Fields vectors, MergeState mergeState) thr
int numFields = vectors.size();
if (numFields == -1) {
- // count manually! TODO: Maybe enforce that Fields.size() returns something valid?
+ // count manually! TODO: Maybe enforce that IndexedFields.size() returns something valid?
numFields = 0;
for (final Iterator it = vectors.iterator(); it.hasNext(); ) {
it.next();
@@ -262,7 +262,7 @@ protected final void addAllDocVectors(Fields vectors, MergeState mergeState) thr
assert lastFieldName == null || fieldName.compareTo(lastFieldName) > 0: "lastFieldName=" + lastFieldName + " fieldName=" + fieldName;
lastFieldName = fieldName;
- final Terms terms = vectors.terms(fieldName);
+ final IndexedField terms = vectors.indexedField(fieldName);
if (terms == null) {
// FieldsEnum shouldn't lie...
continue;
@@ -275,16 +275,16 @@ protected final void addAllDocVectors(Fields vectors, MergeState mergeState) thr
int numTerms = (int) terms.size();
if (numTerms == -1) {
- // count manually. It is stupid, but needed, as Terms.size() is not a mandatory statistics function
+ // count manually. It is stupid, but needed, as IndexedField.size() is not a mandatory statistics function
numTerms = 0;
- termsEnum = terms.iterator();
+ termsEnum = terms.getTermsEnum();
while(termsEnum.next() != null) {
numTerms++;
}
}
startField(fieldInfo, numTerms, hasPositions, hasOffsets, hasPayloads);
- termsEnum = terms.iterator();
+ termsEnum = terms.getTermsEnum();
int termCount = 0;
while(termsEnum.next() != null) {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
index 6fc9a2490724..c586d92067cd 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsReader.java
@@ -33,7 +33,7 @@
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentReadState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.PrefixQuery; // javadocs
import org.apache.lucene.search.TermRangeQuery; // javadocs
import org.apache.lucene.store.IndexInput;
@@ -60,12 +60,12 @@
* much memory the terms index uses.
*
*
If auto-prefix terms were indexed (see
- * {@link BlockTreeTermsWriter}), then the {@link Terms#intersect}
+ * {@link BlockTreeTermsWriter}), then the {@link IndexedField#intersect}
* implementation here will make use of these terms only if the
* automaton has a binary sink state, i.e. an accept state
* which has a transition to itself accepting all byte values.
* For example, both {@link PrefixQuery} and {@link TermRangeQuery}
- * pass such automata to {@link Terms#intersect}.
+ * pass such automata to {@link IndexedField#intersect}.
*
*
The data structure used by this implementation is very
* similar to a burst trie
@@ -287,7 +287,7 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
assert field != null;
return fields.get(field);
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java
index bdacc22325fa..cd1e95e0de51 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java
@@ -27,11 +27,11 @@
import org.apache.lucene.codecs.PostingsWriterBase;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.store.IndexOutput;
@@ -315,7 +315,7 @@ public static void validateSettings(int minItemsInBlock, int maxItemsInBlock) {
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
//if (DEBUG) System.out.println("\nBTTW.write seg=" + segment);
String lastField = null;
@@ -324,12 +324,12 @@ public void write(Fields fields) throws IOException {
lastField = field;
//if (DEBUG) System.out.println("\nBTTW.write seg=" + segment + " field=" + field);
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
continue;
}
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
TermsWriter termsWriter = new TermsWriter(fieldInfos.fieldInfo(field));
while (true) {
BytesRef term = termsEnum.next();
@@ -750,7 +750,7 @@ private PendingBlock writeBlock(int prefixLength, boolean isFloor, int floorLead
// For non-leaf block we borrow 1 bit to record
// if entry is term or sub-block, and 1 bit to record if
- // it's a prefix term. Terms cannot be larger than ~32 KB
+ // it's a prefix term. IndexedField cannot be larger than ~32 KB
// so we won't run out of bits:
suffixWriter.writeVInt(suffix << 1);
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java
index 4ee38262403d..3f72946e5e87 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/FieldReader.java
@@ -23,7 +23,7 @@
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.ByteArrayDataInput;
import org.apache.lucene.store.IndexInput;
@@ -36,10 +36,10 @@
import org.apache.lucene.util.fst.FST;
/**
- * BlockTree's implementation of {@link Terms}.
+ * BlockTree's implementation of {@link IndexedField}.
* @lucene.internal
*/
-public final class FieldReader extends Terms implements Accountable {
+public final class FieldReader extends IndexedField implements Accountable {
// private final boolean DEBUG = BlockTreeTermsWriter.DEBUG;
@@ -152,7 +152,7 @@ public boolean hasPayloads() {
}
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
return new SegmentTermsEnum(this);
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
index 19e56a40a04d..a4d53f08f1f5 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
@@ -21,7 +21,7 @@
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.ArrayUtil;
@@ -35,7 +35,7 @@
import org.apache.lucene.util.fst.FST;
import org.apache.lucene.util.fst.Outputs;
-/** This is used to implement efficient {@link Terms#intersect} for
+/** This is used to implement efficient {@link IndexedField#intersect} for
* block-tree. Note that it cannot seek, except for the initial term on
* init. It just "nexts" through the intersection of the automaton and
* the terms. It does not use the terms index at all: on init, it
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
index f0d1640d85e7..ef1fa90cc7a1 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
@@ -30,10 +30,10 @@
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.SegmentInfo;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.ByteArrayDataInput;
@@ -239,7 +239,7 @@ public TermVectorsReader clone() {
}
@Override
- public Fields get(int doc) throws IOException {
+ public IndexedFields get(int doc) throws IOException {
ensureOpen();
// seek to the right place
@@ -654,7 +654,7 @@ private int[][] readPositions(int skip, int numFields, PackedInts.Reader flags,
return positions;
}
- private class TVFields extends Fields {
+ private class TVFields extends IndexedFields {
private final int[] fieldNums, fieldFlags, fieldNumOffs, numTerms, fieldLengths;
private final int[][] prefixLengths, suffixLengths, termFreqs, positionIndex, positions, startOffsets, lengths, payloadIndex;
@@ -706,7 +706,7 @@ public void remove() {
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
final FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
if (fieldInfo == null) {
return null;
@@ -747,7 +747,7 @@ public int size() {
}
- private class TVTerms extends Terms {
+ private class TVTerms extends IndexedField {
private final int numTerms, flags;
private final int[] prefixLengths, suffixLengths, termFreqs, positionIndex, positions, startOffsets, lengths, payloadIndex;
@@ -772,7 +772,7 @@ private class TVTerms extends Terms {
}
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
TVTermsEnum termsEnum = new TVTermsEnum();
termsEnum.reset(numTerms, flags, prefixLengths, suffixLengths, termFreqs, positionIndex, positions, startOffsets, lengths,
payloadIndex, payloadBytes,
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
index 9bd2483389e4..5001e442282d 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
@@ -31,7 +31,7 @@
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.SegmentInfo;
@@ -828,7 +828,7 @@ public int merge(MergeState mergeState) throws IOException {
if (liveDocs != null && liveDocs.get(i) == false) {
continue;
}
- Fields vectors;
+ IndexedFields vectors;
if (vectorsReader == null) {
vectors = null;
} else {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldMergeState.java b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldMergeState.java
index 991eedfe78e2..a68a2917a6d6 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldMergeState.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldMergeState.java
@@ -30,7 +30,7 @@
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.MergeState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
/**
* Utility class to update the {@link MergeState} instance to be restricted to a set of fields.
@@ -248,12 +248,12 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
if (!filtered.contains(field)) {
throw new IllegalArgumentException("The field named '" + field + "' is not accessible in the current " +
"merge context, available ones are: " + filtered);
}
- return in.terms(field);
+ return in.indexedField(field);
}
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java
index 281b08fe3748..b2522621de95 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java
@@ -36,14 +36,14 @@
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.FilterLeafReader.FilterFields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables;
import org.apache.lucene.util.IOUtils;
@@ -117,7 +117,7 @@ public FieldsWriter(SegmentWriteState writeState) {
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
Map formatToGroups = buildFieldsGroupMapping(fields);
// Write postings
@@ -128,7 +128,7 @@ public void write(Fields fields) throws IOException {
final FieldsGroup group = ent.getValue();
// Exposes only the fields from this group:
- Fields maskedFields = new FilterFields(fields) {
+ IndexedFields maskedFields = new FilterFields(fields) {
@Override
public Iterator iterator() {
return group.fields.iterator();
@@ -172,7 +172,7 @@ public void merge(MergeState mergeState) throws IOException {
}
}
- private Map buildFieldsGroupMapping(Fields fields) {
+ private Map buildFieldsGroupMapping(IndexedFields fields) {
// Maps a PostingsFormat instance to the suffix it
// should use
Map formatToGroups = new HashMap<>();
@@ -311,9 +311,9 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
FieldsProducer fieldsProducer = fields.get(field);
- return fieldsProducer == null ? null : fieldsProducer.terms(field);
+ return fieldsProducer == null ? null : fieldsProducer.indexedField(field);
}
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java b/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java
index 3256107d426b..3701e6401484 100644
--- a/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/BaseCompositeReader.java
@@ -93,7 +93,7 @@ protected BaseCompositeReader(R[] subReaders) throws IOException {
}
@Override
- public final Fields getTermVectors(int docID) throws IOException {
+ public final IndexedFields getTermVectors(int docID) throws IOException {
ensureOpen();
final int i = readerIndex(docID); // find subreader num
return subReaders[i].getTermVectors(docID - starts[i]); // dispatch to subreader
diff --git a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
index 9da1e09530de..42b17e4e54a5 100644
--- a/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
+++ b/lucene/core/src/java/org/apache/lucene/index/BufferedUpdatesStream.java
@@ -502,10 +502,10 @@ private synchronized long applyTermDeletes(CoalescedUpdates updates, SegmentStat
long segTermCount = 0;
for(int i=0;i>
// DocValues updates
private synchronized void applyDocValuesUpdates(List updates,
SegmentState segState, DocValuesFieldUpdates.Container dvUpdatesContainer) throws IOException {
- Fields fields = segState.reader.fields();
+ IndexedFields fields = segState.reader.fields();
// TODO: we can process the updates per DV field, from last to first so that
// if multiple terms affect same document for the same field, we add an update
@@ -650,12 +650,12 @@ private synchronized void applyDocValuesUpdates(List updates,
// if we change the code to process updates in terms order, enable this assert
// assert currentField == null || currentField.compareTo(term.field()) < 0;
currentField = term.field();
- Terms terms = fields.terms(currentField);
+ IndexedField terms = fields.indexedField(currentField);
if (terms != null) {
- termsEnum = terms.iterator();
+ termsEnum = terms.getTermsEnum();
} else {
termsEnum = null;
- }
+ }
}
if (termsEnum == null) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
index f3bdfb0b24f0..e95b8cb7a156 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
@@ -744,7 +744,7 @@ public Status checkIndex(List onlySegments) throws IOException {
// Test the Term Index
segInfoStat.termIndexStatus = testPostings(reader, infoStream, verbose, failFast, version);
- // Test Stored Fields
+ // Test Stored IndexedFields
segInfoStat.storedFieldStatus = testStoredFields(reader, infoStream, failFast);
// Test Term Vectors
@@ -1106,13 +1106,13 @@ private static long getDocsFromTermRange(String field, int maxDoc, TermsEnum ter
}
}
- /** Test Terms.intersect on this range, and validates that it returns the same doc ids as using non-intersect TermsEnum. Returns true if
+ /** Test IndexedField.intersect on this range, and validates that it returns the same doc ids as using non-intersect TermsEnum. Returns true if
* any fake terms were seen. */
- private static boolean checkSingleTermRange(String field, int maxDoc, Terms terms, BytesRef minTerm, BytesRef maxTerm, FixedBitSet normalDocs, FixedBitSet intersectDocs) throws IOException {
+ private static boolean checkSingleTermRange(String field, int maxDoc, IndexedField terms, BytesRef minTerm, BytesRef maxTerm, FixedBitSet normalDocs, FixedBitSet intersectDocs) throws IOException {
//System.out.println(" check minTerm=" + minTerm.utf8ToString() + " maxTerm=" + maxTerm.utf8ToString());
assert minTerm.compareTo(maxTerm) <= 0;
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
TermsEnum.SeekStatus status = termsEnum.seekCeil(minTerm);
if (status != TermsEnum.SeekStatus.FOUND) {
throw new RuntimeException("failed to seek to existing term field=" + field + " term=" + minTerm);
@@ -1140,7 +1140,7 @@ private static boolean checkSingleTermRange(String field, int maxDoc, Terms term
* interval of terms, at different boundaries, and then gradually decrease the interval. This is not guaranteed to hit all non-real
* terms (doing that in general is non-trivial), but it should hit many of them, and validate their postings against the postings for the
* real terms. */
- private static void checkTermRanges(String field, int maxDoc, Terms terms, long numTerms) throws IOException {
+ private static void checkTermRanges(String field, int maxDoc, IndexedField terms, long numTerms) throws IOException {
// We'll target this many terms in our interval for the current level:
double currentInterval = numTerms;
@@ -1154,7 +1154,7 @@ private static void checkTermRanges(String field, int maxDoc, Terms terms, long
//System.out.println(" cycle interval=" + currentInterval);
// We iterate this terms enum to locate min/max term for each sliding/overlapping interval we test at the current level:
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
long termCount = 0;
@@ -1204,10 +1204,10 @@ private static void checkTermRanges(String field, int maxDoc, Terms terms, long
}
/**
- * checks Fields api is consistent with itself.
+ * checks IndexedFields api is consistent with itself.
* searcher is optional, to verify with queries. Can be null.
*/
- private static Status.TermIndexStatus checkFields(Fields fields, Bits liveDocs, int maxDoc, FieldInfos fieldInfos, boolean doPrint, boolean isVectors, PrintStream infoStream, boolean verbose, Version version) throws IOException {
+ private static Status.TermIndexStatus checkFields(IndexedFields fields, Bits liveDocs, int maxDoc, FieldInfos fieldInfos, boolean doPrint, boolean isVectors, PrintStream infoStream, boolean verbose, Version version) throws IOException {
// TODO: we should probably return our own stats thing...?!
long startNS;
if (doPrint) {
@@ -1241,12 +1241,12 @@ private static Status.TermIndexStatus checkFields(Fields fields, Bits liveDocs,
}
// TODO: really the codec should not return a field
- // from FieldsEnum if it has no Terms... but we do
+ // from FieldsEnum if it has no IndexedField... but we do
// this today:
- // assert fields.terms(field) != null;
+ // assert fields.indexedField(field) != null;
computedFieldCount++;
- final Terms terms = fields.terms(field);
+ final IndexedField terms = fields.indexedField(field);
if (terms == null) {
continue;
}
@@ -1295,7 +1295,7 @@ private static Status.TermIndexStatus checkFields(Fields fields, Bits liveDocs,
if (hasFreqs == false) {
if (terms.getSumTotalTermFreq() != -1) {
- throw new RuntimeException("field \"" + field + "\" hasFreqs is false, but Terms.getSumTotalTermFreq()=" + terms.getSumTotalTermFreq() + " (should be -1)");
+ throw new RuntimeException("field \"" + field + "\" hasFreqs is false, but IndexedField.getSumTotalTermFreq()=" + terms.getSumTotalTermFreq() + " (should be -1)");
}
}
@@ -1316,7 +1316,7 @@ private static Status.TermIndexStatus checkFields(Fields fields, Bits liveDocs,
}
}
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
boolean hasOrd = true;
final long termCountStart = status.delTermCount + status.termCount;
@@ -1416,7 +1416,7 @@ private static Status.TermIndexStatus checkFields(Fields fields, Bits liveDocs,
// consistently "lie" and pretend that freq was
// 1:
if (postings.freq() != 1) {
- throw new RuntimeException("term " + term + ": doc " + doc + ": freq " + freq + " != 1 when Terms.hasFreqs() is false");
+ throw new RuntimeException("term " + term + ": doc " + doc + ": freq " + freq + " != 1 when IndexedField.hasFreqs() is false");
}
}
if (liveDocs == null || liveDocs.get(doc)) {
@@ -1602,10 +1602,10 @@ private static Status.TermIndexStatus checkFields(Fields fields, Bits liveDocs,
throw new RuntimeException("field=\"" + field + "\": minTerm is non-null yet we saw no terms: " + minTerm);
}
- final Terms fieldTerms = fields.terms(field);
+ final IndexedField fieldTerms = fields.indexedField(field);
if (fieldTerms == null) {
// Unusual: the FieldsEnum returned a field but
- // the Terms for that field is null; this should
+ // the IndexedField for that field is null; this should
// only happen if it's a ghost field (field with
// no terms, eg there used to be terms but all
// docs got deleted and then merged away):
@@ -1628,14 +1628,14 @@ private static Status.TermIndexStatus checkFields(Fields fields, Bits liveDocs,
status.blockTreeStats.put(field, stats);
if (sumTotalTermFreq != 0) {
- final long v = fields.terms(field).getSumTotalTermFreq();
+ final long v = fields.indexedField(field).getSumTotalTermFreq();
if (v != -1 && sumTotalTermFreq != v) {
throw new RuntimeException("sumTotalTermFreq for field " + field + "=" + v + " != recomputed sumTotalTermFreq=" + sumTotalTermFreq);
}
}
if (sumDocFreq != 0) {
- final long v = fields.terms(field).getSumDocFreq();
+ final long v = fields.indexedField(field).getSumDocFreq();
if (v != -1 && sumDocFreq != v) {
throw new RuntimeException("sumDocFreq for field " + field + "=" + v + " != recomputed sumDocFreq=" + sumDocFreq);
}
@@ -1670,7 +1670,7 @@ private static Status.TermIndexStatus checkFields(Fields fields, Bits liveDocs,
long termCount = -1;
if (fieldTermCount > 0) {
- termCount = fields.terms(field).size();
+ termCount = fields.indexedField(field).size();
if (termCount != -1 && termCount != fieldTermCount) {
throw new RuntimeException("termCount mismatch " + termCount + " vs " + fieldTermCount);
@@ -1764,7 +1764,7 @@ public static Status.TermIndexStatus testPostings(CodecReader reader, PrintStrea
infoStream.print(" test: terms, freq, prox...");
}
- final Fields fields = reader.getPostingsReader().getMergeInstance();
+ final IndexedFields fields = reader.getPostingsReader().getMergeInstance();
final FieldInfos fieldInfos = reader.getFieldInfos();
status = checkFields(fields, reader.getLiveDocs(), maxDoc, fieldInfos, true, false, infoStream, verbose, version);
} catch (Throwable e) {
@@ -2400,7 +2400,7 @@ public static Status.TermVectorStatus testTermVectors(CodecReader reader, PrintS
final Bits liveDocs = reader.getLiveDocs();
- final Fields postingsFields;
+ final IndexedFields postingsFields;
// TODO: testTermsIndex
if (crossCheckTermVectors) {
postingsFields = reader.getPostingsReader().getMergeInstance();
@@ -2416,7 +2416,7 @@ public static Status.TermVectorStatus testTermVectors(CodecReader reader, PrintS
// Intentionally pull/visit (but don't count in
// stats) deleted documents to make sure they too
// are not corrupt:
- Fields tfv = vectorsReader.get(j);
+ IndexedFields tfv = vectorsReader.get(j);
// TODO: can we make a IS(FIR) that searches just
// this term vector... to pass for searcher?
@@ -2444,17 +2444,17 @@ public static Status.TermVectorStatus testTermVectors(CodecReader reader, PrintS
}
if (crossCheckTermVectors) {
- Terms terms = tfv.terms(field);
- TermsEnum termsEnum = terms.iterator();
+ IndexedField terms = tfv.indexedField(field);
+ TermsEnum termsEnum = terms.getTermsEnum();
final boolean postingsHasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
final boolean postingsHasPayload = fieldInfo.hasPayloads();
final boolean vectorsHasPayload = terms.hasPayloads();
- Terms postingsTerms = postingsFields.terms(field);
+ IndexedField postingsTerms = postingsFields.indexedField(field);
if (postingsTerms == null) {
throw new RuntimeException("vector field=" + field + " does not exist in postings; doc=" + j);
}
- TermsEnum postingsTermsEnum = postingsTerms.iterator();
+ TermsEnum postingsTermsEnum = postingsTerms.getTermsEnum();
final boolean hasProx = terms.hasOffsets() || terms.hasPositions();
BytesRef term = null;
diff --git a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
index 460a9d6cf343..38f011dee0d5 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CodecReader.java
@@ -84,7 +84,7 @@ public final void document(int docID, StoredFieldVisitor visitor) throws IOExcep
}
@Override
- public final Fields getTermVectors(int docID) throws IOException {
+ public final IndexedFields getTermVectors(int docID) throws IOException {
TermVectorsReader termVectorsReader = getTermVectorsReader();
if (termVectorsReader == null) {
return null;
@@ -100,7 +100,7 @@ private void checkBounds(int docID) {
}
@Override
- public final Fields fields() {
+ public final IndexedFields fields() {
return getPostingsReader();
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/ExitableDirectoryReader.java b/lucene/core/src/java/org/apache/lucene/index/ExitableDirectoryReader.java
index ee1c0ceda2ec..76d31070edd9 100644
--- a/lucene/core/src/java/org/apache/lucene/index/ExitableDirectoryReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/ExitableDirectoryReader.java
@@ -18,7 +18,7 @@
import org.apache.lucene.index.FilterLeafReader.FilterFields;
-import org.apache.lucene.index.FilterLeafReader.FilterTerms;
+import org.apache.lucene.index.FilterLeafReader.FilterField;
import org.apache.lucene.index.FilterLeafReader.FilterTermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.CompiledAutomaton;
@@ -79,8 +79,8 @@ public ExitableFilterAtomicReader(LeafReader in, QueryTimeout queryTimeout) {
}
@Override
- public Fields fields() throws IOException {
- Fields fields = super.fields();
+ public IndexedFields fields() throws IOException {
+ IndexedFields fields = super.fields();
if (queryTimeout.isTimeoutEnabled()) {
return new ExitableFields(fields, queryTimeout);
}
@@ -102,21 +102,21 @@ public Object getCombinedCoreAndDeletesKey() {
}
/**
- * Wrapper class for another Fields implementation that is used by the ExitableFilterAtomicReader.
+ * Wrapper class for another IndexedFields implementation that is used by the ExitableFilterAtomicReader.
*/
public static class ExitableFields extends FilterFields {
private QueryTimeout queryTimeout;
/** Constructor **/
- public ExitableFields(Fields fields, QueryTimeout queryTimeout) {
+ public ExitableFields(IndexedFields fields, QueryTimeout queryTimeout) {
super(fields);
this.queryTimeout = queryTimeout;
}
@Override
- public Terms terms(String field) throws IOException {
- Terms terms = in.terms(field);
+ public IndexedField indexedField(String field) throws IOException {
+ IndexedField terms = in.indexedField(field);
if (terms == null) {
return null;
}
@@ -125,14 +125,14 @@ public Terms terms(String field) throws IOException {
}
/**
- * Wrapper class for another Terms implementation that is used by ExitableFields.
+ * Wrapper class for another IndexedField implementation that is used by ExitableFields.
*/
- public static class ExitableTerms extends FilterTerms {
+ public static class ExitableTerms extends FilterField {
private QueryTimeout queryTimeout;
/** Constructor **/
- public ExitableTerms(Terms terms, QueryTimeout queryTimeout) {
+ public ExitableTerms(IndexedField terms, QueryTimeout queryTimeout) {
super(terms);
this.queryTimeout = queryTimeout;
}
@@ -143,8 +143,8 @@ public TermsEnum intersect(CompiledAutomaton compiled, BytesRef startTerm) throw
}
@Override
- public TermsEnum iterator() throws IOException {
- return new ExitableTermsEnum(in.iterator(), queryTimeout);
+ public TermsEnum getTermsEnum() throws IOException {
+ return new ExitableTermsEnum(in.getTermsEnum(), queryTimeout);
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
index 9ed62e70b560..d80ff13c0035 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
@@ -56,19 +56,19 @@ public static LeafReader unwrap(LeafReader reader) {
return reader;
}
- /** Base class for filtering {@link Fields}
+ /** Base class for filtering {@link IndexedFields}
* implementations. */
- public abstract static class FilterFields extends Fields {
- /** The underlying Fields instance. */
- protected final Fields in;
+ public abstract static class FilterFields extends IndexedFields {
+ /** The underlying IndexedFields instance. */
+ protected final IndexedFields in;
/**
* Creates a new FilterFields.
- * @param in the underlying Fields instance.
+ * @param in the underlying IndexedFields instance.
*/
- public FilterFields(Fields in) {
+ public FilterFields(IndexedFields in) {
if (in == null) {
- throw new NullPointerException("incoming Fields must not be null");
+ throw new NullPointerException("incoming IndexedFields must not be null");
}
this.in = in;
}
@@ -79,8 +79,8 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
- return in.terms(field);
+ public IndexedField indexedField(String field) throws IOException {
+ return in.indexedField(field);
}
@Override
@@ -89,29 +89,29 @@ public int size() {
}
}
- /** Base class for filtering {@link Terms} implementations.
+ /** Base class for filtering {@link IndexedField} implementations.
*
NOTE: If the order of terms and documents is not changed, and if
* these terms are going to be intersected with automata, you could consider
* overriding {@link #intersect} for better performance.
*/
- public abstract static class FilterTerms extends Terms {
- /** The underlying Terms instance. */
- protected final Terms in;
+ public abstract static class FilterField extends IndexedField {
+ /** The underlying IndexedField instance. */
+ protected final IndexedField in;
/**
- * Creates a new FilterTerms
- * @param in the underlying Terms instance.
+ * Creates a new FilterField
+ * @param in the underlying IndexedField instance.
*/
- public FilterTerms(Terms in) {
+ public FilterField(IndexedField in) {
if (in == null) {
- throw new NullPointerException("incoming Terms must not be null");
+ throw new NullPointerException("incoming IndexedField must not be null");
}
this.in = in;
}
@Override
- public TermsEnum iterator() throws IOException {
- return in.iterator();
+ public TermsEnum getTermsEnum() throws IOException {
+ return in.getTermsEnum();
}
@Override
@@ -387,7 +387,7 @@ public PointValues getPointValues(String field) throws IOException {
}
@Override
- public Fields getTermVectors(int docID)
+ public IndexedFields getTermVectors(int docID)
throws IOException {
ensureOpen();
return in.getTermVectors(docID);
@@ -417,7 +417,7 @@ protected void doClose() throws IOException {
}
@Override
- public Fields fields() throws IOException {
+ public IndexedFields fields() throws IOException {
ensureOpen();
return in.fields();
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
index fb78a928279b..9af4b9910f7f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
@@ -29,11 +29,11 @@
import org.apache.lucene.util.BytesRefBuilder;
/** Implements limited (iterators only, no stats) {@link
- * Fields} interface over the in-RAM buffered
+ * IndexedFields} interface over the in-RAM buffered
* fields/terms/postings, to flush postings through the
* PostingsFormat. */
-class FreqProxFields extends Fields {
+class FreqProxFields extends IndexedFields {
final Map fields = new LinkedHashMap<>();
public FreqProxFields(List fieldList) {
@@ -48,7 +48,7 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
FreqProxTermsWriterPerField perField = fields.get(field);
return perField == null ? null : new FreqProxTerms(perField);
}
@@ -59,7 +59,7 @@ public int size() {
throw new UnsupportedOperationException();
}
- private static class FreqProxTerms extends Terms {
+ private static class FreqProxTerms extends IndexedField {
final FreqProxTermsWriterPerField terms;
public FreqProxTerms(FreqProxTermsWriterPerField terms) {
@@ -67,7 +67,7 @@ public FreqProxTerms(FreqProxTermsWriterPerField terms) {
}
@Override
- public TermsEnum iterator() {
+ public TermsEnum getTermsEnum() {
FreqProxTermsEnum termsEnum = new FreqProxTermsEnum(terms);
termsEnum.reset();
return termsEnum;
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
index 1ca2830edcc7..1aca655423b2 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
@@ -33,7 +33,7 @@ public FreqProxTermsWriter(DocumentsWriterPerThread docWriter, TermsHash termVec
super(docWriter, true, termVectors);
}
- private void applyDeletes(SegmentWriteState state, Fields fields) throws IOException {
+ private void applyDeletes(SegmentWriteState state, IndexedFields fields) throws IOException {
// Process any pending Term deletes for this newly
// flushed segment:
@@ -47,9 +47,9 @@ private void applyDeletes(SegmentWriteState state, Fields fields) throws IOExcep
for(Term deleteTerm : deleteTerms) {
if (deleteTerm.field().equals(lastField) == false) {
lastField = deleteTerm.field();
- Terms terms = fields.terms(lastField);
+ IndexedField terms = fields.indexedField(lastField);
if (terms != null) {
- termsEnum = terms.iterator();
+ termsEnum = terms.getTermsEnum();
} else {
termsEnum = null;
}
@@ -97,7 +97,7 @@ public void flush(Map fieldsToFlush, final SegmentWrit
// Sort by field name
CollectionUtil.introSort(allFields);
- Fields fields = new FreqProxFields(allFields);
+ IndexedFields fields = new FreqProxFields(allFields);
applyDeletes(state, fields);
if (sortMap != null) {
fields = new SortingLeafReader.SortingFields(fields, state.fieldInfos, sortMap);
diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexReader.java b/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
index 976f548317bd..50ee0943df78 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexReader.java
@@ -301,23 +301,23 @@ public final int hashCode() {
}
/** Retrieve term vectors for this document, or null if
- * term vectors were not indexed. The returned Fields
+ * term vectors were not indexed. The returned IndexedFields
* instance acts like a single-document inverted index
* (the docID will be 0). */
- public abstract Fields getTermVectors(int docID)
+ public abstract IndexedFields getTermVectors(int docID)
throws IOException;
/** Retrieve term vector for this document and field, or
* null if term vectors were not indexed. The returned
- * Fields instance acts like a single-document inverted
+ * IndexedField instance acts like a single-document inverted
* index (the docID will be 0). */
- public final Terms getTermVector(int docID, String field)
+ public final IndexedField getTermVector(int docID, String field)
throws IOException {
- Fields vectors = getTermVectors(docID);
+ IndexedFields vectors = getTermVectors(docID);
if (vectors == null) {
return null;
}
- return vectors.terms(field);
+ return vectors.indexedField(field);
}
/** Returns the number of documents in this index. */
@@ -476,7 +476,7 @@ public Object getCombinedCoreAndDeletesKey() {
* or -1 if this measure isn't stored by the codec. Note that, just like other
* term measures, this measure does not take deleted documents into account.
*
- * @see Terms#getSumDocFreq()
+ * @see IndexedField#getSumDocFreq()
*/
public abstract long getSumDocFreq(String field) throws IOException;
@@ -485,7 +485,7 @@ public Object getCombinedCoreAndDeletesKey() {
* or -1 if this measure isn't stored by the codec. Note that, just like other
* term measures, this measure does not take deleted documents into account.
*
- * @see Terms#getDocCount()
+ * @see IndexedField#getDocCount()
*/
public abstract int getDocCount(String field) throws IOException;
@@ -495,7 +495,7 @@ public Object getCombinedCoreAndDeletesKey() {
* omits term freq and positions). Note that, just like other term measures,
* this measure does not take deleted documents into account.
*
- * @see Terms#getSumTotalTermFreq()
+ * @see IndexedField#getSumTotalTermFreq()
*/
public abstract long getSumTotalTermFreq(String field) throws IOException;
diff --git a/lucene/core/src/java/org/apache/lucene/index/Terms.java b/lucene/core/src/java/org/apache/lucene/index/IndexedField.java
similarity index 93%
rename from lucene/core/src/java/org/apache/lucene/index/Terms.java
rename to lucene/core/src/java/org/apache/lucene/index/IndexedField.java
index 7197e25e549c..abda0f3f1175 100644
--- a/lucene/core/src/java/org/apache/lucene/index/Terms.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexedField.java
@@ -25,20 +25,20 @@
import org.apache.lucene.util.automaton.CompiledAutomaton;
/**
- * Access to the terms in a specific field. See {@link Fields}.
+ * Access to the terms in a specific field. See {@link IndexedFields}.
* @lucene.experimental
*/
-public abstract class Terms {
+public abstract class IndexedField {
/** Sole constructor. (For invocation by subclass
* constructors, typically implicit.) */
- protected Terms() {
+ protected IndexedField() {
}
/** Returns an iterator that will step through all
* terms. This method will not return null. */
- public abstract TermsEnum iterator() throws IOException;
+ public abstract TermsEnum getTermsEnum() throws IOException;
/** Returns a TermsEnum that iterates over all terms and
* documents that are accepted by the provided {@link
@@ -73,7 +73,7 @@ public TermsEnum intersect(CompiledAutomaton compiled, final BytesRef startTerm)
// the returned enum, instead of only being able to seek
// at the start
- TermsEnum termsEnum = iterator();
+ TermsEnum termsEnum = getTermsEnum();
if (compiled.type != CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
throw new IllegalArgumentException("please use CompiledAutomaton.getTermsEnum instead");
@@ -135,15 +135,15 @@ protected BytesRef nextSeekTerm(BytesRef term) throws IOException {
/** Returns true if documents in this field store payloads. */
public abstract boolean hasPayloads();
- /** Zero-length array of {@link Terms}. */
- public final static Terms[] EMPTY_ARRAY = new Terms[0];
+ /** Zero-length array of {@link IndexedField}. */
+ public final static IndexedField[] EMPTY_ARRAY = new IndexedField[0];
/** Returns the smallest term (in lexicographic order) in the field.
* Note that, just like other term measures, this measure does not
* take deleted documents into account. This returns
* null when there are no terms. */
public BytesRef getMin() throws IOException {
- return iterator().next();
+ return getTermsEnum().next();
}
/** Returns the largest term (in lexicographic order) in the field.
@@ -160,7 +160,7 @@ public BytesRef getMax() throws IOException {
} else if (size >= 0) {
// try to seek-by-ord
try {
- TermsEnum iterator = iterator();
+ TermsEnum iterator = getTermsEnum();
iterator.seekExact(size - 1);
return iterator.term();
} catch (UnsupportedOperationException e) {
@@ -169,7 +169,7 @@ public BytesRef getMax() throws IOException {
}
// otherwise: binary search
- TermsEnum iterator = iterator();
+ TermsEnum iterator = getTermsEnum();
BytesRef v = iterator.next();
if (v == null) {
// empty: only possible from a FilteredTermsEnum...
@@ -214,7 +214,7 @@ public BytesRef getMax() throws IOException {
}
/**
- * Expert: returns additional information about this Terms instance
+ * Expert: returns additional information about this IndexedField instance
* for debugging purposes.
*/
public Object getStats() throws IOException {
diff --git a/lucene/core/src/java/org/apache/lucene/index/Fields.java b/lucene/core/src/java/org/apache/lucene/index/IndexedFields.java
similarity index 77%
rename from lucene/core/src/java/org/apache/lucene/index/Fields.java
rename to lucene/core/src/java/org/apache/lucene/index/IndexedFields.java
index c5794b611136..69732fba5a25 100644
--- a/lucene/core/src/java/org/apache/lucene/index/Fields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IndexedFields.java
@@ -23,11 +23,11 @@
/** Flex API for access to fields and terms
* @lucene.experimental */
-public abstract class Fields implements Iterable {
+public abstract class IndexedFields implements Iterable {
- /** Sole constructor. (For invocation by subclass
+ /** Sole constructor. (For invocation by subclass
* constructors, typically implicit.) */
- protected Fields() {
+ protected IndexedFields() {
}
/** Returns an iterator that will step through all fields
@@ -35,15 +35,15 @@ protected Fields() {
@Override
public abstract Iterator iterator();
- /** Get the {@link Terms} for this field. This will return
+ /** Get the {@link IndexedField} for this field. This will return
* null if the field does not exist. */
- public abstract Terms terms(String field) throws IOException;
+ public abstract IndexedField indexedField(String field) throws IOException;
/** Returns the number of fields or -1 if the number of
* distinct field names is unknown. If >= 0,
* {@link #iterator} will return as many field names. */
public abstract int size();
-
- /** Zero-length {@code Fields} array. */
- public final static Fields[] EMPTY_ARRAY = new Fields[0];
+
+ /** Zero-length {@code IndexedFields} array. */
+ public final static IndexedFields[] EMPTY_ARRAY = new IndexedFields[0];
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/LeafReader.java b/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
index 73394f23670e..d730d2b5295e 100644
--- a/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
@@ -137,18 +137,18 @@ protected static void removeCoreClosedListenerAsReaderClosedListener(IndexReader
public abstract void removeCoreClosedListener(CoreClosedListener listener);
/**
- * Returns {@link Fields} for this reader.
+ * Returns {@link IndexedFields} for this reader.
* This method will not return null.
*/
- public abstract Fields fields() throws IOException;
+ public abstract IndexedFields fields() throws IOException;
@Override
public final int docFreq(Term term) throws IOException {
- final Terms terms = terms(term.field());
+ final IndexedField terms = indexedField(term.field());
if (terms == null) {
return 0;
}
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
if (termsEnum.seekExact(term.bytes())) {
return termsEnum.docFreq();
} else {
@@ -163,11 +163,11 @@ public final int docFreq(Term term) throws IOException {
* away. */
@Override
public final long totalTermFreq(Term term) throws IOException {
- final Terms terms = terms(term.field());
+ final IndexedField terms = indexedField(term.field());
if (terms == null) {
return 0;
}
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
if (termsEnum.seekExact(term.bytes())) {
return termsEnum.totalTermFreq();
} else {
@@ -177,7 +177,7 @@ public final long totalTermFreq(Term term) throws IOException {
@Override
public final long getSumDocFreq(String field) throws IOException {
- final Terms terms = terms(field);
+ final IndexedField terms = indexedField(field);
if (terms == null) {
return 0;
}
@@ -186,7 +186,7 @@ public final long getSumDocFreq(String field) throws IOException {
@Override
public final int getDocCount(String field) throws IOException {
- final Terms terms = terms(field);
+ final IndexedField terms = indexedField(field);
if (terms == null) {
return 0;
}
@@ -195,7 +195,7 @@ public final int getDocCount(String field) throws IOException {
@Override
public final long getSumTotalTermFreq(String field) throws IOException {
- final Terms terms = terms(field);
+ final IndexedField terms = indexedField(field);
if (terms == null) {
return 0;
}
@@ -203,8 +203,8 @@ public final long getSumTotalTermFreq(String field) throws IOException {
}
/** This may return null if the field does not exist.*/
- public final Terms terms(String field) throws IOException {
- return fields().terms(field);
+ public final IndexedField indexedField(String field) throws IOException {
+ return fields().indexedField(field);
}
/** Returns {@link PostingsEnum} for the specified term.
@@ -215,9 +215,9 @@ public final Terms terms(String field) throws IOException {
public final PostingsEnum postings(Term term, int flags) throws IOException {
assert term.field() != null;
assert term.bytes() != null;
- final Terms terms = terms(term.field());
+ final IndexedField terms = indexedField(term.field());
if (terms != null) {
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
if (termsEnum.seekExact(term.bytes())) {
return termsEnum.postings(null, flags);
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
index 280b52b1c95d..5720dd30ca96 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
@@ -20,11 +20,11 @@
import java.io.IOException;
import static org.apache.lucene.index.FilterLeafReader.FilterFields;
-import static org.apache.lucene.index.FilterLeafReader.FilterTerms;
+import static org.apache.lucene.index.FilterLeafReader.FilterField;
import static org.apache.lucene.index.FilterLeafReader.FilterTermsEnum;
-/** A {@link Fields} implementation that merges multiple
- * Fields into one, and maps around deleted documents.
+/** A {@link IndexedFields} implementation that merges multiple
+ * IndexedFields into one, and maps around deleted documents.
* This is used for merging.
* @lucene.internal
*/
@@ -39,8 +39,8 @@ public MappedMultiFields(MergeState mergeState, MultiFields multiFields) {
}
@Override
- public Terms terms(String field) throws IOException {
- MultiTerms terms = (MultiTerms) in.terms(field);
+ public IndexedField indexedField(String field) throws IOException {
+ MultiField terms = (MultiField) in.indexedField(field);
if (terms == null) {
return null;
} else {
@@ -48,19 +48,19 @@ public Terms terms(String field) throws IOException {
}
}
- private static class MappedMultiTerms extends FilterTerms {
+ private static class MappedMultiTerms extends FilterField {
final MergeState mergeState;
final String field;
- public MappedMultiTerms(String field, MergeState mergeState, MultiTerms multiTerms) {
- super(multiTerms);
+ public MappedMultiTerms(String field, MergeState mergeState, MultiField multiField) {
+ super(multiField);
this.field = field;
this.mergeState = mergeState;
}
@Override
- public TermsEnum iterator() throws IOException {
- TermsEnum iterator = in.iterator();
+ public TermsEnum getTermsEnum() throws IOException {
+ TermsEnum iterator = in.getTermsEnum();
if (iterator == TermsEnum.EMPTY) {
// LUCENE-6826
return TermsEnum.EMPTY;
diff --git a/lucene/core/src/java/org/apache/lucene/index/MergeReaderWrapper.java b/lucene/core/src/java/org/apache/lucene/index/MergeReaderWrapper.java
index 7eb90dff7ce5..cdd8e19a5d4d 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MergeReaderWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MergeReaderWrapper.java
@@ -81,7 +81,7 @@ public void removeCoreClosedListener(CoreClosedListener listener) {
}
@Override
- public Fields fields() throws IOException {
+ public IndexedFields fields() throws IOException {
return fields;
}
@@ -187,7 +187,7 @@ public void checkIntegrity() throws IOException {
}
@Override
- public Fields getTermVectors(int docID) throws IOException {
+ public IndexedFields getTermVectors(int docID) throws IOException {
ensureOpen();
checkBounds(docID);
if (vectors == null) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java
index 3970e0a6524e..10069a282659 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiDocValues.java
@@ -646,7 +646,7 @@ public static SortedSetDocValues getSortedSetValues(final IndexReader r, final S
}
/** maps per-segment ordinals to/from global ordinal space */
- // TODO: we could also have a utility method to merge Terms[] and use size() as a weight when we need it
+ // TODO: we could also have a utility method to merge IndexedField[] and use size() as a weight when we need it
// TODO: use more efficient packed ints structures?
// TODO: pull this out? it's pretty generic (maps between N ord()-enabled TermsEnums)
public static class OrdinalMap implements Accountable {
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java b/lucene/core/src/java/org/apache/lucene/index/MultiField.java
similarity index 86%
rename from lucene/core/src/java/org/apache/lucene/index/MultiTerms.java
rename to lucene/core/src/java/org/apache/lucene/index/MultiField.java
index 79e11c45eee6..4f481d1f6b4a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiTerms.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiField.java
@@ -32,8 +32,8 @@
* @lucene.experimental
*/
-public final class MultiTerms extends Terms {
- private final Terms[] subs;
+public final class MultiField extends IndexedField {
+ private final IndexedField[] subs;
private final ReaderSlice[] subSlices;
private final boolean hasFreqs;
private final boolean hasOffsets;
@@ -42,15 +42,15 @@ public final class MultiTerms extends Terms {
/** Sole constructor.
*
- * @param subs The {@link Terms} instances of all sub-readers.
+ * @param subs The {@link IndexedField} instances of all sub-readers.
* @param subSlices A parallel array (matching {@code
* subs}) describing the sub-reader slices.
*/
- public MultiTerms(Terms[] subs, ReaderSlice[] subSlices) throws IOException {
+ public MultiField(IndexedField[] subs, ReaderSlice[] subSlices) throws IOException {
this.subs = subs;
this.subSlices = subSlices;
- assert subs.length > 0 : "inefficient: don't use MultiTerms over one sub";
+ assert subs.length > 0 : "inefficient: don't use MultiField over one sub";
boolean _hasFreqs = true;
boolean _hasOffsets = true;
boolean _hasPositions = true;
@@ -68,12 +68,12 @@ public MultiTerms(Terms[] subs, ReaderSlice[] subSlices) throws IOException {
hasPayloads = hasPositions && _hasPayloads; // if all subs have pos, and at least one has payloads.
}
- /** Expert: returns the Terms being merged. */
- public Terms[] getSubTerms() {
+ /** Expert: returns the IndexedField being merged. */
+ public IndexedField[] getSubTerms() {
return subs;
}
- /** Expert: returns pointers to the sub-readers corresponding to the Terms being merged. */
+ /** Expert: returns pointers to the sub-readers corresponding to the IndexedField being merged. */
public ReaderSlice[] getSubSlices() {
return subSlices;
}
@@ -98,7 +98,7 @@ public TermsEnum intersect(CompiledAutomaton compiled, BytesRef startTerm) throw
@Override
public BytesRef getMin() throws IOException {
BytesRef minTerm = null;
- for(Terms terms : subs) {
+ for(IndexedField terms : subs) {
BytesRef term = terms.getMin();
if (minTerm == null || term.compareTo(minTerm) < 0) {
minTerm = term;
@@ -111,7 +111,7 @@ public BytesRef getMin() throws IOException {
@Override
public BytesRef getMax() throws IOException {
BytesRef maxTerm = null;
- for(Terms terms : subs) {
+ for(IndexedField terms : subs) {
BytesRef term = terms.getMax();
if (maxTerm == null || term.compareTo(maxTerm) > 0) {
maxTerm = term;
@@ -122,11 +122,11 @@ public BytesRef getMax() throws IOException {
}
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
final List termsEnums = new ArrayList<>();
for(int i=0;i terms = new ConcurrentHashMap<>();
+ private final Map terms = new ConcurrentHashMap<>();
- /** Returns a single {@link Fields} instance for this
+ /** Returns a single {@link IndexedFields} instance for this
* reader, merging fields/terms/docs/positions on the
* fly. This method will return null if the reader
* has no postings.
@@ -59,25 +59,25 @@ public final class MultiFields extends Fields {
*
NOTE: this is a slow way to access postings.
* It's better to get the sub-readers and iterate through them
* yourself. */
- public static Fields getFields(IndexReader reader) throws IOException {
+ public static IndexedFields getFields(IndexReader reader) throws IOException {
final List leaves = reader.leaves();
switch (leaves.size()) {
case 1:
// already an atomic reader / reader with one leave
return leaves.get(0).reader().fields();
default:
- final List fields = new ArrayList<>(leaves.size());
+ final List fields = new ArrayList<>(leaves.size());
final List slices = new ArrayList<>(leaves.size());
for (final LeafReaderContext ctx : leaves) {
final LeafReader r = ctx.reader();
- final Fields f = r.fields();
+ final IndexedFields f = r.fields();
fields.add(f);
slices.add(new ReaderSlice(ctx.docBase, r.maxDoc(), fields.size()-1));
}
if (fields.size() == 1) {
return fields.get(0);
} else {
- return new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
+ return new MultiFields(fields.toArray(IndexedFields.EMPTY_ARRAY),
slices.toArray(ReaderSlice.EMPTY_ARRAY));
}
}
@@ -116,8 +116,8 @@ public static Bits getLiveDocs(IndexReader reader) {
}
/** This method may return null if the field does not exist.*/
- public static Terms getTerms(IndexReader r, String field) throws IOException {
- return getFields(r).terms(field);
+ public static IndexedField getIndexedField(IndexReader r, String field) throws IOException {
+ return getFields(r).indexedField(field);
}
/** Returns {@link PostingsEnum} for the specified field and
@@ -136,9 +136,9 @@ public static PostingsEnum getTermDocsEnum(IndexReader r, String field, BytesRef
public static PostingsEnum getTermDocsEnum(IndexReader r, String field, BytesRef term, int flags) throws IOException {
assert field != null;
assert term != null;
- final Terms terms = getTerms(r, field);
+ final IndexedField terms = getIndexedField(r, field);
if (terms != null) {
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
if (termsEnum.seekExact(term)) {
return termsEnum.postings(null, flags);
}
@@ -163,9 +163,9 @@ public static PostingsEnum getTermPositionsEnum(IndexReader r, String field, Byt
public static PostingsEnum getTermPositionsEnum(IndexReader r, String field, BytesRef term, int flags) throws IOException {
assert field != null;
assert term != null;
- final Terms terms = getTerms(r, field);
+ final IndexedField terms = getIndexedField(r, field);
if (terms != null) {
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
if (termsEnum.seekExact(term)) {
return termsEnum.postings(null, flags);
}
@@ -178,7 +178,7 @@ public static PostingsEnum getTermPositionsEnum(IndexReader r, String field, Byt
* @lucene.internal
*/
// TODO: why is this public?
- public MultiFields(Fields[] subs, ReaderSlice[] subSlices) {
+ public MultiFields(IndexedFields[] subs, ReaderSlice[] subSlices) {
this.subs = subs;
this.subSlices = subSlices;
}
@@ -194,20 +194,20 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
- Terms result = terms.get(field);
+ public IndexedField indexedField(String field) throws IOException {
+ IndexedField result = terms.get(field);
if (result != null)
return result;
// Lazy init: first time this field is requested, we
// create & add to terms:
- final List subs2 = new ArrayList<>();
+ final List subs2 = new ArrayList<>();
final List slices2 = new ArrayList<>();
// Gather all sub-readers that share this field
for(int i=0;i fields = new TreeMap<>();
+ private final class ParallelFields extends IndexedFields {
+ final Map fields = new TreeMap<>();
ParallelFields() {
}
- void addField(String fieldName, Terms terms) {
+ void addField(String fieldName, IndexedField terms) {
fields.put(fieldName, terms);
}
@@ -186,7 +186,7 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) {
+ public IndexedField indexedField(String field) {
return fields.get(field);
}
@@ -216,7 +216,7 @@ public Bits getLiveDocs() {
}
@Override
- public Fields fields() {
+ public IndexedFields fields() {
ensureOpen();
return fields;
}
@@ -242,12 +242,12 @@ public void document(int docID, StoredFieldVisitor visitor) throws IOException {
}
@Override
- public Fields getTermVectors(int docID) throws IOException {
+ public IndexedFields getTermVectors(int docID) throws IOException {
ensureOpen();
ParallelFields fields = null;
for (Map.Entry ent : tvFieldToReader.entrySet()) {
String fieldName = ent.getKey();
- Terms vector = ent.getValue().getTermVector(docID, fieldName);
+ IndexedField vector = ent.getValue().getTermVector(docID, fieldName);
if (vector != null) {
if (fields == null) {
fields = new ParallelFields();
diff --git a/lucene/core/src/java/org/apache/lucene/index/SimpleMergedSegmentWarmer.java b/lucene/core/src/java/org/apache/lucene/index/SimpleMergedSegmentWarmer.java
index cf092ac57ed8..7d7e2158e0fe 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SimpleMergedSegmentWarmer.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SimpleMergedSegmentWarmer.java
@@ -45,7 +45,7 @@ public void warm(LeafReader reader) throws IOException {
int normsCount = 0;
for (FieldInfo info : reader.getFieldInfos()) {
if (info.getIndexOptions() != IndexOptions.NONE) {
- reader.terms(info.name);
+ reader.indexedField(info.name);
indexedCount++;
if (info.hasNorms()) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java b/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java
index d5b5c3358850..166346b2513d 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SlowCodecReaderWrapper.java
@@ -259,7 +259,7 @@ public long ramBytesUsed() {
private static TermVectorsReader readerToTermVectorsReader(final LeafReader reader) {
return new TermVectorsReader() {
@Override
- public Fields get(int docID) throws IOException {
+ public IndexedFields get(int docID) throws IOException {
return reader.getTermVectors(docID);
}
@@ -285,7 +285,7 @@ public long ramBytesUsed() {
}
private static FieldsProducer readerToFieldsProducer(final LeafReader reader) throws IOException {
- final Fields fields = reader.fields();
+ final IndexedFields fields = reader.fields();
return new FieldsProducer() {
@Override
public Iterator iterator() {
@@ -293,8 +293,8 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
- return fields.terms(field);
+ public IndexedField indexedField(String field) throws IOException {
+ return fields.indexedField(field);
}
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
index f24a4d0728e3..43984071db81 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortingLeafReader.java
@@ -54,15 +54,15 @@ static class SortingFields extends FilterFields {
private final Sorter.DocMap docMap;
private final FieldInfos infos;
- public SortingFields(final Fields in, FieldInfos infos, Sorter.DocMap docMap) {
+ public SortingFields(final IndexedFields in, FieldInfos infos, Sorter.DocMap docMap) {
super(in);
this.docMap = docMap;
this.infos = infos;
}
@Override
- public Terms terms(final String field) throws IOException {
- Terms terms = in.terms(field);
+ public IndexedField indexedField(final String field) throws IOException {
+ IndexedField terms = in.indexedField(field);
if (terms == null) {
return null;
} else {
@@ -72,20 +72,20 @@ public Terms terms(final String field) throws IOException {
}
- private static class SortingTerms extends FilterTerms {
+ private static class SortingTerms extends FilterField {
private final Sorter.DocMap docMap;
private final IndexOptions indexOptions;
- public SortingTerms(final Terms in, IndexOptions indexOptions, final Sorter.DocMap docMap) {
+ public SortingTerms(final IndexedField in, IndexOptions indexOptions, final Sorter.DocMap docMap) {
super(in);
this.docMap = docMap;
this.indexOptions = indexOptions;
}
@Override
- public TermsEnum iterator() throws IOException {
- return new SortingTermsEnum(in.iterator(), docMap, indexOptions, hasPositions());
+ public TermsEnum getTermsEnum() throws IOException {
+ return new SortingTermsEnum(in.getTermsEnum(), docMap, indexOptions, hasPositions());
}
@Override
@@ -1042,7 +1042,7 @@ public void document(final int docID, final StoredFieldVisitor visitor) throws I
}
@Override
- public Fields fields() throws IOException {
+ public IndexedFields fields() throws IOException {
return new SortingFields(in.fields(), in.getFieldInfos(), docMap);
}
@@ -1238,7 +1238,7 @@ public SortedSetDocValues getSortedSetDocValues(String field) throws IOException
}
@Override
- public Fields getTermVectors(final int docID) throws IOException {
+ public IndexedFields getTermVectors(final int docID) throws IOException {
return in.getTermVectors(docMap.newToOld(docID));
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortingTermVectorsConsumer.java b/lucene/core/src/java/org/apache/lucene/index/SortingTermVectorsConsumer.java
index dff808ee9657..8d6d432162ef 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortingTermVectorsConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortingTermVectorsConsumer.java
@@ -55,7 +55,7 @@ void flush(Map fieldsToFlush, final SegmentWriteState
try {
reader.checkIntegrity();
for (int docID = 0; docID < state.segmentInfo.maxDoc(); docID++) {
- Fields vectors = mergeReader.get(sortMap.newToOld(docID));
+ IndexedFields vectors = mergeReader.get(sortMap.newToOld(docID));
writeTermVectors(writer, vectors, state.fieldInfos);
}
writer.finish(state.fieldInfos, state.segmentInfo.maxDoc());
@@ -88,7 +88,7 @@ public void abort() {
}
/** Safe (but, slowish) default method to copy every vector field in the provided {@link TermVectorsWriter}. */
- private static void writeTermVectors(TermVectorsWriter writer, Fields vectors, FieldInfos fieldInfos) throws IOException {
+ private static void writeTermVectors(TermVectorsWriter writer, IndexedFields vectors, FieldInfos fieldInfos) throws IOException {
if (vectors == null) {
writer.startDocument(0);
writer.finishDocument();
@@ -97,7 +97,7 @@ private static void writeTermVectors(TermVectorsWriter writer, Fields vectors, F
int numFields = vectors.size();
if (numFields == -1) {
- // count manually! TODO: Maybe enforce that Fields.size() returns something valid?
+ // count manually! TODO: Maybe enforce that IndexedFields.size() returns something valid?
numFields = 0;
for (final Iterator it = vectors.iterator(); it.hasNext(); ) {
it.next();
@@ -119,7 +119,7 @@ private static void writeTermVectors(TermVectorsWriter writer, Fields vectors, F
assert lastFieldName == null || fieldName.compareTo(lastFieldName) > 0: "lastFieldName=" + lastFieldName + " fieldName=" + fieldName;
lastFieldName = fieldName;
- final Terms terms = vectors.terms(fieldName);
+ final IndexedField terms = vectors.indexedField(fieldName);
if (terms == null) {
// FieldsEnum shouldn't lie...
continue;
@@ -132,16 +132,16 @@ private static void writeTermVectors(TermVectorsWriter writer, Fields vectors, F
int numTerms = (int) terms.size();
if (numTerms == -1) {
- // count manually. It is stupid, but needed, as Terms.size() is not a mandatory statistics function
+ // count manually. It is stupid, but needed, as IndexedField.size() is not a mandatory statistics function
numTerms = 0;
- termsEnum = terms.iterator();
+ termsEnum = terms.getTermsEnum();
while(termsEnum.next() != null) {
numTerms++;
}
}
writer.startField(fieldInfo, numTerms, hasPositions, hasOffsets, hasPayloads);
- termsEnum = terms.iterator();
+ termsEnum = terms.getTermsEnum();
int termCount = 0;
while(termsEnum.next() != null) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermContext.java b/lucene/core/src/java/org/apache/lucene/index/TermContext.java
index ed25564e4612..9ebbe02e2008 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermContext.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermContext.java
@@ -93,9 +93,9 @@ public static TermContext build(IndexReaderContext context, Term term)
//if (DEBUG) System.out.println("prts.build term=" + term);
for (final LeafReaderContext ctx : context.leaves()) {
//if (DEBUG) System.out.println(" r=" + leaves[i].reader);
- final Terms terms = ctx.reader().terms(field);
+ final IndexedField terms = ctx.reader().indexedField(field);
if (terms != null) {
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
if (termsEnum.seekExact(bytes)) {
final TermState termState = termsEnum.termState();
//if (DEBUG) System.out.println(" found");
diff --git a/lucene/core/src/java/org/apache/lucene/index/package-info.java b/lucene/core/src/java/org/apache/lucene/index/package-info.java
index f5a86d1cb37d..d8d74700e9f0 100644
--- a/lucene/core/src/java/org/apache/lucene/index/package-info.java
+++ b/lucene/core/src/java/org/apache/lucene/index/package-info.java
@@ -22,8 +22,8 @@
*
*
- * {@link org.apache.lucene.index.Fields} is the initial entry point into the
+ * {@link org.apache.lucene.index.IndexedFields} is the initial entry point into the
* postings APIs, this can be obtained in several ways:
*
* // access indexed fields for an index segment
- * Fields fields = reader.fields();
+ * IndexedFields fields = reader.fields();
* // access term vector fields for a specified document
- * Fields fields = reader.getTermVectors(docid);
+ * IndexedFields fields = reader.getTermVectors(docid);
*
- * Fields implements Java's Iterable interface, so it's easy to enumerate the
+ * IndexedFields implements Java's Iterable interface, so it's easy to enumerate the
* list of fields:
*
* // enumerate list of fields
* for (String field : fields) {
* // access the terms for this field
- * Terms terms = fields.terms(field);
+ * IndexedField terms = fields.indexedField(field);
* }
*
*
*
- * Terms
+ * IndexedField
*
*
- * {@link org.apache.lucene.index.Terms} represents the collection of terms
+ * {@link org.apache.lucene.index.IndexedField} represents the collection of terms
* within a field, exposes some metadata and statistics,
* and an API for enumeration.
*
@@ -160,23 +160,23 @@
* Field statistics
*
*
- *
{@link org.apache.lucene.index.Terms#size}: Returns the number of
+ *
{@link org.apache.lucene.index.IndexedField#size}: Returns the number of
* unique terms in the field. This statistic may be unavailable
- * (returns -1) for some Terms implementations such as
- * {@link org.apache.lucene.index.MultiTerms}, where it cannot be efficiently
+ * (returns -1) for some IndexedField implementations such as
+ * {@link org.apache.lucene.index.MultiField}, where it cannot be efficiently
* computed. Note that this count also includes terms that appear only
* in deleted documents: when segments are merged such terms are also merged
* away and the statistic is then updated.
- *
{@link org.apache.lucene.index.Terms#getDocCount}: Returns the number of
+ *
{@link org.apache.lucene.index.IndexedField#getDocCount}: Returns the number of
* documents that contain at least one occurrence of any term for this field.
* This can be thought of as a Field-level docFreq(). Like docFreq() it will
* also count deleted documents.
- *
{@link org.apache.lucene.index.Terms#getSumDocFreq}: Returns the number of
+ *
{@link org.apache.lucene.index.IndexedField#getSumDocFreq}: Returns the number of
* postings (term-document mappings in the inverted index) for the field. This
* can be thought of as the sum of {@link org.apache.lucene.index.TermsEnum#docFreq}
* across all terms in the field, and like docFreq() it will also count postings
* that appear in deleted documents.
- *
{@link org.apache.lucene.index.Terms#getSumTotalTermFreq}: Returns the number
+ *
{@link org.apache.lucene.index.IndexedField#getSumTotalTermFreq}: Returns the number
* of tokens for the field. This can be thought of as the sum of
* {@link org.apache.lucene.index.TermsEnum#totalTermFreq} across all terms in the
* field, and like totalTermFreq() it will also count occurrences that appear in
@@ -196,7 +196,7 @@
* of live documents (excluding deleted documents) in the index.
*
{@link org.apache.lucene.index.IndexReader#numDeletedDocs}: Returns the
* number of deleted documents in the index.
- *
{@link org.apache.lucene.index.Fields#size}: Returns the number of indexed
+ *
{@link org.apache.lucene.index.IndexedFields#size}: Returns the number of indexed
* fields.
*
*
diff --git a/lucene/core/src/java/org/apache/lucene/search/AutomatonQuery.java b/lucene/core/src/java/org/apache/lucene/search/AutomatonQuery.java
index 3b113a28a313..ea8b749a0f56 100644
--- a/lucene/core/src/java/org/apache/lucene/search/AutomatonQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/AutomatonQuery.java
@@ -20,7 +20,7 @@
import java.io.IOException;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.automaton.Automaton;
@@ -103,7 +103,7 @@ public AutomatonQuery(final Term term, Automaton automaton, int maxDeterminizedS
}
@Override
- protected TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException {
+ protected TermsEnum getTermsEnum(IndexedField terms, AttributeSource atts) throws IOException {
return compiled.getTermsEnum(terms);
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/BoostAttribute.java b/lucene/core/src/java/org/apache/lucene/search/BoostAttribute.java
index 2a99a0828ad7..6823df507e28 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BoostAttribute.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BoostAttribute.java
@@ -20,9 +20,9 @@
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeSource; // javadocs only
import org.apache.lucene.index.TermsEnum; // javadocs only
-import org.apache.lucene.index.Terms; // javadocs only
+import org.apache.lucene.index.IndexedField; // javadocs only
-/** Add this {@link Attribute} to a {@link TermsEnum} returned by {@link MultiTermQuery#getTermsEnum(Terms,AttributeSource)}
+/** Add this {@link Attribute} to a {@link TermsEnum} returned by {@link MultiTermQuery#getTermsEnum(IndexedField,AttributeSource)}
* and update the boost on each returned term. This enables to control the boost factor
* for each matching term in {@link MultiTermQuery#SCORING_BOOLEAN_REWRITE} or
* {@link TopTermsRewrite} mode.
diff --git a/lucene/core/src/java/org/apache/lucene/search/CollectionStatistics.java b/lucene/core/src/java/org/apache/lucene/search/CollectionStatistics.java
index e0aafa84ab2a..83eeba497ffe 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CollectionStatistics.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CollectionStatistics.java
@@ -17,7 +17,7 @@
package org.apache.lucene.search;
import org.apache.lucene.index.IndexReader; // javadocs
-import org.apache.lucene.index.Terms; // javadocs
+import org.apache.lucene.index.IndexedField; // javadocs
/**
@@ -57,19 +57,19 @@ public final long maxDoc() {
/** returns the total number of documents that
* have at least one term for this field.
- * @see Terms#getDocCount() */
+ * @see IndexedField#getDocCount() */
public final long docCount() {
return docCount;
}
/** returns the total number of tokens for this field
- * @see Terms#getSumTotalTermFreq() */
+ * @see IndexedField#getSumTotalTermFreq() */
public final long sumTotalTermFreq() {
return sumTotalTermFreq;
}
/** returns the total number of postings for this field
- * @see Terms#getSumDocFreq() */
+ * @see IndexedField#getSumDocFreq() */
public final long sumDocFreq() {
return sumDocFreq;
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocValuesRewriteMethod.java b/lucene/core/src/java/org/apache/lucene/search/DocValuesRewriteMethod.java
index 20266781d035..e70fde627d31 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocValuesRewriteMethod.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocValuesRewriteMethod.java
@@ -23,7 +23,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.LongBitSet;
@@ -77,10 +77,10 @@ public Weight createWeight(IndexSearcher searcher, boolean needsScores, float bo
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
final SortedSetDocValues fcsi = DocValues.getSortedSet(context.reader(), query.field);
- TermsEnum termsEnum = query.getTermsEnum(new Terms() {
+ TermsEnum termsEnum = query.getTermsEnum(new IndexedField() {
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
return fcsi.termsEnum();
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FuzzyQuery.java b/lucene/core/src/java/org/apache/lucene/search/FuzzyQuery.java
index 3c1eacd80ee9..515addc243d6 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FuzzyQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FuzzyQuery.java
@@ -21,7 +21,7 @@
import org.apache.lucene.index.SingleTermsEnum;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.automaton.LevenshteinAutomata;
@@ -147,9 +147,9 @@ public boolean getTranspositions() {
}
@Override
- protected TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException {
+ protected TermsEnum getTermsEnum(IndexedField terms, AttributeSource atts) throws IOException {
if (maxEdits == 0 || prefixLength >= term.text().length()) { // can only match if it's exact
- return new SingleTermsEnum(terms.iterator(), term.bytes());
+ return new SingleTermsEnum(terms.getTermsEnum(), term.bytes());
}
return new FuzzyTermsEnum(terms, atts, getTerm(), maxEdits, prefixLength, transpositions);
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
index 881c5dd62b1c..d2011d8f0628 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
@@ -20,7 +20,7 @@
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeImpl;
@@ -72,7 +72,7 @@ public final class FuzzyTermsEnum extends TermsEnum {
// collected terms are ed=1:
private int maxEdits;
- final Terms terms;
+ final IndexedField terms;
final Term term;
final int termText[];
final int realPrefixLength;
@@ -96,7 +96,7 @@ public final class FuzzyTermsEnum extends TermsEnum {
* @param prefixLength Length of required common prefix. Default value is 0.
* @throws IOException if there is a low-level IO error
*/
- public FuzzyTermsEnum(Terms terms, AttributeSource atts, Term term,
+ public FuzzyTermsEnum(IndexedField terms, AttributeSource atts, Term term,
final int maxEdits, final int prefixLength, boolean transpositions) throws IOException {
if (maxEdits < 0 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
throw new IllegalArgumentException("max edits must be 0.." + LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE + ", inclusive; got: " + maxEdits);
diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
index 5cae1222cc05..0c0275c6f94e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
+++ b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
@@ -42,7 +42,7 @@
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.similarities.BM25Similarity;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.store.NIOFSDirectory; // javadoc
@@ -792,7 +792,7 @@ public CollectionStatistics collectionStatistics(String field) throws IOExceptio
assert field != null;
- Terms terms = MultiFields.getTerms(reader, field);
+ IndexedField terms = MultiFields.getIndexedField(reader, field);
if (terms == null) {
docCount = 0;
sumTotalTermFreq = 0;
diff --git a/lucene/core/src/java/org/apache/lucene/search/MaxNonCompetitiveBoostAttribute.java b/lucene/core/src/java/org/apache/lucene/search/MaxNonCompetitiveBoostAttribute.java
index 59670b4885e4..e7db82a2bba2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MaxNonCompetitiveBoostAttribute.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MaxNonCompetitiveBoostAttribute.java
@@ -20,16 +20,16 @@
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeSource; // javadocs only
import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.index.Terms; // javadocs only
+import org.apache.lucene.index.IndexedField; // javadocs only
/** Add this {@link Attribute} to a fresh {@link AttributeSource} before calling
- * {@link MultiTermQuery#getTermsEnum(Terms,AttributeSource)}.
+ * {@link MultiTermQuery#getTermsEnum(IndexedField,AttributeSource)}.
* {@link FuzzyQuery} is using this to control its internal behaviour
* to only return competitive terms.
*
Please note: This attribute is intended to be added by the {@link MultiTermQuery.RewriteMethod}
* to an empty {@link AttributeSource} that is shared for all segments
* during query rewrite. This attribute source is passed to all segment enums
- * on {@link MultiTermQuery#getTermsEnum(Terms,AttributeSource)}.
+ * on {@link MultiTermQuery#getTermsEnum(IndexedField,AttributeSource)}.
* {@link TopTermsRewrite} uses this attribute to
* inform all enums about the current boost, that is not competitive.
* @lucene.internal
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
index afb6fc754cb6..1e286c34777b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
@@ -28,7 +28,7 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.search.similarities.Similarity;
@@ -41,7 +41,7 @@
* adding more than one term at the same position that are treated as a disjunction (OR).
* To use this class to search for the phrase "Microsoft app*" first create a Builder and use
* {@link Builder#add(Term)} on the term "microsoft" (assuming lowercase analysis), then
- * find all terms that have "app" as prefix using {@link LeafReader#terms(String)},
+ * find all terms that have "app" as prefix using {@link LeafReader#indexedField(String)},
* seeking to "app" then iterating and collecting terms until there is no longer
* that prefix, and finally use {@link Builder#add(Term[])} to add them.
* {@link Builder#build()} returns the fully constructed (and immutable) MultiPhraseQuery.
@@ -226,7 +226,7 @@ public Scorer scorer(LeafReaderContext context) throws IOException {
PhraseQuery.PostingsAndFreq[] postingsFreqs = new PhraseQuery.PostingsAndFreq[termArrays.length];
- final Terms fieldTerms = reader.terms(field);
+ final IndexedField fieldTerms = reader.indexedField(field);
if (fieldTerms == null) {
return null;
}
@@ -238,7 +238,7 @@ public Scorer scorer(LeafReaderContext context) throws IOException {
}
// Reuse single TermsEnum below:
- final TermsEnum termsEnum = fieldTerms.iterator();
+ final TermsEnum termsEnum = fieldTerms.getTermsEnum();
float totalMatchCost = 0;
for (int pos=0; posThis query cannot be used directly; you must subclass
- * it and define {@link #getTermsEnum(Terms,AttributeSource)} to provide a {@link
+ * it and define {@link #getTermsEnum(IndexedField,AttributeSource)} to provide a {@link
* FilteredTermsEnum} that iterates through the terms to be
* matched.
*
@@ -72,9 +72,9 @@ public static abstract class RewriteMethod {
public abstract Query rewrite(IndexReader reader, MultiTermQuery query) throws IOException;
/**
* Returns the {@link MultiTermQuery}s {@link TermsEnum}
- * @see MultiTermQuery#getTermsEnum(Terms, AttributeSource)
+ * @see MultiTermQuery#getTermsEnum(IndexedField, AttributeSource)
*/
- protected TermsEnum getTermsEnum(MultiTermQuery query, Terms terms, AttributeSource atts) throws IOException {
+ protected TermsEnum getTermsEnum(MultiTermQuery query, IndexedField terms, AttributeSource atts) throws IOException {
return query.getTermsEnum(terms, atts); // allow RewriteMethod subclasses to pull a TermsEnum from the MTQ
}
}
@@ -290,19 +290,19 @@ public MultiTermQuery(final String field) {
* provide attributes, the rewrite method uses to inform about e.g. maximum competitive boosts.
* This is currently only used by {@link TopTermsRewrite}
*/
- protected abstract TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException;
+ protected abstract TermsEnum getTermsEnum(IndexedField terms, AttributeSource atts) throws IOException;
/** Convenience method, if no attributes are needed:
* This simply passes empty attributes and is equal to:
* getTermsEnum(terms, new AttributeSource())
*/
- protected final TermsEnum getTermsEnum(Terms terms) throws IOException {
+ protected final TermsEnum getTermsEnum(IndexedField terms) throws IOException {
return getTermsEnum(terms, new AttributeSource());
}
/**
* To rewrite to a simpler form, instead return a simpler
- * enum from {@link #getTermsEnum(Terms, AttributeSource)}. For example,
+ * enum from {@link #getTermsEnum(IndexedField, AttributeSource)}. For example,
* to rewrite to a single term, return a {@link SingleTermsEnum}
*/
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiTermQueryConstantScoreWrapper.java b/lucene/core/src/java/org/apache/lucene/search/MultiTermQueryConstantScoreWrapper.java
index 54c6d484a0ff..d4f9833b9cba 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiTermQueryConstantScoreWrapper.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiTermQueryConstantScoreWrapper.java
@@ -27,7 +27,7 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.util.BytesRef;
@@ -132,7 +132,7 @@ private boolean collectTerms(LeafReaderContext context, TermsEnum termsEnum, Lis
* there are few terms, or build a bitset containing matching docs.
*/
private WeightOrDocIdSet rewrite(LeafReaderContext context) throws IOException {
- final Terms terms = context.reader().terms(query.field);
+ final IndexedField terms = context.reader().indexedField(query.field);
if (terms == null) {
// field does not exist
return new WeightOrDocIdSet((DocIdSet) null);
@@ -160,7 +160,7 @@ private WeightOrDocIdSet rewrite(LeafReaderContext context) throws IOException {
// Too many terms: go back to the terms we already collected and start building the bit set
DocIdSetBuilder builder = new DocIdSetBuilder(context.reader().maxDoc(), terms);
if (collectedTerms.isEmpty() == false) {
- TermsEnum termsEnum2 = terms.iterator();
+ TermsEnum termsEnum2 = terms.getTermsEnum();
for (TermAndState t : collectedTerms) {
termsEnum2.seekExact(t.term, t.state);
docs = termsEnum2.postings(docs, PostingsEnum.NONE);
diff --git a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
index d0bf8285cc9f..4cf9663540b6 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
@@ -34,7 +34,7 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
@@ -392,7 +392,7 @@ public Scorer scorer(LeafReaderContext context) throws IOException {
final LeafReader reader = context.reader();
PostingsAndFreq[] postingsFreqs = new PostingsAndFreq[terms.length];
- final Terms fieldTerms = reader.terms(field);
+ final IndexedField fieldTerms = reader.indexedField(field);
if (fieldTerms == null) {
return null;
}
@@ -402,7 +402,7 @@ public Scorer scorer(LeafReaderContext context) throws IOException {
}
// Reuse single TermsEnum below:
- final TermsEnum te = fieldTerms.iterator();
+ final TermsEnum te = fieldTerms.getTermsEnum();
float totalMatchCost = 0;
for (int i = 0; i < terms.length; i++) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/SynonymQuery.java b/lucene/core/src/java/org/apache/lucene/search/SynonymQuery.java
index c718dc9ed761..e3710e4a54e2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SynonymQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SynonymQuery.java
@@ -191,7 +191,7 @@ public Scorer scorer(LeafReaderContext context) throws IOException {
for (int i = 0; i < terms.length; i++) {
TermState state = termContexts[i].get(context.ord);
if (state != null) {
- TermsEnum termsEnum = context.reader().terms(terms[i].field()).iterator();
+ TermsEnum termsEnum = context.reader().indexedField(terms[i].field()).getTermsEnum();
termsEnum.seekExact(terms[i].bytes(), state);
PostingsEnum postings = termsEnum.postings(null, PostingsEnum.FREQS);
subScorers.add(new TermScorer(this, postings, simScorer));
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermCollectingRewrite.java b/lucene/core/src/java/org/apache/lucene/search/TermCollectingRewrite.java
index fffa5a84fca3..0b01160489b1 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermCollectingRewrite.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermCollectingRewrite.java
@@ -24,7 +24,7 @@
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.BytesRef;
@@ -49,7 +49,7 @@ protected final void addClause(B topLevel, Term term, int docCount, float boost)
final void collectTerms(IndexReader reader, MultiTermQuery query, TermCollector collector) throws IOException {
IndexReaderContext topReaderContext = reader.getContext();
for (LeafReaderContext context : topReaderContext.leaves()) {
- final Terms terms = context.reader().terms(query.field);
+ final IndexedField terms = context.reader().indexedField(query.field);
if (terms == null) {
// field does not exist
continue;
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
index 08fe3c3a485f..3c1704e8b3f4 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermInSetQuery.java
@@ -26,7 +26,6 @@
import java.util.Set;
import java.util.SortedSet;
-import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
@@ -36,7 +35,8 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedFields;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.util.Accountable;
@@ -224,12 +224,12 @@ public void extractTerms(Set terms) {
private WeightOrDocIdSet rewrite(LeafReaderContext context) throws IOException {
final LeafReader reader = context.reader();
- final Fields fields = reader.fields();
- Terms terms = fields.terms(field);
+ final IndexedFields fields = reader.fields();
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
return null;
}
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
PostingsEnum docs = null;
TermIterator iterator = termData.iterator();
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
index e3e299f6bb51..fb045e901ea3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
@@ -29,7 +29,7 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
@@ -109,16 +109,16 @@ private TermsEnum getTermsEnum(LeafReaderContext context) throws IOException {
assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term=" + term;
return null;
}
- final TermsEnum termsEnum = context.reader().terms(term.field()).iterator();
+ final TermsEnum termsEnum = context.reader().indexedField(term.field()).getTermsEnum();
termsEnum.seekExact(term.bytes(), state);
return termsEnum;
} else {
// TermQuery used as a filter, so the term states have not been built up front
- Terms terms = context.reader().terms(term.field());
+ IndexedField terms = context.reader().indexedField(term.field());
if (terms == null) {
return null;
}
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
if (termsEnum.seekExact(term.bytes())) {
return termsEnum;
} else {
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/BasicStats.java b/lucene/core/src/java/org/apache/lucene/search/similarities/BasicStats.java
index a08fe2fdbc15..0da58b039893 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/BasicStats.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/BasicStats.java
@@ -17,7 +17,7 @@
package org.apache.lucene.search.similarities;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
/**
* Stores all statistics commonly used ranking methods.
@@ -61,7 +61,7 @@ public void setNumberOfDocuments(long numberOfDocuments) {
/**
* Returns the total number of tokens in the field.
- * @see Terms#getSumTotalTermFreq()
+ * @see IndexedField#getSumTotalTermFreq()
*/
public long getNumberOfFieldTokens() {
return numberOfFieldTokens;
@@ -69,7 +69,7 @@ public long getNumberOfFieldTokens() {
/**
* Sets the total number of tokens in the field.
- * @see Terms#getSumTotalTermFreq()
+ * @see IndexedField#getSumTotalTermFreq()
*/
public void setNumberOfFieldTokens(long numberOfFieldTokens) {
this.numberOfFieldTokens = numberOfFieldTokens;
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java
index 7958f4758b0b..7343afe149ec 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanNearQuery.java
@@ -30,7 +30,7 @@
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@@ -203,7 +203,7 @@ public void extractTermContexts(Map contexts) {
@Override
public Spans getSpans(final LeafReaderContext context, Postings requiredPostings) throws IOException {
- Terms terms = context.reader().terms(field);
+ IndexedField terms = context.reader().indexedField(field);
if (terms == null) {
return null; // field does not exist
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
index 3e13be7ecb11..a26eea500f28 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
@@ -30,7 +30,7 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.IndexSearcher;
@@ -107,13 +107,13 @@ public Spans getSpans(final LeafReaderContext context, Postings requiredPostings
return null;
}
- final Terms terms = context.reader().terms(term.field());
+ final IndexedField terms = context.reader().indexedField(term.field());
if (terms == null)
return null;
if (terms.hasPositions() == false)
throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run SpanTermQuery (term=" + term.text() + ")");
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
termsEnum.seekExact(term.bytes(), state);
final PostingsEnum postings = termsEnum.postings(null, requiredPostings.getRequiredPostings());
diff --git a/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java b/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java
index 954614b3ad59..61d1b879d156 100644
--- a/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java
+++ b/lucene/core/src/java/org/apache/lucene/util/DocIdSetBuilder.java
@@ -22,7 +22,7 @@
import java.util.List;
import org.apache.lucene.index.PointValues;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.packed.PackedInts;
@@ -108,8 +108,8 @@ public DocIdSetBuilder(int maxDoc) {
}
/** Create a {@link DocIdSetBuilder} instance that is optimized for
- * accumulating docs that match the given {@link Terms}. */
- public DocIdSetBuilder(int maxDoc, Terms terms) throws IOException {
+ * accumulating docs that match the given {@link IndexedField}. */
+ public DocIdSetBuilder(int maxDoc, IndexedField terms) throws IOException {
this(maxDoc, terms.getDocCount(), terms.getSumDocFreq());
}
diff --git a/lucene/core/src/java/org/apache/lucene/util/automaton/CompiledAutomaton.java b/lucene/core/src/java/org/apache/lucene/util/automaton/CompiledAutomaton.java
index bd00a709afb1..64c40553763e 100644
--- a/lucene/core/src/java/org/apache/lucene/util/automaton/CompiledAutomaton.java
+++ b/lucene/core/src/java/org/apache/lucene/util/automaton/CompiledAutomaton.java
@@ -22,7 +22,7 @@
import java.util.List;
import org.apache.lucene.index.SingleTermsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
@@ -320,18 +320,18 @@ private BytesRef addTail(int state, BytesRefBuilder term, int idx, int leadLabel
}
// TODO: should this take startTerm too? This way
- // Terms.intersect could forward to this method if type !=
+ // IndexedField.intersect could forward to this method if type !=
// NORMAL:
- /** Return a {@link TermsEnum} intersecting the provided {@link Terms}
+ /** Return a {@link TermsEnum} intersecting the provided {@link IndexedField}
* with the terms accepted by this automaton. */
- public TermsEnum getTermsEnum(Terms terms) throws IOException {
+ public TermsEnum getTermsEnum(IndexedField terms) throws IOException {
switch(type) {
case NONE:
return TermsEnum.EMPTY;
case ALL:
- return terms.iterator();
+ return terms.getTermsEnum();
case SINGLE:
- return new SingleTermsEnum(terms.iterator(), term);
+ return new SingleTermsEnum(terms.getTermsEnum(), term);
case NORMAL:
return terms.intersect(this, null);
default:
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat.java
index 1a2ab627c720..dbe0182e803a 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat.java
@@ -55,7 +55,7 @@ public void testFinalBlock() throws Exception {
DirectoryReader r = DirectoryReader.open(w);
assertEquals(1, r.leaves().size());
- FieldReader field = (FieldReader) r.leaves().get(0).reader().fields().terms("field");
+ FieldReader field = (FieldReader) r.leaves().get(0).reader().fields().indexedField("field");
// We should see exactly two blocks: one root block (prefix empty string) and one block for z* terms (prefix z):
Stats stats = field.getStats();
assertEquals(0, stats.floorBlockCount);
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
index 1b3b9affdf3b..6c42a7f117c7 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
@@ -41,7 +41,7 @@
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
@@ -150,19 +150,19 @@ private void verify(Directory dir) throws Exception {
DirectoryReader ir = DirectoryReader.open(dir);
for (LeafReaderContext leaf : ir.leaves()) {
LeafReader leafReader = leaf.reader();
- assertTerms(leafReader.terms("field1docs"), leafReader.terms("field2freqs"), true);
- assertTerms(leafReader.terms("field3positions"), leafReader.terms("field4offsets"), true);
- assertTerms(leafReader.terms("field4offsets"), leafReader.terms("field5payloadsFixed"), true);
- assertTerms(leafReader.terms("field5payloadsFixed"), leafReader.terms("field6payloadsVariable"), true);
- assertTerms(leafReader.terms("field6payloadsVariable"), leafReader.terms("field7payloadsFixedOffsets"), true);
- assertTerms(leafReader.terms("field7payloadsFixedOffsets"), leafReader.terms("field8payloadsVariableOffsets"), true);
+ assertTerms(leafReader.indexedField("field1docs"), leafReader.indexedField("field2freqs"), true);
+ assertTerms(leafReader.indexedField("field3positions"), leafReader.indexedField("field4offsets"), true);
+ assertTerms(leafReader.indexedField("field4offsets"), leafReader.indexedField("field5payloadsFixed"), true);
+ assertTerms(leafReader.indexedField("field5payloadsFixed"), leafReader.indexedField("field6payloadsVariable"), true);
+ assertTerms(leafReader.indexedField("field6payloadsVariable"), leafReader.indexedField("field7payloadsFixedOffsets"), true);
+ assertTerms(leafReader.indexedField("field7payloadsFixedOffsets"), leafReader.indexedField("field8payloadsVariableOffsets"), true);
}
ir.close();
}
// following code is almost an exact dup of code from TestDuelingCodecs: sorry!
- public void assertTerms(Terms leftTerms, Terms rightTerms, boolean deep) throws Exception {
+ public void assertTerms(IndexedField leftTerms, IndexedField rightTerms, boolean deep) throws Exception {
if (leftTerms == null || rightTerms == null) {
assertNull(leftTerms);
assertNull(rightTerms);
@@ -173,8 +173,8 @@ public void assertTerms(Terms leftTerms, Terms rightTerms, boolean deep) throws
// NOTE: we don't assert hasOffsets/hasPositions/hasPayloads because they are allowed to be different
boolean bothHavePositions = leftTerms.hasPositions() && rightTerms.hasPositions();
- TermsEnum leftTermsEnum = leftTerms.iterator();
- TermsEnum rightTermsEnum = rightTerms.iterator();
+ TermsEnum leftTermsEnum = leftTerms.getTermsEnum();
+ TermsEnum rightTermsEnum = rightTerms.getTermsEnum();
assertTermsEnum(leftTermsEnum, rightTermsEnum, true, bothHavePositions);
assertTermsSeeking(leftTerms, rightTerms);
@@ -194,7 +194,7 @@ public void assertTerms(Terms leftTerms, Terms rightTerms, boolean deep) throws
}
}
- private void assertTermsSeeking(Terms leftTerms, Terms rightTerms) throws Exception {
+ private void assertTermsSeeking(IndexedField leftTerms, IndexedField rightTerms) throws Exception {
TermsEnum leftEnum = null;
TermsEnum rightEnum = null;
@@ -206,7 +206,7 @@ private void assertTermsSeeking(Terms leftTerms, Terms rightTerms) throws Except
HashSet tests = new HashSet<>();
int numPasses = 0;
while (numPasses < 10 && tests.size() < numTests) {
- leftEnum = leftTerms.iterator();
+ leftEnum = leftTerms.getTermsEnum();
BytesRef term = null;
while ((term = leftEnum.next()) != null) {
int code = random.nextInt(10);
@@ -234,8 +234,8 @@ private void assertTermsSeeking(Terms leftTerms, Terms rightTerms) throws Except
Collections.shuffle(shuffledTests, random);
for (BytesRef b : shuffledTests) {
- leftEnum = leftTerms.iterator();
- rightEnum = rightTerms.iterator();
+ leftEnum = leftTerms.getTermsEnum();
+ rightEnum = rightTerms.getTermsEnum();
assertEquals(leftEnum.seekExact(b), rightEnum.seekExact(b));
assertEquals(leftEnum.seekExact(b), rightEnum.seekExact(b));
@@ -260,9 +260,9 @@ private void assertTermsSeeking(Terms leftTerms, Terms rightTerms) throws Except
}
/**
- * checks collection-level statistics on Terms
+ * checks collection-level statistics on IndexedField
*/
- public void assertTermsStatistics(Terms leftTerms, Terms rightTerms) throws Exception {
+ public void assertTermsStatistics(IndexedField leftTerms, IndexedField rightTerms) throws Exception {
if (leftTerms.getDocCount() != -1 && rightTerms.getDocCount() != -1) {
assertEquals(leftTerms.getDocCount(), rightTerms.getDocCount());
}
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene70/TestLucene70DocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene70/TestLucene70DocValuesFormat.java
index 6cca55e3a48a..4bb6908e2fa7 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene70/TestLucene70DocValuesFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene70/TestLucene70DocValuesFormat.java
@@ -60,7 +60,7 @@
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.index.TermsEnum;
@@ -345,11 +345,11 @@ public DocValuesFormat getDocValuesFormatForField(String field) {
DirectoryReader ir = writer.getReader();
for (LeafReaderContext context : ir.leaves()) {
LeafReader r = context.reader();
- Terms terms = r.terms("indexed");
+ IndexedField terms = r.indexedField("indexed");
if (terms != null) {
SortedSetDocValues ssdv = r.getSortedSetDocValues("dv");
assertEquals(terms.size(), ssdv.getValueCount());
- TermsEnum expected = terms.iterator();
+ TermsEnum expected = terms.getTermsEnum();
TermsEnum actual = r.getSortedSetDocValues("dv").termsEnum();
assertEquals(terms.size(), expected, actual);
@@ -363,10 +363,10 @@ public DocValuesFormat getDocValuesFormatForField(String field) {
// now compare again after the merge
ir = writer.getReader();
LeafReader ar = getOnlyLeafReader(ir);
- Terms terms = ar.terms("indexed");
+ IndexedField terms = ar.indexedField("indexed");
if (terms != null) {
assertEquals(terms.size(), ar.getSortedSetDocValues("dv").getValueCount());
- TermsEnum expected = terms.iterator();
+ TermsEnum expected = terms.getTermsEnum();
TermsEnum actual = ar.getSortedSetDocValues("dv").termsEnum();
assertEquals(terms.size(), expected, actual);
}
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
index 804f50785598..c822edac6d5b 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
@@ -41,7 +41,7 @@
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
@@ -407,7 +407,7 @@ public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException
final FieldsConsumer consumer = delegate.fieldsConsumer(state);
return new FieldsConsumer() {
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
consumer.write(fields);
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BDocs.java b/lucene/core/src/test/org/apache/lucene/index/Test2BDocs.java
index 4fab45a1a472..1d08875009e5 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BDocs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BDocs.java
@@ -91,9 +91,9 @@ public void test2BDocs() throws Exception {
LeafReader reader = context.reader();
int lim = context.reader().maxDoc();
- Terms terms = reader.fields().terms("f1");
+ IndexedField terms = reader.fields().indexedField("f1");
for (int i=0; i<10000; i++) {
- TermsEnum te = terms.iterator();
+ TermsEnum te = terms.getTermsEnum();
assertTrue( te.seekExact(term) );
PostingsEnum docs = te.postings(null);
diff --git a/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java b/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java
index 22d12346d4e9..351bdbe7d28e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java
@@ -237,7 +237,7 @@ public void test2BTerms() throws IOException {
private List findTerms(IndexReader r) throws IOException {
System.out.println("TEST: findTerms");
- final TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator();
+ final TermsEnum termsEnum = MultiFields.getIndexedField(r, "field").getTermsEnum();
final List savedTerms = new ArrayList<>();
int nextSave = TestUtil.nextInt(random(), 500000, 1000000);
BytesRef term;
@@ -255,7 +255,7 @@ private void testSavedTerms(IndexReader r, List terms) throws IOExcept
System.out.println("TEST: run " + terms.size() + " terms on reader=" + r);
IndexSearcher s = newSearcher(r);
Collections.shuffle(terms, random());
- TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator();
+ TermsEnum termsEnum = MultiFields.getIndexedField(r, "field").getTermsEnum();
boolean failed = false;
for(int iter=0;iter<10*terms.size();iter++) {
final BytesRef term = terms.get(random().nextInt(terms.size()));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java
index 6214e4cde1da..eaa61deb261a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPositions.java
@@ -134,10 +134,10 @@ public void run() {
DirectoryReader ir = iw.getReader();
assertEquals(1, ir.leaves().size());
LeafReader air = ir.leaves().get(0).reader();
- Terms terms = air.terms("field");
+ IndexedField terms = air.indexedField("field");
// numTerms-1 because there cannot be a term 0 with 0 postings:
assertEquals(numTerms-1, terms.size());
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
BytesRef term;
while ((term = termsEnum.next()) != null) {
int value = Integer.parseInt(term.utf8ToString());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java
index 8cd255195670..617c59fe1ff4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestBagOfPostings.java
@@ -126,10 +126,10 @@ public void run() {
DirectoryReader ir = iw.getReader();
assertEquals(1, ir.leaves().size());
LeafReader air = ir.leaves().get(0).reader();
- Terms terms = air.terms("field");
+ IndexedField terms = air.indexedField("field");
// numTerms-1 because there cannot be a term 0 with 0 postings:
assertEquals(numTerms-1, terms.size());
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
BytesRef term;
while ((term = termsEnum.next()) != null) {
int value = Integer.parseInt(term.utf8ToString());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
index 79783b500ca7..b23c15dfe32e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
@@ -225,10 +225,10 @@ public void testFixedPostings() throws Throwable {
final Iterator fieldsEnum = reader.iterator();
String fieldName = fieldsEnum.next();
assertNotNull(fieldName);
- final Terms terms2 = reader.terms(fieldName);
+ final IndexedField terms2 = reader.indexedField(fieldName);
assertNotNull(terms2);
- final TermsEnum termsEnum = terms2.iterator();
+ final TermsEnum termsEnum = terms2.getTermsEnum();
PostingsEnum postingsEnum = null;
for(int i=0;i fenum2 = fields2.iterator();
for (String field1 : fields1) {
assertEquals("Different fields", field1, fenum2.next());
- Terms terms1 = fields1.terms(field1);
+ IndexedField terms1 = fields1.indexedField(field1);
if (terms1 == null) {
- assertNull(fields2.terms(field1));
+ assertNull(fields2.indexedField(field1));
continue;
}
- TermsEnum enum1 = terms1.iterator();
+ TermsEnum enum1 = terms1.getTermsEnum();
- Terms terms2 = fields2.terms(field1);
+ IndexedField terms2 = fields2.indexedField(field1);
assertNotNull(terms2);
- TermsEnum enum2 = terms2.iterator();
+ TermsEnum enum2 = terms2.getTermsEnum();
while(enum1.next() != null) {
assertEquals("Different terms", enum1.term(), enum2.next());
@@ -768,8 +768,8 @@ public void testUniqueTermCount() throws Exception {
DirectoryReader r = DirectoryReader.open(dir);
LeafReader r1 = getOnlyLeafReader(r);
- assertEquals(26, r1.terms("field").size());
- assertEquals(10, r1.terms("number").size());
+ assertEquals(26, r1.indexedField("field").size());
+ assertEquals(10, r1.indexedField("number").size());
writer.addDocument(doc);
writer.commit();
DirectoryReader r2 = DirectoryReader.openIfChanged(r);
@@ -777,8 +777,8 @@ public void testUniqueTermCount() throws Exception {
r.close();
for(LeafReaderContext s : r2.leaves()) {
- assertEquals(26, s.reader().terms("field").size());
- assertEquals(10, s.reader().terms("number").size());
+ assertEquals(26, s.reader().indexedField("field").size());
+ assertEquals(10, s.reader().indexedField("number").size());
}
r2.close();
writer.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
index 8b24b4d7bc51..e4b1aa53dc52 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDoc.java
@@ -249,11 +249,11 @@ private void printSegment(PrintWriter out, SegmentCommitInfo si)
for (int i = 0; i < reader.numDocs(); i++)
out.println(reader.document(i));
- Fields fields = reader.fields();
+ IndexedFields fields = reader.fields();
for (String field : fields) {
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
assertNotNull(terms);
- TermsEnum tis = terms.iterator();
+ TermsEnum tis = terms.getTermsEnum();
while(tis.next() != null) {
out.print(" term=" + field + ":" + tis.term());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java b/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java
index 0f221e32d279..4230a480831f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocCount.java
@@ -26,7 +26,7 @@
import org.apache.lucene.util.TestUtil;
/**
- * Tests the Terms.docCount statistic
+ * Tests the IndexedField.docCount statistic
*/
public class TestDocCount extends LuceneTestCase {
public void testSimple() throws Exception {
@@ -57,15 +57,15 @@ private Document doc() {
}
private void verifyCount(IndexReader ir) throws Exception {
- Fields fields = MultiFields.getFields(ir);
+ IndexedFields fields = MultiFields.getFields(ir);
for (String field : fields) {
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
continue;
}
int docCount = terms.getDocCount();
FixedBitSet visited = new FixedBitSet(ir.maxDoc());
- TermsEnum te = terms.iterator();
+ TermsEnum te = terms.getTermsEnum();
while (te.next() != null) {
PostingsEnum de = TestUtil.docs(random(), te, null, PostingsEnum.NONE);
while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java
index cc1f2be04b9c..031aa454b4a5 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java
@@ -91,9 +91,9 @@ public void testPositionsSimple() throws IOException {
public PostingsEnum getDocsAndPositions(LeafReader reader,
BytesRef bytes) throws IOException {
- Terms terms = reader.terms(fieldName);
+ IndexedField terms = reader.indexedField(fieldName);
if (terms != null) {
- TermsEnum te = terms.iterator();
+ TermsEnum te = terms.getTermsEnum();
if (te.seekExact(bytes)) {
return te.postings(null, PostingsEnum.ALL);
}
@@ -341,7 +341,7 @@ public void testDocsEnumStart() throws Exception {
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
// now reuse and check again
- TermsEnum te = r.terms("foo").iterator();
+ TermsEnum te = r.indexedField("foo").getTermsEnum();
assertTrue(te.seekExact(new BytesRef("bar")));
disi = TestUtil.docs(random(), te, disi, PostingsEnum.NONE);
docid = disi.docID();
@@ -366,7 +366,7 @@ public void testDocsAndPositionsEnumStart() throws Exception {
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
// now reuse and check again
- TermsEnum te = r.terms("foo").iterator();
+ TermsEnum te = r.indexedField("foo").getTermsEnum();
assertTrue(te.seekExact(new BytesRef("bar")));
disi = te.postings(disi, PostingsEnum.ALL);
docid = disi.docID();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestExitableDirectoryReader.java b/lucene/core/src/test/org/apache/lucene/index/TestExitableDirectoryReader.java
index 71406c8759f9..bc97c60bdd3b 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestExitableDirectoryReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestExitableDirectoryReader.java
@@ -32,30 +32,30 @@
/**
* Test that uses a default/lucene Implementation of {@link QueryTimeout}
- * to exit out long running queries that take too long to iterate over Terms.
+ * to exit out long running queries that take too long to iterate over IndexedField.
*/
public class TestExitableDirectoryReader extends LuceneTestCase {
private static class TestReader extends FilterLeafReader {
private static class TestFields extends FilterFields {
- TestFields(Fields in) {
+ TestFields(IndexedFields in) {
super(in);
}
@Override
- public Terms terms(String field) throws IOException {
- return new TestTerms(super.terms(field));
+ public IndexedField indexedField(String field) throws IOException {
+ return new TestTerms(super.indexedField(field));
}
}
- private static class TestTerms extends FilterTerms {
- TestTerms(Terms in) {
+ private static class TestTerms extends FilterField {
+ TestTerms(IndexedField in) {
super(in);
}
@Override
- public TermsEnum iterator() throws IOException {
- return new TestTermsEnum(super.iterator());
+ public TermsEnum getTermsEnum() throws IOException {
+ return new TestTermsEnum(super.getTermsEnum());
}
}
@@ -83,7 +83,7 @@ public TestReader(LeafReader reader) throws IOException {
}
@Override
- public Fields fields() throws IOException {
+ public IndexedFields fields() throws IOException {
return new TestFields(super.fields());
}
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
index e9f6fe28854e..d6108d3be61d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
@@ -37,24 +37,24 @@ private static class TestReader extends FilterLeafReader {
/** Filter that only permits terms containing 'e'.*/
private static class TestFields extends FilterFields {
- TestFields(Fields in) {
+ TestFields(IndexedFields in) {
super(in);
}
@Override
- public Terms terms(String field) throws IOException {
- return new TestTerms(super.terms(field));
+ public IndexedField indexedField(String field) throws IOException {
+ return new TestTerms(super.indexedField(field));
}
}
- private static class TestTerms extends FilterTerms {
- TestTerms(Terms in) {
+ private static class TestTerms extends FilterField {
+ TestTerms(IndexedField in) {
super(in);
}
@Override
- public TermsEnum iterator() throws IOException {
- return new TestTermsEnum(super.iterator());
+ public TermsEnum getTermsEnum() throws IOException {
+ return new TestTermsEnum(super.getTermsEnum());
}
}
@@ -103,7 +103,7 @@ public TestReader(LeafReader reader) throws IOException {
}
@Override
- public Fields fields() throws IOException {
+ public IndexedFields fields() throws IOException {
return new TestFields(super.fields());
}
}
@@ -143,7 +143,7 @@ public void testFilterIndexReader() throws Exception {
writer.close();
IndexReader reader = DirectoryReader.open(target);
- TermsEnum terms = MultiFields.getTerms(reader, "default").iterator();
+ TermsEnum terms = MultiFields.getIndexedField(reader, "default").getTermsEnum();
while (terms.next() != null) {
assertTrue(terms.term().utf8ToString().indexOf('e') != -1);
}
@@ -185,7 +185,7 @@ private static void checkOverrideMethods(Class> clazz) throws NoSuchMethodExce
public void testOverrideMethods() throws Exception {
checkOverrideMethods(FilterLeafReader.class);
checkOverrideMethods(FilterLeafReader.FilterFields.class);
- checkOverrideMethods(FilterLeafReader.FilterTerms.class);
+ checkOverrideMethods(FilterLeafReader.FilterField.class);
checkOverrideMethods(FilterLeafReader.FilterTermsEnum.class);
checkOverrideMethods(FilterLeafReader.FilterPostingsEnum.class);
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFlex.java b/lucene/core/src/test/org/apache/lucene/index/TestFlex.java
index d91301fa6acd..219afe4a579a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFlex.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFlex.java
@@ -52,7 +52,7 @@ public void testNonFlex() throws Exception {
IndexReader r = w.getReader();
- TermsEnum terms = MultiFields.getTerms(r, "field3").iterator();
+ TermsEnum terms = MultiFields.getIndexedField(r, "field3").getTermsEnum();
assertEquals(TermsEnum.SeekStatus.END, terms.seekCeil(new BytesRef("abc")));
r.close();
}
@@ -70,7 +70,7 @@ public void testTermOrd() throws Exception {
w.addDocument(doc);
w.forceMerge(1);
DirectoryReader r = w.getReader();
- TermsEnum terms = getOnlyLeafReader(r).fields().terms("f").iterator();
+ TermsEnum terms = getOnlyLeafReader(r).fields().indexedField("f").getTermsEnum();
assertTrue(terms.next() != null);
try {
assertEquals(0, terms.ord());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index e4f0ab0d55d1..7df7cc02bb76 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -693,7 +693,7 @@ public void testEmptyFieldNameTerms() throws IOException {
writer.close();
DirectoryReader reader = DirectoryReader.open(dir);
LeafReader subreader = getOnlyLeafReader(reader);
- TermsEnum te = subreader.fields().terms("").iterator();
+ TermsEnum te = subreader.fields().indexedField("").getTermsEnum();
assertEquals(new BytesRef("a"), te.next());
assertEquals(new BytesRef("b"), te.next());
assertEquals(new BytesRef("c"), te.next());
@@ -714,7 +714,7 @@ public void testEmptyFieldNameWithEmptyTerm() throws IOException {
writer.close();
DirectoryReader reader = DirectoryReader.open(dir);
LeafReader subreader = getOnlyLeafReader(reader);
- TermsEnum te = subreader.fields().terms("").iterator();
+ TermsEnum te = subreader.fields().indexedField("").getTermsEnum();
assertEquals(new BytesRef(""), te.next());
assertEquals(new BytesRef("a"), te.next());
assertEquals(new BytesRef("b"), te.next());
@@ -823,8 +823,8 @@ public void testPositionIncrementGapEmptyField() throws Exception {
w.close();
IndexReader r = DirectoryReader.open(dir);
- Terms tpv = r.getTermVectors(0).terms("field");
- TermsEnum termsEnum = tpv.iterator();
+ IndexedField tpv = r.getTermVectors(0).indexedField("field");
+ TermsEnum termsEnum = tpv.getTermsEnum();
assertNotNull(termsEnum.next());
PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL);
assertNotNull(dpEnum);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java
index f45cf210dba8..795ba7072067 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java
@@ -136,7 +136,7 @@ private final String termDesc(String s) {
}
private void checkTermsOrder(IndexReader r, Set allTerms, boolean isTop) throws IOException {
- TermsEnum terms = MultiFields.getFields(r).terms("f").iterator();
+ TermsEnum terms = MultiFields.getFields(r).indexedField("f").getTermsEnum();
BytesRefBuilder last = new BytesRefBuilder();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
index 67edab90437c..1c2fa25f5c14 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
@@ -273,9 +273,9 @@ public void remove() {
if (indexed) {
final boolean tv = counter % 2 == 1 && fieldID != 9;
if (tv) {
- final Terms tfv = r.getTermVectors(docID).terms(name);
+ final IndexedField tfv = r.getTermVectors(docID).indexedField(name);
assertNotNull(tfv);
- TermsEnum termsEnum = tfv.iterator();
+ TermsEnum termsEnum = tfv.getTermsEnum();
assertEquals(new BytesRef(""+counter), termsEnum.next());
assertEquals(1, termsEnum.totalTermFreq());
PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL);
@@ -295,8 +295,8 @@ public void remove() {
// TODO: offsets
} else {
- Fields vectors = r.getTermVectors(docID);
- assertTrue(vectors == null || vectors.terms(name) == null);
+ IndexedFields vectors = r.getTermVectors(docID);
+ assertTrue(vectors == null || vectors.indexedField(name) == null);
}
BooleanQuery.Builder bq = new BooleanQuery.Builder();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiTermsEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiTermsEnum.java
index ac352c1318c7..a18bcd89cec6 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMultiTermsEnum.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiTermsEnum.java
@@ -36,7 +36,7 @@
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
@@ -89,15 +89,15 @@ private static class MigratingFieldsProducer extends BaseMigratingFieldsProducer
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
if ("deleted".equals(field)) {
- Terms deletedTerms = super.terms("deleted");
+ IndexedField deletedTerms = super.indexedField("deleted");
if (deletedTerms != null) {
return new ValueFilteredTerms(deletedTerms, new BytesRef("1"));
}
return null;
} else {
- return super.terms(field);
+ return super.indexedField(field);
}
}
@@ -106,19 +106,19 @@ protected FieldsProducer create(FieldsProducer delegate, FieldInfos newFieldInfo
return new MigratingFieldsProducer(delegate, newFieldInfo);
}
- private static class ValueFilteredTerms extends Terms {
+ private static class ValueFilteredTerms extends IndexedField {
- private final Terms delegate;
+ private final IndexedField delegate;
private final BytesRef value;
- public ValueFilteredTerms(Terms delegate, BytesRef value) {
+ public ValueFilteredTerms(IndexedField delegate, BytesRef value) {
this.delegate = delegate;
this.value = value;
}
@Override
- public TermsEnum iterator() throws IOException {
- return new FilteredTermsEnum(delegate.iterator()) {
+ public TermsEnum getTermsEnum() throws IOException {
+ return new FilteredTermsEnum(delegate.getTermsEnum()) {
@Override
protected AcceptStatus accept(BytesRef term) {
@@ -232,8 +232,8 @@ public int size() {
}
@Override
- public Terms terms(String field) throws IOException {
- return delegate.terms(field);
+ public IndexedField indexedField(String field) throws IOException {
+ return delegate.indexedField(field);
}
@Override
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java
index 3efdc8b3d723..1f71b25b9322 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java
@@ -291,11 +291,11 @@ public void testIgnoreStoredFields() throws IOException {
assertNull(pr.document(0).get("f3"));
assertNull(pr.document(0).get("f4"));
// check that fields are there
- Fields slow = MultiFields.getFields(pr);
- assertNotNull(slow.terms("f1"));
- assertNotNull(slow.terms("f2"));
- assertNotNull(slow.terms("f3"));
- assertNotNull(slow.terms("f4"));
+ IndexedFields slow = MultiFields.getFields(pr);
+ assertNotNull(slow.indexedField("f1"));
+ assertNotNull(slow.indexedField("f2"));
+ assertNotNull(slow.indexedField("f3"));
+ assertNotNull(slow.indexedField("f4"));
pr.close();
// no stored fields at all
@@ -308,10 +308,10 @@ public void testIgnoreStoredFields() throws IOException {
assertNull(pr.document(0).get("f4"));
// check that fields are there
slow = MultiFields.getFields(pr);
- assertNull(slow.terms("f1"));
- assertNull(slow.terms("f2"));
- assertNotNull(slow.terms("f3"));
- assertNotNull(slow.terms("f4"));
+ assertNull(slow.indexedField("f1"));
+ assertNull(slow.indexedField("f2"));
+ assertNotNull(slow.indexedField("f3"));
+ assertNotNull(slow.indexedField("f4"));
pr.close();
// without overlapping
@@ -324,10 +324,10 @@ public void testIgnoreStoredFields() throws IOException {
assertNull(pr.document(0).get("f4"));
// check that fields are there
slow = MultiFields.getFields(pr);
- assertNull(slow.terms("f1"));
- assertNull(slow.terms("f2"));
- assertNotNull(slow.terms("f3"));
- assertNotNull(slow.terms("f4"));
+ assertNull(slow.indexedField("f1"));
+ assertNull(slow.indexedField("f2"));
+ assertNotNull(slow.indexedField("f3"));
+ assertNotNull(slow.indexedField("f4"));
pr.close();
// no main readers
@@ -380,7 +380,7 @@ private void queryTest(Query query) throws IOException {
}
}
- // Fields 1-4 indexed together:
+ // IndexedFields 1-4 indexed together:
private IndexSearcher single(Random random, boolean compositeComposite) throws IOException {
dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random)));
@@ -419,7 +419,7 @@ private IndexSearcher single(Random random, boolean compositeComposite) throws I
return newSearcher(ir);
}
- // Fields 1 & 2 in one index, 3 & 4 in other, with ParallelReader:
+ // IndexedFields 1 & 2 in one index, 3 & 4 in other, with ParallelReader:
private IndexSearcher parallel(Random random, boolean compositeComposite) throws IOException {
dir1 = getDir1(random);
dir2 = getDir2(random);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelLeafReader.java
index 35523f352a76..39935d4fcee7 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestParallelLeafReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelLeafReader.java
@@ -188,10 +188,10 @@ public void testIgnoreStoredFields() throws IOException {
assertNull(pr.document(0).get("f3"));
assertNull(pr.document(0).get("f4"));
// check that fields are there
- assertNotNull(pr.terms("f1"));
- assertNotNull(pr.terms("f2"));
- assertNotNull(pr.terms("f3"));
- assertNotNull(pr.terms("f4"));
+ assertNotNull(pr.indexedField("f1"));
+ assertNotNull(pr.indexedField("f2"));
+ assertNotNull(pr.indexedField("f3"));
+ assertNotNull(pr.indexedField("f4"));
pr.close();
// no stored fields at all
@@ -203,10 +203,10 @@ public void testIgnoreStoredFields() throws IOException {
assertNull(pr.document(0).get("f3"));
assertNull(pr.document(0).get("f4"));
// check that fields are there
- assertNull(pr.terms("f1"));
- assertNull(pr.terms("f2"));
- assertNotNull(pr.terms("f3"));
- assertNotNull(pr.terms("f4"));
+ assertNull(pr.indexedField("f1"));
+ assertNull(pr.indexedField("f2"));
+ assertNotNull(pr.indexedField("f3"));
+ assertNotNull(pr.indexedField("f4"));
pr.close();
// without overlapping
@@ -218,10 +218,10 @@ public void testIgnoreStoredFields() throws IOException {
assertNull(pr.document(0).get("f3"));
assertNull(pr.document(0).get("f4"));
// check that fields are there
- assertNull(pr.terms("f1"));
- assertNull(pr.terms("f2"));
- assertNotNull(pr.terms("f3"));
- assertNotNull(pr.terms("f4"));
+ assertNull(pr.indexedField("f1"));
+ assertNull(pr.indexedField("f2"));
+ assertNotNull(pr.indexedField("f3"));
+ assertNotNull(pr.indexedField("f4"));
pr.close();
// no main readers
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
index 25d05ca155be..a719c78acaff 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
@@ -71,9 +71,9 @@ public void tearDown() throws Exception {
super.tearDown();
}
- private void checkTerms(Terms terms, String... termsList) throws IOException {
+ private void checkTerms(IndexedField terms, String... termsList) throws IOException {
assertNotNull(terms);
- final TermsEnum te = terms.iterator();
+ final TermsEnum te = terms.getTermsEnum();
for (String t : termsList) {
BytesRef b = te.next();
@@ -90,20 +90,20 @@ private void checkTerms(Terms terms, String... termsList) throws IOException {
public void test1() throws IOException {
ParallelLeafReader pr = new ParallelLeafReader(ir1, ir2);
- Fields fields = pr.fields();
+ IndexedFields fields = pr.fields();
Iterator fe = fields.iterator();
String f = fe.next();
assertEquals("field1", f);
- checkTerms(fields.terms(f), "brown", "fox", "jumps", "quick", "the");
+ checkTerms(fields.indexedField(f), "brown", "fox", "jumps", "quick", "the");
f = fe.next();
assertEquals("field2", f);
- checkTerms(fields.terms(f), "brown", "fox", "jumps", "quick", "the");
+ checkTerms(fields.indexedField(f), "brown", "fox", "jumps", "quick", "the");
f = fe.next();
assertEquals("field3", f);
- checkTerms(fields.terms(f), "dog", "fox", "jumps", "lazy", "over", "the");
+ checkTerms(fields.indexedField(f), "dog", "fox", "jumps", "lazy", "over", "the");
assertFalse(fe.hasNext());
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
index e893d8786a1b..6cbf9872ec11 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
@@ -479,7 +479,7 @@ public void run() {
}
writer.close();
IndexReader reader = DirectoryReader.open(dir);
- TermsEnum terms = MultiFields.getFields(reader).terms(field).iterator();
+ TermsEnum terms = MultiFields.getFields(reader).indexedField(field).getTermsEnum();
PostingsEnum tp = null;
while (terms.next() != null) {
String termText = terms.term().utf8ToString();
@@ -602,7 +602,7 @@ public void testMixupDocs() throws Exception {
field.setTokenStream(ts);
writer.addDocument(doc);
DirectoryReader reader = writer.getReader();
- TermsEnum te = MultiFields.getFields(reader).terms("field").iterator();
+ TermsEnum te = MultiFields.getFields(reader).indexedField("field").getTermsEnum();
assertTrue(te.seekExact(new BytesRef("withPayload")));
PostingsEnum de = te.postings(null, PostingsEnum.PAYLOADS);
de.nextDoc();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
index a90a5d25bbc2..b06e9d156a2d 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
@@ -68,9 +68,9 @@ public void testMixupDocs() throws Exception {
writer.addDocument(doc);
DirectoryReader reader = writer.getReader();
- Terms terms = reader.getTermVector(1, "field");
+ IndexedField terms = reader.getTermVector(1, "field");
assert terms != null;
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
assertTrue(termsEnum.seekExact(new BytesRef("withPayload")));
PostingsEnum de = termsEnum.postings(null, PostingsEnum.ALL);
assertEquals(0, de.nextDoc());
@@ -110,9 +110,9 @@ public void testMixupMultiValued() throws Exception {
doc.add(field3);
writer.addDocument(doc);
DirectoryReader reader = writer.getReader();
- Terms terms = reader.getTermVector(0, "field");
+ IndexedField terms = reader.getTermVector(0, "field");
assert terms != null;
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
assertTrue(termsEnum.seekExact(new BytesRef("withPayload")));
PostingsEnum de = termsEnum.postings(null, PostingsEnum.ALL);
assertEquals(0, de.nextDoc());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
index 34d8afbd19c8..0db51f578f31 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
@@ -221,9 +221,9 @@ public static void printDelDocs(Bits bits) {
public int[] toDocsArray(Term term, Bits bits, IndexReader reader)
throws IOException {
- Fields fields = MultiFields.getFields(reader);
- Terms cterms = fields.terms(term.field);
- TermsEnum ctermsEnum = cterms.iterator();
+ IndexedFields fields = MultiFields.getFields(reader);
+ IndexedField cterms = fields.indexedField(term.field);
+ TermsEnum ctermsEnum = cterms.getTermsEnum();
if (ctermsEnum.seekExact(new BytesRef(term.text()))) {
PostingsEnum postingsEnum = TestUtil.docs(random(), ctermsEnum, null, PostingsEnum.NONE);
return toArray(postingsEnum);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
index c265d9286654..494e6fbfccdc 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
@@ -291,7 +291,7 @@ public void testRandom() throws Exception {
// TODO: improve this
LeafReader sub = ctx.reader();
//System.out.println("\nsub=" + sub);
- final TermsEnum termsEnum = sub.fields().terms("content").iterator();
+ final TermsEnum termsEnum = sub.fields().indexedField("content").getTermsEnum();
PostingsEnum docs = null;
PostingsEnum docsAndPositions = null;
PostingsEnum docsAndPositionsAndOffsets = null;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
index 1ef37c0892f1..95e27baf05b8 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
@@ -126,10 +126,10 @@ public void testMerge() throws IOException {
//System.out.println("stored size: " + stored.size());
assertEquals("We do not have 3 fields that were indexed with term vector", 3, tvCount);
- Terms vector = mergedReader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY);
+ IndexedField vector = mergedReader.getTermVectors(0).indexedField(DocHelper.TEXT_FIELD_2_KEY);
assertNotNull(vector);
assertEquals(3, vector.size());
- TermsEnum termsEnum = vector.iterator();
+ TermsEnum termsEnum = vector.getTermsEnum();
int i = 0;
while (termsEnum.next() != null) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
index f008afe7a798..ea313971cc7e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
@@ -114,11 +114,11 @@ public void testGetFieldNameVariations() {
}
public void testTerms() throws IOException {
- Fields fields = MultiFields.getFields(reader);
+ IndexedFields fields = MultiFields.getFields(reader);
for (String field : fields) {
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
assertNotNull(terms);
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
while(termsEnum.next() != null) {
BytesRef term = termsEnum.term();
assertTrue(term != null);
@@ -186,10 +186,10 @@ public static void checkNorms(LeafReader reader) throws IOException {
}
public void testTermVectors() throws IOException {
- Terms result = reader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY);
+ IndexedField result = reader.getTermVectors(0).indexedField(DocHelper.TEXT_FIELD_2_KEY);
assertNotNull(result);
assertEquals(3, result.size());
- TermsEnum termsEnum = result.iterator();
+ TermsEnum termsEnum = result.getTermsEnum();
while(termsEnum.next() != null) {
String term = termsEnum.term().utf8ToString();
int freq = (int) termsEnum.totalTermFreq();
@@ -197,7 +197,7 @@ public void testTermVectors() throws IOException {
assertTrue(freq > 0);
}
- Fields results = reader.getTermVectors(0);
+ IndexedFields results = reader.getTermVectors(0);
assertTrue(results != null);
assertEquals("We do not have 3 term freq vectors", 3, results.size());
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
index 7acf3e449734..e9b0b74170c6 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
@@ -56,7 +56,7 @@ public void testTermDocs() throws IOException {
SegmentReader reader = new SegmentReader(info, newIOContext(random()));
assertTrue(reader != null);
- TermsEnum terms = reader.fields().terms(DocHelper.TEXT_FIELD_2_KEY).iterator();
+ TermsEnum terms = reader.fields().indexedField(DocHelper.TEXT_FIELD_2_KEY).getTermsEnum();
terms.seekCeil(new BytesRef("field"));
PostingsEnum termDocs = TestUtil.docs(random(), terms, null, PostingsEnum.FREQS);
if (termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
index 1e85e14c9de2..f275fe184e8c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java
@@ -80,7 +80,7 @@ public void testPrevTermAtEnd() throws IOException
addDoc(writer, "aaa bbb");
writer.close();
LeafReader reader = getOnlyLeafReader(DirectoryReader.open(dir));
- TermsEnum terms = reader.fields().terms("content").iterator();
+ TermsEnum terms = reader.fields().indexedField("content").getTermsEnum();
assertNotNull(terms.next());
assertEquals("aaa", terms.term().utf8ToString());
assertNotNull(terms.next());
@@ -104,7 +104,7 @@ private void verifyDocFreq()
throws IOException
{
IndexReader reader = DirectoryReader.open(dir);
- TermsEnum termEnum = MultiFields.getTerms(reader, "content").iterator();
+ TermsEnum termEnum = MultiFields.getIndexedField(reader, "content").getTermsEnum();
// create enumeration of all terms
// go to the first term (aaa)
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
index 606a11aff65a..5e75d537353a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
@@ -74,7 +74,7 @@ public void testStressAdvance() throws Exception {
bDocIDs.add(docID);
}
}
- final TermsEnum te = getOnlyLeafReader(r).fields().terms("field").iterator();
+ final TermsEnum te = getOnlyLeafReader(r).fields().indexedField("field").getTermsEnum();
PostingsEnum de = null;
for(int iter2=0;iter2<10;iter2++) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
index 3cd9a6a6c0a6..5881af3cba97 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
@@ -310,25 +310,25 @@ public void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField)
int[] r2r1 = new int[r2.maxDoc()]; // r2 id to r1 id mapping
// create mapping from id2 space to id2 based on idField
- final Fields f1 = MultiFields.getFields(r1);
+ final IndexedFields f1 = MultiFields.getFields(r1);
if (f1 == null) {
// make sure r2 is empty
assertNull(MultiFields.getFields(r2));
return;
}
- final Terms terms1 = f1.terms(idField);
+ final IndexedField terms1 = f1.indexedField(idField);
if (terms1 == null) {
assertTrue(MultiFields.getFields(r2) == null ||
- MultiFields.getFields(r2).terms(idField) == null);
+ MultiFields.getFields(r2).indexedField(idField) == null);
return;
}
- final TermsEnum termsEnum = terms1.iterator();
+ final TermsEnum termsEnum = terms1.getTermsEnum();
final Bits liveDocs1 = MultiFields.getLiveDocs(r1);
final Bits liveDocs2 = MultiFields.getLiveDocs(r2);
- Fields fields = MultiFields.getFields(r2);
- Terms terms2 = fields.terms(idField);
+ IndexedFields fields = MultiFields.getFields(r2);
+ IndexedField terms2 = fields.indexedField(idField);
if (fields.size() == 0 || terms2 == null) {
// make sure r1 is in fact empty (eg has only all
// deleted docs):
@@ -342,7 +342,7 @@ public void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField)
}
return;
}
- TermsEnum termsEnum2 = terms2.iterator();
+ TermsEnum termsEnum2 = terms2.getTermsEnum();
PostingsEnum termDocs1 = null;
PostingsEnum termDocs2 = null;
@@ -391,16 +391,16 @@ public void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField)
verifyEquals(r1.getTermVectors(id1), r2.getTermVectors(id2));
} catch (Throwable e) {
System.out.println("FAILED id=" + term + " id1=" + id1 + " id2=" + id2);
- Fields tv1 = r1.getTermVectors(id1);
+ IndexedFields tv1 = r1.getTermVectors(id1);
System.out.println(" d1=" + tv1);
if (tv1 != null) {
PostingsEnum dpEnum = null;
PostingsEnum dEnum = null;
for (String field : tv1) {
System.out.println(" " + field + ":");
- Terms terms3 = tv1.terms(field);
+ IndexedField terms3 = tv1.indexedField(field);
assertNotNull(terms3);
- TermsEnum termsEnum3 = terms3.iterator();
+ TermsEnum termsEnum3 = terms3.getTermsEnum();
BytesRef term2;
while((term2 = termsEnum3.next()) != null) {
System.out.println(" " + term2.utf8ToString() + ": freq=" + termsEnum3.totalTermFreq());
@@ -423,16 +423,16 @@ public void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField)
}
}
- Fields tv2 = r2.getTermVectors(id2);
+ IndexedFields tv2 = r2.getTermVectors(id2);
System.out.println(" d2=" + tv2);
if (tv2 != null) {
PostingsEnum dpEnum = null;
PostingsEnum dEnum = null;
for (String field : tv2) {
System.out.println(" " + field + ":");
- Terms terms3 = tv2.terms(field);
+ IndexedField terms3 = tv2.indexedField(field);
assertNotNull(terms3);
- TermsEnum termsEnum3 = terms3.iterator();
+ TermsEnum termsEnum3 = terms3.getTermsEnum();
BytesRef term2;
while((term2 = termsEnum3.next()) != null) {
System.out.println(" " + term2.utf8ToString() + ": freq=" + termsEnum3.totalTermFreq());
@@ -463,9 +463,9 @@ public void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField)
// Verify postings
//System.out.println("TEST: create te1");
- final Fields fields1 = MultiFields.getFields(r1);
+ final IndexedFields fields1 = MultiFields.getFields(r1);
final Iterator fields1Enum = fields1.iterator();
- final Fields fields2 = MultiFields.getFields(r2);
+ final IndexedFields fields2 = MultiFields.getFields(r2);
final Iterator fields2Enum = fields2.iterator();
@@ -490,11 +490,11 @@ public void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField)
break;
}
field1 = fields1Enum.next();
- Terms terms = fields1.terms(field1);
+ IndexedField terms = fields1.indexedField(field1);
if (terms == null) {
continue;
}
- termsEnum1 = terms.iterator();
+ termsEnum1 = terms.getTermsEnum();
}
term1 = termsEnum1.next();
if (term1 == null) {
@@ -526,11 +526,11 @@ public void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField)
break;
}
field2 = fields2Enum.next();
- Terms terms = fields2.terms(field2);
+ IndexedField terms = fields2.indexedField(field2);
if (terms == null) {
continue;
}
- termsEnum2 = terms.iterator();
+ termsEnum2 = terms.getTermsEnum();
}
term2 = termsEnum2.next();
if (term2 == null) {
@@ -599,7 +599,7 @@ public static void verifyEquals(Document d1, Document d2) {
}
}
- public static void verifyEquals(Fields d1, Fields d2) throws IOException {
+ public static void verifyEquals(IndexedFields d1, IndexedFields d2) throws IOException {
if (d1 == null) {
assertTrue(d2 == null || d2.size() == 0);
return;
@@ -612,13 +612,13 @@ public static void verifyEquals(Fields d1, Fields d2) throws IOException {
String field2 = fieldsEnum2.next();
assertEquals(field1, field2);
- Terms terms1 = d1.terms(field1);
+ IndexedField terms1 = d1.indexedField(field1);
assertNotNull(terms1);
- TermsEnum termsEnum1 = terms1.iterator();
+ TermsEnum termsEnum1 = terms1.getTermsEnum();
- Terms terms2 = d2.terms(field2);
+ IndexedField terms2 = d2.indexedField(field2);
assertNotNull(terms2);
- TermsEnum termsEnum2 = terms2.iterator();
+ TermsEnum termsEnum2 = terms2.getTermsEnum();
PostingsEnum dpEnum1 = null;
PostingsEnum dpEnum2 = null;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java b/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java
index 67063f661de3..4c1738d8e17a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSumDocFreq.java
@@ -24,7 +24,7 @@
import org.apache.lucene.util.TestUtil;
/**
- * Tests {@link Terms#getSumDocFreq()}
+ * Tests {@link IndexedField#getSumDocFreq()}
* @lucene.experimental
*/
public class TestSumDocFreq extends LuceneTestCase {
@@ -73,10 +73,10 @@ public void testSumDocFreq() throws Exception {
private void assertSumDocFreq(IndexReader ir) throws Exception {
// compute sumDocFreq across all fields
- Fields fields = MultiFields.getFields(ir);
+ IndexedFields fields = MultiFields.getFields(ir);
for (String f : fields) {
- Terms terms = fields.terms(f);
+ IndexedField terms = fields.indexedField(f);
long sumDocFreq = terms.getSumDocFreq();
if (sumDocFreq == -1) {
if (VERBOSE) {
@@ -86,7 +86,7 @@ private void assertSumDocFreq(IndexReader ir) throws Exception {
}
long computedSumDocFreq = 0;
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
while (termsEnum.next() != null) {
computedSumDocFreq += termsEnum.docFreq();
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
index dd8616aed718..dd43054c5989 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
@@ -107,7 +107,7 @@ private void verifyIndex(Directory dir) throws IOException {
IndexReader r = DirectoryReader.open(dir);
int numDocs = r.numDocs();
for (int i = 0; i < numDocs; i++) {
- assertNotNull("term vectors should not have been null for document " + i, r.getTermVectors(i).terms("c"));
+ assertNotNull("term vectors should not have been null for document " + i, r.getTermVectors(i).indexedField("c"));
}
r.close();
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
index 4b2a06095a65..77ac930e7491 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
@@ -199,10 +199,10 @@ public void test() throws IOException {
public void testReader() throws IOException {
TermVectorsReader reader = Codec.getDefault().termVectorsFormat().vectorsReader(dir, seg.info, fieldInfos, newIOContext(random()));
for (int j = 0; j < 5; j++) {
- Terms vector = reader.get(j).terms(testFields[0]);
+ IndexedField vector = reader.get(j).indexedField(testFields[0]);
assertNotNull(vector);
assertEquals(testTerms.length, vector.size());
- TermsEnum termsEnum = vector.iterator();
+ TermsEnum termsEnum = vector.getTermsEnum();
for (int i = 0; i < testTerms.length; i++) {
final BytesRef text = termsEnum.next();
assertNotNull(text);
@@ -218,10 +218,10 @@ public void testReader() throws IOException {
public void testDocsEnum() throws IOException {
TermVectorsReader reader = Codec.getDefault().termVectorsFormat().vectorsReader(dir, seg.info, fieldInfos, newIOContext(random()));
for (int j = 0; j < 5; j++) {
- Terms vector = reader.get(j).terms(testFields[0]);
+ IndexedField vector = reader.get(j).indexedField(testFields[0]);
assertNotNull(vector);
assertEquals(testTerms.length, vector.size());
- TermsEnum termsEnum = vector.iterator();
+ TermsEnum termsEnum = vector.getTermsEnum();
PostingsEnum postingsEnum = null;
for (int i = 0; i < testTerms.length; i++) {
final BytesRef text = termsEnum.next();
@@ -245,10 +245,10 @@ public void testDocsEnum() throws IOException {
public void testPositionReader() throws IOException {
TermVectorsReader reader = Codec.getDefault().termVectorsFormat().vectorsReader(dir, seg.info, fieldInfos, newIOContext(random()));
BytesRef[] terms;
- Terms vector = reader.get(0).terms(testFields[0]);
+ IndexedField vector = reader.get(0).indexedField(testFields[0]);
assertNotNull(vector);
assertEquals(testTerms.length, vector.size());
- TermsEnum termsEnum = vector.iterator();
+ TermsEnum termsEnum = vector.getTermsEnum();
PostingsEnum dpEnum = null;
for (int i = 0; i < testTerms.length; i++) {
final BytesRef text = termsEnum.next();
@@ -282,10 +282,10 @@ public void testPositionReader() throws IOException {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
}
- Terms freqVector = reader.get(0).terms(testFields[1]); //no pos, no offset
+ IndexedField freqVector = reader.get(0).indexedField(testFields[1]); //no pos, no offset
assertNotNull(freqVector);
assertEquals(testTerms.length, freqVector.size());
- termsEnum = freqVector.iterator();
+ termsEnum = freqVector.getTermsEnum();
assertNotNull(termsEnum);
for (int i = 0; i < testTerms.length; i++) {
final BytesRef text = termsEnum.next();
@@ -301,9 +301,9 @@ public void testPositionReader() throws IOException {
public void testOffsetReader() throws IOException {
TermVectorsReader reader = Codec.getDefault().termVectorsFormat().vectorsReader(dir, seg.info, fieldInfos, newIOContext(random()));
- Terms vector = reader.get(0).terms(testFields[0]);
+ IndexedField vector = reader.get(0).indexedField(testFields[0]);
assertNotNull(vector);
- TermsEnum termsEnum = vector.iterator();
+ TermsEnum termsEnum = vector.getTermsEnum();
assertNotNull(termsEnum);
assertEquals(testTerms.length, vector.size());
PostingsEnum dpEnum = null;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
index 11f1503405b7..fd1d3fe0c400 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
@@ -59,9 +59,9 @@ public void testDoubleOffsetCounting() throws Exception {
w.close();
IndexReader r = DirectoryReader.open(dir);
- Terms vector = r.getTermVectors(0).terms("field");
+ IndexedField vector = r.getTermVectors(0).indexedField("field");
assertNotNull(vector);
- TermsEnum termsEnum = vector.iterator();
+ TermsEnum termsEnum = vector.getTermsEnum();
assertNotNull(termsEnum.next());
assertEquals("", termsEnum.term().utf8ToString());
@@ -115,7 +115,7 @@ public void testDoubleOffsetCounting2() throws Exception {
w.close();
IndexReader r = DirectoryReader.open(dir);
- TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator();
+ TermsEnum termsEnum = r.getTermVectors(0).indexedField("field").getTermsEnum();
assertNotNull(termsEnum.next());
PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL);
assertEquals(2, termsEnum.totalTermFreq());
@@ -150,7 +150,7 @@ public void testEndOffsetPositionCharAnalyzer() throws Exception {
w.close();
IndexReader r = DirectoryReader.open(dir);
- TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator();
+ TermsEnum termsEnum = r.getTermVectors(0).indexedField("field").getTermsEnum();
assertNotNull(termsEnum.next());
PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL);
assertEquals(2, termsEnum.totalTermFreq());
@@ -188,7 +188,7 @@ public void testEndOffsetPositionWithCachingTokenFilter() throws Exception {
w.close();
IndexReader r = DirectoryReader.open(dir);
- TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator();
+ TermsEnum termsEnum = r.getTermVectors(0).indexedField("field").getTermsEnum();
assertNotNull(termsEnum.next());
PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL);
assertEquals(2, termsEnum.totalTermFreq());
@@ -223,7 +223,7 @@ public void testEndOffsetPositionStopFilter() throws Exception {
w.close();
IndexReader r = DirectoryReader.open(dir);
- TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator();
+ TermsEnum termsEnum = r.getTermVectors(0).indexedField("field").getTermsEnum();
assertNotNull(termsEnum.next());
PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL);
assertEquals(2, termsEnum.totalTermFreq());
@@ -259,7 +259,7 @@ public void testEndOffsetPositionStandard() throws Exception {
w.close();
IndexReader r = DirectoryReader.open(dir);
- TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator();
+ TermsEnum termsEnum = r.getTermVectors(0).indexedField("field").getTermsEnum();
assertNotNull(termsEnum.next());
PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL);
@@ -303,7 +303,7 @@ public void testEndOffsetPositionStandardEmptyField() throws Exception {
w.close();
IndexReader r = DirectoryReader.open(dir);
- TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator();
+ TermsEnum termsEnum = r.getTermVectors(0).indexedField("field").getTermsEnum();
assertNotNull(termsEnum.next());
PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL);
@@ -345,7 +345,7 @@ public void testEndOffsetPositionStandardEmptyField2() throws Exception {
w.close();
IndexReader r = DirectoryReader.open(dir);
- TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator();
+ TermsEnum termsEnum = r.getTermVectors(0).indexedField("field").getTermsEnum();
assertNotNull(termsEnum.next());
PostingsEnum dpEnum = termsEnum.postings(null, PostingsEnum.ALL);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java b/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
index 2060353dff81..b61d8b950806 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
@@ -113,7 +113,7 @@ public int doTest(int iter, int ndocs, int maxTF, float percentDocs) throws IOEx
IndexReader reader = DirectoryReader.open(dir);
- TermsEnum tenum = MultiFields.getTerms(reader, "foo").iterator();
+ TermsEnum tenum = MultiFields.getIndexedField(reader, "foo").getTermsEnum();
start = System.currentTimeMillis();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTerms.java b/lucene/core/src/test/org/apache/lucene/index/TestTerms.java
index e8871dcb7699..6630affc9f2f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTerms.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTerms.java
@@ -34,7 +34,7 @@ public void testTermMinMaxBasic() throws Exception {
doc.add(newTextField("field", "a b c cc ddd", Field.Store.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
- Terms terms = MultiFields.getTerms(r, "field");
+ IndexedField terms = MultiFields.getIndexedField(r, "field");
assertEquals(new BytesRef("a"), terms.getMin());
assertEquals(new BytesRef("ddd"), terms.getMax());
r.close();
@@ -74,7 +74,7 @@ public void testTermMinMaxRandom() throws Exception {
}
IndexReader r = w.getReader();
- Terms terms = MultiFields.getTerms(r, "field");
+ IndexedField terms = MultiFields.getIndexedField(r, "field");
assertEquals(minTerm, terms.getMin());
assertEquals(maxTerm, terms.getMax());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java
index d2df59ff4fd8..b98f03077bfa 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum.java
@@ -54,7 +54,7 @@ public void test() throws Exception {
w.close();
final List terms = new ArrayList<>();
- final TermsEnum termsEnum = MultiFields.getTerms(r, "body").iterator();
+ final TermsEnum termsEnum = MultiFields.getIndexedField(r, "body").getTermsEnum();
BytesRef term;
while((term = termsEnum.next()) != null) {
terms.add(BytesRef.deepCopyOf(term));
@@ -178,7 +178,7 @@ private boolean accepts(CompiledAutomaton c, BytesRef b) {
return c.runAutomaton.isAccept(state);
}
- // Tests Terms.intersect
+ // Tests IndexedField.intersect
public void testIntersectRandom() throws IOException {
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
@@ -302,7 +302,7 @@ public void testIntersectRandom() throws IOException {
}
}
- final TermsEnum te = MultiFields.getTerms(r, "f").intersect(c, startTerm);
+ final TermsEnum te = MultiFields.getIndexedField(r, "f").intersect(c, startTerm);
int loc;
if (startTerm == null) {
@@ -481,7 +481,7 @@ public void testFloorBlocks() throws Exception {
assertEquals(1, docFreq(r, "xx"));
assertEquals(1, docFreq(r, "aa4"));
- final TermsEnum te = MultiFields.getTerms(r, FIELD).iterator();
+ final TermsEnum te = MultiFields.getIndexedField(r, FIELD).getTermsEnum();
while(te.next() != null) {
//System.out.println("TEST: next term=" + te.term().utf8ToString());
}
@@ -511,9 +511,9 @@ public void testZeroTerms() throws Exception {
w.close();
assertEquals(1, r.numDocs());
assertEquals(1, r.maxDoc());
- Terms terms = MultiFields.getTerms(r, "field");
+ IndexedField terms = MultiFields.getIndexedField(r, "field");
if (terms != null) {
- assertNull(terms.iterator().next());
+ assertNull(terms.getTermsEnum().next());
}
r.close();
d.close();
@@ -613,7 +613,7 @@ private void testRandomSeeks(IndexReader r, String... validTermStrings) throws I
System.out.println(" " + t.utf8ToString() + " " + t);
}
}
- final TermsEnum te = MultiFields.getTerms(r, FIELD).iterator();
+ final TermsEnum te = MultiFields.getIndexedField(r, FIELD).getTermsEnum();
final int END_LOC = -validTerms.length-1;
@@ -738,7 +738,7 @@ public void testIntersectBasic() throws Exception {
DirectoryReader r = w.getReader();
w.close();
LeafReader sub = getOnlyLeafReader(r);
- Terms terms = sub.fields().terms("field");
+ IndexedField terms = sub.fields().indexedField("field");
Automaton automaton = new RegExp(".*", RegExp.NONE).toAutomaton();
CompiledAutomaton ca = new CompiledAutomaton(automaton, false, false);
TermsEnum te = terms.intersect(ca, null);
@@ -792,7 +792,7 @@ public void testIntersectStartTerm() throws Exception {
DirectoryReader r = w.getReader();
w.close();
LeafReader sub = getOnlyLeafReader(r);
- Terms terms = sub.fields().terms("field");
+ IndexedField terms = sub.fields().indexedField("field");
Automaton automaton = new RegExp(".*d", RegExp.NONE).toAutomaton();
CompiledAutomaton ca = new CompiledAutomaton(automaton, false, false);
@@ -846,7 +846,7 @@ public void testIntersectEmptyString() throws Exception {
DirectoryReader r = w.getReader();
w.close();
LeafReader sub = getOnlyLeafReader(r);
- Terms terms = sub.fields().terms("field");
+ IndexedField terms = sub.fields().indexedField("field");
Automaton automaton = new RegExp(".*", RegExp.NONE).toAutomaton(); // accept ALL
CompiledAutomaton ca = new CompiledAutomaton(automaton, false, false);
@@ -905,7 +905,7 @@ public void testCommonPrefixTerms() throws Exception {
System.out.println("\nTEST: reader=" + r);
}
- TermsEnum termsEnum = MultiFields.getTerms(r, "id").iterator();
+ TermsEnum termsEnum = MultiFields.getIndexedField(r, "id").getTermsEnum();
PostingsEnum postingsEnum = null;
PerThreadPKLookup pkLookup = new PerThreadPKLookup(r, "id");
@@ -986,7 +986,7 @@ public void testVaryingTermsPerSegment() throws Exception {
w.addDocument(doc);
IndexReader r = w.getReader();
assertEquals(1, r.leaves().size());
- TermsEnum te = r.leaves().get(0).reader().fields().terms("field").iterator();
+ TermsEnum te = r.leaves().get(0).reader().fields().indexedField("field").getTermsEnum();
for(int i=0;i<=termCount;i++) {
assertTrue("term '" + termsList.get(i).utf8ToString() + "' should exist but doesn't", te.seekExact(termsList.get(i)));
}
@@ -1007,9 +1007,9 @@ public void testIntersectRegexp() throws Exception {
doc.add(newStringField("field", "foobar", Field.Store.NO));
w.addDocument(doc);
IndexReader r = w.getReader();
- Fields fields = MultiFields.getFields(r);
+ IndexedFields fields = MultiFields.getFields(r);
CompiledAutomaton automaton = new CompiledAutomaton(new RegExp("do_not_match_anything").toAutomaton());
- Terms terms = fields.terms("field");
+ IndexedField terms = fields.indexedField("field");
String message = expectThrows(IllegalArgumentException.class, () -> {terms.intersect(automaton, null);}).getMessage();
assertEquals("please use CompiledAutomaton.getTermsEnum instead", message);
r.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java
index 00181d545125..cd13d6381059 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsEnum2.java
@@ -116,7 +116,7 @@ public void testSeeking() throws Exception {
String reg = AutomatonTestUtil.randomRegexp(random());
Automaton automaton = Operations.determinize(new RegExp(reg, RegExp.NONE).toAutomaton(),
DEFAULT_MAX_DETERMINIZED_STATES);
- TermsEnum te = MultiFields.getTerms(reader, "field").iterator();
+ TermsEnum te = MultiFields.getIndexedField(reader, "field").getTermsEnum();
ArrayList unsortedTerms = new ArrayList<>(terms);
Collections.shuffle(unsortedTerms, random());
@@ -139,7 +139,7 @@ public void testSeeking() throws Exception {
/** mixes up seek and next for all terms */
public void testSeekingAndNexting() throws Exception {
for (int i = 0; i < numIterations; i++) {
- TermsEnum te = MultiFields.getTerms(reader, "field").iterator();
+ TermsEnum te = MultiFields.getIndexedField(reader, "field").getTermsEnum();
for (BytesRef term : terms) {
int c = random().nextInt(3);
@@ -161,7 +161,7 @@ public void testIntersect() throws Exception {
String reg = AutomatonTestUtil.randomRegexp(random());
Automaton automaton = new RegExp(reg, RegExp.NONE).toAutomaton();
CompiledAutomaton ca = new CompiledAutomaton(automaton, Operations.isFinite(automaton), false);
- TermsEnum te = MultiFields.getTerms(reader, "field").intersect(ca, null);
+ TermsEnum te = MultiFields.getIndexedField(reader, "field").intersect(ca, null);
Automaton expected = Operations.determinize(Operations.intersection(termsAutomaton, automaton),
DEFAULT_MAX_DETERMINIZED_STATES);
TreeSet found = new TreeSet<>();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java b/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java
index 3878d59abb09..0dc68bad582c 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TermInSetQueryTest.java
@@ -29,14 +29,14 @@
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.FilterDirectoryReader;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.Directory;
@@ -219,19 +219,19 @@ public TermsCountingLeafReaderWrapper(LeafReader in, AtomicInteger counter) {
}
@Override
- public Fields fields() throws IOException {
+ public IndexedFields fields() throws IOException {
return new FilterFields(in.fields()) {
@Override
- public Terms terms(String field) throws IOException {
- final Terms in = this.in.terms(field);
+ public IndexedField indexedField(String field) throws IOException {
+ final IndexedField in = this.in.indexedField(field);
if (in == null) {
return null;
}
- return new FilterTerms(in) {
+ return new FilterField(in) {
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
counter.incrementAndGet();
- return super.iterator();
+ return super.getTermsEnum();
}
};
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java
index d4c865fac010..06c2aec685e6 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java
@@ -30,7 +30,7 @@
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SingleTermsEnum;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
@@ -181,7 +181,7 @@ public void testEquals() {
*/
public void testRewriteSingleTerm() throws IOException {
AutomatonQuery aq = new AutomatonQuery(newTerm("bogus"), Automata.makeString("piece"));
- Terms terms = MultiFields.getTerms(searcher.getIndexReader(), FN);
+ IndexedField terms = MultiFields.getIndexedField(searcher.getIndexReader(), FN);
assertTrue(aq.getTermsEnum(terms) instanceof SingleTermsEnum);
assertEquals(1, automatonQueryNrHits(aq));
}
@@ -204,7 +204,7 @@ public void testEmptyOptimization() throws IOException {
AutomatonQuery aq = new AutomatonQuery(newTerm("bogus"), Automata.makeEmpty());
// not yet available: assertTrue(aq.getEnum(searcher.getIndexReader())
// instanceof EmptyTermEnum);
- Terms terms = MultiFields.getTerms(searcher.getIndexReader(), FN);
+ IndexedField terms = MultiFields.getIndexedField(searcher.getIndexReader(), FN);
assertSame(TermsEnum.EMPTY, aq.getTermsEnum(terms));
assertEquals(0, automatonQueryNrHits(aq));
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
index 262cbf339d4e..f990d69a00e0 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
@@ -71,7 +71,7 @@ public void testPhrasePrefix() throws IOException {
// this TermEnum gives "piccadilly", "pie" and "pizza".
String prefix = "pi";
- TermsEnum te = MultiFields.getFields(reader).terms("body").iterator();
+ TermsEnum te = MultiFields.getFields(reader).indexedField("body").getTermsEnum();
te.seekCeil(new BytesRef(prefix));
do {
String s = te.term().utf8ToString();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java
index 44f8d8bd2c75..96ee040746b2 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java
@@ -28,7 +28,7 @@
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.AttributeSource;
@@ -153,8 +153,8 @@ private void checkBooleanQueryBoosts(BooleanQuery bq) {
private void checkBoosts(MultiTermQuery.RewriteMethod method) throws Exception {
final MultiTermQuery mtq = new MultiTermQuery("data") {
@Override
- protected TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException {
- return new FilteredTermsEnum(terms.iterator()) {
+ protected TermsEnum getTermsEnum(IndexedField terms, AttributeSource atts) throws IOException {
+ return new FilteredTermsEnum(terms.getTermsEnum()) {
final BoostAttribute boostAtt =
attributes().addAttribute(BoostAttribute.class);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
index d143bf74baae..f1faed269fa0 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
@@ -22,10 +22,10 @@
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.English;
@@ -164,25 +164,25 @@ private void testTermVectors() throws Exception {
long start = 0L;
for (int docId = 0; docId < numDocs; docId++) {
start = System.currentTimeMillis();
- Fields vectors = reader.getTermVectors(docId);
+ IndexedFields vectors = reader.getTermVectors(docId);
timeElapsed += System.currentTimeMillis()-start;
// verify vectors result
verifyVectors(vectors, docId);
start = System.currentTimeMillis();
- Terms vector = reader.getTermVectors(docId).terms("field");
+ IndexedField vector = reader.getTermVectors(docId).indexedField("field");
timeElapsed += System.currentTimeMillis()-start;
- verifyVector(vector.iterator(), docId);
+ verifyVector(vector.getTermsEnum(), docId);
}
}
- private void verifyVectors(Fields vectors, int num) throws IOException {
+ private void verifyVectors(IndexedFields vectors, int num) throws IOException {
for (String field : vectors) {
- Terms terms = vectors.terms(field);
+ IndexedField terms = vectors.indexedField(field);
assert terms != null;
- verifyVector(terms.iterator(), num);
+ verifyVector(terms.getTermsEnum(), num);
}
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
index 81511509377c..a9271b59efda 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
@@ -73,7 +73,7 @@ public void testPhrasePrefix() throws IOException {
// this TermEnum gives "piccadilly", "pie" and "pizza".
String prefix = "pi";
- TermsEnum te = MultiFields.getFields(reader).terms("body").iterator();
+ TermsEnum te = MultiFields.getFields(reader).indexedField("body").getTermsEnum();
te.seekCeil(new BytesRef(prefix));
do {
String s = te.term().utf8ToString();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java b/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java
index 72fdc7aa689b..6f3952b165c4 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java
@@ -27,7 +27,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.AttributeSource;
@@ -84,8 +84,8 @@ private class DumbPrefixQuery extends MultiTermQuery {
}
@Override
- protected TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException {
- return new SimplePrefixTermsEnum(terms.iterator(), prefix);
+ protected TermsEnum getTermsEnum(IndexedField terms, AttributeSource atts) throws IOException {
+ return new SimplePrefixTermsEnum(terms.getTermsEnum(), prefix);
}
private class SimplePrefixTermsEnum extends FilteredTermsEnum {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
index a704d5479e1b..70708565e349 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestRegexpRandom2.java
@@ -31,7 +31,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.AttributeSource;
@@ -112,8 +112,8 @@ private class DumbRegexpQuery extends MultiTermQuery {
}
@Override
- protected TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException {
- return new SimpleAutomatonTermsEnum(terms.iterator());
+ protected TermsEnum getTermsEnum(IndexedField terms, AttributeSource atts) throws IOException {
+ return new SimpleAutomatonTermsEnum(terms.getTermsEnum());
}
private class SimpleAutomatonTermsEnum extends FilteredTermsEnum {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java b/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java
index 83a11be8b7a1..6c593275f5c4 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSameScoresWithThreads.java
@@ -31,7 +31,7 @@
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
@@ -61,9 +61,9 @@ public void test() throws Exception {
w.close();
final IndexSearcher s = newSearcher(r);
- Terms terms = MultiFields.getFields(r).terms("body");
+ IndexedField terms = MultiFields.getFields(r).indexedField("body");
int termCount = 0;
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
while(termsEnum.next() != null) {
termCount++;
}
@@ -71,7 +71,7 @@ public void test() throws Exception {
// Target ~10 terms to search:
double chance = 10.0 / termCount;
- termsEnum = terms.iterator();
+ termsEnum = terms.getTermsEnum();
final Map answers = new HashMap<>();
while(termsEnum.next() != null) {
if (random().nextDouble() <= chance) {
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java b/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java
index 768ee0e17139..fc75caedf7b1 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestShardSearching.java
@@ -174,7 +174,7 @@ public void testSimple() throws Exception {
if (terms == null && docCount > minDocsToMakeTerms) {
// TODO: try to "focus" on high freq terms sometimes too
// TODO: maybe also periodically reset the terms...?
- final TermsEnum termsEnum = MultiFields.getTerms(mockReader, "body").iterator();
+ final TermsEnum termsEnum = MultiFields.getIndexedField(mockReader, "body").getTermsEnum();
terms = new ArrayList<>();
while(termsEnum.next() != null) {
terms.add(BytesRef.deepCopyOf(termsEnum.term()));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestTermQuery.java
index a99411898fad..03473c81eb1d 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermQuery.java
@@ -22,7 +22,7 @@
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.FilterDirectoryReader;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.LeafReader;
@@ -32,7 +32,7 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
@@ -118,14 +118,14 @@ public NoSeekLeafReader(LeafReader in) {
}
@Override
- public Fields fields() throws IOException {
+ public IndexedFields fields() throws IOException {
return new FilterFields(super.fields()) {
@Override
- public Terms terms(String field) throws IOException {
- return new FilterTerms(super.terms(field)) {
+ public IndexedField indexedField(String field) throws IOException {
+ return new FilterField(super.indexedField(field)) {
@Override
- public TermsEnum iterator() throws IOException {
- return new FilterTermsEnum(super.iterator()) {
+ public TermsEnum getTermsEnum() throws IOException {
+ return new FilterTermsEnum(super.getTermsEnum()) {
@Override
public SeekStatus seekCeil(BytesRef text) throws IOException {
throw new AssertionError("no seek");
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java b/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java
index 4255d7c64812..49bbb29a8ffc 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestWildcard.java
@@ -27,7 +27,7 @@
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import java.io.IOException;
@@ -116,7 +116,7 @@ public void testPrefixTerm() throws IOException {
wq = new WildcardQuery(new Term("field", "*"));
assertMatches(searcher, wq, 2);
- Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "field");
+ IndexedField terms = MultiFields.getIndexedField(searcher.getIndexReader(), "field");
assertFalse(wq.getTermsEnum(terms).getClass().getSimpleName().contains("AutomatonTermsEnum"));
reader.close();
indexStore.close();
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java b/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java
index f87a73af0c3c..68925785e67f 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestDocIdSetBuilder.java
@@ -20,7 +20,7 @@
import java.io.IOException;
import org.apache.lucene.index.PointValues;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
@@ -204,7 +204,7 @@ public void testLeverageStats() throws IOException {
assertTrue(builder.multivalued);
// single-valued terms
- Terms terms = new DummyTerms(42, 42);
+ IndexedField terms = new DummyTerms(42, 42);
builder = new DocIdSetBuilder(100, terms);
assertEquals(1d, builder.numValuesPerDoc, 0d);
assertFalse(builder.multivalued);
@@ -239,7 +239,7 @@ public void testLeverageStats() throws IOException {
assertTrue(builder.multivalued);
}
- private static class DummyTerms extends Terms {
+ private static class DummyTerms extends IndexedField {
private final int docCount;
private final long numValues;
@@ -250,7 +250,7 @@ private static class DummyTerms extends Terms {
}
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
index dcce285383b4..4d77c9483d2a 100644
--- a/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
+++ b/lucene/core/src/test/org/apache/lucene/util/fst/TestFSTs.java
@@ -49,7 +49,7 @@
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
@@ -337,10 +337,10 @@ public void testRealTerms() throws Exception {
System.out.println("FST stores docFreq");
}
}
- Terms terms = MultiFields.getTerms(r, "body");
+ IndexedField terms = MultiFields.getIndexedField(r, "body");
if (terms != null) {
final IntsRefBuilder scratchIntsRef = new IntsRefBuilder();
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
if (VERBOSE) {
System.out.println("TEST: got termsEnum=" + termsEnum);
}
@@ -917,7 +917,7 @@ public void testPrimaryKeys() throws Exception {
}
// Verify w/ MultiTermsEnum
- final TermsEnum termsEnum = MultiFields.getTerms(r, "id").iterator();
+ final TermsEnum termsEnum = MultiFields.getIndexedField(r, "id").getTermsEnum();
for(int iter=0;iter<2*NUM_IDS;iter++) {
final String id;
final String nextID;
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
index 632d74bff8f1..001069b3c2eb 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
@@ -54,7 +54,7 @@
import org.apache.lucene.index.LogByteSizeMergePolicy;
import org.apache.lucene.index.ReaderManager;
import org.apache.lucene.index.SegmentInfos;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.store.AlreadyClosedException;
@@ -382,10 +382,10 @@ protected synchronized int findCategory(FacetLabel categoryPath) throws IOExcept
final BytesRef catTerm = new BytesRef(FacetsConfig.pathToString(categoryPath.components, categoryPath.length));
PostingsEnum docs = null; // reuse
for (LeafReaderContext ctx : reader.leaves()) {
- Terms terms = ctx.reader().terms(Consts.FULL);
+ IndexedField terms = ctx.reader().indexedField(Consts.FULL);
if (terms != null) {
// TODO: share per-segment TermsEnum here!
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
if (termsEnum.seekExact(catTerm)) {
// liveDocs=null because the taxonomy has no deletes
docs = termsEnum.postings(docs, 0 /* freqs not required */);
@@ -693,10 +693,10 @@ private synchronized void perhapsFillCache() throws IOException {
try {
PostingsEnum postingsEnum = null;
for (LeafReaderContext ctx : reader.leaves()) {
- Terms terms = ctx.reader().terms(Consts.FULL);
+ IndexedField terms = ctx.reader().indexedField(Consts.FULL);
if (terms != null) { // cannot really happen, but be on the safe side
// TODO: share per-segment TermsEnum here!
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
while (termsEnum.next() != null) {
if (!cache.isFull()) {
BytesRef t = termsEnum.term();
@@ -790,9 +790,9 @@ public void addTaxonomy(Directory taxoDir, OrdinalMap map) throws IOException {
PostingsEnum docs = null;
for (final LeafReaderContext ctx : r.leaves()) {
final LeafReader ar = ctx.reader();
- final Terms terms = ar.terms(Consts.FULL);
+ final IndexedField terms = ar.indexedField(Consts.FULL);
// TODO: share per-segment TermsEnum here!
- TermsEnum te = terms.iterator();
+ TermsEnum te = terms.getTermsEnum();
while (te.next() != null) {
FacetLabel cp = new FacetLabel(FacetsConfig.stringToPath(te.term().utf8ToString()));
final int ordinal = addCategory(cp);
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
index 608e3d47128e..f4583e9b8508 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TermVectorLeafReader.java
@@ -24,7 +24,7 @@
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.NumericDocValues;
@@ -33,29 +33,29 @@
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.StoredFieldVisitor;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.Sort;
import org.apache.lucene.util.Bits;
/**
- * Wraps a Terms with a {@link org.apache.lucene.index.LeafReader}, typically from term vectors.
+ * Wraps a IndexedField with a {@link org.apache.lucene.index.LeafReader}, typically from term vectors.
*
* @lucene.experimental
*/
public class TermVectorLeafReader extends LeafReader {
- private final Fields fields;
+ private final IndexedFields fields;
private final FieldInfos fieldInfos;
- public TermVectorLeafReader(String field, Terms terms) {
- fields = new Fields() {
+ public TermVectorLeafReader(String field, IndexedField terms) {
+ fields = new IndexedFields() {
@Override
public Iterator iterator() {
return Collections.singletonList(field).iterator();
}
@Override
- public Terms terms(String fld) throws IOException {
+ public IndexedField indexedField(String fld) throws IOException {
if (!field.equals(fld)) {
return null;
}
@@ -99,7 +99,7 @@ protected void doClose() throws IOException {
}
@Override
- public Fields fields() throws IOException {
+ public IndexedFields fields() throws IOException {
return fields;
}
@@ -153,7 +153,7 @@ public void checkIntegrity() throws IOException {
}
@Override
- public Fields getTermVectors(int docID) throws IOException {
+ public IndexedFields getTermVectors(int docID) throws IOException {
if (docID != 0) {
return null;
}
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
index 2730f40409a8..3ac7e64fa10b 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
@@ -25,9 +25,9 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.LimitTokenOffsetFilter;
import org.apache.lucene.document.Document;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
/**
* Convenience methods for obtaining a {@link TokenStream} for use with the {@link Highlighter} - can obtain from
@@ -50,12 +50,12 @@ private TokenSources() {}
* be re-used for the same document (e.g. when highlighting multiple fields).
* @param text the text to analyze, failing term vector un-inversion
* @param analyzer the analyzer to analyze {@code text} with, failing term vector un-inversion
- * @param maxStartOffset Terms with a startOffset greater than this aren't returned. Use -1 for no limit.
+ * @param maxStartOffset IndexedField with a startOffset greater than this aren't returned. Use -1 for no limit.
* Suggest using {@link Highlighter#getMaxDocCharsToAnalyze()} - 1.
*
* @return a token stream from either term vectors, or from analyzing the text. Never null.
*/
- public static TokenStream getTokenStream(String field, Fields tvFields, String text, Analyzer analyzer,
+ public static TokenStream getTokenStream(String field, IndexedFields tvFields, String text, Analyzer analyzer,
int maxStartOffset) throws IOException {
TokenStream tokenStream = getTermVectorTokenStreamOrNull(field, tvFields, maxStartOffset);
if (tokenStream != null) {
@@ -76,16 +76,16 @@ public static TokenStream getTokenStream(String field, Fields tvFields, String t
* @param field The field to get term vectors from.
* @param tvFields from {@link IndexReader#getTermVectors(int)}. Possibly null. For performance, this instance should
* be re-used for the same document (e.g. when highlighting multiple fields).
- * @param maxStartOffset Terms with a startOffset greater than this aren't returned. Use -1 for no limit.
+ * @param maxStartOffset IndexedField with a startOffset greater than this aren't returned. Use -1 for no limit.
* Suggest using {@link Highlighter#getMaxDocCharsToAnalyze()} - 1
* @return a token stream from term vectors. Null if no term vectors with the right options.
*/
- public static TokenStream getTermVectorTokenStreamOrNull(String field, Fields tvFields, int maxStartOffset)
+ public static TokenStream getTermVectorTokenStreamOrNull(String field, IndexedFields tvFields, int maxStartOffset)
throws IOException {
if (tvFields == null) {
return null;
}
- final Terms tvTerms = tvFields.terms(field);
+ final IndexedField tvTerms = tvFields.indexedField(field);
if (tvTerms == null || !tvTerms.hasOffsets()) {
return null;
}
@@ -116,9 +116,9 @@ public static TokenStream getAnyTokenStream(IndexReader reader, int docId,
String field, Document document, Analyzer analyzer) throws IOException {
TokenStream ts = null;
- Fields vectors = reader.getTermVectors(docId);
+ IndexedFields vectors = reader.getTermVectors(docId);
if (vectors != null) {
- Terms vector = vectors.terms(field);
+ IndexedField vector = vectors.indexedField(field);
if (vector != null) {
ts = getTokenStream(vector);
}
@@ -145,9 +145,9 @@ public static TokenStream getAnyTokenStream(IndexReader reader, int docId,
String field, Analyzer analyzer) throws IOException {
TokenStream ts = null;
- Fields vectors = reader.getTermVectors(docId);
+ IndexedFields vectors = reader.getTermVectors(docId);
if (vectors != null) {
- Terms vector = vectors.terms(field);
+ IndexedField vector = vectors.indexedField(field);
if (vector != null) {
ts = getTokenStream(vector);
}
@@ -160,17 +160,17 @@ public static TokenStream getAnyTokenStream(IndexReader reader, int docId,
return ts;
}
- /** Simply calls {@link #getTokenStream(org.apache.lucene.index.Terms)} now. */
+ /** Simply calls {@link #getTokenStream(org.apache.lucene.index.IndexedField)} now. */
@Deprecated // maintenance reasons LUCENE-6445
- public static TokenStream getTokenStream(Terms vector,
+ public static TokenStream getTokenStream(IndexedField vector,
boolean tokenPositionsGuaranteedContiguous) throws IOException {
return getTokenStream(vector);
}
/**
- * Returns a token stream generated from a {@link Terms}. This
+ * Returns a token stream generated from a {@link IndexedField}. This
* can be used to feed the highlighter with a pre-parsed token
- * stream. The {@link Terms} must have offsets available. If there are no positions available,
+ * stream. The {@link IndexedField} must have offsets available. If there are no positions available,
* all tokens will have position increments reflecting adjacent tokens, or coincident when terms
* share a start offset. If there are stopwords filtered from the index, you probably want to ensure
* term vectors have positions so that phrase queries won't match across stopwords.
@@ -178,7 +178,7 @@ public static TokenStream getTokenStream(Terms vector,
* @throws IllegalArgumentException if no offsets are available
*/
@Deprecated // maintenance reasons LUCENE-6445
- public static TokenStream getTokenStream(final Terms tpv) throws IOException {
+ public static TokenStream getTokenStream(final IndexedField tpv) throws IOException {
if (!tpv.hasOffsets()) {
throw new IllegalArgumentException("Highlighting requires offsets from the TokenStream.");
@@ -192,7 +192,7 @@ public static TokenStream getTokenStream(final Terms tpv) throws IOException {
/**
* Returns a {@link TokenStream} with positions and offsets constructed from
* field termvectors. If the field has no termvectors or offsets
- * are not included in the termvector, return null. See {@link #getTokenStream(org.apache.lucene.index.Terms)}
+ * are not included in the termvector, return null. See {@link #getTokenStream(org.apache.lucene.index.IndexedField)}
* for an explanation of what happens when positions aren't present.
*
* @param reader the {@link IndexReader} to retrieve term vectors from
@@ -201,18 +201,18 @@ public static TokenStream getTokenStream(final Terms tpv) throws IOException {
* @return a {@link TokenStream}, or null if offsets are not available
* @throws IOException If there is a low-level I/O error
*
- * @see #getTokenStream(org.apache.lucene.index.Terms)
+ * @see #getTokenStream(org.apache.lucene.index.IndexedField)
*/
@Deprecated // maintenance reasons LUCENE-6445
public static TokenStream getTokenStreamWithOffsets(IndexReader reader, int docId,
String field) throws IOException {
- Fields vectors = reader.getTermVectors(docId);
+ IndexedFields vectors = reader.getTermVectors(docId);
if (vectors == null) {
return null;
}
- Terms vector = vectors.terms(field);
+ IndexedField vector = vectors.indexedField(field);
if (vector == null) {
return null;
}
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
index 346ecba013d7..b2ee3a57140d 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
@@ -25,7 +25,7 @@
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.AttributeFactory;
import org.apache.lucene.util.BytesRef;
@@ -57,7 +57,7 @@ public final class TokenStreamFromTermVector extends TokenStream {
AttributeFactory.getStaticImplementation(
AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, PackedTokenAttributeImpl.class);
- private final Terms vector;
+ private final IndexedField vector;
private final CharTermAttribute termAttribute;
@@ -84,11 +84,11 @@ public final class TokenStreamFromTermVector extends TokenStream {
* Constructor. The uninversion doesn't happen here; it's delayed till the first call to
* {@link #incrementToken}.
*
- * @param vector Terms that contains the data for
+ * @param vector IndexedField that contains the data for
* creating the TokenStream. Must have positions and/or offsets.
* @param maxStartOffset if a token's start offset exceeds this then the token is not added. -1 disables the limit.
*/
- public TokenStreamFromTermVector(Terms vector, int maxStartOffset) throws IOException {
+ public TokenStreamFromTermVector(IndexedField vector, int maxStartOffset) throws IOException {
super(ATTRIBUTE_FACTORY);
this.maxStartOffset = maxStartOffset < 0 ? Integer.MAX_VALUE : maxStartOffset;
assert !hasAttribute(PayloadAttribute.class) : "AttributeFactory shouldn't have payloads *yet*";
@@ -101,7 +101,7 @@ public TokenStreamFromTermVector(Terms vector, int maxStartOffset) throws IOExce
positionIncrementAttribute = addAttribute(PositionIncrementAttribute.class);
}
- public Terms getTermVectorTerms() { return vector; }
+ public IndexedField getTermVectorTerms() { return vector; }
@Override
public void reset() throws IOException {
@@ -134,7 +134,7 @@ private void init() throws IOException {
int lastPosition = -1;
- final TermsEnum termsEnum = vector.iterator();
+ final TermsEnum termsEnum = vector.getTermsEnum();
BytesRef termBytesRef;
PostingsEnum dpEnum = null;
CharsRefBuilder tempCharsRefBuilder = new CharsRefBuilder();//only for UTF8->UTF16 call
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
index 0e0093b02e3b..f75f8355868a 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
@@ -30,7 +30,7 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
@@ -38,7 +38,7 @@
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.queries.CommonTermsQuery;
import org.apache.lucene.queries.CustomScoreQuery;
@@ -98,7 +98,7 @@ public WeightedSpanTermExtractor(String defaultField) {
* Fills a Map with {@link WeightedSpanTerm}s using the terms from the supplied Query.
*
* @param query
- * Query to extract Terms from
+ * Query to extract IndexedField from
* @param terms
* Map to place created WeightedSpanTerms in
* @throws IOException If there is a low-level I/O error
@@ -263,7 +263,7 @@ protected void extractUnknownQuery(Query query,
* @param terms
* Map to place created WeightedSpanTerms in
* @param spanQuery
- * SpanQuery to extract Terms from
+ * SpanQuery to extract IndexedField from
* @throws IOException If there is a low-level I/O error
*/
protected void extractWeightedSpanTerms(Map terms, SpanQuery spanQuery, float boost) throws IOException {
@@ -355,7 +355,7 @@ protected void extractWeightedSpanTerms(Map terms, Span
* @param terms
* Map to place created WeightedSpanTerms in
* @param query
- * Query to extract Terms from
+ * Query to extract IndexedField from
* @throws IOException If there is a low-level I/O error
*/
protected void extractWeightedTerms(Map terms, Query query, float boost) throws IOException {
@@ -385,10 +385,10 @@ protected LeafReaderContext getLeafContext() throws IOException {
if (internalReader == null) {
boolean cacheIt = wrapToCaching && !(tokenStream instanceof CachingTokenFilter);
- // If it's from term vectors, simply wrap the underlying Terms in a reader
+ // If it's from term vectors, simply wrap the underlying IndexedField in a reader
if (tokenStream instanceof TokenStreamFromTermVector) {
cacheIt = false;
- Terms termVectorTerms = ((TokenStreamFromTermVector) tokenStream).getTermVectorTerms();
+ IndexedField termVectorTerms = ((TokenStreamFromTermVector) tokenStream).getTermVectorTerms();
if (termVectorTerms.hasPositions() && termVectorTerms.hasOffsets()) {
internalReader = new TermVectorLeafReader(DelegatingLeafReader.FIELD_NAME, termVectorTerms);
}
@@ -436,11 +436,11 @@ public FieldInfos getFieldInfos() {
}
@Override
- public Fields fields() throws IOException {
+ public IndexedFields fields() throws IOException {
return new FilterFields(super.fields()) {
@Override
- public Terms terms(String field) throws IOException {
- return super.terms(DelegatingLeafReader.FIELD_NAME);
+ public IndexedField indexedField(String field) throws IOException {
+ return super.indexedField(DelegatingLeafReader.FIELD_NAME);
}
@Override
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
index e4d366793aa3..6e25fd0f59b1 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
@@ -42,7 +42,7 @@
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@@ -503,13 +503,13 @@ private Map highlightField(String field, String contents[], Brea
// if the segment has changed, we must initialize new enums.
if (leaf != lastLeaf) {
- Terms t = r.terms(field);
+ IndexedField t = r.indexedField(field);
if (t != null) {
if (!t.hasOffsets()) {
// no offsets available
throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
}
- termsEnum = t.iterator();
+ termsEnum = t.getTermsEnum();
postings = new PostingsEnum[terms.length];
} else {
termsEnum = null;
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/FieldOffsetStrategy.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/FieldOffsetStrategy.java
index 155f0a76fb97..8b34312972b9 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/FieldOffsetStrategy.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/FieldOffsetStrategy.java
@@ -25,7 +25,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.util.BytesRef;
@@ -65,7 +65,7 @@ public String getField() {
public abstract List getOffsetsEnums(IndexReader reader, int docId, String content) throws IOException;
protected List createOffsetsEnumsFromReader(LeafReader leafReader, int doc) throws IOException {
- final Terms termsIndex = leafReader.terms(field);
+ final IndexedField termsIndex = leafReader.indexedField(field);
if (termsIndex == null) {
return Collections.emptyList();
}
@@ -82,7 +82,7 @@ protected List createOffsetsEnumsFromReader(LeafReader leafReader,
// Handle sourceTerms:
if (!sourceTerms.isEmpty()) {
- TermsEnum termsEnum = termsIndex.iterator();//does not return null
+ TermsEnum termsEnum = termsIndex.getTermsEnum();//does not return null
for (BytesRef term : sourceTerms) {
if (termsEnum.seekExact(term)) {
PostingsEnum postingsEnum = termsEnum.postings(null, PostingsEnum.OFFSETS);
@@ -110,13 +110,13 @@ protected List createOffsetsEnumsFromReader(LeafReader leafReader,
return offsetsEnums;
}
- protected List createAutomataOffsetsFromTerms(Terms termsIndex, int doc) throws IOException {
+ protected List createAutomataOffsetsFromTerms(IndexedField termsIndex, int doc) throws IOException {
List> automataPostings = new ArrayList<>(automata.length);
for (int i = 0; i < automata.length; i++) {
automataPostings.add(new ArrayList<>());
}
- TermsEnum termsEnum = termsIndex.iterator();
+ TermsEnum termsEnum = termsIndex.getTermsEnum();
BytesRef term;
CharsRefBuilder refBuilder = new CharsRefBuilder();
while ((term = termsEnum.next()) != null) {
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PhraseHelper.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PhraseHelper.java
index d7e8671c4c2e..910856d24571 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PhraseHelper.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PhraseHelper.java
@@ -36,7 +36,7 @@
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
@@ -44,7 +44,7 @@
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
@@ -122,7 +122,7 @@ public PhraseHelper(Query query, String field, Predicate fieldMatcher, F
boolean[] mustRewriteHolder = {false}; // boolean wrapped in 1-ary array so it's mutable from inner class
- // For TermQueries or other position insensitive queries, collect the Terms.
+ // For TermQueries or other position insensitive queries, collect the IndexedField.
// For other Query types, WSTE will convert to an equivalent SpanQuery. NOT extracting position spans here.
new WeightedSpanTermExtractor(field) {
//anonymous constructor
@@ -407,7 +407,7 @@ public int endOffset() throws IOException {
}
/**
- * Simple TreeSet that filters out Terms not matching the provided predicate on {@code add()}.
+ * Simple TreeSet that filters out IndexedField not matching the provided predicate on {@code add()}.
*/
private class FieldFilteringTermSet extends TreeSet {
@Override
@@ -546,11 +546,11 @@ public FieldInfos getFieldInfos() {
}
@Override
- public Fields fields() throws IOException {
+ public IndexedFields fields() throws IOException {
return new FilterFields(super.fields()) {
@Override
- public Terms terms(String field) throws IOException {
- return super.terms(fieldName);
+ public IndexedField indexedField(String field) throws IOException {
+ return super.indexedField(fieldName);
}
@Override
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PostingsWithTermVectorsOffsetStrategy.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PostingsWithTermVectorsOffsetStrategy.java
index b9086a7400ab..c89f0bbe76de 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PostingsWithTermVectorsOffsetStrategy.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/PostingsWithTermVectorsOffsetStrategy.java
@@ -24,7 +24,7 @@
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
@@ -51,7 +51,7 @@ public List getOffsetsEnums(IndexReader reader, int docId, String c
docId -= LeafReaderContext.docBase; // adjust 'doc' to be within this atomic reader
}
- Terms docTerms = leafReader.getTermVector(docId, field);
+ IndexedField docTerms = leafReader.getTermVector(docId, field);
if (docTerms == null) {
return Collections.emptyList();
}
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TermVectorFilteredLeafReader.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TermVectorFilteredLeafReader.java
index 954024cbe066..65b86cd22210 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TermVectorFilteredLeafReader.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TermVectorFilteredLeafReader.java
@@ -18,11 +18,11 @@
import java.io.IOException;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.CompiledAutomaton;
@@ -37,7 +37,7 @@
final class TermVectorFilteredLeafReader extends FilterLeafReader {
// NOTE: super ("in") is baseLeafReader
- private final Terms filterTerms;
+ private final IndexedField filterTerms;
/**
*
Construct a FilterLeafReader based on the specified base reader.
@@ -46,38 +46,38 @@ final class TermVectorFilteredLeafReader extends FilterLeafReader {
* @param baseLeafReader full/original reader.
* @param filterTerms set of terms to filter by -- probably from a TermVector or MemoryIndex.
*/
- TermVectorFilteredLeafReader(LeafReader baseLeafReader, Terms filterTerms) {
+ TermVectorFilteredLeafReader(LeafReader baseLeafReader, IndexedField filterTerms) {
super(baseLeafReader);
this.filterTerms = filterTerms;
}
@Override
- public Fields fields() throws IOException {
+ public IndexedFields fields() throws IOException {
return new TermVectorFilteredFields(in.fields(), filterTerms);
}
private static final class TermVectorFilteredFields extends FilterLeafReader.FilterFields {
// NOTE: super ("in") is baseFields
- private final Terms filterTerms;
+ private final IndexedField filterTerms;
- TermVectorFilteredFields(Fields baseFields, Terms filterTerms) {
+ TermVectorFilteredFields(IndexedFields baseFields, IndexedField filterTerms) {
super(baseFields);
this.filterTerms = filterTerms;
}
@Override
- public Terms terms(String field) throws IOException {
- return new TermsFilteredTerms(in.terms(field), filterTerms);
+ public IndexedField indexedField(String field) throws IOException {
+ return new TermsFilteredTerms(in.indexedField(field), filterTerms);
}
}
- private static final class TermsFilteredTerms extends FilterLeafReader.FilterTerms {
+ private static final class TermsFilteredTerms extends FilterLeafReader.FilterField {
// NOTE: super ("in") is the baseTerms
- private final Terms filterTerms;
+ private final IndexedField filterTerms;
- TermsFilteredTerms(Terms baseTerms, Terms filterTerms) {
+ TermsFilteredTerms(IndexedField baseTerms, IndexedField filterTerms) {
super(baseTerms);
this.filterTerms = filterTerms;
}
@@ -87,13 +87,13 @@ private static final class TermsFilteredTerms extends FilterLeafReader.FilterTer
//TODO delegate getMin, getMax to filterTerms
@Override
- public TermsEnum iterator() throws IOException {
- return new TermVectorFilteredTermsEnum(in.iterator(), filterTerms.iterator());
+ public TermsEnum getTermsEnum() throws IOException {
+ return new TermVectorFilteredTermsEnum(in.getTermsEnum(), filterTerms.getTermsEnum());
}
@Override
public TermsEnum intersect(CompiledAutomaton compiled, BytesRef startTerm) throws IOException {
- return new TermVectorFilteredTermsEnum(in.iterator(), filterTerms.intersect(compiled, startTerm));
+ return new TermVectorFilteredTermsEnum(in.getTermsEnum(), filterTerms.intersect(compiled, startTerm));
}
}
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TermVectorOffsetStrategy.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TermVectorOffsetStrategy.java
index f6eedc417661..43b42967663b 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TermVectorOffsetStrategy.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/TermVectorOffsetStrategy.java
@@ -22,7 +22,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.highlight.TermVectorLeafReader;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
@@ -45,7 +45,7 @@ public UnifiedHighlighter.OffsetSource getOffsetSource() {
@Override
public List getOffsetsEnums(IndexReader reader, int docId, String content) throws IOException {
- Terms tvTerms = reader.getTermVector(docId, field);
+ IndexedField tvTerms = reader.getTermVector(docId, field);
if (tvTerms == null) {
return Collections.emptyList();
}
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java
index bbcfd5b0203e..7f6757d021ef 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/uhighlight/UnifiedHighlighter.java
@@ -40,7 +40,7 @@
import org.apache.lucene.index.BaseCompositeReader;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
@@ -1051,14 +1051,14 @@ protected void doClose() throws IOException {
}
private int lastDocId = -1;
- private Fields tvFields;
+ private IndexedFields tvFields;
TermVectorReusingLeafReader(LeafReader in) {
super(in);
}
@Override
- public Fields getTermVectors(int docID) throws IOException {
+ public IndexedFields getTermVectors(int docID) throws IOException {
if (docID != lastDocId) {
lastDocId = docID;
tvFields = in.getTermVectors(docID);
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
index f67ba8044344..352c78123972 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
@@ -22,10 +22,10 @@
import java.util.Set;
import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRefBuilder;
@@ -78,20 +78,20 @@ public FieldTermStack( IndexReader reader, int docId, String fieldName, final Fi
// just return to make null snippet if un-matched fieldName specified when fieldMatch == true
if( termSet == null ) return;
- final Fields vectors = reader.getTermVectors(docId);
+ final IndexedFields vectors = reader.getTermVectors(docId);
if (vectors == null) {
// null snippet
return;
}
- final Terms vector = vectors.terms(fieldName);
+ final IndexedField vector = vectors.indexedField(fieldName);
if (vector == null || vector.hasPositions() == false) {
// null snippet
return;
}
final CharsRefBuilder spare = new CharsRefBuilder();
- final TermsEnum termsEnum = vector.iterator();
+ final TermsEnum termsEnum = vector.getTermsEnum();
PostingsEnum dpEnum = null;
BytesRef text;
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/package-info.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/package-info.java
index 39d0f5d5441d..00a727268702 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/package-info.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/package-info.java
@@ -168,7 +168,7 @@
*
* query = das alte testament (The Old Testament)
*
- *
Terms in fragment
sum-of-distinct-weights
sum-of-boosts
+ *
IndexedField in fragment
sum-of-distinct-weights
sum-of-boosts
*
das alte testament
5.339621
3.0
*
das alte testament
5.339621
3.0
*
das testament alte
5.339621
3.0
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java
index d49434a248e3..afb22c692520 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java
@@ -36,7 +36,7 @@
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.BaseTermVectorsFormatTestCase;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.RandomIndexWriter;
@@ -461,7 +461,7 @@ public void testMaxStartOffsetConsistency() throws IOException {
reader = writer.getReader();
}
try {
- Fields tvFields = reader.getTermVectors(0);
+ IndexedFields tvFields = reader.getTermVectors(0);
for (int maxStartOffset = -1; maxStartOffset <= TEXT.length(); maxStartOffset++) {
TokenStream tvStream = TokenSources.getTokenStream("fld_tv", tvFields, TEXT, analyzer, maxStartOffset);
TokenStream anaStream = TokenSources.getTokenStream("fld_notv", tvFields, TEXT, analyzer, maxStartOffset);
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermVec.java b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermVec.java
index 57d398bd32a1..aaa1ba96facf 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermVec.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/uhighlight/TestUnifiedHighlighterTermVec.java
@@ -122,7 +122,7 @@ public LeafReader wrap(LeafReader reader) {
BitSet seenDocIDs = new BitSet();
@Override
- public Fields getTermVectors(int docID) throws IOException {
+ public IndexedFields getTermVectors(int docID) throws IOException {
// if we're invoked by ParallelLeafReader then we can't do our assertion. TODO see LUCENE-6868
if (calledBy(ParallelLeafReader.class) == false
&& calledBy(CheckIndex.class) == false) {
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
index 28d3044788cd..9c2280843d21 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
@@ -25,7 +25,7 @@
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
@@ -113,9 +113,9 @@ public void extractTerms(Set terms) {}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- Terms terms = context.reader().terms(field);
+ IndexedField terms = context.reader().indexedField(field);
if (terms != null) {
- TermsEnum segmentTermsEnum = terms.iterator();
+ TermsEnum segmentTermsEnum = terms.getTermsEnum();
BytesRef spare = new BytesRef();
PostingsEnum postingsEnum = null;
for (int i = 0; i < TermsIncludingScoreQuery.this.terms.size(); i++) {
@@ -133,7 +133,7 @@ public Explanation explain(LeafReaderContext context, int doc) throws IOExceptio
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
- Terms terms = context.reader().terms(field);
+ IndexedField terms = context.reader().indexedField(field);
if (terms == null) {
return null;
}
@@ -141,7 +141,7 @@ public Scorer scorer(LeafReaderContext context) throws IOException {
// what is the runtime...seems ok?
final long cost = context.reader().maxDoc() * terms.size();
- TermsEnum segmentTermsEnum = terms.iterator();
+ TermsEnum segmentTermsEnum = terms.getTermsEnum();
if (multipleValuesPerDocument) {
return new MVInOrderScorer(this, segmentTermsEnum, context.reader().maxDoc(), cost);
} else {
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsQuery.java
index 63561c39e3ba..9a55f4c12338 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsQuery.java
@@ -17,7 +17,7 @@
package org.apache.lucene.search.join;
import org.apache.lucene.index.FilteredTermsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.Query;
@@ -51,12 +51,12 @@ class TermsQuery extends MultiTermQuery {
}
@Override
- protected TermsEnum getTermsEnum(Terms terms, AttributeSource atts) throws IOException {
+ protected TermsEnum getTermsEnum(IndexedField terms, AttributeSource atts) throws IOException {
if (this.terms.size() == 0) {
return TermsEnum.EMPTY;
}
- return new SeekingTermSetTermsEnum(terms.iterator(), this.terms, ords);
+ return new SeekingTermSetTermsEnum(terms.getTermsEnum(), this.terms, ords);
}
@Override
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
index 6e20f232a2bd..5596dbdb2f05 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java
@@ -64,7 +64,7 @@
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -1261,13 +1261,13 @@ public boolean needsScores() {
final Map docToJoinScore = new HashMap<>();
if (multipleValuesPerDocument) {
- Terms terms = MultiFields.getTerms(topLevelReader, toField);
+ IndexedField terms = MultiFields.getIndexedField(topLevelReader, toField);
if (terms != null) {
PostingsEnum postingsEnum = null;
SortedSet joinValues = new TreeSet<>();
joinValues.addAll(joinValueToJoinScores.keySet());
for (BytesRef joinValue : joinValues) {
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
if (termsEnum.seekExact(joinValue)) {
postingsEnum = termsEnum.postings(postingsEnum, PostingsEnum.NONE);
JoinScore joinScore = joinValueToJoinScores.get(joinValue);
diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index b1adf60130e2..01d19448a807 100644
--- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -835,11 +835,11 @@ private final class Info {
* Term strings and their positions for this field: Map <String
* termText, ArrayIntList positions>
*/
- private BytesRefHash terms; // note unfortunate variable name class with Terms type
+ private BytesRefHash terms; // note unfortunate variable name class with IndexedField type
private SliceByteStartArray sliceArray;
- /** Terms sorted ascending by term text; computed on demand */
+ /** IndexedField sorted ascending by term text; computed on demand */
private transient int[] sortedTerms;
/** Number of added tokens for this field */
@@ -1072,7 +1072,7 @@ private void prepareForUsage() {
*/
private final class MemoryIndexReader extends LeafReader {
- private Fields memoryFields = new MemoryFields(fields);
+ private IndexedFields memoryFields = new MemoryFields(fields);
private MemoryIndexReader() {
super(); // avoid as much superclass baggage as possible
@@ -1209,11 +1209,11 @@ public void checkIntegrity() throws IOException {
}
@Override
- public Fields fields() {
+ public IndexedFields fields() {
return memoryFields;
}
- private class MemoryFields extends Fields {
+ private class MemoryFields extends IndexedFields {
private final Map fields;
@@ -1230,15 +1230,15 @@ public Iterator iterator() {
}
@Override
- public Terms terms(final String field) {
+ public IndexedField indexedField(final String field) {
final Info info = fields.get(field);
if (info == null || info.numTokens <= 0) {
return null;
}
- return new Terms() {
+ return new IndexedField() {
@Override
- public TermsEnum iterator() {
+ public TermsEnum getTermsEnum() {
return new MemoryTermsEnum(info);
}
@@ -1564,7 +1564,7 @@ public int getDocCount() {
}
@Override
- public Fields getTermVectors(int docID) {
+ public IndexedFields getTermVectors(int docID) {
if (docID == 0) {
return fields();
} else {
diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndex.java b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndex.java
index ab4fe21bbebd..6ce3392fa6fc 100644
--- a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndex.java
+++ b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndex.java
@@ -125,7 +125,7 @@ public void testSeekByTermOrd() throws IOException {
mi.addField("field", "some terms be here", analyzer);
IndexSearcher searcher = mi.createSearcher();
LeafReader reader = (LeafReader) searcher.getIndexReader();
- TermsEnum terms = reader.fields().terms("field").iterator();
+ TermsEnum terms = reader.fields().indexedField("field").getTermsEnum();
terms.seekExact(0);
assertEquals("be", terms.term().utf8ToString());
TestUtil.checkReader(reader);
@@ -315,7 +315,7 @@ public void testDocValuesDoNotAffectBoostPositionsOrOffset() throws Exception {
doc.add(new TextField("text", "quick brown fox", Field.Store.NO));
MemoryIndex mi = MemoryIndex.fromDocument(doc, analyzer, true, true);
LeafReader leafReader = mi.createSearcher().getIndexReader().leaves().get(0).reader();
- TermsEnum tenum = leafReader.terms("text").iterator();
+ TermsEnum tenum = leafReader.indexedField("text").getTermsEnum();
assertEquals("brown", tenum.next().utf8ToString());
PostingsEnum penum = tenum.postings(null, PostingsEnum.OFFSETS);
@@ -412,7 +412,7 @@ public void testPointValuesDoNotAffectBoostPositionsOrOffset() throws Exception
mi.addField(new BinaryPoint("text", "quick".getBytes(StandardCharsets.UTF_8)), analyzer, 5f);
mi.addField(new BinaryPoint("text", "brown".getBytes(StandardCharsets.UTF_8)), analyzer, 5f);
LeafReader leafReader = mi.createSearcher().getIndexReader().leaves().get(0).reader();
- TermsEnum tenum = leafReader.terms("text").iterator();
+ TermsEnum tenum = leafReader.indexedField("text").getTermsEnum();
assertEquals("brown", tenum.next().utf8ToString());
PostingsEnum penum = tenum.postings(null, PostingsEnum.OFFSETS);
diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
index 03c17a502139..c04dbc234cd1 100644
--- a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
+++ b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
@@ -171,10 +171,10 @@ public void assertAgainstRAMDirectory(MemoryIndex memory) throws Exception {
private void duellReaders(CompositeReader other, LeafReader memIndexReader)
throws IOException {
- Fields memFields = memIndexReader.fields();
+ IndexedFields memFields = memIndexReader.fields();
for (String field : MultiFields.getFields(other)) {
- Terms memTerms = memFields.terms(field);
- Terms iwTerms = memIndexReader.terms(field);
+ IndexedField memTerms = memFields.indexedField(field);
+ IndexedField iwTerms = memIndexReader.indexedField(field);
if (iwTerms == null) {
assertNull(memTerms);
} else {
@@ -192,8 +192,8 @@ private void duellReaders(CompositeReader other, LeafReader memIndexReader)
assertEquals(iwTerms.getDocCount(), memTerms.getDocCount());
assertEquals(iwTerms.getSumDocFreq(), memTerms.getSumDocFreq());
assertEquals(iwTerms.getSumTotalTermFreq(), memTerms.getSumTotalTermFreq());
- TermsEnum iwTermsIter = iwTerms.iterator();
- TermsEnum memTermsIter = memTerms.iterator();
+ TermsEnum iwTermsIter = iwTerms.getTermsEnum();
+ TermsEnum memTermsIter = memTerms.getTermsEnum();
if (iwTerms.hasPositions()) {
final boolean offsets = iwTerms.hasOffsets() && memTerms.hasOffsets();
@@ -327,7 +327,7 @@ public void testDocsEnumStart() throws Exception {
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
// now reuse and check again
- TermsEnum te = reader.terms("foo").iterator();
+ TermsEnum te = reader.indexedField("foo").getTermsEnum();
assertTrue(te.seekExact(new BytesRef("bar")));
disi = te.postings(disi, PostingsEnum.NONE);
docid = disi.docID();
@@ -356,7 +356,7 @@ public void testDocsAndPositionsEnumStart() throws Exception {
memory.addField("foo", "bar", analyzer);
LeafReader reader = (LeafReader) memory.createSearcher().getIndexReader();
TestUtil.checkReader(reader);
- assertEquals(1, reader.terms("foo").getSumTotalTermFreq());
+ assertEquals(1, reader.indexedField("foo").getSumTotalTermFreq());
PostingsEnum disi = reader.postings(new Term("foo", "bar"), PostingsEnum.ALL);
int docid = disi.docID();
assertEquals(-1, docid);
@@ -366,7 +366,7 @@ public void testDocsAndPositionsEnumStart() throws Exception {
assertEquals(3, disi.endOffset());
// now reuse and check again
- TermsEnum te = reader.terms("foo").iterator();
+ TermsEnum te = reader.indexedField("foo").getTermsEnum();
assertTrue(te.seekExact(new BytesRef("bar")));
disi = te.postings(disi);
docid = disi.docID();
@@ -410,7 +410,7 @@ public void testSameFieldAddedMultipleTimes() throws IOException {
mindex.addField("field", "jumps over the", mockAnalyzer);
LeafReader reader = (LeafReader) mindex.createSearcher().getIndexReader();
TestUtil.checkReader(reader);
- assertEquals(7, reader.terms("field").getSumTotalTermFreq());
+ assertEquals(7, reader.indexedField("field").getSumTotalTermFreq());
PhraseQuery query = new PhraseQuery("field", "fox", "jumps");
assertTrue(mindex.search(query) > 0.1);
mindex.reset();
@@ -433,7 +433,7 @@ public void testNonExistentField() throws IOException {
assertNull(reader.getNormValues("not-in-index"));
assertNull(reader.postings(new Term("not-in-index", "foo")));
assertNull(reader.postings(new Term("not-in-index", "foo"), PostingsEnum.ALL));
- assertNull(reader.terms("not-in-index"));
+ assertNull(reader.indexedField("not-in-index"));
}
public void testDocValuesMemoryIndexVsNormalIndex() throws Exception {
@@ -690,10 +690,10 @@ public void testDuelMemoryIndexCoreDirectoryWithArrayField() throws Exception {
memIndex.addField(field_name, "foo bar foo bar foo", mockAnalyzer);
//compare term vectors
- Terms ramTv = reader.getTermVector(0, field_name);
+ IndexedField ramTv = reader.getTermVector(0, field_name);
IndexReader memIndexReader = memIndex.createSearcher().getIndexReader();
TestUtil.checkReader(memIndexReader);
- Terms memTv = memIndexReader.getTermVector(0, field_name);
+ IndexedField memTv = memIndexReader.getTermVector(0, field_name);
compareTermVectors(ramTv, memTv, field_name);
memIndexReader.close();
@@ -702,10 +702,10 @@ public void testDuelMemoryIndexCoreDirectoryWithArrayField() throws Exception {
}
- protected void compareTermVectors(Terms terms, Terms memTerms, String field_name) throws IOException {
+ protected void compareTermVectors(IndexedField terms, IndexedField memTerms, String field_name) throws IOException {
- TermsEnum termEnum = terms.iterator();
- TermsEnum memTermEnum = memTerms.iterator();
+ TermsEnum termEnum = terms.getTermsEnum();
+ TermsEnum memTermEnum = memTerms.getTermsEnum();
while (termEnum.next() != null) {
assertNotNull(memTermEnum.next());
diff --git a/lucene/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java b/lucene/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java
index 01a2d9dcef36..3562dd514281 100644
--- a/lucene/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java
+++ b/lucene/misc/src/java/org/apache/lucene/misc/HighFreqTerms.java
@@ -19,9 +19,9 @@
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.PriorityQueue;
@@ -98,24 +98,24 @@ public static TermStats[] getHighFreqTerms(IndexReader reader, int numTerms, Str
TermStatsQueue tiq = null;
if (field != null) {
- Terms terms = MultiFields.getTerms(reader, field);
+ IndexedField terms = MultiFields.getIndexedField(reader, field);
if (terms == null) {
throw new RuntimeException("field " + field + " not found");
}
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
tiq = new TermStatsQueue(numTerms, comparator);
tiq.fill(field, termsEnum);
} else {
- Fields fields = MultiFields.getFields(reader);
+ IndexedFields fields = MultiFields.getFields(reader);
if (fields.size() == 0) {
throw new RuntimeException("no fields found for this index");
}
tiq = new TermStatsQueue(numTerms, comparator);
for (String fieldName : fields) {
- Terms terms = fields.terms(fieldName);
+ IndexedField terms = fields.indexedField(fieldName);
if (terms != null) {
- tiq.fill(fieldName, terms.iterator());
+ tiq.fill(fieldName, terms.getTermsEnum());
}
}
}
diff --git a/lucene/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java b/lucene/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
index 7c9745705165..87c2ce3e12cf 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
@@ -70,7 +70,7 @@ public void testSplitRR() throws Exception {
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error
Document doc = ir.document(0);
assertEquals("0", doc.get("id"));
- TermsEnum te = MultiFields.getTerms(ir, "id").iterator();
+ TermsEnum te = MultiFields.getIndexedField(ir, "id").getTermsEnum();
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("1")));
assertNotSame("1", te.term().utf8ToString());
ir.close();
@@ -78,7 +78,7 @@ public void testSplitRR() throws Exception {
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);
doc = ir.document(0);
assertEquals("1", doc.get("id"));
- te = MultiFields.getTerms(ir, "id").iterator();
+ te = MultiFields.getIndexedField(ir, "id").getTermsEnum();
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("0")));
assertNotSame("0", te.term().utf8ToString());
@@ -88,7 +88,7 @@ public void testSplitRR() throws Exception {
doc = ir.document(0);
assertEquals("2", doc.get("id"));
- te = MultiFields.getTerms(ir, "id").iterator();
+ te = MultiFields.getIndexedField(ir, "id").getTermsEnum();
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef("1")));
assertNotSame("1", te.term());
@@ -128,7 +128,7 @@ public void testSplitSeq() throws Exception {
doc = ir.document(0);
assertEquals(start + "", doc.get("id"));
// make sure the deleted doc is not here
- TermsEnum te = MultiFields.getTerms(ir, "id").iterator();
+ TermsEnum te = MultiFields.getIndexedField(ir, "id").getTermsEnum();
Term t = new Term("id", (NUM_DOCS - 1) + "");
assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seekCeil(new BytesRef(t.text())));
assertNotSame(t.text(), te.term().utf8ToString());
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/CommonTermsQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/CommonTermsQuery.java
index 857f4d3fdbcc..51702aa9e35b 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/CommonTermsQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/CommonTermsQuery.java
@@ -22,12 +22,12 @@
import java.util.List;
import java.util.Objects;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
@@ -214,16 +214,16 @@ public void collectTermContext(IndexReader reader,
Term[] queryTerms) throws IOException {
TermsEnum termsEnum = null;
for (LeafReaderContext context : leaves) {
- final Fields fields = context.reader().fields();
+ final IndexedFields fields = context.reader().fields();
for (int i = 0; i < queryTerms.length; i++) {
Term term = queryTerms[i];
TermContext termContext = contextArray[i];
- final Terms terms = fields.terms(term.field());
+ final IndexedField terms = fields.indexedField(term.field());
if (terms == null) {
// field does not exist
continue;
}
- termsEnum = terms.iterator();
+ termsEnum = terms.getTermsEnum();
assert termsEnum != null;
if (termsEnum == TermsEnum.EMPTY) continue;
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/JoinDocFreqValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/JoinDocFreqValueSource.java
index db936ba655c6..53ca1f0e4c68 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/JoinDocFreqValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/JoinDocFreqValueSource.java
@@ -25,7 +25,7 @@
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.ReaderUtil;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.docvalues.IntDocValues;
@@ -57,8 +57,8 @@ public FunctionValues getValues(Map context, LeafReaderContext readerContext) th
{
final BinaryDocValues terms = DocValues.getBinary(readerContext.reader(), field);
final IndexReader top = ReaderUtil.getTopLevelContext(readerContext).reader();
- Terms t = MultiFields.getTerms(top, qfield);
- final TermsEnum termsEnum = t == null ? TermsEnum.EMPTY : t.iterator();
+ IndexedField t = MultiFields.getIndexedField(top, qfield);
+ final TermsEnum termsEnum = t == null ? TermsEnum.EMPTY : t.getTermsEnum();
return new IntDocValues(this) {
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SumTotalTermFreqValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SumTotalTermFreqValueSource.java
index 746ad8e9d186..35b99ef3bbbe 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SumTotalTermFreqValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/SumTotalTermFreqValueSource.java
@@ -17,7 +17,7 @@
package org.apache.lucene.queries.function.valuesource;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.docvalues.LongDocValues;
@@ -58,7 +58,7 @@ public FunctionValues getValues(Map context, LeafReaderContext readerContext) th
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
long sumTotalTermFreq = 0;
for (LeafReaderContext readerContext : searcher.getTopReaderContext().leaves()) {
- Terms terms = readerContext.reader().terms(indexedField);
+ IndexedField terms = readerContext.reader().indexedField(indexedField);
if (terms == null) continue;
long v = terms.getSumTotalTermFreq();
if (v == -1) {
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
index f0e252c6980b..33efd921f7a0 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
@@ -20,9 +20,9 @@
import java.util.Map;
import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
@@ -50,8 +50,8 @@ public String name() {
@Override
public FunctionValues getValues(Map context, LeafReaderContext readerContext) throws IOException {
- Fields fields = readerContext.reader().fields();
- final Terms terms = fields.terms(indexedField);
+ IndexedFields fields = readerContext.reader().fields();
+ final IndexedField terms = fields.indexedField(indexedField);
IndexSearcher searcher = (IndexSearcher)context.get("searcher");
final TFIDFSimilarity similarity = IDFValueSource.asTFIDF(searcher.getSimilarity(true), indexedField);
if (similarity == null) {
@@ -69,7 +69,7 @@ public void reset() throws IOException {
// no one should call us for deleted docs?
if (terms != null) {
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
if (termsEnum.seekExact(indexedBytes)) {
docs = termsEnum.postings(null);
} else {
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java
index 789e3e9e6e3a..a7cc3012c060 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java
@@ -20,9 +20,9 @@
import java.util.Map;
import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.docvalues.IntDocValues;
@@ -48,8 +48,8 @@ public String name() {
@Override
public FunctionValues getValues(Map context, LeafReaderContext readerContext) throws IOException {
- Fields fields = readerContext.reader().fields();
- final Terms terms = fields.terms(indexedField);
+ IndexedFields fields = readerContext.reader().fields();
+ final IndexedField terms = fields.indexedField(indexedField);
return new IntDocValues(this) {
PostingsEnum docs ;
@@ -62,7 +62,7 @@ public void reset() throws IOException {
// no one should call us for deleted docs?
if (terms != null) {
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
if (termsEnum.seekExact(indexedBytes)) {
docs = termsEnum.postings(null);
} else {
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java b/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
index ea02af3f8f43..5f9a3e3d21a7 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/mlt/MoreLikeThis.java
@@ -29,12 +29,12 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -730,10 +730,10 @@ public String describeParams() {
private PriorityQueue retrieveTerms(int docNum) throws IOException {
Map> field2termFreqMap = new HashMap<>();
for (String fieldName : fieldNames) {
- final Fields vectors = ir.getTermVectors(docNum);
- final Terms vector;
+ final IndexedFields vectors = ir.getTermVectors(docNum);
+ final IndexedField vector;
if (vectors != null) {
- vector = vectors.terms(fieldName);
+ vector = vectors.indexedField(fieldName);
} else {
vector = null;
}
@@ -781,13 +781,13 @@ private PriorityQueue retrieveTerms(Map> f
* @param field2termFreqMap a Map of terms and their frequencies per field
* @param vector List of terms and their frequencies for a doc/field
*/
- private void addTermFrequencies(Map> field2termFreqMap, Terms vector, String fieldName) throws IOException {
+ private void addTermFrequencies(Map> field2termFreqMap, IndexedField vector, String fieldName) throws IOException {
Map termFreqMap = field2termFreqMap.get(fieldName);
if (termFreqMap == null) {
termFreqMap = new HashMap<>();
field2termFreqMap.put(fieldName, termFreqMap);
}
- final TermsEnum termsEnum = vector.iterator();
+ final TermsEnum termsEnum = vector.getTermsEnum();
final CharsRefBuilder spare = new CharsRefBuilder();
BytesRef text;
while((text = termsEnum.next()) != null) {
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/payloads/SpanPayloadCheckQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/payloads/SpanPayloadCheckQuery.java
index 57ba678e34d9..3ffc76bb6407 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/payloads/SpanPayloadCheckQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/payloads/SpanPayloadCheckQuery.java
@@ -24,7 +24,7 @@
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.spans.FilterSpans;
@@ -105,7 +105,7 @@ public SpanScorer scorer(LeafReaderContext context) throws IOException {
if (field == null)
return null;
- Terms terms = context.reader().terms(field);
+ IndexedField terms = context.reader().indexedField(field);
if (terms != null && terms.hasPositions() == false) {
throw new IllegalStateException("field \"" + field + "\" was indexed without position data; cannot run SpanQuery (query=" + parentQuery + ")");
}
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java b/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java
index 716e2fb19aa7..b1556f458f05 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/CommonTermsQueryTest.java
@@ -34,7 +34,7 @@
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanClause;
@@ -402,7 +402,7 @@ public void testRandomIndex() throws IOException {
DirectoryReader reader = w.getReader();
LeafReader wrapper = getOnlyLeafReader(reader);
String field = "body";
- Terms terms = wrapper.terms(field);
+ IndexedField terms = wrapper.indexedField(field);
PriorityQueue lowFreqQueue = new PriorityQueue(
5) {
@@ -422,7 +422,7 @@ protected boolean lessThan(TermAndFreq a, TermAndFreq b) {
};
try {
- TermsEnum iterator = terms.iterator();
+ TermsEnum iterator = terms.getTermsEnum();
while (iterator.next() != null) {
if (highFreqQueue.size() < 5) {
highFreqQueue.add(new TermAndFreq(
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndPrefixQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndPrefixQuery.java
index 587c4fbe1cf7..e9e20682d77a 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndPrefixQuery.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndPrefixQuery.java
@@ -16,7 +16,7 @@
*/
package org.apache.lucene.queryparser.surround.query;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.index.TermsEnum;
@@ -61,9 +61,9 @@ public void visitMatchingTerms(
MatchingTermVisitor mtv) throws IOException
{
/* inspired by PrefixQuery.rewrite(): */
- Terms terms = MultiFields.getTerms(reader, fieldName);
+ IndexedField terms = MultiFields.getIndexedField(reader, fieldName);
if (terms != null) {
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
boolean skip = false;
TermsEnum.SeekStatus status = termsEnum.seekCeil(new BytesRef(getPrefix()));
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTermQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTermQuery.java
index ffa8c783890e..9469577cd6c7 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTermQuery.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTermQuery.java
@@ -20,7 +20,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.util.BytesRef;
@@ -51,9 +51,9 @@ public void visitMatchingTerms(
MatchingTermVisitor mtv) throws IOException
{
/* check term presence in index here for symmetry with other SimpleTerm's */
- Terms terms = MultiFields.getTerms(reader, fieldName);
+ IndexedField terms = MultiFields.getIndexedField(reader, fieldName);
if (terms != null) {
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
TermsEnum.SeekStatus status = termsEnum.seekCeil(new BytesRef(getTermText()));
if (status == TermsEnum.SeekStatus.FOUND) {
diff --git a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTruncQuery.java b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTruncQuery.java
index b98792156736..f0cce5bc8a9b 100644
--- a/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTruncQuery.java
+++ b/lucene/queryparser/src/java/org/apache/lucene/queryparser/surround/query/SrndTruncQuery.java
@@ -17,7 +17,7 @@
package org.apache.lucene.queryparser.surround.query;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.index.IndexReader;
@@ -91,11 +91,11 @@ public void visitMatchingTerms(
MatchingTermVisitor mtv) throws IOException
{
int prefixLength = prefix.length();
- Terms terms = MultiFields.getTerms(reader, fieldName);
+ IndexedField terms = MultiFields.getIndexedField(reader, fieldName);
if (terms != null) {
Matcher matcher = pattern.matcher("");
try {
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
TermsEnum.SeekStatus status = termsEnum.seekCeil(prefixRef);
BytesRef text;
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsReader.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsReader.java
index 167bb4808e4b..05709cd778b6 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsReader.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsReader.java
@@ -31,7 +31,7 @@
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.SegmentReadState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables;
@@ -196,7 +196,7 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
+ public IndexedField indexedField(String field) throws IOException {
assert field != null;
return fields.get(field);
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsWriter.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsWriter.java
index d83b9155e5b8..4de387f10254 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsWriter.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsWriter.java
@@ -27,11 +27,11 @@
import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.RAMOutputStream;
@@ -220,19 +220,19 @@ private void writeIndexTrailer(IndexOutput indexOut, long dirStart) throws IOExc
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
String lastField = null;
for(String field : fields) {
assert lastField == null || lastField.compareTo(field) < 0;
lastField = field;
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
continue;
}
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
TermsWriter termsWriter = new TermsWriter(fieldInfos.fieldInfo(field));
while (true) {
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionFieldReader.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionFieldReader.java
index 581201f9ea4c..4c188d0c4f35 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionFieldReader.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionFieldReader.java
@@ -22,7 +22,7 @@
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.ByteArrayDataInput;
import org.apache.lucene.store.IndexInput;
@@ -32,9 +32,9 @@
import org.apache.lucene.util.fst.FST;
import org.apache.lucene.util.fst.PairOutputs.Pair;
-/** BlockTree's implementation of {@link Terms}. */
+/** BlockTree's implementation of {@link IndexedField}. */
// public for CheckIndex:
-final class VersionFieldReader extends Terms implements Accountable {
+final class VersionFieldReader extends IndexedField implements Accountable {
final long numTerms;
final FieldInfo fieldInfo;
final long sumTotalTermFreq;
@@ -133,7 +133,7 @@ public boolean hasPayloads() {
}
@Override
- public TermsEnum iterator() throws IOException {
+ public TermsEnum getTermsEnum() throws IOException {
return new IDVersionSegmentTermsEnum(this);
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java
index 8bd7b894afa3..729548c3b80b 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/FuzzyLikeThisQuery.java
@@ -31,7 +31,7 @@
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -188,7 +188,7 @@ public void addTerms(String queryString, String fieldName,float minSimilarity, i
private void addTerms(IndexReader reader, FieldVals f, ScoreTermQueue q) throws IOException {
if (f.queryString == null) return;
- final Terms terms = MultiFields.getTerms(reader, f.fieldName);
+ final IndexedField terms = MultiFields.getIndexedField(reader, f.fieldName);
if (terms == null) {
return;
}
@@ -259,9 +259,9 @@ private Query newTermQuery(IndexReader reader, Term term) throws IOException {
// equal to 1
TermContext context = new TermContext(reader.getContext());
for (LeafReaderContext leafContext : reader.leaves()) {
- Terms terms = leafContext.reader().terms(term.field());
+ IndexedField terms = leafContext.reader().indexedField(term.field());
if (terms != null) {
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
if (termsEnum.seekExact(term.bytes())) {
int freq = 1 - context.docFreq(); // we want the total df and ttf to be 1
context.register(termsEnum.termState(), leafContext.ord, freq, freq);
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesTermsQuery.java b/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesTermsQuery.java
index 6e30baed9cd4..a9cfaad3f413 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesTermsQuery.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/DocValuesTermsQuery.java
@@ -52,7 +52,7 @@
*
*
* With each search, this query translates the specified
- * set of Terms into a private {@link LongBitSet} keyed by
+ * set of IndexedField into a private {@link LongBitSet} keyed by
* term number per unique {@link IndexReader} (normally one
* reader per segment). Then, during matching, the term
* number for each docID is retrieved from the cache and
@@ -79,9 +79,9 @@
*
* Generally, this filter will be slower on the first
* invocation for a given field, but subsequent invocations,
- * even if you change the allowed set of Terms, should be
+ * even if you change the allowed set of IndexedField, should be
* faster than TermsQuery, especially as the number of
- * Terms being matched increases. If you are matching only
+ * IndexedField being matched increases. If you are matching only
* a very small number of terms, and those terms in turn
* match a very small number of documents, TermsQuery may
* perform faster.
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java
index 04c8736fda10..51530aba0ad1 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java
@@ -382,7 +382,7 @@ public Scorer scorer(LeafReaderContext context) throws IOException {
BytesRef term = idToTerm.get(ent.getKey());
TermState state = termContext.get(context.ord);
if (state != null) {
- TermsEnum termsEnum = context.reader().terms(field).iterator();
+ TermsEnum termsEnum = context.reader().indexedField(field).getTermsEnum();
termsEnum.seekExact(term, state);
enums[ent.getKey()] = new EnumAndScorer(ent.getKey(), termsEnum.postings(null, PostingsEnum.POSITIONS));
any = true;
diff --git a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
index d173762a6365..82eced358539 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
@@ -75,7 +75,8 @@ public void testBasic() throws Exception {
doc.add(makeIDField("id1", 110));
w.addDocument(doc);
IndexReader r = w.getReader();
- IDVersionSegmentTermsEnum termsEnum = (IDVersionSegmentTermsEnum) r.leaves().get(0).reader().fields().terms("id").iterator();
+ IDVersionSegmentTermsEnum termsEnum = (IDVersionSegmentTermsEnum)
+ r.leaves().get(0).reader().fields().indexedField("id").getTermsEnum();
assertTrue(termsEnum.seekExact(new BytesRef("id0"), 50));
assertTrue(termsEnum.seekExact(new BytesRef("id0"), 100));
assertFalse(termsEnum.seekExact(new BytesRef("id0"), 101));
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeQuery.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeQuery.java
index dbe643e4b995..60576e14561e 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeQuery.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeQuery.java
@@ -21,7 +21,7 @@
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
@@ -103,7 +103,7 @@ public abstract class BaseTermsEnumTraverser {//TODO rename to LeafTermsEnumTrav
protected final LeafReaderContext context;
protected final int maxDoc;
- protected final Terms terms; // maybe null
+ protected final IndexedField terms; // maybe null
protected final TermsEnum termsEnum;//remember to check for null!
protected PostingsEnum postingsEnum;
@@ -111,9 +111,9 @@ public BaseTermsEnumTraverser(LeafReaderContext context) throws IOException {
this.context = context;
LeafReader reader = context.reader();
this.maxDoc = reader.maxDoc();
- terms = reader.terms(fieldName);
+ terms = reader.indexedField(fieldName);
if (terms != null) {
- this.termsEnum = terms.iterator();
+ this.termsEnum = terms.getTermsEnum();
} else {
this.termsEnum = null;
}
diff --git a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java
index bca73ccf4dc1..5ab0d5b44ecc 100644
--- a/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java
+++ b/lucene/spatial-extras/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java
@@ -61,9 +61,9 @@ public synchronized ShapeFieldCache getCache(LeafReader reader) throws IOExce
idx = new ShapeFieldCache<>(reader.maxDoc(),defaultSize);
int count = 0;
PostingsEnum docs = null;
- Terms terms = reader.terms(shapeField);
+ IndexedField terms = reader.indexedField(shapeField);
if (terms != null) {
- TermsEnum te = terms.iterator();
+ TermsEnum te = terms.getTermsEnum();
BytesRef term = te.next();
while (term != null) {
T shape = readShape(term);
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/geopoint/search/GeoPointMultiTermQuery.java b/lucene/spatial/src/java/org/apache/lucene/spatial/geopoint/search/GeoPointMultiTermQuery.java
index 511720606838..d114188898fb 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/geopoint/search/GeoPointMultiTermQuery.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/geopoint/search/GeoPointMultiTermQuery.java
@@ -20,7 +20,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.PointValues.Relation;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.Query;
@@ -85,8 +85,8 @@ public Query rewrite(IndexReader reader, MultiTermQuery query) {
};
@Override @SuppressWarnings("unchecked")
- protected TermsEnum getTermsEnum(final Terms terms, AttributeSource atts) throws IOException {
- return new GeoPointTermsEnum(terms.iterator(), this);
+ protected TermsEnum getTermsEnum(final IndexedField terms, AttributeSource atts) throws IOException {
+ return new GeoPointTermsEnum(terms.getTermsEnum(), this);
}
/**
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/geopoint/search/GeoPointTermQueryConstantScoreWrapper.java b/lucene/spatial/src/java/org/apache/lucene/spatial/geopoint/search/GeoPointTermQueryConstantScoreWrapper.java
index a5344b7fbebd..bdf0c4954255 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/geopoint/search/GeoPointTermQueryConstantScoreWrapper.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/geopoint/search/GeoPointTermQueryConstantScoreWrapper.java
@@ -22,7 +22,7 @@
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SortedNumericDocValues;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.DocIdSet;
@@ -80,7 +80,7 @@ public Weight createWeight(IndexSearcher searcher, boolean needsScores, float bo
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
- final Terms terms = context.reader().terms(query.getField());
+ final IndexedField terms = context.reader().indexedField(query.getField());
if (terms == null) {
return null;
}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java
index 0774718f4dbc..0860691dace8 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java
@@ -19,7 +19,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.BoostAttribute;
import org.apache.lucene.search.FuzzyTermsEnum;
import org.apache.lucene.search.MaxNonCompetitiveBoostAttribute;
@@ -402,7 +402,7 @@ protected Collection suggestSimilar(Term term, int numSug, IndexReade
AttributeSource atts = new AttributeSource();
MaxNonCompetitiveBoostAttribute maxBoostAtt =
atts.addAttribute(MaxNonCompetitiveBoostAttribute.class);
- Terms terms = MultiFields.getTerms(ir, term.field());
+ IndexedField terms = MultiFields.getIndexedField(ir, term.field());
if (terms == null) {
return Collections.emptyList();
}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java
index 657684c587ae..2accddca185e 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/HighFrequencyDictionary.java
@@ -21,7 +21,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.search.suggest.InputIterator;
import org.apache.lucene.util.BytesRef;
@@ -46,7 +46,7 @@ public class HighFrequencyDictionary implements Dictionary {
* Creates a new Dictionary, pulling source terms from
* the specified field in the provided reader.
*
- * Terms appearing in less than thresh percentage of documents
+ * IndexedField appearing in less than thresh percentage of documents
* will be excluded.
*/
public HighFrequencyDictionary(IndexReader reader, String field, float thresh) {
@@ -67,9 +67,9 @@ final class HighFrequencyIterator implements InputIterator {
private long freq;
HighFrequencyIterator() throws IOException {
- Terms terms = MultiFields.getTerms(reader, field);
+ IndexedField terms = MultiFields.getIndexedField(reader, field);
if (terms != null) {
- termsEnum = terms.iterator();
+ termsEnum = terms.getTermsEnum();
} else {
termsEnum = null;
}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java
index b96af9bbd580..5d84db17a093 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/LuceneDictionary.java
@@ -18,7 +18,7 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.suggest.InputIterator;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.MultiFields;
import java.io.*;
@@ -42,9 +42,9 @@ public LuceneDictionary(IndexReader reader, String field) {
@Override
public final InputIterator getEntryIterator() throws IOException {
- final Terms terms = MultiFields.getTerms(reader, field);
+ final IndexedField terms = MultiFields.getIndexedField(reader, field);
if (terms != null) {
- return new InputIterator.InputIteratorWrapper(terms.iterator());
+ return new InputIterator.InputIteratorWrapper(terms.getTermsEnum());
} else {
return InputIterator.EMPTY;
}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java
index 1f11e27507cc..7d207f394486 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java
@@ -33,7 +33,7 @@
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -496,9 +496,9 @@ public final void indexDictionary(Dictionary dict, IndexWriterConfig config, boo
final IndexReader reader = searcher.getIndexReader();
if (reader.maxDoc() > 0) {
for (final LeafReaderContext ctx : reader.leaves()) {
- Terms terms = ctx.reader().terms(F_WORD);
+ IndexedField terms = ctx.reader().indexedField(F_WORD);
if (terms != null)
- termsEnums.add(terms.iterator());
+ termsEnums.add(terms.getTermsEnum());
}
}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
index 413d401b6a5b..50d13fbc1f34 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
@@ -31,7 +31,7 @@
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.MultiDocValues;
import org.apache.lucene.index.PostingsEnum;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -271,8 +271,8 @@ private static void boundedTreeAdd(TreeSet results, Lookup.
*/
private double createCoefficient(IndexSearcher searcher, int doc, Set matchedTokens, String prefixToken) throws IOException {
- Terms tv = searcher.getIndexReader().getTermVector(doc, TEXT_FIELD_NAME);
- TermsEnum it = tv.iterator();
+ IndexedField tv = searcher.getIndexReader().getTermVector(doc, TEXT_FIELD_NAME);
+ TermsEnum it = tv.getTermsEnum();
Integer position = Integer.MAX_VALUE;
BytesRef term;
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java
index 3e4845bf16b2..10bf1cdb4113 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/FreeTextSuggester.java
@@ -50,7 +50,7 @@
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.suggest.InputIterator;
import org.apache.lucene.search.suggest.Lookup;
@@ -295,13 +295,13 @@ public void build(InputIterator iterator, double ramBufferSizeMB) throws IOExcep
}
reader = DirectoryReader.open(writer);
- Terms terms = MultiFields.getTerms(reader, "body");
+ IndexedField terms = MultiFields.getIndexedField(reader, "body");
if (terms == null) {
throw new IllegalArgumentException("need at least one suggestion");
}
// Move all ngrams into an FST:
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
Outputs outputs = PositiveIntOutputs.getSingleton();
Builder builder = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs);
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java
index 9df9d605197d..fb162cb5a34a 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java
@@ -24,11 +24,11 @@
import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.ByteArrayDataInput;
@@ -44,7 +44,7 @@
/**
*
- * Weighted FSTs for any indexed {@link SuggestField} is built on {@link #write(Fields)}.
+ * Weighted FSTs for any indexed {@link SuggestField} is built on {@link #write(IndexedFields)}.
* A weighted FST maps the analyzed forms of a field to its
* surface form and document id. FSTs are stored in the CompletionDictionary (.lkp).
*
@@ -80,17 +80,17 @@ final class CompletionFieldsConsumer extends FieldsConsumer {
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
delegateFieldsConsumer.write(fields);
for (String field : fields) {
CompletionTermWriter termWriter = new CompletionTermWriter();
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
- // this can happen from ghost fields, where the incoming Fields iterator claims a field exists but it does not
+ // this can happen from ghost fields, where the incoming IndexedFields iterator claims a field exists but it does not
continue;
}
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
// write terms
BytesRef term;
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsProducer.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsProducer.java
index 7a29b616ce73..e7bb2cb7ce49 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsProducer.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsProducer.java
@@ -31,7 +31,7 @@
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.SegmentReadState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.store.ChecksumIndexInput;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.Accountable;
@@ -51,7 +51,7 @@
*
*
* Completion dictionary (.lkp) is opened at instantiation and a field's FST is loaded
- * into memory the first time it is requested via {@link #terms(String)}.
+ * into memory the first time it is requested via {@link #indexedField(String)}.
*
*
* NOTE: Only the footer is validated for Completion dictionary (.lkp) and not the checksum due
@@ -161,8 +161,8 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
- Terms terms = delegateFieldsProducer.terms(field) ;
+ public IndexedField indexedField(String field) throws IOException {
+ IndexedField terms = delegateFieldsProducer.indexedField(field) ;
if (terms == null) {
return null;
}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java
index 71ba15ae1205..73678f08e1ef 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionQuery.java
@@ -22,7 +22,7 @@
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.suggest.BitsProducer;
@@ -96,11 +96,11 @@ public Term getTerm() {
public Query rewrite(IndexReader reader) throws IOException {
byte type = 0;
boolean first = true;
- Terms terms;
+ IndexedField terms;
for (LeafReaderContext context : reader.leaves()) {
LeafReader leafReader = context.reader();
try {
- if ((terms = leafReader.terms(getField())) == null) {
+ if ((terms = leafReader.indexedField(getField())) == null) {
continue;
}
} catch (IOException e) {
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTerms.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTerms.java
index 0f86739515f7..fcdab262ec88 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTerms.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionTerms.java
@@ -19,23 +19,23 @@
import java.io.IOException;
import org.apache.lucene.index.FilterLeafReader;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
/**
- * Wrapped {@link org.apache.lucene.index.Terms}
+ * Wrapped {@link org.apache.lucene.index.IndexedField}
* used by {@link SuggestField} and {@link ContextSuggestField}
* to access corresponding suggester and their attributes
*
* @lucene.experimental
*/
-public final class CompletionTerms extends FilterLeafReader.FilterTerms {
+public final class CompletionTerms extends FilterLeafReader.FilterField {
private final CompletionsTermsReader reader;
/**
* Creates a completionTerms based on {@link CompletionsTermsReader}
*/
- CompletionTerms(Terms in, CompletionsTermsReader reader) {
+ CompletionTerms(IndexedField in, CompletionsTermsReader reader) {
super(in);
this.reader = reader;
}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionWeight.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionWeight.java
index d74e56f2ecfc..1ebc60185c51 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionWeight.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionWeight.java
@@ -22,7 +22,7 @@
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Scorer;
@@ -70,9 +70,9 @@ public Automaton getAutomaton() {
@Override
public BulkScorer bulkScorer(final LeafReaderContext context) throws IOException {
final LeafReader reader = context.reader();
- final Terms terms;
+ final IndexedField terms;
final NRTSuggester suggester;
- if ((terms = reader.terms(completionQuery.getField())) == null) {
+ if ((terms = reader.indexedField(completionQuery.getField())) == null) {
return null;
}
if (terms instanceof CompletionTerms) {
diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestPrefixCompletionQuery.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestPrefixCompletionQuery.java
index f5bacefc6384..af12edc33dbb 100644
--- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestPrefixCompletionQuery.java
+++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/document/TestPrefixCompletionQuery.java
@@ -320,7 +320,7 @@ public void testGhostField() throws Exception {
// first force merge is OK
iw.forceMerge(1);
- // second force merge causes MultiFields to include "suggest_field" in its iteration, yet a null Terms is returned (no documents have
+ // second force merge causes MultiFields to include "suggest_field" in its iteration, yet a null IndexedField is returned (no documents have
// this field anymore)
iw.addDocument(new Document());
iw.forceMerge(1);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
index a89b5081ded3..9a2e3b42fc3d 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
@@ -25,13 +25,13 @@
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.AssertingLeafReader;
import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.BytesRef;
@@ -83,9 +83,9 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
- Terms terms = in.terms(field);
- return terms == null ? null : new AssertingLeafReader.AssertingTerms(terms);
+ public IndexedField indexedField(String field) throws IOException {
+ IndexedField terms = in.indexedField(field);
+ return terms == null ? null : new AssertingLeafReader.AssertingField(terms);
}
@Override
@@ -133,13 +133,13 @@ static class AssertingFieldsConsumer extends FieldsConsumer {
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
in.write(fields);
// TODO: more asserts? can we somehow run a
// "limited" CheckIndex here??? Or ... can we improve
// AssertingFieldsProducer and us it also to wrap the
- // incoming Fields here?
+ // incoming IndexedFields here?
String lastField = null;
@@ -150,13 +150,13 @@ public void write(Fields fields) throws IOException {
assert lastField == null || lastField.compareTo(field) < 0;
lastField = field;
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
continue;
}
assert terms != null;
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
BytesRefBuilder lastTerm = null;
PostingsEnum postingsEnum = null;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingTermVectorsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingTermVectorsFormat.java
index 000fd6f3ff8a..2f7d4b9fe251 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingTermVectorsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingTermVectorsFormat.java
@@ -25,7 +25,7 @@
import org.apache.lucene.index.AssertingLeafReader;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.SegmentInfo;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
@@ -67,8 +67,8 @@ public void close() throws IOException {
}
@Override
- public Fields get(int doc) throws IOException {
- Fields fields = in.get(doc);
+ public IndexedFields get(int doc) throws IOException {
+ IndexedFields fields = in.get(doc);
return fields == null ? null : new AssertingLeafReader.AssertingFields(fields);
}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPostingsFormat.java
index 2ca1bc75eeab..d3756a0e4d6b 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPostingsFormat.java
@@ -22,7 +22,7 @@
import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
@@ -61,7 +61,7 @@ static class CrankyFieldsConsumer extends FieldsConsumer {
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
if (random.nextInt(100) == 0) {
throw new IOException("Fake IOException from FieldsConsumer.write()");
}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
index 4b85f130c4af..eb2b38f9aa03 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
@@ -35,12 +35,12 @@
import org.apache.lucene.codecs.TermStats;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexedFields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
-import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.IndexedField;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
@@ -68,7 +68,7 @@ static class RAMPostings extends FieldsProducer {
final Map fieldToTerms = new TreeMap<>();
@Override
- public Terms terms(String field) {
+ public IndexedField indexedField(String field) {
return fieldToTerms.get(field);
}
@@ -104,7 +104,7 @@ public Collection getChildResources() {
public void checkIntegrity() throws IOException {}
}
- static class RAMField extends Terms implements Accountable {
+ static class RAMField extends IndexedField implements Accountable {
final String field;
final SortedMap termToDocs = new TreeMap<>();
long sumTotalTermFreq;
@@ -147,7 +147,7 @@ public int getDocCount() throws IOException {
}
@Override
- public TermsEnum iterator() {
+ public TermsEnum getTermsEnum() {
return new RAMTermsEnum(RAMOnlyPostingsFormat.RAMField.this);
}
@@ -227,15 +227,15 @@ public RAMFieldsConsumer(SegmentWriteState writeState, RAMPostings postings) {
}
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
for(String field : fields) {
- Terms terms = fields.terms(field);
+ IndexedField terms = fields.indexedField(field);
if (terms == null) {
continue;
}
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
FieldInfo fieldInfo = state.fieldInfos.fieldInfo(field);
if (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
index e83735947cee..700a93a19998 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
@@ -59,21 +59,21 @@ public AssertingLeafReader(LeafReader in) {
}
@Override
- public Fields fields() throws IOException {
+ public IndexedFields fields() throws IOException {
return new AssertingFields(super.fields());
}
@Override
- public Fields getTermVectors(int docID) throws IOException {
- Fields fields = super.getTermVectors(docID);
+ public IndexedFields getTermVectors(int docID) throws IOException {
+ IndexedFields fields = super.getTermVectors(docID);
return fields == null ? null : new AssertingFields(fields);
}
/**
- * Wraps a Fields but with additional asserts
+ * Wraps a IndexedFields but with additional asserts
*/
public static class AssertingFields extends FilterFields {
- public AssertingFields(Fields in) {
+ public AssertingFields(IndexedFields in) {
super(in);
}
@@ -85,17 +85,17 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) throws IOException {
- Terms terms = super.terms(field);
- return terms == null ? null : new AssertingTerms(terms);
+ public IndexedField indexedField(String field) throws IOException {
+ IndexedField terms = super.indexedField(field);
+ return terms == null ? null : new AssertingField(terms);
}
}
/**
- * Wraps a Terms but with additional asserts
+ * Wraps an IndexedField with additional asserts
*/
- public static class AssertingTerms extends FilterTerms {
- public AssertingTerms(Terms in) {
+ public static class AssertingField extends FilterField {
+ public AssertingField(IndexedField in) {
super(in);
}
@@ -122,15 +122,15 @@ public BytesRef getMax() throws IOException {
}
@Override
- public TermsEnum iterator() throws IOException {
- TermsEnum termsEnum = super.iterator();
+ public TermsEnum getTermsEnum() throws IOException {
+ TermsEnum termsEnum = super.getTermsEnum();
assert termsEnum != null;
return new AssertingTermsEnum(termsEnum);
}
@Override
public String toString() {
- return "AssertingTerms(" + in + ")";
+ return "AssertingField(" + in + ")";
}
}
@@ -149,7 +149,7 @@ public AssertingTermsEnum(TermsEnum in) {
@Override
public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
- assertThread("Terms enums", creationThread);
+ assertThread("IndexedField enums", creationThread);
assert state == State.POSITIONED: "docs(...) called on unpositioned TermsEnum";
// reuse if the codec reused
@@ -174,7 +174,7 @@ public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
// someone should not call next() after it returns null!!!!
@Override
public BytesRef next() throws IOException {
- assertThread("Terms enums", creationThread);
+ assertThread("IndexedField enums", creationThread);
assert state == State.INITIAL || state == State.POSITIONED: "next() called on unpositioned TermsEnum";
BytesRef result = super.next();
if (result == null) {
@@ -188,28 +188,28 @@ public BytesRef next() throws IOException {
@Override
public long ord() throws IOException {
- assertThread("Terms enums", creationThread);
+ assertThread("IndexedField enums", creationThread);
assert state == State.POSITIONED : "ord() called on unpositioned TermsEnum";
return super.ord();
}
@Override
public int docFreq() throws IOException {
- assertThread("Terms enums", creationThread);
+ assertThread("IndexedField enums", creationThread);
assert state == State.POSITIONED : "docFreq() called on unpositioned TermsEnum";
return super.docFreq();
}
@Override
public long totalTermFreq() throws IOException {
- assertThread("Terms enums", creationThread);
+ assertThread("IndexedField enums", creationThread);
assert state == State.POSITIONED : "totalTermFreq() called on unpositioned TermsEnum";
return super.totalTermFreq();
}
@Override
public BytesRef term() throws IOException {
- assertThread("Terms enums", creationThread);
+ assertThread("IndexedField enums", creationThread);
assert state == State.POSITIONED : "term() called on unpositioned TermsEnum";
BytesRef ret = super.term();
assert ret == null || ret.isValid();
@@ -218,14 +218,14 @@ public BytesRef term() throws IOException {
@Override
public void seekExact(long ord) throws IOException {
- assertThread("Terms enums", creationThread);
+ assertThread("IndexedField enums", creationThread);
super.seekExact(ord);
state = State.POSITIONED;
}
@Override
public SeekStatus seekCeil(BytesRef term) throws IOException {
- assertThread("Terms enums", creationThread);
+ assertThread("IndexedField enums", creationThread);
assert term.isValid();
SeekStatus result = super.seekCeil(term);
if (result == SeekStatus.END) {
@@ -238,7 +238,7 @@ public SeekStatus seekCeil(BytesRef term) throws IOException {
@Override
public boolean seekExact(BytesRef text) throws IOException {
- assertThread("Terms enums", creationThread);
+ assertThread("IndexedField enums", creationThread);
assert text.isValid();
boolean result;
if (delegateOverridesSeekExact) {
@@ -256,14 +256,14 @@ public boolean seekExact(BytesRef text) throws IOException {
@Override
public TermState termState() throws IOException {
- assertThread("Terms enums", creationThread);
+ assertThread("IndexedField enums", creationThread);
assert state == State.POSITIONED : "termState() called on unpositioned TermsEnum";
return in.termState();
}
@Override
public void seekExact(BytesRef term, TermState state) throws IOException {
- assertThread("Terms enums", creationThread);
+ assertThread("IndexedField enums", creationThread);
assert term.isValid();
in.seekExact(term, state);
this.state = State.POSITIONED;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
index 1155a7365c3e..1bf34e8c2f79 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
@@ -160,8 +160,8 @@ public void testPostingsEnumReuse() throws Exception {
Collections.shuffle(postingsTester.allTerms, random());
RandomPostingsTester.FieldAndTerm fieldAndTerm = postingsTester.allTerms.get(0);
- Terms terms = fieldsProducer.terms(fieldAndTerm.field);
- TermsEnum te = terms.iterator();
+ IndexedField terms = fieldsProducer.indexedField(fieldAndTerm.field);
+ TermsEnum te = terms.getTermsEnum();
te.seekExact(fieldAndTerm.term);
checkReuse(te, PostingsEnum.FREQS, PostingsEnum.ALL, false);
@@ -193,13 +193,13 @@ public void testJustEmptyField() throws Exception {
iw.addDocument(doc);
DirectoryReader ir = iw.getReader();
LeafReader ar = getOnlyLeafReader(ir);
- Fields fields = ar.fields();
+ IndexedFields fields = ar.fields();
int fieldCount = fields.size();
// -1 is allowed, if the codec doesn't implement fields.size():
assertTrue(fieldCount == 1 || fieldCount == -1);
- Terms terms = ar.terms("");
+ IndexedField terms = ar.indexedField("");
assertNotNull(terms);
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
assertNotNull(termsEnum.next());
assertEquals(termsEnum.term(), new BytesRef("something"));
assertNull(termsEnum.next());
@@ -218,13 +218,13 @@ public void testEmptyFieldAndEmptyTerm() throws Exception {
iw.addDocument(doc);
DirectoryReader ir = iw.getReader();
LeafReader ar = getOnlyLeafReader(ir);
- Fields fields = ar.fields();
+ IndexedFields fields = ar.fields();
int fieldCount = fields.size();
// -1 is allowed, if the codec doesn't implement fields.size():
assertTrue(fieldCount == 1 || fieldCount == -1);
- Terms terms = ar.terms("");
+ IndexedField terms = ar.indexedField("");
assertNotNull(terms);
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
assertNotNull(termsEnum.next());
assertEquals(termsEnum.term(), new BytesRef(""));
assertNull(termsEnum.next());
@@ -244,7 +244,7 @@ public void testDidntWantFreqsButAskedAnyway() throws Exception {
iw.addDocument(doc);
DirectoryReader ir = iw.getReader();
LeafReader ar = getOnlyLeafReader(ir);
- TermsEnum termsEnum = ar.terms("field").iterator();
+ TermsEnum termsEnum = ar.indexedField("field").getTermsEnum();
assertTrue(termsEnum.seekExact(new BytesRef("value")));
PostingsEnum docsEnum = termsEnum.postings(null, PostingsEnum.NONE);
assertEquals(0, docsEnum.nextDoc());
@@ -267,7 +267,7 @@ public void testAskForPositionsWhenNotThere() throws Exception {
iw.addDocument(doc);
DirectoryReader ir = iw.getReader();
LeafReader ar = getOnlyLeafReader(ir);
- TermsEnum termsEnum = ar.terms("field").iterator();
+ TermsEnum termsEnum = ar.indexedField("field").getTermsEnum();
assertTrue(termsEnum.seekExact(new BytesRef("value")));
PostingsEnum docsEnum = termsEnum.postings(null, PostingsEnum.POSITIONS);
assertEquals(0, docsEnum.nextDoc());
@@ -296,13 +296,13 @@ public void testGhosts() throws Exception {
iw.forceMerge(1);
DirectoryReader ir = iw.getReader();
LeafReader ar = getOnlyLeafReader(ir);
- Fields fields = ar.fields();
+ IndexedFields fields = ar.fields();
// Ghost busting terms dict impls will have
// fields.size() == 0; all others must be == 1:
assertTrue(fields.size() <= 1);
- Terms terms = fields.terms("ghostField");
+ IndexedField terms = fields.indexedField("ghostField");
if (terms != null) {
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
BytesRef term = termsEnum.next();
if (term != null) {
PostingsEnum postingsEnum = termsEnum.postings(null);
@@ -342,7 +342,7 @@ public void testLevel2Ghosts() throws Exception {
// first force merge creates a level 1 ghost field
iw.forceMerge(1);
- // second force merge creates a level 2 ghost field, causing MultiFields to include "suggest_field" in its iteration, yet a null Terms is returned (no documents have
+ // second force merge creates a level 2 ghost field, causing MultiFields to include "suggest_field" in its iteration, yet a null IndexedField is returned (no documents have
// this field anymore)
iw.addDocument(new Document());
iw.forceMerge(1);
@@ -405,7 +405,7 @@ public FieldsConsumer fieldsConsumer(final SegmentWriteState state) throws IOExc
return new FieldsConsumer() {
@Override
- public void write(Fields fields) throws IOException {
+ public void write(IndexedFields fields) throws IOException {
fieldsConsumer.write(fields);
boolean isMerge = state.context.context == IOContext.Context.MERGE;
@@ -424,10 +424,10 @@ public void write(Fields fields) throws IOException {
//System.out.println("write isMerge=" + isMerge + " 2ndPass=" + addOnSecondPass);
// Gather our own stats:
- Terms terms = fields.terms("body");
+ IndexedField terms = fields.indexedField("body");
assert terms != null;
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
PostingsEnum docs = null;
while(termsEnum.next() != null) {
BytesRef term = termsEnum.term();
@@ -557,11 +557,11 @@ public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException
IndexReader r = w.getReader();
w.close();
- Terms terms = MultiFields.getTerms(r, "body");
+ IndexedField terms = MultiFields.getIndexedField(r, "body");
assertEquals(sumDocFreq.get(), terms.getSumDocFreq());
assertEquals(sumTotalTermFreq.get(), terms.getSumTotalTermFreq());
- TermsEnum termsEnum = terms.iterator();
+ TermsEnum termsEnum = terms.getTermsEnum();
long termCount = 0;
boolean supportsOrds = true;
while(termsEnum.next() != null) {
@@ -612,7 +612,7 @@ public void testPostingsEnumDocsOnly() throws Exception {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
// termsenum reuse (FREQS)
- TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator();
+ TermsEnum termsEnum = getOnlyLeafReader(reader).indexedField("foo").getTermsEnum();
termsEnum.seekExact(new BytesRef("bar"));
PostingsEnum postings2 = termsEnum.postings(postings);
assertNotNull(postings2);
@@ -670,7 +670,7 @@ protected TokenStreamComponents createComponents(String fieldName) {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
// termsenum reuse (FREQS)
- TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator();
+ TermsEnum termsEnum = getOnlyLeafReader(reader).indexedField("foo").getTermsEnum();
termsEnum.seekExact(new BytesRef("bar"));
PostingsEnum postings2 = termsEnum.postings(postings);
assertNotNull(postings2);
@@ -748,7 +748,7 @@ protected TokenStreamComponents createComponents(String fieldName) {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
// termsenum reuse (FREQS)
- TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator();
+ TermsEnum termsEnum = getOnlyLeafReader(reader).indexedField("foo").getTermsEnum();
termsEnum.seekExact(new BytesRef("bar"));
PostingsEnum postings2 = termsEnum.postings(postings);
assertNotNull(postings2);
@@ -928,7 +928,7 @@ protected TokenStreamComponents createComponents(String fieldName) {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
// termsenum reuse (FREQS)
- TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator();
+ TermsEnum termsEnum = getOnlyLeafReader(reader).indexedField("foo").getTermsEnum();
termsEnum.seekExact(new BytesRef("bar"));
PostingsEnum postings2 = termsEnum.postings(postings);
assertNotNull(postings2);
@@ -1113,7 +1113,7 @@ public void testPostingsEnumPayloads() throws Exception {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
// termsenum reuse (FREQS)
- TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator();
+ TermsEnum termsEnum = getOnlyLeafReader(reader).indexedField("foo").getTermsEnum();
termsEnum.seekExact(new BytesRef("bar"));
PostingsEnum postings2 = termsEnum.postings(postings);
assertNotNull(postings2);
@@ -1299,7 +1299,7 @@ public void testPostingsEnumAll() throws Exception {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postings.nextDoc());
// termsenum reuse (FREQS)
- TermsEnum termsEnum = getOnlyLeafReader(reader).terms("foo").iterator();
+ TermsEnum termsEnum = getOnlyLeafReader(reader).indexedField("foo").getTermsEnum();
termsEnum.seekExact(new BytesRef("bar"));
PostingsEnum postings2 = termsEnum.postings(postings);
assertNotNull(postings2);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
index 7acee871f59d..46ed8e288090 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseTermVectorsFormatTestCase.java
@@ -393,7 +393,7 @@ public RandomDocument newDocument(int fieldCount, int maxTermCount, Options opti
}
- protected void assertEquals(RandomDocument doc, Fields fields) throws IOException {
+ protected void assertEquals(RandomDocument doc, IndexedFields fields) throws IOException {
// compare field names
assertEquals(doc == null, fields == null);
assertEquals(doc.fieldNames.length, fields.size());
@@ -408,7 +408,7 @@ protected void assertEquals(RandomDocument doc, Fields fields) throws IOExceptio
assertEquals(fields1, fields2);
for (int i = 0; i < doc.fieldNames.length; ++i) {
- assertEquals(doc.tokenStreams[i], doc.fieldTypes[i], fields.terms(doc.fieldNames[i]));
+ assertEquals(doc.tokenStreams[i], doc.fieldTypes[i], fields.indexedField(doc.fieldNames[i]));
}
}
@@ -424,7 +424,7 @@ protected static boolean equals(Object o1, Object o2) {
private final ThreadLocal docsEnum = new ThreadLocal<>();
private final ThreadLocal docsAndPositionsEnum = new ThreadLocal<>();
- protected void assertEquals(RandomTokenStream tk, FieldType ft, Terms terms) throws IOException {
+ protected void assertEquals(RandomTokenStream tk, FieldType ft, IndexedField terms) throws IOException {
assertEquals(1, terms.getDocCount());
final int termCount = new HashSet<>(Arrays.asList(tk.terms)).size();
assertEquals(termCount, terms.size());
@@ -438,7 +438,7 @@ protected void assertEquals(RandomTokenStream tk, FieldType ft, Terms terms) thr
}
final BytesRef[] sortedTerms = uniqueTerms.toArray(new BytesRef[0]);
Arrays.sort(sortedTerms);
- final TermsEnum termsEnum = terms.iterator();
+ final TermsEnum termsEnum = terms.getTermsEnum();
for (int i = 0; i < sortedTerms.length; ++i) {
final BytesRef nextTerm = termsEnum.next();
assertEquals(sortedTerms[i], nextTerm);
@@ -553,14 +553,14 @@ public void testRareVectors() throws IOException {
final int docWithVectorsID = docID(reader, "42");
for (int i = 0; i < 10; ++i) {
final int docID = random().nextInt(numDocs);
- final Fields fields = reader.getTermVectors(docID);
+ final IndexedFields fields = reader.getTermVectors(docID);
if (docID == docWithVectorsID) {
assertEquals(doc, fields);
} else {
assertNull(fields);
}
}
- final Fields fields = reader.getTermVectors(docWithVectorsID);
+ final IndexedFields fields = reader.getTermVectors(docWithVectorsID);
assertEquals(doc, fields);
reader.close();
writer.close();
@@ -757,8 +757,8 @@ protected TokenStreamComponents createComponents(String fieldName) {
iw.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(iw);
- Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
- TermsEnum termsEnum = terms.iterator();
+ IndexedField terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
+ TermsEnum termsEnum = terms.getTermsEnum();
assertNotNull(termsEnum);
assertEquals(new BytesRef("bar"), termsEnum.next());
@@ -838,8 +838,8 @@ protected TokenStreamComponents createComponents(String fieldName) {
iw.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(iw);
- Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
- TermsEnum termsEnum = terms.iterator();
+ IndexedField terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
+ TermsEnum termsEnum = terms.getTermsEnum();
assertNotNull(termsEnum);
assertEquals(new BytesRef("bar"), termsEnum.next());
@@ -1017,8 +1017,8 @@ protected TokenStreamComponents createComponents(String fieldName) {
iw.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(iw);
- Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
- TermsEnum termsEnum = terms.iterator();
+ IndexedField terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
+ TermsEnum termsEnum = terms.getTermsEnum();
assertNotNull(termsEnum);
assertEquals(new BytesRef("bar"), termsEnum.next());
@@ -1203,8 +1203,8 @@ protected TokenStreamComponents createComponents(String fieldName) {
iw.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(iw);
- Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
- TermsEnum termsEnum = terms.iterator();
+ IndexedField terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
+ TermsEnum termsEnum = terms.getTermsEnum();
assertNotNull(termsEnum);
assertEquals(new BytesRef("bar"), termsEnum.next());
@@ -1389,8 +1389,8 @@ public void testPostingsEnumPayloads() throws Exception {
iw.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(iw);
- Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
- TermsEnum termsEnum = terms.iterator();
+ IndexedField terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
+ TermsEnum termsEnum = terms.getTermsEnum();
assertNotNull(termsEnum);
assertEquals(new BytesRef("bar"), termsEnum.next());
@@ -1575,8 +1575,8 @@ public void testPostingsEnumAll() throws Exception {
iw.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(iw);
- Terms terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
- TermsEnum termsEnum = terms.iterator();
+ IndexedField terms = getOnlyLeafReader(reader).getTermVector(0, "foo");
+ TermsEnum termsEnum = terms.getTermsEnum();
assertNotNull(termsEnum);
assertEquals(new BytesRef("bar"), termsEnum.next());
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterLeafReader.java
index a75af543d92a..2412144bb860 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterLeafReader.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/FieldFilterLeafReader.java
@@ -56,8 +56,8 @@ public FieldInfos getFieldInfos() {
}
@Override
- public Fields getTermVectors(int docID) throws IOException {
- Fields f = super.getTermVectors(docID);
+ public IndexedFields getTermVectors(int docID) throws IOException {
+ IndexedFields f = super.getTermVectors(docID);
if (f == null) {
return null;
}
@@ -108,8 +108,8 @@ public Status needsField(FieldInfo fieldInfo) throws IOException {
}
@Override
- public Fields fields() throws IOException {
- final Fields f = super.fields();
+ public IndexedFields fields() throws IOException {
+ final IndexedFields f = super.fields();
return (f == null) ? null : new FieldFilterFields(f);
}
@@ -148,7 +148,7 @@ public String toString() {
private class FieldFilterFields extends FilterFields {
- public FieldFilterFields(Fields in) {
+ public FieldFilterFields(IndexedFields in) {
super(in);
}
@@ -169,8 +169,8 @@ protected boolean predicateFunction(String field) {
}
@Override
- public Terms terms(String field) throws IOException {
- return hasField(field) ? super.terms(field) : null;
+ public IndexedField indexedField(String field) throws IOException {
+ return hasField(field) ? super.indexedField(field) : null;
}
}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java b/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java
index c417a8864507..329ede6dcee3 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java
@@ -59,9 +59,9 @@ public int compare(LeafReaderContext c1, LeafReaderContext c2) {
int numSegs = 0;
boolean hasDeletions = false;
for(int i=0;i> fields;
final FieldInfos fieldInfos;
final IndexOptions maxAllowed;
@@ -434,7 +434,7 @@ public Iterator iterator() {
}
@Override
- public Terms terms(String field) {
+ public IndexedField indexedField(String field) {
SortedMap terms = fields.get(field);
if (terms == null) {
return null;
@@ -449,7 +449,7 @@ public int size() {
}
}
- private static class SeedTerms extends Terms {
+ private static class SeedTerms extends IndexedField {
final SortedMap terms;
final FieldInfo fieldInfo;
final IndexOptions maxAllowed;
@@ -463,7 +463,7 @@ public SeedTerms(SortedMap terms, FieldInfo fieldInfo, Inde
}
@Override
- public TermsEnum iterator() {
+ public TermsEnum getTermsEnum() {
SeedTermsEnum termsEnum = new SeedTermsEnum(terms, maxAllowed, allowPayloads);
termsEnum.reset();
@@ -651,7 +651,7 @@ public FieldsProducer buildIndex(Codec codec, Directory dir, IndexOptions maxAll
segmentInfo, newFieldInfos,
null, new IOContext(new FlushInfo(maxDoc, bytes)));
- Fields seedFields = new SeedFields(fields, newFieldInfos, maxAllowed, allowPayloads);
+ IndexedFields seedFields = new SeedFields(fields, newFieldInfos, maxAllowed, allowPayloads);
FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(writeState);
boolean success = false;
@@ -978,7 +978,7 @@ private void verifyEnum(Random random,
}
private static class TestThread extends Thread {
- private Fields fieldsSource;
+ private IndexedFields fieldsSource;
private EnumSet