forked from mozilla-metrics/grouperfish
/
NGramEnglishAnalyzer.java
118 lines (103 loc) · 5.16 KB
/
NGramEnglishAnalyzer.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
/*
* Copyright 2011 Mozilla Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mozilla.grouperfish.lucene.analysis.en;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.Set;
import org.apache.hadoop.fs.Path;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerFilter;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.en.EnglishPossessiveFilter;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.util.Version;
import com.mozilla.grouperfish.text.Dictionary;
public class NGramEnglishAnalyzer extends StopwordAnalyzerBase {
private final Set<?> stemExclusionSet;
private boolean stem = false;
private boolean outputUnigrams = true;
private int minNGram = ShingleAllStopFilter.DEFAULT_MIN_SHINGLE_SIZE;
private int maxNGram = ShingleAllStopFilter.DEFAULT_MAX_SHINGLE_SIZE;
public NGramEnglishAnalyzer(Version version) {
this(version, StandardAnalyzer.STOP_WORDS_SET, false);
}
public NGramEnglishAnalyzer(Version version, boolean stem) {
this(version, StandardAnalyzer.STOP_WORDS_SET, stem);
}
public NGramEnglishAnalyzer(Version version, Set<?> stopwords, boolean stem) {
this(version, stopwords, stem, true);
}
public NGramEnglishAnalyzer(Version version, Set<?> stopwords, boolean stem, boolean outputUnigrams) {
this(version, stopwords, stem, outputUnigrams, ShingleAllStopFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleAllStopFilter.DEFAULT_MAX_SHINGLE_SIZE, CharArraySet.EMPTY_SET);
}
public NGramEnglishAnalyzer(Version version, Set<?> stopwords, boolean stem, boolean outputUnigrams, int minNGram, int maxNGram) {
this(version, stopwords, stem, outputUnigrams, minNGram, maxNGram, CharArraySet.EMPTY_SET);
}
public NGramEnglishAnalyzer(Version matchVersion, Set<?> stopwords, boolean stem, boolean outputUnigrams, int minNGram, int maxNGram, Set<?> stemExclusionSet) {
super(matchVersion, stopwords);
this.stem = stem;
this.outputUnigrams = outputUnigrams;
this.minNGram = minNGram;
this.maxNGram = maxNGram;
this.stemExclusionSet = CharArraySet.unmodifiableSet(CharArraySet.copy(matchVersion, stemExclusionSet));
}
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
final Tokenizer source = new StandardTokenizer(matchVersion, reader);
TokenStream result = new StandardFilter(matchVersion, source);
if (matchVersion.onOrAfter(Version.LUCENE_31)) {
result = new EnglishPossessiveFilter(result);
}
result = new LowerCaseFilter(matchVersion, result);
ShingleAllStopFilter sf = new ShingleAllStopFilter(result, minNGram, maxNGram, stopwords);
sf.setOutputUnigrams(outputUnigrams);
if (!outputUnigrams) {
sf.setOutputUnigramsIfNoShingles(false);
}
result = sf;
if (stem) {
if (!stemExclusionSet.isEmpty()) {
result = new KeywordMarkerFilter(result, stemExclusionSet);
}
result = new PorterStemFilter(result);
}
return new TokenStreamComponents(source, result);
}
public static void main(String[] args) throws IOException {
Set<String> stopwords = Dictionary.loadDictionary(new Path("file:///Users/xstevens/workspace/akela/stopwords-en.txt"));
NGramEnglishAnalyzer analyzer = new com.mozilla.grouperfish.lucene.analysis.en.NGramEnglishAnalyzer(Version.LUCENE_31, stopwords, false, true);
TokenStream stream = analyzer.tokenStream("", new StringReader("When I was growing up this was so much fun."));
CharTermAttribute termAttr = stream.addAttribute(CharTermAttribute.class);
while (stream.incrementToken()) {
if (termAttr.length() > 0) {
System.out.println(termAttr.toString());
termAttr.setEmpty();
}
}
}
}