Skip to content

Commit

Permalink
#33 fix performance issue
Browse files Browse the repository at this point in the history
  • Loading branch information
medcl committed Jul 4, 2014
1 parent 54fd970 commit bafb724
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 15 deletions.
2 changes: 1 addition & 1 deletion pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
</parent>

<properties>
<elasticsearch.version>1.1.1</elasticsearch.version>
<elasticsearch.version>1.0.0</elasticsearch.version>
</properties>

<repositories>
Expand Down
4 changes: 2 additions & 2 deletions src/main/java/org/wltea/analyzer/core/CJKSegmenter.java
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ public void analyze(AnalyzeContext context) {
//处理词段队列
Hit[] tmpArray = this.tmpHits.toArray(new Hit[this.tmpHits.size()]);
for(Hit hit : tmpArray){
hit = Dictionary.getSingleton().matchWithHit(String.valueOf(context.getSegmentBuff()).toLowerCase().toCharArray(), context.getCursor() , hit);
hit = Dictionary.getSingleton().matchWithHit(context.getSegmentBuff(), context.getCursor() , hit);
if(hit.isMatch()){
//输出当前的词
Lexeme newLexeme = new Lexeme(context.getBufferOffset() , hit.getBegin() , context.getCursor() - hit.getBegin() + 1 , Lexeme.TYPE_CNWORD);
Expand All @@ -77,7 +77,7 @@ public void analyze(AnalyzeContext context) {

//*********************************
//再对当前指针位置的字符进行单字匹配
Hit singleCharHit = Dictionary.getSingleton().matchInMainDict(String.valueOf(context.getSegmentBuff()).toLowerCase().toCharArray(), context.getCursor(), 1);
Hit singleCharHit = Dictionary.getSingleton().matchInMainDict(context.getSegmentBuff(), context.getCursor(), 1);
if(singleCharHit.isMatch()){//首字成词
//输出当前的词
Lexeme newLexeme = new Lexeme(context.getBufferOffset() , context.getCursor() , 1 , Lexeme.TYPE_CNWORD);
Expand Down
21 changes: 10 additions & 11 deletions src/main/java/org/wltea/analyzer/dic/Dictionary.java
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ public void addWords(Collection<String> words){
for(String word : words){
if (word != null) {
//批量加载词条到主内存词典中
singleton._MainDict.fillSegment(word.trim().toLowerCase().toCharArray());
singleton._MainDict.fillSegment(word.trim().toCharArray());
}
}
}
Expand All @@ -133,7 +133,7 @@ public void disableWords(Collection<String> words){
for(String word : words){
if (word != null) {
//批量屏蔽词条
singleton._MainDict.disableSegment(word.trim().toLowerCase().toCharArray());
singleton._MainDict.disableSegment(word.trim().toCharArray());
}
}
}
Expand All @@ -152,15 +152,15 @@ public Hit matchInMainDict(char[] charArray){
* @return Hit 匹配结果描述
*/
public Hit matchInMainDict(char[] charArray , int begin, int length){
return singleton._MainDict.match(String.valueOf(charArray).toLowerCase().toCharArray(), begin, length);
return singleton._MainDict.match(charArray, begin, length);
}

/**
* 检索匹配量词词典
* @return Hit 匹配结果描述
*/
public Hit matchInQuantifierDict(char[] charArray , int begin, int length){
return singleton._QuantifierDict.match(String.valueOf(charArray).toLowerCase().toCharArray(), begin, length);
return singleton._QuantifierDict.match(charArray, begin, length);
}


Expand All @@ -179,7 +179,7 @@ public Hit matchWithHit(char[] charArray , int currentIndex , Hit matchedHit){
* @return boolean
*/
public boolean isStopWord(char[] charArray , int begin, int length){
return singleton._StopWords.match(String.valueOf(charArray).toLowerCase().toCharArray(), begin, length).isMatch();
return singleton._StopWords.match(charArray, begin, length).isMatch();
}

/**
Expand All @@ -205,7 +205,7 @@ private void loadMainDict(){
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
_MainDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
_MainDict.fillSegment(theWord.trim().toCharArray());
}
} while (theWord != null);

Expand Down Expand Up @@ -255,7 +255,7 @@ private void loadExtDict(){
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
//加载扩展词典数据到主内存词典中
_MainDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
_MainDict.fillSegment(theWord.trim().toCharArray());
}
} while (theWord != null);

Expand Down Expand Up @@ -298,7 +298,7 @@ private void loadStopWordDict(){
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
_StopWords.fillSegment(theWord.trim().toLowerCase().toCharArray());
_StopWords.fillSegment(theWord.trim().toCharArray());
}
} while (theWord != null);

Expand Down Expand Up @@ -342,7 +342,7 @@ private void loadStopWordDict(){
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
//加载扩展停止词典数据到内存中
_StopWords.fillSegment(theWord.trim().toLowerCase().toCharArray());
_StopWords.fillSegment(theWord.trim().toCharArray());
}
} while (theWord != null);

Expand Down Expand Up @@ -383,7 +383,7 @@ private void loadQuantifierDict(){
do {
theWord = br.readLine();
if (theWord != null && !"".equals(theWord.trim())) {
_QuantifierDict.fillSegment(theWord.trim().toLowerCase().toCharArray());
_QuantifierDict.fillSegment(theWord.trim().toCharArray());
}
} while (theWord != null);

Expand Down Expand Up @@ -440,7 +440,6 @@ private void loadSurnameDict(){
}



private void loadSuffixDict(){

_SuffixDict = new DictSegment((char)0);
Expand Down
2 changes: 1 addition & 1 deletion src/main/java/org/wltea/analyzer/lucene/IKTokenizer.java
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ public boolean incrementToken() throws IOException {
if(nextLexeme != null){
//将Lexeme转成Attributes
//设置词元文本
termAtt.append(nextLexeme.getLexemeText().toLowerCase());
termAtt.append(nextLexeme.getLexemeText());
//设置词元长度
termAtt.setLength(nextLexeme.getLength());
//设置词元位移
Expand Down

0 comments on commit bafb724

Please sign in to comment.