Skip to content

Commit

Permalink
[analysis-icu] Allow setting unicodeSetFilter (#20814)
Browse files Browse the repository at this point in the history
UnicodeSetFilter was only allowed in the icu_folding token filter.
It seems useful to expose this setting in icu_normalizer token filter
and char filter.
  • Loading branch information
nomoa authored and jpountz committed Jun 16, 2017
1 parent 9ddea53 commit ff9edb6
Show file tree
Hide file tree
Showing 5 changed files with 90 additions and 29 deletions.
8 changes: 8 additions & 0 deletions docs/plugins/analysis-icu.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@ normalization can be specified with the `name` parameter, which accepts `nfc`,
`nfkc`, and `nfkc_cf` (default). Set the `mode` parameter to `decompose` to
convert `nfc` to `nfd` or `nfkc` to `nfkd` respectively:

Which letters are normalized can be controlled by specifying the
`unicodeSetFilter` parameter, which accepts a
http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet].

Here are two examples, the default usage and a customised character filter:


Expand Down Expand Up @@ -189,6 +193,10 @@ without any further configuration. The type of normalization can be specified
with the `name` parameter, which accepts `nfc`, `nfkc`, and `nfkc_cf`
(default).

Which letters are normalized can be controlled by specifying the
`unicodeSetFilter` parameter, which accepts a
http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet].

You should probably prefer the <<analysis-icu-normalization-charfilter,Normalization character filter>>.

Here are two examples, the default usage and a customised token filter:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,8 @@

package org.elasticsearch.index.analysis;

import com.ibm.icu.text.FilteredNormalizer2;
import com.ibm.icu.text.Normalizer2;
import com.ibm.icu.text.UnicodeSet;

import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.icu.ICUFoldingFilter;
import org.elasticsearch.common.settings.Settings;
Expand All @@ -41,31 +40,20 @@
* @author kimchy (shay.banon)
*/
public class IcuFoldingTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent {
private final String unicodeSetFilter;
/** Store here the same Normalizer used by the lucene ICUFoldingFilter */
private static final Normalizer2 ICU_FOLDING_NORMALIZER = Normalizer2.getInstance(
ICUFoldingFilter.class.getResourceAsStream("utr30.nrm"), "utr30", Normalizer2.Mode.COMPOSE);

private final Normalizer2 normalizer;

public IcuFoldingTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.unicodeSetFilter = settings.get("unicodeSetFilter");
this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(ICU_FOLDING_NORMALIZER, settings);
}

@Override
public TokenStream create(TokenStream tokenStream) {

// The ICUFoldingFilter is in fact implemented as a ICUNormalizer2Filter.
// ICUFoldingFilter lacks a constructor for adding filtering so we implemement it here
if (unicodeSetFilter != null) {
Normalizer2 base = Normalizer2.getInstance(
ICUFoldingFilter.class.getResourceAsStream("utr30.nrm"),
"utr30", Normalizer2.Mode.COMPOSE);
UnicodeSet unicodeSet = new UnicodeSet(unicodeSetFilter);

unicodeSet.freeze();
Normalizer2 filtered = new FilteredNormalizer2(base, unicodeSet);
return new org.apache.lucene.analysis.icu.ICUNormalizer2Filter(tokenStream, filtered);
}
else {
return new ICUFoldingFilter(tokenStream);
}
return new org.apache.lucene.analysis.icu.ICUNormalizer2Filter(tokenStream, normalizer);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@


import com.ibm.icu.text.Normalizer2;

import org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
Expand All @@ -33,22 +34,22 @@
* Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter} to normalize character.
* <p>The <tt>name</tt> can be used to provide the type of normalization to perform.</p>
* <p>The <tt>mode</tt> can be used to provide 'compose' or 'decompose'. Default is compose.</p>
* <p>The <tt>unicodeSetFilter</tt> attribute can be used to provide the UniCodeSet for filtering.</p>
*/
public class IcuNormalizerCharFilterFactory extends AbstractCharFilterFactory implements MultiTermAwareComponent {

private final String name;

private final Normalizer2 normalizer;

public IcuNormalizerCharFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name);
this.name = settings.get("name", "nfkc_cf");
String method = settings.get("name", "nfkc_cf");
String mode = settings.get("mode");
if (!"compose".equals(mode) && !"decompose".equals(mode)) {
mode = "compose";
}
this.normalizer = Normalizer2.getInstance(
null, this.name, "compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE);
Normalizer2 normalizer = Normalizer2.getInstance(
null, method, "compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE);
this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(normalizer, settings);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,10 @@

package org.elasticsearch.index.analysis;

import com.ibm.icu.text.FilteredNormalizer2;
import com.ibm.icu.text.Normalizer2;
import com.ibm.icu.text.UnicodeSet;

import org.apache.lucene.analysis.TokenStream;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
Expand All @@ -28,26 +31,40 @@

/**
* Uses the {@link org.apache.lucene.analysis.icu.ICUNormalizer2Filter} to normalize tokens.
* <p>The <tt>name</tt> can be used to provide the type of normalization to perform.
* <p>The <tt>name</tt> can be used to provide the type of normalization to perform.</p>
* <p>The <tt>unicodeSetFilter</tt> attribute can be used to provide the UniCodeSet for filtering.</p>
*
*
*/
public class IcuNormalizerTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent {

private final String name;
private final Normalizer2 normalizer;

public IcuNormalizerTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.name = settings.get("name", "nfkc_cf");
String method = settings.get("name", "nfkc_cf");
Normalizer2 normalizer = Normalizer2.getInstance(null, method, Normalizer2.Mode.COMPOSE);
this.normalizer = wrapWithUnicodeSetFilter(normalizer, settings);
}

@Override
public TokenStream create(TokenStream tokenStream) {
return new org.apache.lucene.analysis.icu.ICUNormalizer2Filter(tokenStream, Normalizer2.getInstance(null, name, Normalizer2.Mode.COMPOSE));
return new org.apache.lucene.analysis.icu.ICUNormalizer2Filter(tokenStream, normalizer);
}

@Override
public Object getMultiTermComponent() {
return this;
}

static Normalizer2 wrapWithUnicodeSetFilter(final Normalizer2 normalizer, Settings settings) {
String unicodeSetFilter = settings.get("unicodeSetFilter");
if (unicodeSetFilter != null) {
UnicodeSet unicodeSet = new UnicodeSet(unicodeSetFilter);

unicodeSet.freeze();
return new FilteredNormalizer2(normalizer, unicodeSet);
}
return normalizer;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -39,3 +39,50 @@
tokenizer: keyword
- length: { tokens: 1 }
- match: { tokens.0.token: foo bar resume }
---
"Normalization with a UnicodeSet Filter":
- do:
indices.create:
index: test
body:
settings:
index:
analysis:
char_filter:
charfilter_icu_normalizer:
type: icu_normalizer
unicodeSetFilter: "[^ß]"
filter:
tokenfilter_icu_normalizer:
type: icu_normalizer
unicodeSetFilter: "[^ßB]"
tokenfilter_icu_folding:
type: icu_folding
unicodeSetFilter: "[^â]"
- do:
indices.analyze:
index: test
body:
char_filter: ["charfilter_icu_normalizer"]
tokenizer: keyword
text: charfilter Föo Bâr Ruß
- length: { tokens: 1 }
- match: { tokens.0.token: charfilter föo bâr ruß }
- do:
indices.analyze:
index: test
body:
tokenizer: keyword
filter: ["tokenfilter_icu_normalizer"]
text: tokenfilter Föo Bâr Ruß
- length: { tokens: 1 }
- match: { tokens.0.token: tokenfilter föo Bâr ruß }
- do:
indices.analyze:
index: test
body:
tokenizer: keyword
filter: ["tokenfilter_icu_folding"]
text: icufolding Föo Bâr Ruß
- length: { tokens: 1 }
- match: { tokens.0.token: icufolding foo bâr russ }

0 comments on commit ff9edb6

Please sign in to comment.