-
Notifications
You must be signed in to change notification settings - Fork 326
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #883 from diyclassics/sent-tok-update
Sentence Tokenizer Update
- Loading branch information
Showing
16 changed files
with
634 additions
and
345 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
# Contributors | ||
CLTK Core authors, ordered alphabetically by first name | ||
|
||
## key | ||
* val1 | ||
* val2 | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Large diffs are not rendered by default.
Oops, something went wrong.
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
""" Params: Greek | ||
""" | ||
|
||
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>'] | ||
__license__ = 'MIT License.' | ||
|
||
from nltk.tokenize.punkt import PunktLanguageVars | ||
|
||
class GreekLanguageVars(PunktLanguageVars): | ||
sent_end_chars = ['.', ';', '·'] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
""" Code for sentence tokenization: Greek | ||
""" | ||
|
||
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>'] | ||
__license__ = 'MIT License.' | ||
|
||
import os.path | ||
import re | ||
|
||
from cltk.tokenize.sentence import BaseSentenceTokenizer, BaseRegexSentenceTokenizer, BasePunktSentenceTokenizer | ||
from cltk.tokenize.greek.params import GreekLanguageVars | ||
from cltk.utils.file_operations import open_pickle | ||
|
||
from nltk.tokenize.punkt import PunktLanguageVars | ||
|
||
def SentenceTokenizer(tokenizer: str = 'regex'): | ||
if tokenizer=='punkt': | ||
return GreekPunktSentenceTokenizer() | ||
if tokenizer=='regex': | ||
return GreekRegexSentenceTokenizer() | ||
|
||
|
||
class GreekPunktSentenceTokenizer(BasePunktSentenceTokenizer): | ||
""" PunktSentenceTokenizer trained on Ancient Greek | ||
""" | ||
models_path = '~/cltk_data/greek/model/greek_models_cltk/tokenizers/sentence' | ||
missing_models_message = "GreekPunktSentenceTokenizer requires the ```greek_models_cltk``` to be in cltk_data. Please load this corpus." | ||
|
||
def __init__(self: object, language: str = 'greek'): | ||
""" | ||
:param language : language for sentence tokenization | ||
:type language: str | ||
""" | ||
super().__init__(language='greek') | ||
self.models_path = GreekPunktSentenceTokenizer.models_path | ||
|
||
try: | ||
self.model = open_pickle(os.path.join(os.path.expanduser(self.models_path), 'greek_punkt.pickle')) | ||
except FileNotFoundError as err: | ||
raise type(err)(GreekPunktSentenceTokenizer.missing_models_message) | ||
|
||
self.lang_vars = GreekLanguageVars() | ||
|
||
|
||
class GreekRegexSentenceTokenizer(BaseRegexSentenceTokenizer): | ||
""" RegexSentenceTokenizer for Ancient Greek | ||
""" | ||
def __init__(self: object): | ||
super().__init__(language='greek', | ||
sent_end_chars=GreekLanguageVars.sent_end_chars) |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
""" Params: Latin | ||
""" | ||
|
||
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>'] | ||
__license__ = 'MIT License.' | ||
|
||
from nltk.tokenize.punkt import PunktLanguageVars | ||
|
||
PRAENOMINA = ['a', 'agr', 'ap', 'c', 'cn', 'd', 'f', 'k', 'l', "m'", 'm', 'mam', 'n', 'oct', 'opet', 'p', 'post', 'pro', 'q', 's', 'ser', 'sert', 'sex', 'st', 't', 'ti', 'v', 'vol', 'vop', 'a', 'ap', 'c', 'cn', 'd', 'f', 'k', 'l', 'm', "m'", 'mam', 'n', 'oct', 'opet', 'p', 'paul', 'post', 'pro', 'q', 'ser', 'sert', 'sex', 'sp', 'st', 'sta', 't', 'ti', 'v', 'vol', 'vop'] | ||
|
||
CALENDAR = ['ian', 'febr', 'mart', 'apr', 'mai', 'iun', 'iul', 'aug', 'sept', 'oct', 'nov', 'dec'] \ | ||
+ ['kal', 'non', 'id', 'a.d'] | ||
|
||
MISC = ['coll', 'cos', 'ord', 'pl.', 's.c', 'suff', 'trib'] | ||
|
||
ABBREVIATIONS = set( | ||
PRAENOMINA + | ||
CALENDAR + | ||
MISC | ||
) | ||
|
||
class LatinLanguageVars(PunktLanguageVars): | ||
_re_non_word_chars = PunktLanguageVars._re_non_word_chars.replace("'",'') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,36 @@ | ||
""" Code for sentence tokenization: Latin | ||
""" | ||
|
||
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>'] | ||
__license__ = 'MIT License.' | ||
|
||
import os.path | ||
|
||
from cltk.tokenize.sentence import BaseSentenceTokenizer, BasePunktSentenceTokenizer | ||
from cltk.tokenize.latin.params import LatinLanguageVars | ||
from cltk.utils.file_operations import open_pickle | ||
|
||
def SentenceTokenizer(tokenizer:str = 'punkt'): | ||
if tokenizer=='punkt': | ||
return LatinPunktSentenceTokenizer() | ||
|
||
|
||
class LatinPunktSentenceTokenizer(BasePunktSentenceTokenizer): | ||
""" PunktSentenceTokenizer trained on Latin | ||
""" | ||
models_path = os.path.expanduser('~/cltk_data/latin/model/latin_models_cltk/tokenizers/sentence') | ||
missing_models_message = "BackoffLatinLemmatizer requires the ```latin_models_cltk``` to be in cltk_data. Please load this corpus." | ||
|
||
def __init__(self: object, language:str = 'latin'): | ||
""" | ||
:param language : language for sentence tokenization | ||
:type language: str | ||
""" | ||
self.lang_vars = LatinLanguageVars() | ||
super().__init__(language='latin', lang_vars=self.lang_vars) | ||
self.models_path = LatinPunktSentenceTokenizer.models_path | ||
|
||
try: | ||
self.model = open_pickle(os.path.join(self.models_path, 'latin_punkt.pickle')) | ||
except FileNotFoundError as err: | ||
raise type(err)(LatinPunktSentenceTokenizer.missing_models_message) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,30 @@ | ||
""" Tokenization utilities: Latin | ||
""" | ||
|
||
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>'] | ||
__license__ = 'MIT License.' | ||
|
||
import pickle | ||
from typing import List, Dict, Tuple, Set, Any, Generator | ||
|
||
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktTrainer | ||
from nltk.tokenize.punkt import PunktLanguageVars | ||
|
||
from cltk.corpus.readers import get_corpus_reader | ||
from cltk.tokenize.latin.params import ABBREVIATIONS | ||
|
||
from cltk.tokenize.utils import BaseSentenceTokenizerTrainer | ||
|
||
class LatinSentenceTokenizerTrainer(BaseSentenceTokenizerTrainer): | ||
""" """ | ||
def __init__(self: object, strict: bool = False): | ||
self.strict = strict | ||
self.punctuation = ['.', '?', '!'] | ||
self.strict_punctuation = [';', ':', '—'] | ||
self.abbreviations = ABBREVIATIONS | ||
|
||
super().__init__(language='latin', | ||
punctuation=self.punctuation, | ||
strict=self.strict, | ||
strict_punctuation=self.strict_punctuation, | ||
abbreviations=self.abbreviations) |
Oops, something went wrong.