diff --git a/CHANGELOG.md b/CHANGELOG.md index 24324a47d..cb47ddcbc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ ## [3.3.0](https://github.com/BLKSerene/Wordless/releases/tag/3.3.0) - ??/??/2023 ### 🎉 New Features - Utils: Add PyThaiNLP's perceptron part-of-speech tagger (Blackboard) +- Utils: Add spaCy's Korean sentence recognizer, word tokenizer, part-of-speech tagger, lemmatizer, and dependency parser ### ❌ Removals - Utils: Remove PyThaiNLP's perceptron part-of-speech tagger (LST20) diff --git a/doc/doc_eng.md b/doc/doc_eng.md index 2fcab198c..8c62c409a 100644 --- a/doc/doc_eng.md +++ b/doc/doc_eng.md @@ -699,7 +699,7 @@ Italian |✔|✔|✔|✔|✔|✔|✔ Japanese |✔|✔|✖️|✔|✔|✔|✔ Kannada |⭕️ |✔|✖️|✖️|✖️|✖️|✖️ Kazakh |⭕️ |⭕️ |✖️|✖️|✖️|✔|✖️ -Korean |⭕️ |✔|✖️|✔|✖️|✔|✖️ +Korean |✔|✔|✖️|✔|✔|✔|✔ Kurdish |⭕️ |⭕️ |✖️|✖️|✖️|✔|✖️ Kyrgyz |⭕️ |✔|✖️|✖️|✖️|✖️|✖️ Latin |⭕️ |✔|✖️|✖️|✔|✔|✖️ diff --git a/requirements_dev.txt b/requirements_dev.txt index 2437bb4e6..d49933e1a 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -62,25 +62,26 @@ tzdata; sys_platform == 'win32' # Required by PyThaiNLP on Windows wordcloud # spaCy models -spacy_models/ca_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/da_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/de_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/el_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/en_core_web_sm-3.5.0-py3-none-any.whl -spacy_models/es_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/fi_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/fr_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/hr_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/it_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/ja_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/lt_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/mk_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/nb_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/nl_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/pl_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/pt_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/ro_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/ru_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/sv_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/uk_core_news_sm-3.5.0-py3-none-any.whl -spacy_models/zh_core_web_sm-3.5.0-py3-none-any.whl +spacy_models/ca_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/da_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/de_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/el_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/en_core_web_sm-3.6.0-py3-none-any.whl +spacy_models/es_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/fi_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/fr_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/hr_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/it_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/ja_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/ko_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/lt_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/mk_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/nb_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/nl_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/pl_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/pt_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/ro_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/ru_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/sv_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/uk_core_news_sm-3.6.0-py3-none-any.whl +spacy_models/zh_core_web_sm-3.6.0-py3-none-any.whl diff --git a/tests/wl_test_lang_examples.py b/tests/wl_test_lang_examples.py index 739bc5b33..9c3ac5b96 100644 --- a/tests/wl_test_lang_examples.py +++ b/tests/wl_test_lang_examples.py @@ -304,6 +304,7 @@ TEXT_ELL = ['Η ελληνική γλώσσα ανήκει στην ινδοευρωπαϊκή οικογένεια[9] και αποτελεί το μοναδικό μέλος του ελληνικού κλάδου, ενώ είναι η επίσημη γλώσσα της Ελλάδας και της Κύπρου.', ' ', 'Ανήκει επίσης στο βαλκανικό γλωσσικό δεσμό.', ' ', 'Στην ελληνική γλώσσα, έχουμε γραπτά κείμενα ήδη από τον 15ο αιώνα π.Χ..', ' ', 'Σαν Παγκόσμια Ημέρα Ελληνικής Γλώσσας, κάθε έτος, έχει καθιερωθεί η 9η Φεβρουαρίου.', ' ', 'Έχει την μακροβιότερη καταγεγραμμένη ιστορία από οποιαδήποτε άλλη ζωντανή ινδοευρωπαϊκή γλώσσα με τουλάχιστον 3.400 χρόνια γραπτής ιστορίας.[10]', ' ', 'Γράφεται με το ελληνικό αλφάβητο το οποίο χρησιμοποιείται για περίπου 2.600 χρόνια.[11][12]', ' ', 'Προηγουμένως η ελληνική γλώσσα γραφόταν με τη Γραμμική Β και το κυπριακό συλλαβάριο.[13]', ' ', 'Το ελληνικό αλφάβητο προέρχεται από το φοινικικό αλφάβητο.', ' ', 'Στο ελληνικό αλφάβητο βασίζεται το λατινικό, το κυριλλικό, το αρμενικό, το κοπτικό, το γοτθικό και πολλά άλλα αλφάβητα.'] TEXT_ITA = ['È classificato al 23º posto tra le lingue per numero di parlanti nel mondo e, in Italia, è utilizzato da circa 58 milioni di residenti.[2]', ' ', "Nel 2015 l'italiano era la lingua materna del 90,4% dei residenti in Italia,[3] che spesso lo acquisiscono e lo usano insieme alle varianti regionali dell'italiano, alle lingue regionali e ai dialetti.", ' ', "In Italia viene ampiamente usato per tutti i tipi di comunicazione della vita quotidiana ed è largamente prevalente nei mezzi di comunicazione nazionali, nell'amministrazione pubblica dello Stato italiano e nell'editoria."] TEXT_JPN = ['日本語(にほんご、にっぽんご[注 2])は、日本国内や、かつての日本領だった国、そして国外移民や移住者を含む日本人同士の間で使用されている言語。', '日本は法令によって公用語を規定していないが、法令その他の公用文は全て日本語で記述され、各種法令[注 3]において日本語を用いることが規定され、学校教育においては「国語」の教科として学習を行う等、事実上、日本国内において唯一の公用語となっている。'] +TEXT_KOR = ['한국어(韓國語)는 대한민국과 조선민주주의인민공화국의 공용어이다.', ' ', '한국말, 조선말, 조선어로도 불린다.'] TEXT_LIT = ['Lietuvių kalba – iš baltų prokalbės kilusi lietuvių tautos kalba, kuri Lietuvoje yra valstybinė, o Europos Sąjungoje – viena iš oficialiųjų kalbų.', ' ', 'Lietuviškai kalba apie tris milijonus žmonių (dauguma jų gyvena Lietuvoje).', ' ', 'Drauge su latvių, mirusiomis prūsų, jotvingių ir kitomis baltų kalbomis priklauso indoeuropiečių kalbų šeimos baltų kalbų grupei.'] TEXT_MKD = ['Македонски јазик — јужнословенски јазик, дел од групата на словенски јазици од јазичното семејство на индоевропски јазици.', ' ', 'Македонскиот е службен и национален јазик во Македонија, а воедно е и официјално признат како регионален службен јазик во Горица и Пустец во Албанија каде што живее бројно македонско население, но и во Србија како официјален во општините Јабука и Пландиште, Романија и Косово.', ' ', 'Македонски се зборува и во Австралија, Бугарија, Грција, Канада, САД, Црна Гора, Турција, во некои земји−членки на Европската Унија и останатата македонска дијаспора.', ' ', 'Вкупниот број на македонски говорници е тешко да се утврди поради несоодветни пописи, но бројката се движи од околу 2,5 до 3 милиони луѓе.'] TEXT_MAL = ['ഇന്ത്യയിൽ കേരള സംസ്ഥാനത്തിലും കേന്ദ്രഭരണപ്രദേശങ്ങളായ ലക്ഷദ്വീപിലും പോണ്ടിച്ചേരിയുടെ ഭാഗമായ മാഹിയിലും തമിഴ്നാട്ടിലെ കന്യാകുമാരി ജില്ലയിലും നീലഗിരി ജില്ലയിലെ ഗൂഡല്ലൂർ താലൂക്കിലും സംസാരിക്കപ്പെടുന്ന ഭാഷയാണ് മലയാളം.', ' ', 'ഇതു ദ്രാവിഡ ഭാഷാ കുടുംബത്തിൽപ്പെടുന്നു.', ' ', 'ഇന്ത്യയിൽ ശ്രേഷ്ഠഭാഷാ പദവി ലഭിക്കുന്ന അഞ്ചാമത്തെ ഭാഷയാണ് മലയാളം[5].', ' ', '2013 മെയ് 23-നു ചേർന്ന കേന്ദ്രമന്ത്രിസഭായോഗമാണ് മലയാളത്തെ ശ്രേഷ്ഠഭാഷയായി അംഗീകരിച്ചത്.', ' ', 'ക്ലാസിക്കൽ ലാംഗ്വേജ് എന്ന പദവിയാണ് ലൽകിയത്.', ' ', 'അതിനു മലയാളത്തിൽ നൽകിയ വിവർത്തനം ആണ് ശ്രേഷ്ഠഭാഷ എന്നത്.', ' ', 'വാസ്തവത്തിൽ ഇത് അത്രശരിയായ വിവർത്തനമോ ശരിയായ പ്രയോഗമോ അല്ല.', ' ', 'ശ്രേഷ്ഠം മോശം എന്ന നിലയിൽ ഭാഷകളെ വിലയിരുത്തുന്നത് ശാസ്ത്രീയമായ കാര്യമല്ല.', ' ', 'ഭാഷകളിൽ ശ്രേഷ്ഠമെന്നും അല്ലാത്തത് എന്നുമുള്ള വിഭജനം ഇല്ല.', ' ', 'ഇന്ത്യൻ ഭരണഘടനയിലെ എട്ടാം ഷെഡ്യൂളിൽ ഉൾപ്പെടുത്തിയിരിക്കുന്ന ഇന്ത്യയിലെ ഇരുപത്തിരണ്ട് ഔദ്യോഗിക ഭാഷകളിൽ ഒന്നാണ് മലയാളം[6].', ' ', 'മലയാള ഭാഷ കൈരളി,മലനാട് ഭാഷ എന്നും അറിയപ്പെടുന്നു.', 'കേരള സംസ്ഥാനത്തിലെ ഭരണഭാഷയും കൂടിയാണ്‌ മലയാളം.', ' ', 'കേരളത്തിനും ലക്ഷദ്വീപിനും പുറമേ തമിഴ്നാട്ടിലെ ചില ഭാഗങ്ങളിലും കന്യാകുമാരി ജില്ല, നീലഗിരി ജില്ല കർണാടകയുടെ ദക്ഷിണ കന്നഡ ജില്ല, കൊടഗ് ഭാഗങ്ങളിലും ഗൾഫ് രാജ്യങ്ങൾ, സിംഗപ്പൂർ, മലേഷ്യ എന്നിവിടങ്ങളിലെ കേരളീയ പൈതൃകമുള്ള അനേകം ജനങ്ങളും മലയാളം ഉപയോഗിച്ചുപോരുന്നു.', 'ദേശീയ ഭാഷയായി ഉൾപ്പെടുത്തിയത് മറ്റ് 21 ഭാഷകളുടേതുപോലെ തനതായ വ്യക്തിത്വം ഉള്ളതിനാലാണ്.', ' ', 'മലയാള ഭാഷയുടെ ഉല്പത്തിയും പ്രാചീനതയും സംബന്ധിച്ച കാര്യങ്ങൾ ഇന്നും അവ്യക്തമാണ്.', ' ', 'പഴയ തമിഴിനും മുൻപത്തെ മൂലദ്രാവിഡമാണ് മലയാളത്തിന്റെ ആദ്യ രൂപം എന്നു കരുതുന്നു.', 'യു.എ.ഇ-യിലെ നാല് ഔദ്യോഗിക ഭാഷകളിൽ ഒന്നു മലയാളമാണ്.[അവലംബം ആവശ്യമാണ്]'] @@ -365,7 +366,7 @@ SENTENCE_ITA = TEXT_ITA[0] SENTENCE_JPN = TEXT_JPN[0] SENTENCE_KAN = 'ದ್ರಾವಿಡ ಭಾಷೆಗಳಲ್ಲಿ ಪ್ರಾಮುಖ್ಯವುಳ್ಳ ಭಾಷೆಯೂ ಭಾರತದ ಪುರಾತನವಾದ ಭಾಷೆಗಳಲ್ಲಿ ಒಂದೂ ಆಗಿರುವ ಕನ್ನಡ ಭಾಷೆಯನ್ನು ಅದರ ವಿವಿಧ ರೂಪಗಳಲ್ಲಿ ಸುಮಾರು ೪೫ ದಶಲಕ್ಷ ಜನರು ಆಡು ನುಡಿಯಾಗಿ ಬಳಸುತ್ತಲಿದ್ದಾರೆ.' -SENTENCE_KOR = '한국어(韓國語)는 대한민국과 조선민주주의인민공화국의 공용어이다.' +SENTENCE_KOR = TEXT_KOR[0] SENTENCE_KIR = 'Кыргыз тили — Кыргыз Республикасынын мамлекеттик тили, түрк тилдеринин курамына, анын ичинде кыргыз-кыпчак же тоо-алтай тобуна кирет.' SENTENCE_LAT = 'Lingua Latina,[1] sive sermo Latinus,[2] est lingua Indoeuropaea qua primum Latini universi et Romani antiqui in primis loquebantur quamobrem interdum etiam lingua Latia[3] (in Latio enim sueta) et lingua Romana[4] (nam imperii Romani sermo sollemnis) appellatur.' SENTENCE_LAV = 'Latviešu valoda ir dzimtā valoda apmēram 1,7 miljoniem cilvēku, galvenokārt Latvijā, kur tā ir vienīgā valsts valoda.[3]' diff --git a/tests/wl_tests_nlp/test_dependency_parsing.py b/tests/wl_tests_nlp/test_dependency_parsing.py index 075cf78af..923a33830 100644 --- a/tests/wl_tests_nlp/test_dependency_parsing.py +++ b/tests/wl_tests_nlp/test_dependency_parsing.py @@ -123,6 +123,8 @@ def test_dependency_parse(lang, dependency_parser): assert dependencies == [('È', 'classificato', 'aux:pass', 1), ('classificato', 'classificato', 'ROOT', 0), ('al', '23º', 'case', 1), ('23º', 'posto', 'nummod', 1), ('posto', 'classificato', 'obl', -3), ('tra', 'lingue', 'case', 2), ('le', 'lingue', 'det', 1), ('lingue', 'posto', 'nmod', -3), ('per', 'numero', 'case', 1), ('numero', 'lingue', 'nmod', -2), ('di', 'parlanti', 'case', 1), ('parlanti', 'numero', 'nmod', -2), ('nel', 'mondo', 'case', 1), ('mondo', 'parlanti', 'nmod', -2), ('e', 'utilizzato', 'cc', 6), (',', 'utilizzato', 'punct', 5), ('in', 'Italia', 'case', 1), ('Italia', 'utilizzato', 'obl', 3), (',', 'Italia', 'punct', -1), ('è', 'utilizzato', 'aux:pass', 1), ('utilizzato', 'classificato', 'conj', -19), ('da', 'milioni', 'case', 3), ('circa', '58', 'advmod', 1), ('58', 'milioni', 'nummod', 1), ('milioni', 'utilizzato', 'obl', -4), ('di', 'residenti.[2', 'case', 1), ('residenti.[2', 'milioni', 'nmod', -2), (']', 'utilizzato', 'punct', -7)] elif lang == 'jpn': assert dependencies == [('日本', 'ご', 'compound', 4), ('語', 'ご', 'compound', 3), ('(', 'ご', 'compound', 2), ('にほん', 'ご', 'compound', 1), ('ご', ')', 'nmod', 8), ('、', 'ご', 'punct', -1), ('にっぽん', 'ご', 'compound', 1), ('ご', ')', 'compound', 5), ('[', ')', 'punct', 4), ('注', ')', 'compound', 3), ('2', ')', 'compound', 2), (']', ')', 'punct', 1), (')', '言語', 'nsubj', 35), ('は', ')', 'case', -1), ('、', ')', 'punct', -2), ('日本', '内', 'compound', 2), ('国', '内', 'compound', 1), ('内', '領', 'nmod', 6), ('や', '内', 'case', -1), ('、', '内', 'punct', -2), ('かつて', '領', 'advmod', 3), ('の', 'かつて', 'case', -1), ('日本', '領', 'compound', 1), ('領', '国', 'acl', 3), ('だっ', '領', 'cop', -1), ('た', '領', 'aux', -2), ('国', '使用', 'obl', 16), ('、', '国', 'punct', -1), ('そして', '使用', 'cc', 14), ('国外', '移民', 'compound', 1), ('移民', '者', 'nmod', 3), ('や', '移民', 'case', -1), ('移住', '者', 'compound', 1), ('者', '含む', 'obj', 2), ('を', '者', 'case', -1), ('含む', '同士', 'acl', 3), ('日本', '同士', 'compound', 2), ('人', '同士', 'compound', 1), ('同士', '間', 'nmod', 2), ('の', '同士', 'case', -1), ('間', '使用', 'obl', 2), ('で', '間', 'case', -1), ('使用', '言語', 'acl', 5), ('さ', '使用', 'aux', -1), ('れ', '使用', 'aux', -2), ('て', '使用', 'mark', -3), ('いる', 'て', 'fixed', -1), ('言語', '言語', 'ROOT', 0), ('。', '言語', 'punct', -1)] + elif lang == 'kor': + assert dependencies == [('한국어', '공용어이다', 'dislocated', 7), ('(', '韓國語', 'punct', 1), ('韓國語', '한국어', 'appos', -2), (')', '韓國語', 'punct', -1), ('는', '한국어', 'case', -4), ('대한민국과', '조선민주주의인민공화국의', 'compound', 1), ('조선민주주의인민공화국의', '공용어이다', 'nsubj', 1), ('공용어이다', '공용어이다', 'ROOT', 0), ('.', '공용어이다', 'punct', -1)] elif lang == 'lit': assert dependencies == [('Lietuvių', 'kalba', 'nmod', 1), ('kalba', 'kalba', 'ROOT', 0), ('–', 'kalba', 'punct', 7), ('iš', 'prokalbės', 'case', 2), ('baltų', 'iš', 'advmod:emph', -1), ('prokalbės', 'kilusi', 'obl:arg', 1), ('kilusi', 'kalba', 'acl', 3), ('lietuvių', 'tautos', 'nmod', 1), ('tautos', 'kalba', 'nmod', 1), ('kalba', 'kalba', 'ROOT', 0), (',', 'kuri', 'punct', 1), ('kuri', 'kalba', 'conj', -2), ('Lietuvoje', 'kuri', 'advmod:emph', -1), ('yra', 'kuri', 'advmod:emph', -2), ('valstybinė', 'kuri', 'advmod:emph', -3), (',', 'Sąjungoje', 'punct', 3), ('o', 'Sąjungoje', 'cc', 2), ('Europos', 'Sąjungoje', 'nmod', 1), ('Sąjungoje', 'valstybinė', 'conj', -4), ('–', 'viena', 'punct', 1), ('viena', 'kuri', 'appos', -9), ('iš', 'kalbų', 'case', 2), ('oficialiųjų', 'kalbų', 'acl', 1), ('kalbų', 'viena', 'obl:arg', -3), ('.', '.', 'ROOT', 0)] elif lang == 'mkd': diff --git a/tests/wl_tests_nlp/test_lemmatization.py b/tests/wl_tests_nlp/test_lemmatization.py index b3c7549a9..4f72a562c 100644 --- a/tests/wl_tests_nlp/test_lemmatization.py +++ b/tests/wl_tests_nlp/test_lemmatization.py @@ -224,6 +224,8 @@ def test_lemmatize(lang, lemmatizer): assert lemmas == ['日本語', '(', 'にほん', 'ご', '、', 'にっぽん', 'ご', '[', '注', '2', ']', ')', 'は', '、', '日本', '国', '内', 'や', '、', 'かつて', 'の', '日本', '領', 'だ', 'た', '国', '、', 'そして', '国外', '移民', 'や', '移住者', 'を', '含む', '日本人', '同士', 'の', '間', 'で', '使用', 'する', 'れる', 'て', 'いる', '言語', '。'] else: tests_lang_util_skipped = True + elif lang == 'kor': + assert lemmas == ['한국어', '(', '韓國語', ')', '는', '대한민+국과', '조선민주주의인민공화국+의', '공용어이다', '.'] elif lang == 'lat': assert lemmas == ['lingua', 'Latina,[1', ']', 'sive', 'sermo', 'Latinus,[2', ']', 'sum', 'lingua', 'indoeuropaeus', 'qui', 'primus', 'Latinus', 'universus', 'et', 'Romanus', 'antiquus', 'in', 'primus', 'loquor', 'quamobrem', 'interdum', 'etiam', 'lingua', 'Latia[3', ']', '(', 'in', 'Latium', 'enim', 'suetus', ')', 'et', 'lingua', 'Romana[4', ']', '(', 'nam', 'imperium', 'Romanus', 'sermo', 'sollemne', ')', 'appello', '.'] elif lang == 'lav': diff --git a/tests/wl_tests_nlp/test_pos_tagging.py b/tests/wl_tests_nlp/test_pos_tagging.py index 85aea7cc8..7b2a378ef 100644 --- a/tests/wl_tests_nlp/test_pos_tagging.py +++ b/tests/wl_tests_nlp/test_pos_tagging.py @@ -159,8 +159,14 @@ def test_pos_tag(lang, pos_tagger): else: tests_lang_util_skipped = True elif lang == 'kor': - assert tokens_tagged == [('한국어', 'NNG'), ('(', 'SSO'), ('韓', 'NNG'), ('國語', 'NNG'), (')', 'SSC'), ('는', 'JX'), ('대한민국', 'NNP'), ('과', 'JC'), ('조선', 'NNP'), ('민주주의', 'NNG'), ('인민공화국', 'NNP'), ('의', 'JKG'), ('공용어', 'NNG'), ('이', 'VCP'), ('다', 'EF'), ('.', 'SF')] - assert tokens_tagged_universal == [('한국어', 'NOUN'), ('(', 'PUNCT'), ('韓', 'NOUN'), ('國語', 'NOUN'), (')', 'PUNCT'), ('는', 'ADP'), ('대한민국', 'PROPN'), ('과', 'CONJ'), ('조선', 'PROPN'), ('민주주의', 'NOUN'), ('인민공화국', 'PROPN'), ('의', 'ADP'), ('공용어', 'NOUN'), ('이', 'ADP'), ('다', 'X'), ('.', 'PUNCT')] + if pos_tagger == 'python_mecab_ko_mecab': + assert tokens_tagged == [('한국어', 'NNG'), ('(', 'SSO'), ('韓', 'NNG'), ('國語', 'NNG'), (')', 'SSC'), ('는', 'JX'), ('대한민국', 'NNP'), ('과', 'JC'), ('조선', 'NNP'), ('민주주의', 'NNG'), ('인민공화국', 'NNP'), ('의', 'JKG'), ('공용어', 'NNG'), ('이', 'VCP'), ('다', 'EF'), ('.', 'SF')] + assert tokens_tagged_universal == [('한국어', 'NOUN'), ('(', 'PUNCT'), ('韓', 'NOUN'), ('國語', 'NOUN'), (')', 'PUNCT'), ('는', 'ADP'), ('대한민국', 'PROPN'), ('과', 'CONJ'), ('조선', 'PROPN'), ('민주주의', 'NOUN'), ('인민공화국', 'PROPN'), ('의', 'ADP'), ('공용어', 'NOUN'), ('이', 'ADP'), ('다', 'X'), ('.', 'PUNCT')] + elif pos_tagger == 'spacy_kor': + assert tokens_tagged == [('한국어', 'ncn'), ('(', 'sl'), ('韓國語', 'nq'), (')', 'sr'), ('는', 'jxt'), ('대한민국과', 'ncn+ncn'), ('조선민주주의인민공화국의', 'nq+jcs'), ('공용어이다', 'ncpa+xsv+ep+ef'), ('.', 'sf')] + assert tokens_tagged_universal == [('한국어', 'NOUN'), ('(', 'PUNCT'), ('韓國語', 'PROPN'), (')', 'PUNCT'), ('는', 'ADP'), ('대한민국과', 'NOUN'), ('조선민주주의인민공화국의', 'NOUN'), ('공용어이다', 'VERB'), ('.', 'PUNCT')] + else: + tests_lang_util_skipped = True elif lang == 'lit': assert tokens_tagged == [('Lietuvių', 'dkt.vyr.dgs.K.'), ('kalba', 'dkt.mot.vns.Įn.'), ('–', 'skyr.'), ('iš', 'prl.K.'), ('baltų', 'bdv.aukšč.vyr.dgs.K.'), ('prokalbės', 'dkt.mot.vns.K.'), ('kilusi', 'vksm.dlv.veik.būt-k.mot.vns.V.'), ('lietuvių', 'dkt.vyr.dgs.K.'), ('tautos', 'dkt.mot.vns.K.'), ('kalba', 'dkt.mot.vns.Įn.'), (',', 'skyr.'), ('kuri', 'įv.mot.vns.V.'), ('Lietuvoje', 'dkt.tikr.mot.vns.Vt.'), ('yra', 'vksm.asm.tiesiog.es.vns.3.'), ('valstybinė', 'bdv.nelygin.mot.vns.V.'), (',', 'skyr.'), ('o', 'jng.'), ('Europos', 'dkt.tikr.mot.vns.K.'), ('Sąjungoje', 'dkt.mot.vns.Vt.'), ('–', 'skyr.'), ('viena', 'įv.mot.vns.V.'), ('iš', 'prl.K.'), ('oficialiųjų', 'bdv.nelygin.įvardž.vyr.dgs.K.'), ('kalbų', 'dkt.vyr.vns.V.'), ('.', 'skyr.')] assert tokens_tagged_universal == [('Lietuvių', 'NOUN'), ('kalba', 'NOUN'), ('–', 'PUNCT'), ('iš', 'ADP'), ('baltų', 'ADJ'), ('prokalbės', 'NOUN'), ('kilusi', 'VERB'), ('lietuvių', 'NOUN'), ('tautos', 'NOUN'), ('kalba', 'NOUN'), (',', 'PUNCT'), ('kuri', 'DET'), ('Lietuvoje', 'PROPN'), ('yra', 'AUX'), ('valstybinė', 'ADJ'), (',', 'PUNCT'), ('o', 'CCONJ'), ('Europos', 'PROPN'), ('Sąjungoje', 'NOUN'), ('–', 'PUNCT'), ('viena', 'PRON'), ('iš', 'ADP'), ('oficialiųjų', 'ADJ'), ('kalbų', 'NOUN'), ('.', 'PUNCT')] diff --git a/tests/wl_tests_nlp/test_sentence_tokenization.py b/tests/wl_tests_nlp/test_sentence_tokenization.py index f8316ec9b..71612a76e 100644 --- a/tests/wl_tests_nlp/test_sentence_tokenization.py +++ b/tests/wl_tests_nlp/test_sentence_tokenization.py @@ -129,6 +129,8 @@ def test_sentence_tokenize(lang, sentence_tokenizer): tests_lang_util_skipped = True elif lang == 'jpn': assert sentences == ['日本語(にほんご、にっぽんご[注 2])は、日本国内や、かつての日本領だった国、そして国外移民や移住者を含む日本人同士の間で使用されている言語。', '日本は法令によって公用語を規定していないが、法令その他の公用文は全て日本語で記述され、各種法令[注 3]において日本語を用いることが規定され、学校教育においては「国語」の教科として学習を行う等、事実上、日本国内において唯一の公用語となっている。'] + elif lang == 'kor': + assert sentences == ['한국어(韓國語)는 대한민국과 조선민주주의인민공화국의 공용어이다.', '한국말, 조선말, 조선어로도 불린다.'] elif lang == 'lit': if sentence_tokenizer == 'spacy_dependency_parser_lit': assert sentences == ['Lietuvių kalba', '– iš baltų prokalbės kilusi lietuvių tautos kalba', ', kuri Lietuvoje yra valstybinė, o Europos Sąjungoje – viena iš oficialiųjų kalbų.', 'Lietuviškai kalba apie tris milijonus žmonių (dauguma jų gyvena Lietuvoje).', 'Drauge su latvių, mirusiomis prūsų, jotvingių ir kitomis baltų kalbomis priklauso indoeuropiečių kalbų šeimos baltų kalbų grupei.'] @@ -284,6 +286,8 @@ def test_sentence_seg_tokenize(lang): assert sentence_segs == ['È classificato al 23º posto tra le lingue per numero di parlanti nel mondo e,', 'in Italia,', 'è utilizzato da circa 58 milioni di residenti.', "[2] Nel 2015 l'italiano era la lingua materna del 90,", '4% dei residenti in Italia,', "[3] che spesso lo acquisiscono e lo usano insieme alle varianti regionali dell'italiano,", 'alle lingue regionali e ai dialetti.', 'In Italia viene ampiamente usato per tutti i tipi di comunicazione della vita quotidiana ed è largamente prevalente nei mezzi di comunicazione nazionali,', "nell'amministrazione pubblica dello Stato italiano e nell'editoria."] elif lang == 'jpn': assert sentence_segs == ['日本語(にほんご、', 'にっぽんご[注 2])は、', '日本国内や、', 'かつての日本領だった国、', 'そして国外移民や移住者を含む日本人同士の間で使用されている言語。', '日本は法令によって公用語を規定していないが、', '法令その他の公用文は全て日本語で記述され、', '各種法令[注 3]において日本語を用いることが規定され、', '学校教育においては「国語」の教科として学習を行う等、', '事実上、', '日本国内において唯一の公用語となっている。'] + elif lang == 'kor': + assert sentence_segs == ['한국어(韓國語)는 대한민국과 조선민주주의인민공화국의 공용어이다.', '한국말,', '조선말,', '조선어로도 불린다.'] elif lang == 'lit': assert sentence_segs == ['Lietuvių kalba – iš baltų prokalbės kilusi lietuvių tautos kalba,', 'kuri Lietuvoje yra valstybinė,', 'o Europos Sąjungoje – viena iš oficialiųjų kalbų.', 'Lietuviškai kalba apie tris milijonus žmonių (dauguma jų gyvena Lietuvoje).', 'Drauge su latvių,', 'mirusiomis prūsų,', 'jotvingių ir kitomis baltų kalbomis priklauso indoeuropiečių kalbų šeimos baltų kalbų grupei.'] elif lang == 'mkd': diff --git a/tests/wl_tests_nlp/test_word_tokenization.py b/tests/wl_tests_nlp/test_word_tokenization.py index 374f3552b..828abaac7 100644 --- a/tests/wl_tests_nlp/test_word_tokenization.py +++ b/tests/wl_tests_nlp/test_word_tokenization.py @@ -211,7 +211,12 @@ def test_word_tokenize(lang, word_tokenizer): elif lang == 'kir': assert tokens == ['Кыргыз', 'тили', '—', 'Кыргыз', 'Республикасынын', 'мамлекеттик', 'тили', ',', 'түрк', 'тилдеринин', 'курамына', ',', 'анын', 'ичинде', 'кыргыз-кыпчак', 'же', 'тоо-алтай', 'тобуна', 'кирет', '.'] elif lang == 'kor': - assert tokens == ['한국어', '(', '韓', '國語', ')', '는', '대한민국', '과', '조선', '민주주의', '인민공화국', '의', '공용어', '이', '다', '.'] + if word_tokenizer == 'python_mecab_ko_mecab': + assert tokens == ['한국어', '(', '韓', '國語', ')', '는', '대한민국', '과', '조선', '민주주의', '인민공화국', '의', '공용어', '이', '다', '.'] + elif word_tokenizer == 'spacy_kor': + assert tokens == ['한국어', '(', '韓國語', ')', '는', '대한민국과', '조선민주주의인민공화국의', '공용어이다', '.'] + else: + tests_lang_util_skipped = True elif lang == 'lat': assert tokens == ['Lingua', 'Latina,[1', ']', 'sive', 'sermo', 'Latinus,[2', ']', 'est', 'lingua', 'Indoeuropaea', 'qua', 'primum', 'Latini', 'universi', 'et', 'Romani', 'antiqui', 'in', 'primis', 'loquebantur', 'quamobrem', 'interdum', 'etiam', 'lingua', 'Latia[3', ']', '(', 'in', 'Latio', 'enim', 'sueta', ')', 'et', 'lingua', 'Romana[4', ']', '(', 'nam', 'imperii', 'Romani', 'sermo', 'sollemnis', ')', 'appellatur', '.'] elif lang == 'lav': diff --git a/tests/wl_tests_settings/test_settings_global.py b/tests/wl_tests_settings/test_settings_global.py index a5f364bd1..a51592682 100644 --- a/tests/wl_tests_settings/test_settings_global.py +++ b/tests/wl_tests_settings/test_settings_global.py @@ -167,11 +167,11 @@ def check_settings_global(self): # Loading languages supported by spaCy for lang in pkgutil.iter_modules(spacy.lang.__path__): if lang.ispkg: - if lang.name not in ['ko', 'sr', 'th', 'vi', 'xx']: - langs_supported_spacy.append(lang.name) # Serbian - elif lang.name == 'sr': + if lang.name == 'sr': langs_supported_spacy.extend(['sr_cyrl', 'sr_latn']) + elif lang.name not in ['th', 'vi', 'xx']: + langs_supported_spacy.append(lang.name) langs_supported_spacy = add_lang_suffixes(langs_supported_spacy) @@ -183,7 +183,7 @@ def check_settings_global(self): langs_supported_spacy_lemmatizers.append(lang_code) # Languages without data files - langs_supported_spacy_lemmatizers.extend(['fi', 'ja', 'uk']) + langs_supported_spacy_lemmatizers.extend(['fi', 'ja', 'ko', 'uk']) langs_supported_spacy_lemmatizers = add_lang_suffixes(langs_supported_spacy_lemmatizers) # Check for missing and extra languages for spaCy's sentence recognizer / sentencizer diff --git a/utils/wl_downloader_ci.py b/utils/wl_downloader_ci.py index 54805e006..2f9ea2e7a 100644 --- a/utils/wl_downloader_ci.py +++ b/utils/wl_downloader_ci.py @@ -32,6 +32,7 @@ spacy.cli.download('el_core_news_sm') spacy.cli.download('it_core_news_sm') spacy.cli.download('ja_core_news_sm') +spacy.cli.download('ko_core_news_sm') spacy.cli.download('lt_core_news_sm') spacy.cli.download('mk_core_news_sm') spacy.cli.download('nb_core_news_sm') diff --git a/wordless/wl_nlp/wl_lemmatization.py b/wordless/wl_nlp/wl_lemmatization.py index a29b8c586..6bda21736 100644 --- a/wordless/wl_nlp/wl_lemmatization.py +++ b/wordless/wl_nlp/wl_lemmatization.py @@ -272,7 +272,7 @@ def wl_lemmatize_tokens(main, inputs, lang, lemmatizer, tagged): # Align tokens while i_tokens < len_tokens - 1 or i_lemmas < len_lemmas - 1: - if lang in ['zho_cn', 'zho_tw', 'jpn', 'tha', 'bod']: + if lang in ['zho_cn', 'zho_tw', 'jpn', 'kor', 'tha', 'bod']: len_tokens_temp = sum((len(token) for token in tokens_temp)) len_lemma_tokens_temp = sum((len(token) for token in lemma_tokens_temp)) else: diff --git a/wordless/wl_nlp/wl_nlp_utils.py b/wordless/wl_nlp/wl_nlp_utils.py index 87f818f9c..c42b41859 100644 --- a/wordless/wl_nlp/wl_nlp_utils.py +++ b/wordless/wl_nlp/wl_nlp_utils.py @@ -74,6 +74,7 @@ def to_lang_util_texts(main, util_type, util_codes): 'ell': 'el_core_news_sm', 'ita': 'it_core_news_sm', 'jpn': 'ja_core_news_sm', + 'kor': 'ko_core_news_sm', 'lit': 'lt_core_news_sm', 'mkd': 'mk_core_news_sm', 'nob': 'nb_core_news_sm', diff --git a/wordless/wl_nlp/wl_pos_tagging.py b/wordless/wl_nlp/wl_pos_tagging.py index c4bbe22cb..18b293ec2 100644 --- a/wordless/wl_nlp/wl_pos_tagging.py +++ b/wordless/wl_nlp/wl_pos_tagging.py @@ -131,7 +131,7 @@ def wl_pos_tag(main, inputs, lang, pos_tagger = 'default', tagset = 'default'): # Align tokens while i_tokens < len_tokens - 1 or i_tokens_tagged < len_tokens_tagged - 1: - if lang in ['zho_cn', 'zho_tw', 'jpn', 'tha', 'bod']: + if lang in ['zho_cn', 'zho_tw', 'jpn', 'kor', 'tha', 'bod']: len_tokens_temp = sum((len(token) for token in tokens_temp)) len_tokens_tagged_temp = sum((len(token) for token in tokens_tagged_temp)) else: diff --git a/wordless/wl_settings/wl_settings_default.py b/wordless/wl_settings/wl_settings_default.py index c04fec92f..1ce506ea3 100644 --- a/wordless/wl_settings/wl_settings_default.py +++ b/wordless/wl_settings/wl_settings_default.py @@ -1193,6 +1193,7 @@ def init_settings_default(main): 'ell': 'spacy_dependency_parser_ell', 'ita': 'spacy_dependency_parser_ita', 'jpn': 'spacy_dependency_parser_jpn', + 'kor': 'spacy_dependency_parser_kor', 'lit': 'spacy_dependency_parser_lit', 'mkd': 'spacy_dependency_parser_mkd', 'mal': 'nltk_punkt_mal', @@ -1264,7 +1265,7 @@ def init_settings_default(main): 'jpn': 'spacy_jpn', 'kan': 'sacremoses_moses', 'kir': 'spacy_kir', - 'kor': 'python_mecab_ko_mecab', + 'kor': 'spacy_kor', 'lat': 'spacy_lat', 'lav': 'sacremoses_moses', 'lij': 'spacy_lij', @@ -1395,7 +1396,7 @@ def init_settings_default(main): 'ell': 'spacy_ell', 'ita': 'spacy_ita', 'jpn': 'spacy_jpn', - 'kor': 'python_mecab_ko_mecab', + 'kor': 'spacy_kor', 'lit': 'spacy_lit', 'mkd': 'spacy_mkd', 'nob': 'spacy_nob', @@ -1510,6 +1511,7 @@ def init_settings_default(main): 'gle': 'simplemma_gle', 'ita': 'spacy_ita', 'jpn': 'spacy_jpn', + 'kor': 'spacy_kor', 'lat': 'simplemma_lat', 'lav': 'simplemma_lav', 'lit': 'spacy_lit', @@ -1653,6 +1655,7 @@ def init_settings_default(main): 'ell': 'spacy_ell', 'ita': 'spacy_ita', 'jpn': 'spacy_jpn', + 'kor': 'spacy_kor', 'lit': 'spacy_lit', 'mkd': 'spacy_mkd', 'nob': 'spacy_nob', diff --git a/wordless/wl_settings/wl_settings_global.py b/wordless/wl_settings/wl_settings_global.py index 40c3ec8a7..e3be005c1 100644 --- a/wordless/wl_settings/wl_settings_global.py +++ b/wordless/wl_settings/wl_settings_global.py @@ -402,6 +402,7 @@ def init_settings_global(): _tr('init_settings_global', 'spaCy - Greek (Modern) dependency parser'): 'spacy_dependency_parser_ell', _tr('init_settings_global', 'spaCy - Italian dependency parser'): 'spacy_dependency_parser_ita', _tr('init_settings_global', 'spaCy - Japanese dependency parser'): 'spacy_dependency_parser_jpn', + _tr('init_settings_global', 'spaCy - Korean dependency parser'): 'spacy_dependency_parser_kor', _tr('init_settings_global', 'spaCy - Lithuanian dependency parser'): 'spacy_dependency_parser_lit', _tr('init_settings_global', 'spaCy - Macedonian dependency parser'): 'spacy_dependency_parser_mkd', _tr('init_settings_global', 'spaCy - Norwegian Bokmål dependency parser'): 'spacy_dependency_parser_nob', @@ -412,6 +413,7 @@ def init_settings_global(): _tr('init_settings_global', 'spaCy - Spanish dependency parser'): 'spacy_dependency_parser_spa', _tr('init_settings_global', 'spaCy - Swedish dependency parser'): 'spacy_dependency_parser_swe', _tr('init_settings_global', 'spaCy - Ukrainian dependency parser'): 'spacy_dependency_parser_ukr', + _tr('init_settings_global', 'spaCy - Catalan sentence recognizer'): 'spacy_sentence_recognizer_cat', _tr('init_settings_global', 'spaCy - Chinese sentence recognizer'): 'spacy_sentence_recognizer_zho', _tr('init_settings_global', 'spaCy - Croatian sentence recognizer'): 'spacy_sentence_recognizer_hrv', @@ -424,6 +426,7 @@ def init_settings_global(): _tr('init_settings_global', 'spaCy - Greek (Modern) sentence recognizer'): 'spacy_sentence_recognizer_ell', _tr('init_settings_global', 'spaCy - Italian sentence recognizer'): 'spacy_sentence_recognizer_ita', _tr('init_settings_global', 'spaCy - Japanese sentence recognizer'): 'spacy_sentence_recognizer_jpn', + _tr('init_settings_global', 'spaCy - Korean sentence recognizer'): 'spacy_sentence_recognizer_kor', _tr('init_settings_global', 'spaCy - Lithuanian sentence recognizer'): 'spacy_sentence_recognizer_lit', _tr('init_settings_global', 'spaCy - Macedonian sentence recognizer'): 'spacy_sentence_recognizer_mkd', _tr('init_settings_global', 'spaCy - Norwegian Bokmål sentence recognizer'): 'spacy_sentence_recognizer_nob', @@ -434,6 +437,7 @@ def init_settings_global(): _tr('init_settings_global', 'spaCy - Spanish sentence recognizer'): 'spacy_sentence_recognizer_spa', _tr('init_settings_global', 'spaCy - Swedish sentence recognizer'): 'spacy_sentence_recognizer_swe', _tr('init_settings_global', 'spaCy - Ukrainian sentence recognizer'): 'spacy_sentence_recognizer_ukr', + _tr('init_settings_global', 'spaCy - Sentencizer'): 'spacy_sentencizer', _tr('init_settings_global', 'Underthesea - Vietnamese sentence tokenizer'): 'underthesea_vie' @@ -493,6 +497,7 @@ def init_settings_global(): _tr('init_settings_global', 'spaCy - Italian word tokenizer'): 'spacy_ita', _tr('init_settings_global', 'spaCy - Japanese word tokenizer'): 'spacy_jpn', _tr('init_settings_global', 'spaCy - Kannada word tokenizer'): 'spacy_kan', + _tr('init_settings_global', 'spaCy - Korean word tokenizer'): 'spacy_kor', _tr('init_settings_global', 'spaCy - Kyrgyz word tokenizer'): 'spacy_kir', _tr('init_settings_global', 'spaCy - Latin word tokenizer'): 'spacy_lat', _tr('init_settings_global', 'spaCy - Latvian word tokenizer'): 'spacy_lav', @@ -616,6 +621,7 @@ def init_settings_global(): _tr('init_settings_global', 'spaCy - Greek (Modern) part-of-speech tagger'): 'spacy_ell', _tr('init_settings_global', 'spaCy - Italian part-of-speech tagger'): 'spacy_ita', _tr('init_settings_global', 'spaCy - Japanese part-of-speech tagger'): 'spacy_jpn', + _tr('init_settings_global', 'spaCy - Korean part-of-speech tagger'): 'spacy_kor', _tr('init_settings_global', 'spaCy - Lithuanian part-of-speech tagger'): 'spacy_lit', _tr('init_settings_global', 'spaCy - Macedonian part-of-speech tagger'): 'spacy_mkd', _tr('init_settings_global', 'spaCy - Norwegian Bokmål part-of-speech tagger'): 'spacy_nob', @@ -703,6 +709,7 @@ def init_settings_global(): _tr('init_settings_global', 'spaCy - Irish lemmatizer'): 'spacy_gle', _tr('init_settings_global', 'spaCy - Italian lemmatizer'): 'spacy_ita', _tr('init_settings_global', 'spaCy - Japanese lemmatizer'): 'spacy_jpn', + _tr('init_settings_global', 'spaCy - Korean lemmatizer'): 'spacy_kor', _tr('init_settings_global', 'spaCy - Lithuanian lemmatizer'): 'spacy_lit', _tr('init_settings_global', 'spaCy - Luxembourgish lemmatizer'): 'spacy_ltz', _tr('init_settings_global', 'spaCy - Macedonian lemmatizer'): 'spacy_mkd', @@ -836,6 +843,7 @@ def init_settings_global(): _tr('init_settings_global', 'spaCy - Greek (Modern) dependency parser'): 'spacy_ell', _tr('init_settings_global', 'spaCy - Italian dependency parser'): 'spacy_ita', _tr('init_settings_global', 'spaCy - Japanese dependency parser'): 'spacy_jpn', + _tr('init_settings_global', 'spaCy - Korean dependency parser'): 'spacy_kor', _tr('init_settings_global', 'spaCy - Lithuanian dependency parser'): 'spacy_lit', _tr('init_settings_global', 'spaCy - Macedonian dependency parser'): 'spacy_mkd', _tr('init_settings_global', 'spaCy - Norwegian Bokmål dependency parser'): 'spacy_nob', @@ -952,6 +960,11 @@ def init_settings_global(): 'spacy_sentence_recognizer_jpn' ], + 'kor': [ + 'spacy_dependency_parser_kor', + 'spacy_sentence_recognizer_kor' + ], + 'lit': [ 'spacy_dependency_parser_lit', 'spacy_sentence_recognizer_lit' @@ -1236,7 +1249,11 @@ def init_settings_global(): ], 'kir': ['spacy_kir'], - 'kor': ['python_mecab_ko_mecab'], + + 'kor': [ + 'python_mecab_ko_mecab', + 'spacy_kor' + ], 'lat': [ 'nltk_nist', 'nltk_nltk', 'nltk_regex', 'nltk_twitter', @@ -1547,7 +1564,11 @@ def init_settings_global(): 'sudachipy_jpn' ], - 'kor': ['python_mecab_ko_mecab'], + 'kor': [ + 'python_mecab_ko_mecab', + 'spacy_kor' + ], + 'lit': ['spacy_lit'], 'mkd': ['spacy_mkd'], 'nob': ['spacy_nob'], @@ -1657,7 +1678,6 @@ def init_settings_global(): 'spacy_deu' ], - 'grc': [ 'spacy_grc' ], @@ -1696,6 +1716,7 @@ def init_settings_global(): 'sudachipy_jpn' ], + 'kor': ['spacy_kor'], 'lat': ['simplemma_lat'], 'lav': ['simplemma_lav'], @@ -2025,6 +2046,7 @@ def init_settings_global(): 'ell': ['spacy_ell'], 'ita': ['spacy_ita'], 'jpn': ['spacy_jpn'], + 'kor': ['spacy_kor'], 'lit': ['spacy_lit'], 'mkd': ['spacy_mkd'], 'nob': ['spacy_nob'],