forked from pielstroem/Topics
-
Notifications
You must be signed in to change notification settings - Fork 13
/
preprocessing.py
732 lines (553 loc) · 23.3 KB
/
preprocessing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Preprocessing.
This module contains functions for various preprocessing steps provided by `DARIAH-DE`_.
.. _DARIAH-DE:
https://de.dariah.eu
https://github.com/DARIAH-DE
"""
__author__ = "DARIAH-DE"
__authors__ = "Steffen Pielstroem, Philip Duerholt, Sina Bock, Severin Simmler"
__email__ = "pielstroem@biozentrum.uni-wuerzburg.de"
import glob
import os
from collections import Counter, defaultdict
import csv
import logging
from lxml import etree
import numpy as np
import pandas as pd
import regex
from itertools import chain
log = logging.getLogger('preprocessing')
log.addHandler(logging.NullHandler())
logging.basicConfig(level=logging.INFO,
format='%(levelname)s %(name)s: %(message)s')
regular_expression = r'\p{Letter}+\p{Punctuation}?\p{Letter}+'
def create_document_list(path, ext='txt'):
"""Creates a list of files with their full path.
Description:
Consider you have a collection of text files in one directory and wish to
read all files at once, use this function to create a list of their full paths.
Use the list as argument for the functions `read_from_txt()`, `read_from_tei()`
or `read_from_csv()` to read the files.
You have to specify the file format, which defaults to plain text.
Args:
path (str): Path to folder, e.g. '/tmp/corpus'.
ext (str): File extension, e.g. 'csv'. Defaults to 'txt'.
Returns:
List of files with full path.
Example:
>>> create_document_list('corpus_txt')[0]
'corpus_txt/Poe_EurekaAProsePoem.txt'
"""
log.info("Creating document list from %s files ...", ext.upper())
pattern = os.path.join(path, '*.' + ext)
doclist = glob.glob(pattern)
if not doclist:
raise FileNotFoundError(
"The pattern %s does not match any files.", pattern)
return doclist
def read_from_txt(doclist):
"""Opens TXT files using file paths.
Description:
With this function you can read plain text files. Commit a list of
full paths or one single path as an argument.
Use the function `create_document_list()` to create a list of your text
files.
Args:
doclist Union(list[str], str): List of all documents in the corpus
or single path to TXT file.
Yields:
Document.
Todo:
* Separate metadata (author, header)?
Example:
>>> list(read_from_txt('corpus_txt/Doyle_AScandalinBohemia.txt'))[0][:20]
'A SCANDAL IN BOHEMIA'
"""
log.info("Accessing TXT documents ...")
if isinstance(doclist, str):
with open(doclist, 'r', encoding='utf-8') as f:
yield f.read()
elif isinstance(doclist, list):
for file in doclist:
with open(file, 'r', encoding='utf-8') as f:
yield f.read()
def read_from_tei(doclist):
"""Opens TEI XML files using file paths.
Description:
With this function you can read TEI encoded XML files. Commit a list of
full paths or one single path as an argument.
Use the function `create_document_list()` to create a list of your XML
files.
Args:
doclist Union(list[str], str): List of all documents in the corpus
or single path to TEI XML file.
Yields:
Document.
Todo:
* Seperate metadata (author, header)?
Example:
>>> list(read_from_tei('corpus_tei/Schnitzler_Amerika.xml'))[0][142:159]
'Arthur Schnitzler'
"""
log.info("Accessing TEI XML documents ...")
if not isinstance(doclist, list):
doclist = [doclist]
ns = dict(tei='http://www.tei-c.org/ns/1.0')
for file in doclist:
tree = etree.parse(file)
text_el = tree.xpath('//tei:text', namespaces=ns)[0]
yield "".join(text_el.xpath('.//text()'))
def read_from_csv(doclist, columns=['ParagraphId', 'TokenId', 'Lemma', 'CPOS', 'NamedEntity']):
"""Opens CSV files using file paths.
Description:
With this function you can read CSV files generated by `DARIAH-DKPro-Wrapper`_,
a tool for natural language processing. Commit a list of full paths or
one single path as an argument. You also have the ability to select certain
columns.
Use the function `create_document_list()` to create a list of your CSV
files.
.. _DARIAH-DKPro-Wrapper:
https://github.com/DARIAH-DE/DARIAH-DKPro-Wrapper
Args:
doclist Union(list[str], str): List of all documents in the corpus
or single path to CSV file.
columns (list[str]): List of CSV column names.
Defaults to '['ParagraphId', 'TokenId', 'Lemma', 'CPOS', 'NamedEntity']'.
Yields:
Document.
Todo:
* Seperate metadata (author, header)?
Example:
>>> list(read_from_csv('corpus_csv/Doyle_AScandalinBohemia.txt.csv'))[0][:4] # doctest: +NORMALIZE_WHITESPACE
ParagraphId TokenId Lemma CPOS NamedEntity
0 0 0 a ART _
1 0 1 scandal NP _
2 0 2 in PP _
3 0 3 bohemia NP _
"""
log.info("Accessing CSV documents ...")
if isinstance(doclist, str):
doclist = [doclist]
for file in doclist:
df = pd.read_csv(file, sep='\t', quoting=csv.QUOTE_NONE)
yield df[columns]
def get_labels(doclist):
"""Creates a list of document labels.
Description:
Consider you have a list of text files and wish to access their raw name
without any extensions or parts of the path. With this function you
can save document labels in a list.
Use the function `create_document_list()` to create a list of your text
files.
Args:
doclist (list[str]): List of file paths.
Yields:
Document labels.
Example:
>>> list(get_labels(['corpus_txt/author_title.txt']))
['author_title']
"""
log.info("Creating document labels ...")
for doc in doclist:
label, ext = os.path.splitext(os.path.basename(doc))
yield label
def split_paragraphs(doc_txt, sep=regex.compile('\n')):
"""Splits the given document by paragraphs.
Description:
With this function you can split a document by paragraphs. You also have
the ability to select a certain regular expression to split the document.
Use the functions `read_from_txt()`, `read_from_tei()` or `read_from_csv()`
to read your text files.
Args:
doc_txt (str): Document text.
sep (regex.Regex): Separator indicating a paragraph.
Returns:
List of paragraphs.
Example:
>>> split_paragraphs("This test contains \\n paragraphs.")
['This test contains ', ' paragraphs.']
"""
if not hasattr(sep, 'match'):
sep = regex.compile(sep)
return sep.split(doc_txt)
def segment_fuzzy(document, segment_size=5000, tolerance=0.05):
"""Segments a document, tolerating existing chunks (like paragraphs).
Description:
Consider you have a document. You wish to split the document into
segments of about 1000 tokens, but you prefer to keep paragraphs together
if this does not increase or decrease the token size by more than 5%.
Args:
document: The document to process. This is an Iterable of chunks, each
of which is an iterable of tokens.
segment_size (int): The target length of each segment in tokens.
tolerance (Number): How much may the actual segment size differ from
the segment_size? If 0 < tolerance < 1, this is interpreted as a
fraction of the segment_size, otherwise it is interpreted as an
absolute number. If tolerance < 0, chunks are never split apart.
Yields:
Segments. Each segment is a list of chunks, each chunk is a list of
tokens.
Example:
>>> list(segment_fuzzy([['This', 'test', 'is', 'very', 'clear'],
... ['and', 'contains', 'chunks']], 2)) # doctest: +NORMALIZE_WHITESPACE
[[['This', 'test']],
[['is', 'very']],
[['clear'], ['and']],
[['contains', 'chunks']]]
"""
if tolerance > 0 and tolerance < 1:
tolerance = round(segment_size * tolerance)
current_segment = []
current_size = 0
carry = None
doc_iter = iter(document)
try:
while True:
chunk = list(carry if carry else next(doc_iter))
carry = None
current_segment.append(chunk)
current_size += len(chunk)
if current_size >= segment_size:
too_long = current_size - segment_size
too_short = segment_size - (current_size - len(chunk))
if tolerance >= 0 and min(too_long, too_short) > tolerance:
chunk_part0 = chunk[:-too_long]
carry = chunk[-too_long:]
current_segment[-1] = chunk_part0
elif too_long >= too_short:
carry = current_segment.pop()
yield current_segment
current_segment = []
current_size = 0
except StopIteration:
pass
# handle leftovers
if current_segment:
yield current_segment
def segment(document, segment_size=1000, tolerance=0, chunker=None,
tokenizer=None, flatten_chunks=False, materialize=False):
"""Segments a document into segments of about `segment_size` tokens, respecting existing chunks.
Description:
Consider you have a document. You wish to split the document into
segments of about 1000 tokens, but you prefer to keep paragraphs together
if this does not increase or decrease the token size by more than 5%.
This is a convenience wrapper around `segment_fuzzy()`.
Args:
segment_size (int): The target size of each segment, in tokens.
tolerance (Number): see `segment_fuzzy`
chunker (callable): a one-argument function that cuts the document into
chunks. If this is present, it is called on the given document.
tokenizer (callable): a one-argument function that tokenizes each chunk.
flatten_chunks (bool): if True, undo the effect of the chunker by
chaining the chunks in each segment, thus each segment consists of
tokens. This can also be a one-argument function in order to
customize the un-chunking.
Example:
>>> list(segment([['This', 'test', 'is', 'very', 'clear'],
... ['and', 'contains', 'chunks']], 2)) # doctest: +NORMALIZE_WHITESPACE
[[['This', 'test']],
[['is', 'very']],
[['clear'], ['and']],
[['contains', 'chunks']]]
"""
if chunker is not None:
document = chunker(document)
if tokenizer is not None:
document = map(tokenizer, document)
segments = segment_fuzzy(document, segment_size, tolerance)
if flatten_chunks:
if not callable(flatten_chunks):
def flatten_chunks(segment):
return list(chain.from_iterable(segment))
segments = map(flatten_chunks, segments)
if materialize:
segments = list(segments)
return segments
def tokenize(doc_txt, expression=regular_expression, lower=True, simple=False):
"""Tokenizes with Unicode Regular Expressions.
Description:
With this function you can tokenize a document with a regular expression.
You also have the ability to commit your own regular expression. The default
expression is '\p{Letter}+\p{Punctuation}?\p{Letter}+', which means one or
more letters, followed by one or no punctuation, followed by one or more
letters. So one letter words won't match.
In case you want to lower alls tokens, set the argument `lower` to True (it
is by default).
If you want a very simple and primitive tokenization, set the argument
`simple` to True.
Use the functions `read_from_txt()`, `read_from_tei()` or `read_from_csv()`
to read your text files.
Args:
doc_txt (str): Document as string.
expression (str): Regular expression to find tokens.
lower (boolean): If True, lowers all words. Defaults to True.
simple (boolean): Uses simple regular expression (r'\w+'). Defaults to False.
If set to True, argument `expression` will be ignored.
Yields:
Tokens
Example:
>>> list(tokenize("This is one example text."))
['this', 'is', 'one', 'example', 'text']
"""
if lower:
doc_txt = doc_txt.lower()
if simple:
pattern = regex.compile(r'\w+')
else:
pattern = regex.compile(expression)
doc_txt = regex.sub("\.", "", doc_txt)
doc_txt = regex.sub("‒", " ", doc_txt)
doc_txt = regex.sub("–", " ", doc_txt)
doc_txt = regex.sub("—", " ", doc_txt)
doc_txt = regex.sub("―", " ", doc_txt)
tokens = pattern.finditer(doc_txt)
for match in tokens:
yield match.group()
def filter_pos_tags(doc_csv, pos_tags=['ADJ', 'V', 'NN']):
"""Gets lemmas by selected POS-tags from DARIAH-DKPro-Wrapper output.
Description:
With this function you can select certain columns of a CSV file
generated by `DARIAH-DKPro-Wrapper`_, a tool for natural language processing.
Use the function `read_from_csv()` to read CSV files.
.. _DARIAH-DKPro-Wrapper:
https://github.com/DARIAH-DE/DARIAH-DKPro-Wrapper
Args:
doc_csv (DataFrame): DataFrame containing DARIAH-DKPro-Wrapper output.
pos_tags (list[str]): List of DKPro POS-tags that should be selected.
Defaults to '['ADJ', 'V', 'NN']'.
Yields:
Lemma.
Example:
>>> df = pd.DataFrame({'CPOS': ['CARD', 'ADJ', 'NN', 'NN'],
... 'Lemma': ['one', 'more', 'example', 'text']})
>>> list(filter_pos_tags(df))[0] # doctest: +NORMALIZE_WHITESPACE
1 more
2 example
3 text
Name: Lemma, dtype: object
"""
log.info("Accessing %s ...", pos_tags)
for pos in pos_tags:
doc_csv = doc_csv.loc[doc_csv['CPOS'] == pos]
yield doc_csv.loc[doc_csv['CPOS'] == pos]['Lemma']
def find_stopwords(sparse_bow, id_types, mfw=200):
"""Creates a stopword list.
Note:
Use `create_TF_matrix` to create `docterm_matrix`.
Args:
docterm_matrix (DataFrame): DataFrame with term and term frequency by document.
mfw (int): Target size of most frequent words to be considered.
Returns:
Most frequent words in DataFrame.
"""
log.info("Finding stopwords ...")
type2id = {value: key for key, value in id_types.items()}
sparse_bow_collapsed = sparse_bow.groupby(
sparse_bow.index.get_level_values('token_id')).sum()
sparse_bow_stopwords = sparse_bow_collapsed[0].nlargest(mfw)
stopwords = [type2id[key]
for key in sparse_bow_stopwords.index.get_level_values('token_id')]
log.debug("%s stopwords found.", len(stopwords))
return stopwords
def find_hapax(sparse_bow, id_types):
"""Creates list with hapax legommena.
Note:
Use `create_TF_matrix` to create `docterm_matrix`.
Args:
docterm_matrix (DataFrame): DataFrame with term and term frequency by document.
Returns:
Hapax legomena in Series.
"""
log.info("Find hapax legomena ...")
type2id = {value: key for key, value in id_types.items()}
sparse_bow_collapsed = sparse_bow.groupby(
sparse_bow.index.get_level_values('token_id')).sum()
sparse_bow_hapax = sparse_bow_collapsed.loc[sparse_bow_collapsed[0] == 1]
hapax = [type2id[key]
for key in sparse_bow_hapax.index.get_level_values('token_id')]
log.debug("%s hapax legomena found.", len(hapax))
return hapax
def remove_features(mm, id_types, features):
"""Removes features.
Note:
Use `find_stopwords()` or `find_hapax()` to create `features`.
Args:
docterm_matrix (DataFrame): DataFrame with term and term frequency by document.
features (set): Set with features to remove.
(not included) features (str): Text as iterable. Use `read_from_txt()` to create iterable.
Returns:
Clean corpus.
ToDo:
Adapt function to work with mm-corpus format.
"""
log.info("Removing features ...")
if type(features) == set:
stoplist_applied = [word for word in set(
id_types.keys()) if word in features]
clean_term_frequency = mm.drop(
[id_types[word] for word in stoplist_applied], level="token_id")
else:
try:
features = set(features)
except:
log.debug("features must be set or convertible to set")
stoplist_applied = [word for word in set(
id_types.keys()) if word in features]
clean_term_frequency = mm.drop(
[id_types[word] for word in stoplist_applied], level="token_id")
total = len(features)
log.debug("%s features removed.", total)
return clean_term_frequency
def create_dictionary(list_of_strings):
"""Creates a dictionary of unique strings with identifier.
Args:
list_of_strings(list): List of strings.
Returns:
Dictionary.
Example:
>>> create_dictionary(['example'])
{'example': 1}
"""
if all(isinstance(element, list) for element in list_of_strings):
list_of_strings = {
string for element in list_of_strings for string in element}
return {string: id_ for id_, string in enumerate(set(list_of_strings), 1)}
def _create_large_counter(doc_labels, doc_tokens, type_dictionary):
"""create_large_TF_matrix
Note:
The main function is create_mm(). This creates a dictionary of dictionaries.
The first level consitst of key = document label : value = dictionary of counts pairs.
The second level consists of key = token id : value = count of tokens in document pairs.
Args:
doc_labels(list): List of doc labels as string.
doc_tokens(list): List of tokens as string.
type_dictionary(dict): Dictionary with key = token : value = id paris.
Returns:
Dictionary of document : counter pairs. With counter being token id : count pairs.
ToDo:
"""
largecounter = defaultdict(dict)
for doc, tokens in zip(doc_labels, doc_tokens):
largecounter[doc] = Counter(
[type_dictionary[token] for token in tokens])
return largecounter
def _create_sparse_index(largecounter):
"""create_large_TF_matrix
Note:
The main function is create_mm(). This creates a pandas multiindex out of tuples.
The Multiindex represents document id to token ids relations.
Args:
largecounter(dict of counters): Dictionary of document : counter pairs. With counter
being token id : count pairs.
Returns:
Pandas Multiindex with document id to token id relations.
ToDo:
"""
tuples = []
for key in range(1, len(largecounter) + 1):
if len(largecounter[key]) == 0:
tuples.append((key, 0))
for value in largecounter[key]:
tuples.append((key, value))
sparse_index = pd.MultiIndex.from_tuples(
tuples, names=["doc_id", "token_id"])
return sparse_index
def create_mm(doc_labels, doc_tokens, type_dictionary, doc_ids):
"""create_large_TF_matrix
Note:
Main funktion that incorporates _create_large_counter() and _create_sparse_index().
Creates Pandas DataFrame out of Pandas Multiindex with document id - token id - count data.
The output has one column representing the counts of tokens for each token in each document.
Args:
doc_labels(list): List of doc labels as string.
doc_tokens(list): List of tokens as string.
type_dictionary(dict): Dictionary with key = token : value = id paris.
doc_ids(dict): Dictionary with keys = document label : value = id pairs.
Returns:
Multiindexed Pandas DataFrame with document id - token id - count data.
ToDo:
Test if it's necessary to build sparse_df_filled with int8 zeroes instead of int64.
"""
temp_counter = _create_large_counter(
doc_labels, doc_tokens, type_dictionary)
largecounter = {doc_ids[key]: value for key, value in temp_counter.items()}
sparse_index = _create_sparse_index(largecounter)
sparse_df_filled = pd.DataFrame(
np.zeros((len(sparse_index), 1), dtype=int), index=sparse_index)
index_iterator = sparse_index.groupby(
sparse_index.get_level_values('doc_id'))
for doc_id in range(1, len(sparse_index.levels[0]) + 1):
for token_id in [val[1] for val in index_iterator[doc_id]]:
sparse_df_filled.set_value(
(doc_id, token_id), 0, int(largecounter[doc_id][token_id]))
return sparse_df_filled
def make_doc2bow_list(sparse_bow):
"""Creates doc2bow_list as input for gensim model.get_document_topics(doc2bow_list[idx])
Note:
Args:
Returns:
ToDo:
"""
doc2bow_list = []
for doc in sparse_bow.index.groupby(sparse_bow.index.get_level_values('doc_id')):
temp = [(token, count) for token, count in zip(
sparse_bow.loc[doc].index, sparse_bow.loc[doc][0])]
doc2bow_list.append(temp)
return doc2bow_list
def make_doc_topic_matrix(model, doc2bow_list, doc2id):
"""Use only for testing purposes, not working properly
Note:
Args:
Returns:
ToDo: make it work
"""
df = pd.DataFrame()
for idx, doc in enumerate(doc2bow_list, 1):
df[doc2id[idx]] = pd.Series(
[value[1] for value in model.get_document_topics(doc)])
return df.fillna(0)
def gensim2dataframe(model):
"""Creates DataFrame out of gensim model (topic keys)
Note:
If you want to save Gensim's topic output set log = True
Args:
model: Gensim LDA model
Returns:
Pandas DataFrame with topics
ToDo:
Format input for DataFrame
"""
num_topics = model.num_topics
topics_df = pd.DataFrame(index=range(num_topics), columns=range(10))
topics = model.show_topics(
num_topics=num_topics, log=False, formatted=False)
for topic in topics:
idx = topic[0]
temp = topic[1]
topics_df.loc[idx] = temp
return topics_df
def save_bow_mm(sparse_bow, output_path):
"""Save bag-of-word model as market matrix
Note:
Create sparse_bow with create_mm() or take output from remove_features().
Ouput Path gives name of new local file.
Args:
sparse_bow(Pandas DataFrame): Multiindexed Pandas DataFrame with
document id - token id - count data.
output_path(str): Path to output file.
Returns:
None. Creates file with sparse_bow in market matrix format.
ToDo:
"""
num_docs = sparse_bow.index.get_level_values("doc_id").max()
num_types = sparse_bow.index.get_level_values("token_id").max()
sum_counts = sparse_bow[0].sum()
header_string = str(num_docs) + " " + str(num_types) + \
" " + str(sum_counts) + "\n"
with open('.'.join([output_path, 'mm']), 'w', encoding="utf-8") as f:
f.write("%%MatrixMarket matrix coordinate real general\n")
f.write(header_string)
sparse_bow.to_csv(f, sep=' ', header=None)