-
Notifications
You must be signed in to change notification settings - Fork 0
/
DocumentBuilder_ML_.py
307 lines (242 loc) · 9.89 KB
/
DocumentBuilder_ML_.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
##################################################################################
# Imports
##################################################################################
# scikit learn imports
import sklearn
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold, GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
from sklearn.preprocessing import scale, StandardScaler, Normalizer, label_binarize
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import Pipeline
from sklearn.utils.class_weight import compute_class_weight
from sklearn.metrics import precision_recall_curve, accuracy_score, f1_score, precision_score, recall_score, classification_report, roc_curve, auc, roc_auc_score, confusion_matrix
from sklearn.metrics.scorer import make_scorer
from sklearn.model_selection import PredefinedSplit
from sklearn.calibration import CalibratedClassifierCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import AdaBoostClassifier
# visualization
import seaborn as sns
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.colors
from mpl_toolkits.mplot3d import Axes3D
# Natural Language Toolkit
import nltk
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer, word_tokenize
from nltk.corpus import stopwords
#nltk.download()
from nltk import ngrams, pos_tag
import numpy as np
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter(action='ignore', category=FutureWarning)
import time
import argparse
import pdb
import random
import collections, numpy
import json
import pandas as pd
import os
import glob
import itertools
import operator
# sklearn
from sklearn import preprocessing
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report, accuracy_score, confusion_matrix
from sklearn.utils.class_weight import compute_class_weight, compute_sample_weight
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold, GridSearchCV
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.preprocessing import StandardScaler
# Visualization tools
from tensorboardX import SummaryWriter
import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
# General imports
import pickle as pkl
import json
import pandas as pd
import numpy as np
import os
import glob
import itertools
import operator
import collections
import csv
import random
import wimpy
from pathlib import Path
import xlrd
# sklearn
from sklearn import preprocessing
from sklearn.utils.class_weight import compute_class_weight
# visualization
import seaborn as sns
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.colors
from mpl_toolkits.mplot3d import Axes3D
#import umap
# Text augmentation tools
from googletrans import Translator
# Natural Language Toolkit
import nltk
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
#print(stop_words)
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import RegexpTokenizer, word_tokenize
from nltk import ngrams, pos_tag
nltk.download('punkt')
# add punctuation marks to the stopword set
stop_words.add(';')
stop_words.add(':')
stop_words.add(',')
stop_words.add('#')
stop_words.add('(')
stop_words.add(')')
stop_words.add('report')
stop_words.add('electronically')
stop_words.add('signed')
stop_words.add('out')
stop_words.add('***')
stop_words.add('reviewed')
stop_words.add('approved')
stop_words.add('\'')
stop_words.add('\'\'')
stop_words.add('&')
stop_words.add('page')
stop_words.add('.')
stop_words.add('-')
stop_words.add('?')
stop_words.add(' ')
stop_words.add('--')
stop_words.add('_')
stop_words.add(' ')
stop_words.add('"')
stop_words.add('/')
stop_words.add('`')
stop_words.add('!')
stop_words.add(']')
stop_words.add('[')
# pyTorch essentials
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
# keras essentials
#from keras.preprocessing.sequence import pad_sequences
# transformer essentials
from transformers import BertModel, BertTokenizer, BertConfig, BertForSequenceClassification
# tensorflow modules
#import tensorflow as tf
# output formatting essentials
from tqdm import tqdm, trange
import io
##################################################################################
# Set all the seed values
##################################################################################
# Set the seed value all over the place to make this reproducible.
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
class DocumentBuilder:
def __init__(self, batch_size):
self.batch_size = batch_size
self.MAX_LEN = MAX_LEN
def get_data_loaders(augment = False):
def augmentReport(reports, labels):
print('Augmenting the reports through backtranslation process...')
translator = Translator()
# augment the report 54 times if it belongs to minority case
# May be augment the training set to have balanced low-grade and high-grade reports
count = collections.Counter(labels)
majority_class = count[0]
minority_class = count[1]
minority_class_indices = []
for i, element in enumerate( labels):
if element == 1:
minority_class_indices.append(i)
for i in range(minority_class, majority_class):
randomly_chosen_index = (random.choice(minority_class_indices))
report2augment = reports[randomly_chosen_index]
# translate to German
german_report = translator.translate(report2augment, dest='de')
# backtranslate to English
english_report = translator.translate(german_report.text, dest='en')
reports.append(english_report.text)
labels.append(1)
return reports, labels
# load the reports here
reportDir = '/reports_denoised/'
# load report labels here
def get_labels():
workbook = xlrd.open_workbook('/nationwidechildrens.org_clinical_patient_prad_gleason.xlsx', on_demand=True)
sheet = workbook.sheet_by_name('nationwidechildrens.org_clinica')
# read header values into the list
keys = [sheet.cell(0, col_index).value for col_index in range(sheet.ncols)]
report2grade = dict()
for row_index in range(1, sheet.nrows):
d = {keys[col_index]: sheet.cell(row_index, col_index).value
for col_index in range(sheet.ncols)}
reportName = d['bcr_patient_barcode']
reportScore = d['gleason_score']
if reportScore == 6.0 or reportScore == 7.0:
report2grade[reportName] = 0 # low-grade
else:
report2grade[reportName] = 1 # high-grade
return report2grade
report2grade = get_labels()
all_reports = []
all_labels = []
count = 0
# iterate the directory
for filename in os.listdir(reportDir):
count = count + 1
fileStats = Path(reportDir+filename).stat()
if fileStats.st_size > 3: # If the file is
with open(reportDir+filename, 'r') as pathReportFile:
buildReport = []
for line in pathReportFile:
if len(line) >= 5:
# tokenize, remove stop words and lowcase the text
tokenized_words = word_tokenize(line.rstrip().lower())
# tokens_without_sw = [word for word in tokenized_words]
tokens_without_sw = [word for word in tokenized_words if not word in stop_words]
# print(tokens_without_sw)
buildReport.append(' '.join(tokens_without_sw))
buildReport = ' '.join(buildReport)
all_reports.append(buildReport)
all_labels.append( report2grade[filename[0:12]] )
# Total reports before augmentation
print('Number high-grade before augmentation: ', all_labels.count(1))
print('Number low-grade before augmentation: ', all_labels.count(0))
#create a dataframe from the report chunks and the labels
df_data = pd.DataFrame(
{'text':all_reports,
'category': all_labels
})
# get training_i and test datasets
X_train_i, X_test, y_train_i, y_test = train_test_split(df_data['text'], df_data['category'], test_size=0.20, shuffle=True, random_state=42)
if augment == True:
# augment them
augmentedReports, augmentedLabels = augmentReport(list(X_train_i), list(y_train_i))
print('Total number of reports loaded: ', len(augmentedReports) + len(X_test))
print('Total number of reports for the class High-Grade after augmentation: ', list(augmentedLabels).count(1) + list(y_test).count(1))
print('Total number of reports for the class Low-Grade after augmentation: ', list(augmentedLabels).count(0) + list(y_test).count(0))
return augmentedReports, X_test, augmentedLabels, y_test
else:
return X_train_i, X_test, y_train_i, y_test