This repository has been archived by the owner on Oct 24, 2019. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Add fine tuning
- Loading branch information
Showing
7 changed files
with
182 additions
and
32 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -106,4 +106,5 @@ venv.bak/ | |
# custom | ||
.vscode/ | ||
notebooks/ | ||
*.log | ||
*.log | ||
*.xlsx |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
51 changes: 51 additions & 0 deletions
51
src/experiments/ranlp/logistic_regression/feature_combination.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
# imports, config | ||
import warnings | ||
from sklearn.model_selection import GridSearchCV | ||
from sklearn.dummy import DummyClassifier | ||
from sklearn.linear_model import LogisticRegression | ||
from src.preprocessing.transformator import get_df | ||
from src.evaluation.compare import compare_classifiers | ||
from src.classifier.sklearn import ranlp_pipelines | ||
from src.data_retrieval.helpers import database | ||
import pandas as pd | ||
|
||
|
||
def warn(*args, **kwargs): | ||
pass | ||
|
||
|
||
warnings.warn = warn | ||
|
||
|
||
db = database.MongoDB() | ||
|
||
df = get_df(list(db.get_articles())) | ||
|
||
# models | ||
feature_sets = ['bg_bert', 'bg_xlm', 'bg_styl', 'bg_lsa', | ||
'en_use', 'en_nela', 'en_bert', 'en_elmo'] | ||
features = [ | ||
('top_1', ['bg_lsa_title', 'bg_lsa_text']), | ||
('top_2', ['bg_lsa_title', 'bg_lsa_text', 'en_elmo_title', 'en_elmo_text']), | ||
('top_3', ['bg_lsa_title', 'bg_lsa_text', 'en_elmo_title', 'en_elmo_text', 'en_use_title', 'en_use_text']), | ||
('top_4', ['bg_lsa_title', 'bg_lsa_text', 'en_elmo_title', 'en_elmo_text', 'en_use_title', 'en_use_text', 'en_bert_title', 'en_bert_text']), | ||
('top_5', ['bg_lsa_title', 'bg_lsa_text', 'en_elmo_title', 'en_elmo_text', 'en_use_title', 'en_use_text', 'en_bert_title', 'en_bert_text', 'bg_bert_title', 'bg_bert_text']), | ||
('top_6', ['bg_lsa_title', 'bg_lsa_text', 'en_elmo_title', 'en_elmo_text', 'en_use_title', 'en_use_text', 'en_bert_title', 'en_bert_text', 'bg_bert_title', 'bg_bert_text', 'meta_media']), | ||
('top_7', ['bg_lsa_title', 'bg_lsa_text', 'en_elmo_title', 'en_elmo_text', 'en_use_title', 'en_use_text', 'en_bert_title', 'en_bert_text', 'bg_bert_title', 'bg_bert_text', 'meta_media', 'bg_xlm_title', 'bg_xlm_text']), | ||
('top_8', ['bg_lsa_title', 'bg_lsa_text', 'en_elmo_title', 'en_elmo_text', 'en_use_title', 'en_use_text', 'en_bert_title', 'en_bert_text', 'bg_bert_title', 'bg_bert_text', 'meta_media', 'bg_xlm_title', 'bg_xlm_text', 'en_nela_title', 'en_nela_text']), | ||
('top_9', ['bg_lsa_title', 'bg_lsa_text', 'en_elmo_title', 'en_elmo_text', 'en_use_title', 'en_use_text', 'en_bert_title', 'en_bert_text', 'bg_bert_title', 'bg_bert_text', 'meta_media', 'bg_xlm_title', 'bg_xlm_text', 'en_nela_title', 'en_nela_text', 'bg_styl_title', 'bg_styl_textx']), | ||
] | ||
|
||
oversampler = None | ||
|
||
|
||
models = [] | ||
for name, feature_list in features: | ||
clf = LogisticRegression() | ||
clf_params = {'clf__C': 1.5, 'clf__solver': 'liblinear', 'clf__tol': 0.01} | ||
model = ranlp_pipelines.make(clf, feature_list, clf_params=clf_params) | ||
|
||
# evaluation | ||
models.append((f'{name}', model)) | ||
|
||
compare_classifiers(models, df, df['label'], silent=False, plot=False) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
import matplotlib.pyplot as plt | ||
|
||
feats = [ | ||
['top_1', 0.4825940755], | ||
['top_2', 0.4860865399], | ||
['top_3', 0.498893917], | ||
['top_4', 0.3657454759], | ||
['top_5', 0.3657454759], | ||
['top_6', 0.3808515722], | ||
['top_7', 0.3808515722], | ||
['top_8', 0.3847330075], | ||
] | ||
|
||
fig, ax = plt.subplots() | ||
ax.set_ylim(0.2, 0.6) | ||
ax.axhline(0.3030, ls='--') | ||
plt.plot([x[0] for x in feats], [x[1] for x in feats], '-o') | ||
plt.show() | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,65 @@ | ||
import numpy as np | ||
import pandas as pd | ||
|
||
from src.data_retrieval.helpers import database | ||
from src.classifier.sklearn import ranlp_pipelines | ||
from src.evaluation.compare import compare_classifiers | ||
from src.preprocessing.transformator import get_df | ||
from sklearn.model_selection import cross_val_predict, GridSearchCV | ||
|
||
from sklearn.linear_model import LogisticRegression | ||
|
||
db = database.MongoDB() | ||
|
||
articles = list(db.get_articles()) | ||
df = get_df(articles) | ||
|
||
clf = LogisticRegression() | ||
|
||
feature_sets = ['bg_bert', 'bg_xlm', 'bg_styl', 'bg_lsa', | ||
'en_use', 'en_nela', 'en_bert', 'en_elmo'] | ||
|
||
all_feats = [] | ||
for feature_set in feature_sets: | ||
all_feats.append(feature_set + '_title') | ||
all_feats.append(feature_set + '_text') | ||
all_feats.append('meta_media') | ||
|
||
|
||
param_grid = { | ||
'clf__tol': [1e-10, 1e-8, 1e-4, 1e-2, 1e-1], # 1e-4 | ||
'clf__C': [0.05, 0.15, 0.25, 0.35, 0.50, 0.75, 1, 1.25, 1.5, 2], # 1, | ||
'clf__solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'] # lbfgs | ||
} | ||
|
||
print('All features count: ', len(all_feats)) | ||
for feature_set in all_feats: | ||
model = ranlp_pipelines.make(clf, [feature_set]) | ||
|
||
gs = GridSearchCV(model, | ||
param_grid=param_grid, | ||
scoring='accuracy', | ||
cv=5, | ||
error_score=-1, | ||
verbose=1, | ||
n_jobs=-1, | ||
iid=False, | ||
return_train_score=True) | ||
|
||
gs.fit(df, df['label']) | ||
|
||
pred = cross_val_predict(gs.best_estimator_, | ||
df, | ||
df['label'], | ||
cv=5, | ||
method='predict_proba') | ||
|
||
for article, article_pred in zip(articles, pred): | ||
if 'tuned_predictions' not in article: | ||
article['tuned_predictions'] = {} | ||
|
||
article['tuned_predictions'][feature_set] = article_pred.tolist() | ||
|
||
db.save_article(article) | ||
|
||
print(f'Done for {feature_set}') |