/
sentiment_twitter_bert_emb.json
144 lines (144 loc) · 3.64 KB
/
sentiment_twitter_bert_emb.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
{
"dataset_reader": {
"class_name": "basic_classification_reader",
"x": "Twit",
"y": "Class",
"data_path": "{DOWNLOADS_PATH}/sentiment_twitter_data"
},
"dataset_iterator": {
"class_name": "basic_classification_iterator",
"seed": 42
},
"chainer": {
"in": [
"x"
],
"in_y": [
"y"
],
"pipe": [
{
"id": "classes_vocab",
"class_name": "simple_vocab",
"fit_on": [
"y"
],
"save_path": "{MODEL_PATH}/classes.dict",
"load_path": "{MODEL_PATH}/classes.dict",
"in": "y",
"out": "y_ids"
},
{
"class_name": "transformers_bert_preprocessor",
"vocab_file": "{BERT_PATH}/vocab.txt",
"do_lower_case": false,
"max_seq_length": 512,
"in": ["x"],
"out": ["tokens", "subword_tokens", "subword_tok_ids", "startofword_markers", "attention_mask"]
},
{
"class_name": "transformers_bert_embedder",
"id": "my_embedder",
"bert_config_path": "{BERT_PATH}/bert_config.json",
"truncate": false,
"load_path": "{BERT_PATH}",
"in": ["subword_tok_ids", "startofword_markers", "attention_mask"],
"out": ["word_emb", "subword_emb", "max_emb", "mean_emb", "pooler_output"]
},
{
"in": "y_ids",
"out": "y_onehot",
"class_name": "one_hotter",
"depth": "#classes_vocab.len",
"single_vector": true
},
{
"in": [
"word_emb"
],
"in_y": [
"y_onehot"
],
"out": [
"y_pred_probas"
],
"main": true,
"class_name": "keras_classification_model",
"save_path": "{MODEL_PATH}/model",
"load_path": "{MODEL_PATH}/model",
"embedding_size": "#my_embedder.dim",
"n_classes": "#classes_vocab.len",
"kernel_sizes_cnn": [
3,
5,
7
],
"filters_cnn": 256,
"optimizer": "Adam",
"learning_rate": 0.01,
"learning_rate_decay": 0.1,
"loss": "binary_crossentropy",
"last_layer_activation": "softmax",
"coef_reg_cnn": 1e-3,
"coef_reg_den": 1e-2,
"dropout_rate": 0.5,
"dense_size": 100,
"model_name": "cnn_model"
},
{
"in": "y_pred_probas",
"out": "y_pred_ids",
"class_name": "proba2labels",
"max_proba": true
},
{
"in": "y_pred_ids",
"out": "y_pred_labels",
"ref": "classes_vocab"
}
],
"out": [
"y_pred_labels"
]
},
"train": {
"epochs": 100,
"batch_size": 64,
"metrics": [
"accuracy",
"f1_macro",
{
"name": "roc_auc",
"inputs": ["y_onehot", "y_pred_probas"]
}
],
"validation_patience": 5,
"val_every_n_epochs": 1,
"log_every_n_epochs": 1,
"show_examples": false,
"evaluation_targets": [
"valid",
"test"
],
"class_name": "nn_trainer"
},
"metadata": {
"variables": {
"ROOT_PATH": "~/.deeppavlov",
"DOWNLOADS_PATH": "{ROOT_PATH}/downloads",
"MODELS_PATH": "{ROOT_PATH}/models",
"MODEL_PATH": "{MODELS_PATH}/classifiers/sentiment_twitter_bert_emb",
"BERT_PATH": "{DOWNLOADS_PATH}/bert_models/rubert_cased_L-12_H-768_A-12_pt"
},
"download": [
{
"url": "http://files.deeppavlov.ai/datasets/sentiment_twitter_data.tar.gz",
"subdir": "{DOWNLOADS_PATH}"
},
{
"url": "http://files.deeppavlov.ai/deeppavlov_data/bert/rubert_cased_L-12_H-768_A-12_pt.tar.gz",
"subdir": "{DOWNLOADS_PATH}/bert_models"
}
]
}
}