/
ner_ontonotes_bert_emb.json
122 lines (119 loc) · 3.21 KB
/
ner_ontonotes_bert_emb.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
{
"dataset_reader": {
"class_name": "conll2003_reader",
"data_path": "{DOWNLOADS_PATH}/ontonotes",
"dataset_name": "ontonotes"
},
"dataset_iterator": {
"class_name": "data_learning_iterator",
"seed": 42
},
"chainer": {
"in": ["x"],
"in_y": ["y"],
"pipe": [
{
"class_name": "transformers_bert_preprocessor",
"vocab_file": "{BERT_PATH}/vocab.txt",
"do_lower_case": false,
"max_seq_length": 512,
"in": ["x"],
"out": ["x_tokens", "subword_tokens", "subword_tok_ids", "startofword_markers", "attention_mask"]
},
{
"in": ["y"],
"id": "tag_vocab",
"class_name": "simple_vocab",
"pad_with_zeros": true,
"fit_on": ["y"],
"save_path": "{MODEL_PATH}/tag.dict",
"load_path": "{MODEL_PATH}/tag.dict",
"out": ["y_ind"]
},
{
"in": ["x_tokens"],
"class_name": "mask",
"out": ["mask"]
},
{
"class_name": "transformers_bert_embedder",
"id": "embedder",
"bert_config_path": "{BERT_PATH}/bert_config.json",
"truncate": false,
"load_path": "{BERT_PATH}",
"in": ["subword_tok_ids", "startofword_markers", "attention_mask"],
"out": ["x_emb", "subword_emb", "max_emb", "mean_emb", "pooler_output"]
},
{
"in": ["x_emb", "mask"],
"in_y": ["y_ind"],
"out": ["y_predicted"],
"class_name": "ner",
"main": true,
"token_emb_dim": "#embedder.dim",
"n_hidden_list": [256, 256, 256],
"net_type": "rnn",
"cell_type": "lstm",
"use_cudnn_rnn": true,
"n_tags": "#tag_vocab.len",
"save_path": "{MODEL_PATH}/model",
"load_path": "{MODEL_PATH}/model",
"two_dense_on_top": true,
"use_crf": true,
"use_batch_norm": true,
"embeddings_dropout": true,
"top_dropout": true,
"intra_layer_dropout": false,
"l2_reg": 0,
"learning_rate": 3e-3,
"learning_rate_drop_patience": 3,
"dropout_keep_prob": 0.7
},
{
"ref": "tag_vocab",
"in": ["y_predicted"],
"out": ["tags"]
}
],
"out": ["x_tokens", "tags"]
},
"train": {
"epochs": 100,
"batch_size": 64,
"metrics": [
{
"name": "ner_f1",
"inputs": ["y", "tags"]
},
{
"name": "ner_token_f1",
"inputs": ["y", "tags"]
}
],
"validation_patience": 7,
"val_every_n_epochs": 1,
"log_every_n_batches": -1,
"tensorboard_log_dir": "{MODEL_PATH}/logs",
"show_examples": false,
"class_name": "nn_trainer",
"evaluation_targets": [
"valid",
"test"
]
},
"metadata": {
"variables": {
"ROOT_PATH": "~/.deeppavlov",
"DOWNLOADS_PATH": "{ROOT_PATH}/downloads",
"MODELS_PATH": "{ROOT_PATH}/models",
"MODEL_PATH": "{MODELS_PATH}/ner_ontonotes_bert_emb",
"BERT_PATH": "{DOWNLOADS_PATH}/bert_models/multi_cased_L-12_H-768_A-12_pt"
},
"download": [
{
"url": "http://files.deeppavlov.ai/deeppavlov_data/bert/multi_cased_L-12_H-768_A-12_pt.tar.gz",
"subdir": "{DOWNLOADS_PATH}/bert_models"
}
]
}
}