/
ranking_ubuntu_v1_mt_word2vec_dam_transformer.json
139 lines (139 loc) · 4.1 KB
/
ranking_ubuntu_v1_mt_word2vec_dam_transformer.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
{
"info": "The config is for training or evaluation of DAM_USE-T on Ubuntu Dialogue Corpus v1 using prepared Word2vec embeddings",
"dataset_reader": {
"class_name": "ubuntu_v1_mt_reader",
"data_path": "{DOWNLOADS_PATH}/ubuntu_v1_data",
"num_context_turns": "{NUM_CONTEXT_TURNS}",
"padding": "pre"
},
"dataset_iterator": {
"class_name": "siamese_iterator",
"shuffle": true,
"seed": 42
},
"chainer": {
"in": ["x"],
"in_y": ["y"],
"pipe": [
{
"class_name": "split_tokenizer",
"id": "tok_1"
},
{
"class_name": "simple_vocab",
"special_tokens": ["", "<UNK>"],
"unk_token": "<UNK>",
"fit_on": ["x"],
"id": "vocab_1",
"save_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/vocabs/int_tok.dict",
"load_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/vocabs/int_tok.dict"
},
{
"id": "word2vec_embedder",
"class_name": "glove",
"dim": 200,
"load_path": "{DOWNLOADS_PATH}/embeddings/v1_ubuntu_word2vec_200_dam_0.779.txt"
},
{
"id": "preproc",
"class_name": "siamese_preprocessor",
"save_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/preproc/tok.dict",
"load_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/preproc/tok.dict",
"num_ranking_samples": 10,
"num_context_turns": "{NUM_CONTEXT_TURNS}",
"max_sequence_length": 50,
"embedding_dim": 200,
"add_raw_text": true,
"fit_on": ["x"],
"in": ["x"],
"out": ["x_proc"],
"tokenizer": {
"ref": "tok_1",
"notes": "use defined tokenizer"
},
"vocab": {
"ref": "vocab_1",
"notes": "use vocab built for tokenized data"
}
},
{
"id": "embeddings",
"class_name": "emb_mat_assembler",
"embedder": "#word2vec_embedder",
"vocab": "#vocab_1"
},
{
"in": ["x_proc"],
"in_y": ["y"],
"out": ["y_predicted"],
"class_name": "dam_nn_use_transformer",
"stack_num": 5,
"is_positional": true,
"num_context_turns": "{NUM_CONTEXT_TURNS}",
"max_sequence_length": "#preproc.max_sequence_length",
"embedding_dim": "#word2vec_embedder.dim",
"emb_matrix": "#embeddings.emb_mat",
"learning_rate": 1e-3,
"batch_size": 100,
"seed": 42,
"decay_steps": 2000,
"save_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/model_dam/model",
"load_path": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/model_dam/model"
}
],
"out": [
"y_predicted"
]
},
"train": {
"class_name": "nn_trainer",
"epochs": 8,
"batch_size": 100,
"shuffle": true,
"pytest_max_batches": 2,
"train_metrics": [],
"validate_best": true,
"test_best": true,
"metrics": [
"r@1",
"r@2",
"r@5",
"rank_response"
],
"validation_patience": 1,
"val_every_n_epochs": 1,
"log_every_n_batches": 100,
"evaluation_targets": [
"valid",
"test"
],
"tensorboard_log_dir": "{MODELS_PATH}/ubuntu_v1_mt_word2vec_dam_transformer/logs_dam/"
},
"metadata": {
"variables": {
"ROOT_PATH": "~/.deeppavlov",
"DOWNLOADS_PATH": "{ROOT_PATH}/downloads",
"MODELS_PATH": "{ROOT_PATH}/models",
"NUM_CONTEXT_TURNS": 10
},
"requirements": [
"{DEEPPAVLOV_PATH}/requirements/tf.txt",
"{DEEPPAVLOV_PATH}/requirements/tf-hub.txt",
"{DEEPPAVLOV_PATH}/requirements/gensim.txt"
],
"download": [
{
"url": "http://files.deeppavlov.ai/deeppavlov_data/ubuntu_v1_mt_word2vec_dam_transformer.tar.gz",
"subdir": "{MODELS_PATH}"
},
{
"url": "http://files.deeppavlov.ai/datasets/ubuntu_v1_data.tar.gz",
"subdir": "{DOWNLOADS_PATH}/ubuntu_v1_data"
},
{
"url": "http://files.deeppavlov.ai/embeddings/v1_ubuntu_word2vec_200_dam_0.779.txt.tar.gz",
"subdir": "{DOWNLOADS_PATH}/embeddings"
}
]
}
}