-
Notifications
You must be signed in to change notification settings - Fork 25
/
languagemodel.go
193 lines (159 loc) · 5.26 KB
/
languagemodel.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
// Copyright 2022 The NLP Odyssey Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bert
import (
"context"
"fmt"
"path"
"path/filepath"
"sort"
"strings"
"github.com/nlpodyssey/cybertron/pkg/models/bert"
"github.com/nlpodyssey/cybertron/pkg/tasks/languagemodeling"
"github.com/nlpodyssey/cybertron/pkg/tokenizers"
"github.com/nlpodyssey/cybertron/pkg/tokenizers/wordpiecetokenizer"
"github.com/nlpodyssey/cybertron/pkg/vocabulary"
"github.com/nlpodyssey/spago/mat"
"github.com/nlpodyssey/spago/nn"
)
const defaultTopK = 10
// LanguageModel is a masked language model.
type LanguageModel struct {
// Model is the model used to answer questions.
Model *bert.ModelForMaskedLM
// Words vocabulary
vocab *vocabulary.Vocabulary
// Tokenizer is the tokenizer used to tokenize questions and passages.
Tokenizer *wordpiecetokenizer.WordPieceTokenizer
// doLowerCase is a flag indicating if the model should lowercase the input before tokenization.
doLowerCase bool
}
// LoadMaskedLanguageModel returns a LanguageModel loading the model, the embeddings and the tokenizer from a directory.
func LoadMaskedLanguageModel(modelPath string) (*LanguageModel, error) {
vocab, err := vocabulary.NewFromFile(filepath.Join(modelPath, "vocab.txt"))
if err != nil {
return nil, fmt.Errorf("failed to load vocabulary for text classification: %w", err)
}
tokenizer := wordpiecetokenizer.New(vocab)
tokenizerConfig, err := bert.ConfigFromFile[bert.TokenizerConfig](path.Join(modelPath, "tokenizer_config.json"))
if err != nil {
return nil, fmt.Errorf("failed to load tokenizer config for text classification: %w", err)
}
m, err := nn.LoadFromFile[*bert.ModelForMaskedLM](path.Join(modelPath, "spago_model.bin"))
if err != nil {
return nil, fmt.Errorf("failed to load bart model: %w", err)
}
return &LanguageModel{
Model: m,
vocab: vocab,
Tokenizer: tokenizer,
doLowerCase: tokenizerConfig.DoLowerCase,
}, nil
}
// Predict returns the predicted tokens
func (m *LanguageModel) Predict(_ context.Context, text string, parameters languagemodeling.Parameters) (languagemodeling.Response, error) {
if parameters.K == 0 {
parameters.K = defaultTopK
}
tokenized := pad(m.tokenize(text))
if l, k := len(tokenized), m.Model.Bert.Config.MaxPositionEmbeddings; l > k {
return languagemodeling.Response{}, fmt.Errorf("%w: %d > %d", languagemodeling.ErrInputSequenceTooLong, l, k)
}
prediction := m.Model.Predict(tokenizers.GetStrings(tokenized))
result := make([]languagemodeling.Token, 0, len(prediction))
for i, logits := range prediction {
probs := logits.Value().(mat.Matrix).Softmax()
scores := make([]float64, 0)
words := make([]string, 0)
for _, item := range selectTopK(probs, parameters.K) {
word, ok := m.vocab.Term(item.Index)
if !ok {
word = wordpiecetokenizer.DefaultUnknownToken // if this is returned, there's a misalignment with the vocabulary
}
words = append(words, word)
scores = append(scores, item.Score)
}
start, end := tokenized[i].Offsets.Start, tokenized[i].Offsets.End
result = append(result, languagemodeling.Token{
Start: start,
End: end,
Words: words,
Scores: scores,
})
}
sort.SliceStable(result, func(i, j int) bool {
return result[i].Start < result[j].Start
})
return languagemodeling.Response{
Tokens: result,
}, nil
}
// tokenize returns the tokens of the given text (without padding tokens).
func (m *LanguageModel) tokenize(text string) []tokenizers.StringOffsetsPair {
if m.doLowerCase {
text = strings.ToLower(text)
}
return m.Tokenizer.Tokenize(text)
}
func pad(tokens []tokenizers.StringOffsetsPair) []tokenizers.StringOffsetsPair {
return append(prepend(tokens, tokenizers.StringOffsetsPair{String: wordpiecetokenizer.DefaultClassToken}),
tokenizers.StringOffsetsPair{String: wordpiecetokenizer.DefaultSequenceSeparator})
}
func prepend(x []tokenizers.StringOffsetsPair, y tokenizers.StringOffsetsPair) []tokenizers.StringOffsetsPair {
return append([]tokenizers.StringOffsetsPair{y}, x...)
}
type IndexScorePair struct {
Index int
Score float64
}
// selectTopK returns the next tokens to be generated.
func selectTopK(scores mat.Matrix, resultSize int) []*IndexScorePair {
if resultSize == 1 {
argmax := scores.ArgMax()
return []*IndexScorePair{
{
Index: argmax,
Score: scores.ScalarAt(argmax).F64(),
},
}
}
arena := make([]IndexScorePair, resultSize)
result := make([]*IndexScorePair, 0, resultSize)
var minScore float64
minIndex := -1
for i, score := range scores.Data().F64() {
if len(result) < resultSize {
if minIndex == -1 || score < minScore {
minScore = score
minIndex = len(result)
}
st := &arena[0]
arena = arena[1:]
st.Index = i
st.Score = score
result = append(result, st)
continue
}
if score <= minScore {
continue
}
// Replace the scored token with minimum score with the new one
st := result[minIndex]
st.Index = i
st.Score = score
// Find the new minimum
minScore = result[0].Score
minIndex = 0
for j, v := range result {
if v.Score < minScore {
minScore = v.Score
minIndex = j
}
}
}
sort.SliceStable(result, func(i, j int) bool {
return result[i].Score > result[j].Score
})
return result
}