/
embeddings.py
49 lines (38 loc) · 1.41 KB
/
embeddings.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
from typing import List
import numpy as np
import torch
import torch.nn.functional as F
from transformers import BertModel, BertTokenizer
def mean_pool(
last_hidden_states: torch.Tensor, attention_mask: torch.Tensor
) -> torch.Tensor:
last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)
return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
def embed(encoder_model, inputs):
model_output = encoder_model(**inputs)
embeddings = mean_pool(model_output.last_hidden_state, inputs["attention_mask"])
return F.normalize(embeddings, p=2, dim=1)
def e5_model_embeddings(
model: BertModel,
tokenizer: BertTokenizer,
samples: List[str],
max_seq_length=512,
batch_size=32,
device="cpu",
) -> np.ndarray:
passage_prefix = "passage: "
augmented_samples = [f"{passage_prefix}{sample}" for sample in samples]
embeddings = []
for start in range(0, len(augmented_samples), batch_size):
batch = augmented_samples[start : start + batch_size]
encoded_samples = tokenizer(
batch,
padding=True,
truncation=True,
max_length=max_seq_length,
return_tensors="pt",
).to(device)
with torch.no_grad():
batch_embeddings = embed(model, encoded_samples)
embeddings.append(batch_embeddings.cpu().numpy())
return np.vstack(embeddings)