-
Notifications
You must be signed in to change notification settings - Fork 2.4k
/
model_quantization.py
93 lines (68 loc) · 3.41 KB
/
model_quantization.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
"""
A quantized model executes some or all of the operations with integers rather than floating point values. This allows for a more compact models and the use of high performance vectorized operations on many hardware platforms.
As a result, you get about 40% smaller and faster models. The speed-up depends on your CPU and how PyTorch was build and can be anywhere between 10% speed-up and 300% speed-up.
Note: Quantized models are only available for CPUs. Use a GPU, if available, for optimal performance.
For more details:
https://pytorch.org/docs/stable/quantization.html
"""
import csv
import gzip
import logging
import os
import time
import torch
from torch.nn import Embedding, Linear
from torch.quantization import quantize_dynamic
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Limit torch to 4 threads
torch.set_num_threads(4)
#### Just some code to print debug information to stdout
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
### /print debug information to stdout
model_name = "all-distilroberta-v1"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name, device="cpu")
q_model = quantize_dynamic(model, {Linear, Embedding})
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark dataset")
test_samples = []
sentences = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
sentences.append(row["sentence1"])
sentences.append(row["sentence2"])
if row["split"] == "test":
test_samples.append(inp_example)
sentences = sentences[0:10000]
logging.info("Evaluating speed of unquantized model")
start_time = time.time()
emb = model.encode(sentences, show_progress_bar=True)
diff_normal = time.time() - start_time
logging.info(f"Done after {diff_normal:.2f} sec. {len(sentences) / diff_normal:.2f} sentences / sec")
logging.info("Evaluating speed of quantized model")
start_time = time.time()
emb = q_model.encode(sentences, show_progress_bar=True)
diff_quantized = time.time() - start_time
logging.info(f"Done after {diff_quantized:.2f} sec. {len(sentences) / diff_quantized:.2f} sentences / sec")
logging.info(f"Speed-up: {diff_normal / diff_quantized:.2f}")
#########
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
logging.info("Evaluate regular model")
model.evaluate(evaluator)
print("\n\n")
logging.info("Evaluate quantized model")
q_model.evaluate(evaluator)