Skip to content

Commit

Permalink
test: add gpu test for set embedding (#164)
Browse files Browse the repository at this point in the history
  • Loading branch information
bwanglzu committed Oct 23, 2021
1 parent 43480cc commit 0d8e0b5
Show file tree
Hide file tree
Showing 5 changed files with 85 additions and 7 deletions.
5 changes: 3 additions & 2 deletions finetuner/embedding.py
Expand Up @@ -14,7 +14,7 @@ def set_embeddings(
:param docs: the Documents to be embedded
:param embed_model: the embedding model written in Keras/Pytorch/Paddle
:param device: the computational device for `embed_model`, can be `cpu`, `cuda`, etc.
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
"""
fm = get_framework(embed_model)
Expand Down Expand Up @@ -47,8 +47,9 @@ def _set_embeddings_torch(
import torch

tensor = torch.tensor(docs.blobs, device=device)
embed_model = embed_model.to(device)
with torch.inference_mode():
embeddings = embed_model(tensor).cpu().numpy()
embeddings = embed_model(tensor).cpu().detach().numpy()

docs.embeddings = embeddings

Expand Down
2 changes: 1 addition & 1 deletion tests/unit/test_embedding.py
Expand Up @@ -37,7 +37,7 @@


@pytest.mark.parametrize('framework', ['keras', 'pytorch', 'paddle'])
def test_embedding_docs(framework, tmpdir):
def test_set_embeddings(framework, tmpdir):
# works for DA
embed_model = embed_models[framework]()
docs = DocumentArray(generate_fashion_match(num_total=100))
Expand Down
28 changes: 26 additions & 2 deletions tests/unit/tuner/keras/test_gpu.py
@@ -1,7 +1,10 @@
import pytest
import tensorflow as tf
from jina import DocumentArray
from jina import DocumentArray, DocumentArrayMemmap

from finetuner.tuner.keras import KerasTuner
from finetuner.embedding import set_embeddings
from finetuner.toydata import generate_fashion_match

all_test_losses = [
'CosineSiameseLoss',
Expand All @@ -22,7 +25,7 @@ def tf_gpu_config():

@pytest.mark.gpu
@pytest.mark.parametrize('loss', all_test_losses)
def test_gpu_keras(generate_random_triplets, loss, caplog):
def test_gpu_keras(generate_random_triplets, loss):
data = generate_random_triplets(4, 4)
embed_model = tf.keras.models.Sequential()
embed_model.add(tf.keras.layers.InputLayer(input_shape=(4,)))
Expand All @@ -31,3 +34,24 @@ def test_gpu_keras(generate_random_triplets, loss, caplog):
tuner = KerasTuner(embed_model, loss)

tuner.fit(data, data, epochs=2, batch_size=4, device='cuda')


@pytest.mark.gpu
def test_set_embeddings_gpu(tmpdir):
# works for DA
embed_model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(32),
]
)
docs = DocumentArray(generate_fashion_match(num_total=100))
set_embeddings(docs, embed_model, 'cuda')
assert docs.embeddings.shape == (100, 32)

# works for DAM
dam = DocumentArrayMemmap(tmpdir)
dam.extend(generate_fashion_match(num_total=42))
set_embeddings(dam, embed_model, 'cuda')
assert dam.embeddings.shape == (42, 32)
28 changes: 27 additions & 1 deletion tests/unit/tuner/paddle/test_gpu.py
@@ -1,6 +1,9 @@
import pytest
import paddle.nn as nn
from jina import DocumentArray
from jina import DocumentArray, DocumentArrayMemmap

from finetuner.embedding import set_embeddings
from finetuner.toydata import generate_fashion_match
from finetuner.tuner.paddle import PaddleTuner

all_test_losses = [
Expand All @@ -27,3 +30,26 @@ def test_gpu_paddle(generate_random_triplets, loss):

for param in tuner.embed_model.parameters():
assert str(param.place) == 'CUDAPlace(0)'


@pytest.mark.gpu
def test_set_embeddings_gpu(tmpdir):
# works for DA
embed_model = nn.Sequential(
nn.Flatten(),
nn.Linear(
in_features=28 * 28,
out_features=128,
),
nn.ReLU(),
nn.Linear(in_features=128, out_features=32),
)
docs = DocumentArray(generate_fashion_match(num_total=100))
set_embeddings(docs, embed_model, 'cuda')
assert docs.embeddings.shape == (100, 32)

# works for DAM
dam = DocumentArrayMemmap(tmpdir)
dam.extend(generate_fashion_match(num_total=42))
set_embeddings(dam, embed_model, 'cuda')
assert dam.embeddings.shape == (42, 32)
29 changes: 28 additions & 1 deletion tests/unit/tuner/torch/test_gpu.py
@@ -1,6 +1,10 @@
import pytest
import torch
from jina import DocumentArray
import torch.nn as nn
from jina import DocumentArray, DocumentArrayMemmap

from finetuner.embedding import set_embeddings
from finetuner.toydata import generate_fashion_match
from finetuner.tuner.pytorch import PytorchTuner


Expand Down Expand Up @@ -30,3 +34,26 @@ def test_gpu_pytorch(generate_random_triplets, loss):

# Test the model was moved (by checking one of its parameters)
assert next(embed_model.parameters()).device.type == 'cuda'


@pytest.mark.gpu
def test_set_embeddings_gpu(tmpdir):
# works for DA
embed_model = nn.Sequential(
nn.Flatten(),
nn.Linear(
in_features=28 * 28,
out_features=128,
),
nn.ReLU(),
nn.Linear(in_features=128, out_features=32),
)
docs = DocumentArray(generate_fashion_match(num_total=100))
set_embeddings(docs, embed_model, 'cuda')
assert docs.embeddings.shape == (100, 32)

# works for DAM
dam = DocumentArrayMemmap(tmpdir)
dam.extend(generate_fashion_match(num_total=42))
set_embeddings(dam, embed_model, 'cuda')
assert dam.embeddings.shape == (42, 32)

0 comments on commit 0d8e0b5

Please sign in to comment.