Skip to content

Commit

Permalink
Add recall_score function in Ivy with Test (#27986)
Browse files Browse the repository at this point in the history
Co-authored-by: ivy-branch <ivy.branch@lets-unify.ai>
  • Loading branch information
muzakkirhussain011 and ivy-branch committed Feb 25, 2024
1 parent 050be25 commit 76bef3e
Show file tree
Hide file tree
Showing 2 changed files with 99 additions and 1 deletion.
33 changes: 33 additions & 0 deletions ivy/functional/frontends/sklearn/metrics/_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,36 @@ def accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None):
ret = ret / y_true.shape[0]
ret = ret.astype("float64")
return ret


@to_ivy_arrays_and_back
def recall_score(y_true, y_pred, *, sample_weight=None):
# Ensure that y_true and y_pred have the same shape
if y_true.shape != y_pred.shape:
raise IvyValueError("y_true and y_pred must have the same shape")

# Check if sample_weight is provided and normalize it
if sample_weight is not None:
sample_weight = ivy.array(sample_weight)
if sample_weight.shape[0] != y_true.shape[0]:
raise IvyValueError(
"sample_weight must have the same length as y_true and y_pred"
)
sample_weight = sample_weight / ivy.sum(sample_weight)
else:
sample_weight = ivy.ones_like(y_true)

# Calculate true positives and actual positives
true_positives = ivy.logical_and(ivy.equal(y_true, 1), ivy.equal(y_pred, 1)).astype(
"int64"
)
actual_positives = ivy.equal(y_true, 1).astype("int64")

# Apply sample weights
weighted_true_positives = ivy.multiply(true_positives, sample_weight)
weighted_actual_positives = ivy.multiply(actual_positives, sample_weight)

# Compute recall
ret = ivy.sum(weighted_true_positives) / ivy.sum(weighted_actual_positives)
ret = ret.astype("float64")
return ret
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from hypothesis import strategies as st

import torch
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
import numpy as np
Expand Down Expand Up @@ -43,3 +43,68 @@ def test_sklearn_accuracy_score(
normalize=normalize,
sample_weight=None,
)


@handle_frontend_test(
fn_tree="sklearn.metrics.recall_score",
arrays_and_dtypes=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
min_value=0,
max_value=1, # Recall score is for binary classification
shared_dtype=True,
shape=(helpers.ints(min_value=2, max_value=5)),
),
sample_weight=st.lists(
st.floats(min_value=0.1, max_value=1), min_size=2, max_size=5
),
)
def test_sklearn_recall_score(
arrays_and_dtypes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
sample_weight,
):
dtypes, values = arrays_and_dtypes
# Ensure the values are binary by rounding and converting to int
for i in range(2):
values[i] = np.round(values[i]).astype(int)

# Adjust sample_weight to have the correct length
sample_weight = np.array(sample_weight).astype(float)
if len(sample_weight) != len(values[0]):
# If sample_weight is shorter, extend it with ones
sample_weight = np.pad(
sample_weight,
(0, max(0, len(values[0]) - len(sample_weight))),
"constant",
constant_values=1.0,
)
# If sample_weight is longer, truncate it
sample_weight = sample_weight[: len(values[0])]

# Detach tensors if they require grad before converting to NumPy arrays
if backend_fw == "torch":
values = [
(
value.detach().numpy()
if isinstance(value, torch.Tensor) and value.requires_grad
else value
)
for value in values
]

helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
frontend=frontend,
on_device=on_device,
y_true=values[0],
y_pred=values[1],
sample_weight=sample_weight,
)

0 comments on commit 76bef3e

Please sign in to comment.