Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion rekcurd/grpc
8 changes: 6 additions & 2 deletions rekcurd/rekcurd_dashboard_servicer.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,12 +188,14 @@ def EvaluateModel(self,
raise Exception(f'Error: Invalid evaluation result file path specified -> {result_path}')

result, details = self.app.evaluate(self.app.get_eval_path(data_path))
label_ios = [self.get_io_by_type(l) for l in result.label]
metrics = rekcurd_pb2.EvaluationMetrics(num=result.num,
accuracy=result.accuracy,
precision=result.precision,
recall=result.recall,
fvalue=result.fvalue,
option=result.option)
option=result.option,
label=label_ios)

eval_result_path = self.app.get_eval_path(result_path)
Path(eval_result_path).parent.mkdir(parents=True, exist_ok=True)
Expand Down Expand Up @@ -244,12 +246,14 @@ def EvaluationResult(self,
result_details = pickle.load(f)
with open(eval_result_path + self.EVALUATE_RESULT, 'rb') as f:
result = pickle.load(f)
label_ios = [self.get_io_by_type(l) for l in result.label]
metrics = rekcurd_pb2.EvaluationMetrics(num=result.num,
accuracy=result.accuracy,
precision=result.precision,
recall=result.recall,
fvalue=result.fvalue,
option=result.option)
option=result.option,
label=label_ios)

detail_chunks = []
detail_chunk = []
Expand Down
27 changes: 10 additions & 17 deletions rekcurd/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,23 +70,16 @@ def __init__(self, label: PredictLabel, score: PredictScore, option: dict = None


class EvaluateResult:
def __init__(self, num: int = None, accuracy: float = None,
precision: List[float] = None, recall: List[float] = None,
fvalue: List[float] = None, option: Dict[str, float] = {}):
if num is None:
self.num = 0
self.accuracy = 0.0
self.precision = [0.0]
self.recall = [0.0]
self.fvalue = [0.0]
self.option = {}
else:
self.num = num
self.accuracy = accuracy
self.precision = precision
self.recall = recall
self.fvalue = fvalue
self.option = option
def __init__(self, num: int, accuracy: float, precision: List[float],
recall: List[float], fvalue: List[float], label: List[PredictLabel],
option: Dict[str, float] = {}):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

BTW, I think option type is just dict not Dict[str, float]...?

Copy link
Member Author

@yuki-mt yuki-mt Feb 8, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@keigohtr
Now gprc spec is map<string, float> here because I expected the option is just used as additional metrics (e.g. accuracy in a specific condition), so it needs to be Dict[str, float]

(it will be OK if we change grpc spec)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's OK at the moment.

Could you make an issue for this?
Additional metrics must be a general field for ML evaluation. Current spec specifies our use-case, so we need to fix it.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I did it! #35

self.num = num
self.accuracy = accuracy
self.precision = precision
self.recall = recall
self.fvalue = fvalue
self.label = label
self.option = option


class EvaluateResultDetail(NamedTuple):
Expand Down
7 changes: 4 additions & 3 deletions test/test_dashboard_servicer.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import unittest
import sys
import time
from functools import wraps
from unittest.mock import patch, Mock, mock_open
Expand All @@ -13,7 +12,7 @@


target_service = rekcurd_pb2.DESCRIPTOR.services_by_name['RekcurdDashboard']
eval_result = EvaluateResult(1, 0.8, [0.7], [0.6], [0.5], {'dummy': 0.4})
eval_result = EvaluateResult(1, 0.8, [0.7], [0.6], [0.5], ['label1'], {'dummy': 0.4})
eval_result_details = [EvaluateResultDetail(PredictResult('pre_label', 0.9), False)]
eval_detail = EvaluateDetail('input', 'label', eval_result_details[0])

Expand Down Expand Up @@ -147,7 +146,7 @@ def test_InvalidEvaluationData(self):
self.assertEqual(response.status, 0)

@patch_predictor()
def test_EvalauteModel(self):
def test_EvaluateModel(self):
request = rekcurd_pb2.EvaluateModelRequest(data_path='my_path', result_path='my_path')
rpc = self._real_time_server.invoke_stream_unary(
target_service.methods_by_name['EvaluateModel'], (), None)
Expand All @@ -163,6 +162,7 @@ def test_EvalauteModel(self):
self.assertEqual([round(r, 3) for r in response.metrics.recall], eval_result.recall)
self.assertEqual([round(f, 3) for f in response.metrics.fvalue], eval_result.fvalue)
self.assertEqual(round(response.metrics.option['dummy'], 3), eval_result.option['dummy'])
self.assertEqual([l.str.val[0] for l in response.metrics.label], eval_result.label)

@patch_predictor()
def test_InvalidEvalauteModel(self):
Expand Down Expand Up @@ -203,6 +203,7 @@ def test_EvalautionResult(self):
self.assertEqual([round(r, 3) for r in response.metrics.recall], eval_result.recall)
self.assertEqual([round(f, 3) for f in response.metrics.fvalue], eval_result.fvalue)
self.assertEqual(round(response.metrics.option['dummy'], 3), eval_result.option['dummy'])
self.assertEqual([l.str.val[0] for l in response.metrics.label], eval_result.label)

self.assertEqual(len(response.detail), 1)
detail = response.detail[0]
Expand Down