New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
handle floating point values more accurate #277
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,3 @@ | ||
import numpy as np | ||
|
||
from m2cgen import ast | ||
from m2cgen.assemblers import utils | ||
from m2cgen.assemblers.base import ModelAssembler | ||
|
@@ -49,11 +47,5 @@ def _assemble_leaf(self, node_id): | |
|
||
def _assemble_cond(self, node_id): | ||
feature_idx = self._tree.feature[node_id] | ||
threshold = self._tree.threshold[node_id] | ||
|
||
# sklearn's trees internally work with float32 numbers, so in order | ||
# to have consistent results across all supported languages, we convert | ||
# all thresholds into float32. | ||
threshold_num_val = ast.NumVal(threshold, dtype=np.float32) | ||
|
||
threshold_num_val = ast.NumVal(self._tree.threshold[node_id]) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Refer to #190 (review). Now threshold matches original type in scikit-learn ( |
||
return utils.lte(ast.FeatureRef(feature_idx), threshold_num_val) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,7 @@ | ||
import re | ||
|
||
import numpy as np | ||
|
||
from collections import namedtuple | ||
from functools import lru_cache | ||
from math import ceil, log | ||
|
@@ -22,3 +24,7 @@ def _get_handler_name(expr_tpe): | |
|
||
def _normalize_expr_name(name): | ||
return re.sub("(?!^)([A-Z]+)", r"_\1", name).lower() | ||
|
||
|
||
def format_float(value): | ||
return np.format_float_positional(value, unique=True, trim="0") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Maybe |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -568,6 +568,8 @@ def test_e2e(estimator, executor_cls, model_trainer, | |
with executor.prepare_then_cleanup(): | ||
for idx in idxs_to_test: | ||
y_pred_executed = executor.predict(X_test[idx]) | ||
y_pred_executed = np.array( | ||
y_pred_executed, dtype=y_pred_true.dtype, copy=False) | ||
Comment on lines
+571
to
+572
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Quite often different packages not only cast input values in |
||
print(f"expected={y_pred_true[idx]}, actual={y_pred_executed}") | ||
res = np.isclose(y_pred_true[idx], y_pred_executed, atol=ATOL) | ||
assert res if isinstance(res, bool) else res.all() |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -13,11 +13,13 @@ | |
from lightning.impl.base import BaseClassifier as LightBaseClassifier | ||
from sklearn import datasets | ||
from sklearn.base import BaseEstimator, RegressorMixin, clone | ||
from sklearn.ensemble._forest import ForestClassifier | ||
from sklearn.ensemble._forest import ForestClassifier, BaseForest | ||
from sklearn.utils import shuffle | ||
from sklearn.linear_model._base import LinearClassifierMixin | ||
from sklearn.tree import DecisionTreeClassifier | ||
from sklearn.tree._classes import BaseDecisionTree | ||
from sklearn.svm import SVC, NuSVC | ||
from sklearn.svm._base import BaseLibSVM | ||
from xgboost import XGBClassifier | ||
|
||
from m2cgen import ast | ||
|
@@ -125,15 +127,22 @@ def __call__(self, estimator): | |
if isinstance(estimator, (LinearClassifierMixin, SVC, NuSVC, | ||
LightBaseClassifier)): | ||
y_pred = estimator.decision_function(self.X_test) | ||
elif isinstance(estimator, DecisionTreeClassifier): | ||
y_pred = estimator.predict_proba(self.X_test.astype(np.float32)) | ||
elif isinstance( | ||
estimator, | ||
(ForestClassifier, XGBClassifier, LGBMClassifier)): | ||
(ForestClassifier, DecisionTreeClassifier, | ||
XGBClassifier, LGBMClassifier)): | ||
y_pred = estimator.predict_proba(self.X_test) | ||
else: | ||
y_pred = estimator.predict(self.X_test) | ||
|
||
# Some models force input data to be particular type | ||
# during prediction phase in their native Python libraries. | ||
# For correct comparison of testing results we mimic the same behavior | ||
if isinstance(estimator, (BaseDecisionTree, BaseForest)): | ||
self.X_test = self.X_test.astype(np.float32, copy=False) | ||
Comment on lines
+141
to
+142
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
elif isinstance(estimator, BaseLibSVM): | ||
self.X_test = self.X_test.astype(np.float64, copy=False) | ||
Comment on lines
+143
to
+144
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
|
||
return self.X_test, y_pred, fitted_estimator | ||
|
||
|
||
|
@@ -238,9 +247,9 @@ def predict_from_commandline(exec_args): | |
items = stdout.decode("utf-8").strip().split(" ") | ||
|
||
if len(items) == 1: | ||
return float(items[0]) | ||
return np.float64(items[0]) | ||
else: | ||
return [float(i) for i in items] | ||
return [np.float64(i) for i in items] | ||
Comment on lines
+250
to
+252
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's use numpy types across all codebase for the consistency. |
||
|
||
|
||
def cartesian_e2e_params(executors_with_marks, models_with_trainers_with_marks, | ||
|
@@ -284,4 +293,4 @@ def inner(*args, **kwarg): | |
|
||
|
||
def _is_float(value): | ||
return isinstance(value, (float, np.float16, np.float32, np.float64)) | ||
return isinstance(value, (float, np.floating)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
https://github.com/dmlc/xgboost/blob/1d22a9be1cdeb53dfa9322c92541bc50e82f3c43/src/tree/tree_model.cc#L316
https://github.com/dmlc/xgboost/blob/1d22a9be1cdeb53dfa9322c92541bc50e82f3c43/include/xgboost/tree_model.h#L152-L155
https://github.com/dmlc/xgboost/blob/1d22a9be1cdeb53dfa9322c92541bc50e82f3c43/include/xgboost/base.h#L110-L111
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Interestingly that
weight
andbias
are alsofloat
internally:https://github.com/dmlc/xgboost/blob/1d22a9be1cdeb53dfa9322c92541bc50e82f3c43/src/gbm/gblinear_model.h#L81-L82
https://github.com/dmlc/xgboost/blob/1d22a9be1cdeb53dfa9322c92541bc50e82f3c43/src/gbm/gblinear_model.h#L90-L91
But at Python side they are loaded into
double
numpy array:https://github.com/dmlc/xgboost/blob/12110c900eff0aaa06045ecf717e6c5a36a164d5/python-package/xgboost/sklearn.py#L717-L718
https://github.com/dmlc/xgboost/blob/12110c900eff0aaa06045ecf717e6c5a36a164d5/python-package/xgboost/sklearn.py#L748