From 65f8dbfa9311fa07da402a95c400f43a9b0fd02c Mon Sep 17 00:00:00 2001 From: Nikita Titov Date: Mon, 6 Jul 2020 19:18:43 +0300 Subject: [PATCH] Support not only task-default objective functions in LightGBM (#221) --- m2cgen/assemblers/boosting.py | 98 +++++++++- m2cgen/interpreters/haskell/log1p.hs | 45 ++--- m2cgen/interpreters/powershell/log1p.ps1 | 2 +- m2cgen/interpreters/visual_basic/log1p.bas | 32 ++-- tests/assemblers/test_lightgbm.py | 200 +++++++++++++++++++++ tests/e2e/test_e2e.py | 35 ++++ tests/interpreters/test_haskell.py | 45 ++--- tests/interpreters/test_powershell.py | 2 +- tests/interpreters/test_visual_basic.py | 32 ++-- 9 files changed, 405 insertions(+), 86 deletions(-) diff --git a/m2cgen/assemblers/boosting.py b/m2cgen/assemblers/boosting.py index 41cffccd..d9a4f549 100644 --- a/m2cgen/assemblers/boosting.py +++ b/m2cgen/assemblers/boosting.py @@ -13,7 +13,7 @@ class BaseBoostingAssembler(ModelAssembler): classifier_names = {} multiclass_params_seq_len = 1 - def __init__(self, model, estimator_params, base_score=0): + def __init__(self, model, estimator_params, base_score=0.0): super().__init__(model) self._all_estimator_params = estimator_params self._base_score = base_score @@ -36,11 +36,12 @@ def assemble(self): return self._assemble_multi_class_output( self._all_estimator_params) else: - return self._assemble_single_output( + result_ast = self._assemble_single_output( self._all_estimator_params, base_score=self._base_score) + return self._single_convert_output(result_ast) def _assemble_single_output(self, estimator_params, - base_score=0, split_idx=0): + base_score=0.0, split_idx=0): estimators_ast = self._assemble_estimators(estimator_params, split_idx) tmp_ast = utils.apply_op_to_expressions( @@ -71,21 +72,21 @@ def _assemble_multi_class_output(self, estimator_params): for i, e in enumerate(splits) ] - proba_exprs = fallback_expressions.softmax(exprs) + proba_exprs = self._multi_class_convert_output(exprs) return ast.VectorVal(proba_exprs) def _assemble_bin_class_output(self, estimator_params): # Base score is calculated based on # https://github.com/dmlc/xgboost/blob/8de7f1928e4815843fbf8773a5ac7ecbc37b2e15/src/objective/regression_loss.h#L91 # return -logf(1.0f / base_score - 1.0f); - base_score = 0 - if self._base_score != 0: + base_score = 0.0 + if self._base_score != 0.0: base_score = -math.log(1.0 / self._base_score - 1.0) expr = self._assemble_single_output( estimator_params, base_score=base_score) - proba_expr = fallback_expressions.sigmoid(expr, to_reuse=True) + proba_expr = self._bin_class_convert_output(expr) return ast.VectorVal([ ast.BinNumExpr(ast.NumVal(1), proba_expr, ast.BinNumOpType.SUB), @@ -95,13 +96,22 @@ def _assemble_bin_class_output(self, estimator_params): def _final_transform(self, ast_to_transform): return ast_to_transform + def _multi_class_convert_output(self, exprs): + return fallback_expressions.softmax(exprs) + + def _bin_class_convert_output(self, expr, to_reuse=True): + return fallback_expressions.sigmoid(expr, to_reuse=to_reuse) + + def _single_convert_output(self, expr): + return expr + def _assemble_estimators(self, estimator_params, split_idx): raise NotImplementedError class BaseTreeBoostingAssembler(BaseBoostingAssembler): - def __init__(self, model, trees, base_score=0, tree_limit=None): + def __init__(self, model, trees, base_score=0.0, tree_limit=None): super().__init__(model, trees, base_score=base_score) assert tree_limit is None or tree_limit > 0, "Unexpected tree limit" self._tree_limit = tree_limit @@ -212,6 +222,9 @@ def __init__(self, model): self.n_iter = len(trees) // model_dump["num_tree_per_iteration"] self.average_output = model_dump.get("average_output", False) + self.objective_config_parts = model_dump.get( + "objective", "custom").split(" ") + self.objective_name = self.objective_config_parts[0] super().__init__(model, trees) @@ -225,6 +238,75 @@ def _final_transform(self, ast_to_transform): else: return super()._final_transform(ast_to_transform) + def _multi_class_convert_output(self, exprs): + supported_objectives = { + "multiclass": super()._multi_class_convert_output, + "multiclassova": self._multi_class_sigmoid_transform, + "custom": super()._single_convert_output, + } + if self.objective_name not in supported_objectives: + raise ValueError( + f"Unsupported objective function '{self.objective_name}'") + return supported_objectives[self.objective_name](exprs) + + def _multi_class_sigmoid_transform(self, exprs): + return [self._bin_class_sigmoid_transform(expr, to_reuse=False) + for expr in exprs] + + def _bin_class_convert_output(self, expr, to_reuse=True): + supported_objectives = { + "binary": self._bin_class_sigmoid_transform, + "custom": super()._single_convert_output, + } + if self.objective_name not in supported_objectives: + raise ValueError( + f"Unsupported objective function '{self.objective_name}'") + return supported_objectives[self.objective_name](expr) + + def _bin_class_sigmoid_transform(self, expr, to_reuse=True): + coef = 1.0 + for config_part in self.objective_config_parts: + config_entry = config_part.split(":") + if config_entry[0] == "sigmoid": + coef = np.float64(config_entry[1]) + break + return super()._bin_class_convert_output( + utils.mul(ast.NumVal(coef), expr) if coef != 1.0 else expr, + to_reuse=to_reuse) + + def _single_convert_output(self, expr): + supported_objectives = { + "cross_entropy": fallback_expressions.sigmoid, + "cross_entropy_lambda": self._log1p_exp_transform, + "regression": self._maybe_sqr_transform, + "regression_l1": self._maybe_sqr_transform, + "huber": super()._single_convert_output, + "fair": self._maybe_sqr_transform, + "poisson": self._exp_transform, + "quantile": self._maybe_sqr_transform, + "mape": self._maybe_sqr_transform, + "gamma": self._exp_transform, + "tweedie": self._exp_transform, + "custom": super()._single_convert_output, + } + if self.objective_name not in supported_objectives: + raise ValueError( + f"Unsupported objective function '{self.objective_name}'") + return supported_objectives[self.objective_name](expr) + + def _log1p_exp_transform(self, expr): + return ast.Log1pExpr(ast.ExpExpr(expr)) + + def _maybe_sqr_transform(self, expr): + if "sqrt" in self.objective_config_parts: + expr = ast.IdExpr(expr, to_reuse=True) + return utils.mul(ast.AbsExpr(expr), expr) + else: + return expr + + def _exp_transform(self, expr): + return ast.ExpExpr(expr) + def _assemble_tree(self, tree): if "leaf_value" in tree: return ast.NumVal(tree["leaf_value"]) diff --git a/m2cgen/interpreters/haskell/log1p.hs b/m2cgen/interpreters/haskell/log1p.hs index 511fd03a..fa7b1873 100644 --- a/m2cgen/interpreters/haskell/log1p.hs +++ b/m2cgen/interpreters/haskell/log1p.hs @@ -12,28 +12,29 @@ log1p x m_epsilon = encodeFloat (signif + 1) expo - 1.0 where (signif, expo) = decodeFloat (1.0::Double) x' = abs x - coeffs = [0.10378693562743769800686267719098e+1, - -0.13364301504908918098766041553133e+0, - 0.19408249135520563357926199374750e-1, - -0.30107551127535777690376537776592e-2, - 0.48694614797154850090456366509137e-3, - -0.81054881893175356066809943008622e-4, - 0.13778847799559524782938251496059e-4, - -0.23802210894358970251369992914935e-5, - 0.41640416213865183476391859901989e-6, - -0.73595828378075994984266837031998e-7, - 0.13117611876241674949152294345011e-7, - -0.23546709317742425136696092330175e-8, - 0.42522773276034997775638052962567e-9, - -0.77190894134840796826108107493300e-10, - 0.14075746481359069909215356472191e-10, - -0.25769072058024680627537078627584e-11, - 0.47342406666294421849154395005938e-12, - -0.87249012674742641745301263292675e-13, - 0.16124614902740551465739833119115e-13, - -0.29875652015665773006710792416815e-14, - 0.55480701209082887983041321697279e-15, - -0.10324619158271569595141333961932e-15] + coeffs = [ + 0.10378693562743769800686267719098e+1, + -0.13364301504908918098766041553133e+0, + 0.19408249135520563357926199374750e-1, + -0.30107551127535777690376537776592e-2, + 0.48694614797154850090456366509137e-3, + -0.81054881893175356066809943008622e-4, + 0.13778847799559524782938251496059e-4, + -0.23802210894358970251369992914935e-5, + 0.41640416213865183476391859901989e-6, + -0.73595828378075994984266837031998e-7, + 0.13117611876241674949152294345011e-7, + -0.23546709317742425136696092330175e-8, + 0.42522773276034997775638052962567e-9, + -0.77190894134840796826108107493300e-10, + 0.14075746481359069909215356472191e-10, + -0.25769072058024680627537078627584e-11, + 0.47342406666294421849154395005938e-12, + -0.87249012674742641745301263292675e-13, + 0.16124614902740551465739833119115e-13, + -0.29875652015665773006710792416815e-14, + 0.55480701209082887983041321697279e-15, + -0.10324619158271569595141333961932e-15] chebyshevBroucke i = fini . foldr step [0, 0, 0] where step k [b0, b1, _] = [(k + i * 2 * b0 - b1), b0, b1] diff --git a/m2cgen/interpreters/powershell/log1p.ps1 b/m2cgen/interpreters/powershell/log1p.ps1 index d4e61e36..e74d0393 100644 --- a/m2cgen/interpreters/powershell/log1p.ps1 +++ b/m2cgen/interpreters/powershell/log1p.ps1 @@ -4,7 +4,7 @@ function Log1p([double] $x) { if ($x -lt -1.0) { return [double]::NaN } [double] $xAbs = [math]::Abs($x) if ($xAbs -lt 0.5 * [double]::Epsilon) { return $x } - if ((($x -gt 0.0) -and ($x -lt 1e-8)) + if ((($x -gt 0.0) -and ($x -lt 1e-8)) ` -or (($x -gt -1e-9) -and ($x -lt 0.0))) { return $x * (1.0 - $x * 0.5) } diff --git a/m2cgen/interpreters/visual_basic/log1p.bas b/m2cgen/interpreters/visual_basic/log1p.bas index a079c148..bef993cd 100644 --- a/m2cgen/interpreters/visual_basic/log1p.bas +++ b/m2cgen/interpreters/visual_basic/log1p.bas @@ -43,27 +43,27 @@ Function Log1p(ByVal x As Double) As Double End If If xAbs < 0.375 Then Dim coeffs(22) As Double - coeffs(0) = 0.10378693562743769800686267719098e+1 - coeffs(1) = -0.13364301504908918098766041553133e+0 - coeffs(2) = 0.19408249135520563357926199374750e-1 - coeffs(3) = -0.30107551127535777690376537776592e-2 - coeffs(4) = 0.48694614797154850090456366509137e-3 - coeffs(5) = -0.81054881893175356066809943008622e-4 - coeffs(6) = 0.13778847799559524782938251496059e-4 - coeffs(7) = -0.23802210894358970251369992914935e-5 - coeffs(8) = 0.41640416213865183476391859901989e-6 - coeffs(9) = -0.73595828378075994984266837031998e-7 - coeffs(10) = 0.13117611876241674949152294345011e-7 + coeffs(0) = 0.10378693562743769800686267719098e+1 + coeffs(1) = -0.13364301504908918098766041553133e+0 + coeffs(2) = 0.19408249135520563357926199374750e-1 + coeffs(3) = -0.30107551127535777690376537776592e-2 + coeffs(4) = 0.48694614797154850090456366509137e-3 + coeffs(5) = -0.81054881893175356066809943008622e-4 + coeffs(6) = 0.13778847799559524782938251496059e-4 + coeffs(7) = -0.23802210894358970251369992914935e-5 + coeffs(8) = 0.41640416213865183476391859901989e-6 + coeffs(9) = -0.73595828378075994984266837031998e-7 + coeffs(10) = 0.13117611876241674949152294345011e-7 coeffs(11) = -0.23546709317742425136696092330175e-8 - coeffs(12) = 0.42522773276034997775638052962567e-9 + coeffs(12) = 0.42522773276034997775638052962567e-9 coeffs(13) = -0.77190894134840796826108107493300e-10 - coeffs(14) = 0.14075746481359069909215356472191e-10 + coeffs(14) = 0.14075746481359069909215356472191e-10 coeffs(15) = -0.25769072058024680627537078627584e-11 - coeffs(16) = 0.47342406666294421849154395005938e-12 + coeffs(16) = 0.47342406666294421849154395005938e-12 coeffs(17) = -0.87249012674742641745301263292675e-13 - coeffs(18) = 0.16124614902740551465739833119115e-13 + coeffs(18) = 0.16124614902740551465739833119115e-13 coeffs(19) = -0.29875652015665773006710792416815e-14 - coeffs(20) = 0.55480701209082887983041321697279e-15 + coeffs(20) = 0.55480701209082887983041321697279e-15 coeffs(21) = -0.10324619158271569595141333961932e-15 Log1p = x * (1.0 - x * ChebyshevBroucke(x / 0.375, coeffs)) Exit Function diff --git a/tests/assemblers/test_lightgbm.py b/tests/assemblers/test_lightgbm.py index 137622ef..3b6ad20d 100644 --- a/tests/assemblers/test_lightgbm.py +++ b/tests/assemblers/test_lightgbm.py @@ -131,3 +131,203 @@ def test_regression_random_forest(): ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) + + +def test_simple_sigmoid_output_transform(): + estimator = lightgbm.LGBMRegressor(n_estimators=2, random_state=1, + max_depth=1, objective="cross_entropy") + utils.get_bounded_regression_model_trainer()(estimator) + + assembler = assemblers.LightGBMModelAssembler(estimator) + actual = assembler.assemble() + + expected = ast.BinNumExpr( + ast.NumVal(1), + ast.BinNumExpr( + ast.NumVal(1), + ast.ExpExpr( + ast.BinNumExpr( + ast.NumVal(0), + ast.BinNumExpr( + ast.IfExpr( + ast.CompExpr( + ast.FeatureRef(12), + ast.NumVal(19.23), + ast.CompOpType.GT), + ast.NumVal(4.0026305187), + ast.NumVal(4.0880438137)), + ast.IfExpr( + ast.CompExpr( + ast.FeatureRef(12), + ast.NumVal(14.895), + ast.CompOpType.GT), + ast.NumVal(-0.0412703078), + ast.NumVal(0.0208393767)), + ast.BinNumOpType.ADD), + ast.BinNumOpType.SUB)), + ast.BinNumOpType.ADD), + ast.BinNumOpType.DIV) + + assert utils.cmp_exprs(actual, expected) + + +def test_log1p_exp_output_transform(): + estimator = lightgbm.LGBMRegressor(n_estimators=2, random_state=1, + max_depth=1, + objective="cross_entropy_lambda") + utils.get_bounded_regression_model_trainer()(estimator) + + assembler = assemblers.LightGBMModelAssembler(estimator) + actual = assembler.assemble() + + expected = ast.Log1pExpr( + ast.ExpExpr( + ast.BinNumExpr( + ast.IfExpr( + ast.CompExpr( + ast.FeatureRef(12), + ast.NumVal(19.23), + ast.CompOpType.GT), + ast.NumVal(0.6623502468), + ast.NumVal(0.6683497987)), + ast.IfExpr( + ast.CompExpr( + ast.FeatureRef(12), + ast.NumVal(15.145), + ast.CompOpType.GT), + ast.NumVal(0.1405181490), + ast.NumVal(0.1453602134)), + ast.BinNumOpType.ADD))) + + assert utils.cmp_exprs(actual, expected) + + +def test_maybe_sqr_output_transform(): + estimator = lightgbm.LGBMRegressor(n_estimators=2, random_state=1, + max_depth=1, reg_sqrt=True, + objective="regression_l1") + utils.get_regression_model_trainer()(estimator) + + assembler = assemblers.LightGBMModelAssembler(estimator) + actual = assembler.assemble() + + raw_output = ast.IdExpr( + ast.BinNumExpr( + ast.IfExpr( + ast.CompExpr( + ast.FeatureRef(12), + ast.NumVal(9.905), + ast.CompOpType.GT), + ast.NumVal(4.5658116817), + ast.NumVal(4.6620790482)), + ast.IfExpr( + ast.CompExpr( + ast.FeatureRef(12), + ast.NumVal(9.77), + ast.CompOpType.GT), + ast.NumVal(-0.0340889740), + ast.NumVal(0.0543687153)), + ast.BinNumOpType.ADD), + to_reuse=True) + + expected = ast.BinNumExpr( + ast.AbsExpr(raw_output), + raw_output, + ast.BinNumOpType.MUL) + + assert utils.cmp_exprs(actual, expected) + + +def test_exp_output_transform(): + estimator = lightgbm.LGBMRegressor(n_estimators=2, random_state=1, + max_depth=1, objective="poisson") + utils.get_regression_model_trainer()(estimator) + + assembler = assemblers.LightGBMModelAssembler(estimator) + actual = assembler.assemble() + + expected = ast.ExpExpr( + ast.BinNumExpr( + ast.IfExpr( + ast.CompExpr( + ast.FeatureRef(5), + ast.NumVal(6.918), + ast.CompOpType.GT), + ast.NumVal(3.1480683932), + ast.NumVal(3.1101554907)), + ast.IfExpr( + ast.CompExpr( + ast.FeatureRef(12), + ast.NumVal(9.63), + ast.CompOpType.GT), + ast.NumVal(-0.0111969636), + ast.NumVal(0.0160298303)), + ast.BinNumOpType.ADD)) + + assert utils.cmp_exprs(actual, expected) + + +def test_bin_class_sigmoid_output_transform(): + estimator = lightgbm.LGBMClassifier(n_estimators=1, random_state=1, + max_depth=1, sigmoid=0.5) + utils.get_binary_classification_model_trainer()(estimator) + + assembler = assemblers.LightGBMModelAssembler(estimator) + actual = assembler.assemble() + + sigmoid = ast.BinNumExpr( + ast.NumVal(1), + ast.BinNumExpr( + ast.NumVal(1), + ast.ExpExpr( + ast.BinNumExpr( + ast.NumVal(0), + ast.BinNumExpr( + ast.NumVal(0.5), + ast.IfExpr( + ast.CompExpr( + ast.FeatureRef(23), + ast.NumVal(868.2), + ast.CompOpType.GT), + ast.NumVal(0.5197386243), + ast.NumVal(1.2474356828)), + ast.BinNumOpType.MUL), + ast.BinNumOpType.SUB)), + ast.BinNumOpType.ADD), + ast.BinNumOpType.DIV, + to_reuse=True) + + expected = ast.VectorVal([ + ast.BinNumExpr(ast.NumVal(1), sigmoid, ast.BinNumOpType.SUB), + sigmoid]) + + assert utils.cmp_exprs(actual, expected) + + +def test_multi_class_sigmoid_output_transform(): + estimator = lightgbm.LGBMClassifier(n_estimators=1, random_state=1, + max_depth=1, sigmoid=0.5, + objective="ovr") + estimator.fit(np.array([[1], [2], [3]]), np.array([1, 2, 3])) + + assembler = assemblers.LightGBMModelAssembler(estimator) + actual = assembler.assemble() + + sigmoid = ast.BinNumExpr( + ast.NumVal(1), + ast.BinNumExpr( + ast.NumVal(1), + ast.ExpExpr( + ast.BinNumExpr( + ast.NumVal(0), + ast.BinNumExpr( + ast.NumVal(0.5), + ast.NumVal(-1.3862943611), + ast.BinNumOpType.MUL), + ast.BinNumOpType.SUB)), + ast.BinNumOpType.ADD), + ast.BinNumOpType.DIV) + + expected = ast.VectorVal([sigmoid] * 3) + + assert utils.cmp_exprs(actual, expected) diff --git a/tests/e2e/test_e2e.py b/tests/e2e/test_e2e.py index 1ed8571c..d559c13a 100644 --- a/tests/e2e/test_e2e.py +++ b/tests/e2e/test_e2e.py @@ -184,6 +184,41 @@ def regression_bounded(model, test_fraction=0.02): classification_binary_random( lightgbm.LGBMClassifier(**LIGHTGBM_PARAMS_LARGE)), + # LightGBM (Different Objectives) + regression(lightgbm.LGBMRegressor( + **LIGHTGBM_PARAMS, objective="mse", reg_sqrt=True)), + regression(lightgbm.LGBMRegressor( + **LIGHTGBM_PARAMS, objective="mae")), + regression(lightgbm.LGBMRegressor( + **LIGHTGBM_PARAMS, objective="huber", alpha=0.5)), + regression(lightgbm.LGBMRegressor( + **LIGHTGBM_PARAMS, objective="fair", fair_c=0.5)), + regression(lightgbm.LGBMRegressor( + **LIGHTGBM_PARAMS, objective="poisson", + poisson_max_delta_step=0.2)), + regression(lightgbm.LGBMRegressor( + **LIGHTGBM_PARAMS, objective="quantile", alpha=0.2)), + regression(lightgbm.LGBMRegressor( + **LIGHTGBM_PARAMS, objective="mape")), + regression(lightgbm.LGBMRegressor( + **LIGHTGBM_PARAMS, objective="gamma")), + regression(lightgbm.LGBMRegressor( + **LIGHTGBM_PARAMS, objective="tweedie", + tweedie_variance_power=1.7)), + regression(lightgbm.LGBMRegressor( + **LIGHTGBM_PARAMS, + objective=lambda _, __: ( + np.ones(len(utils.get_regression_model_trainer().y_train)), + np.ones(len(utils.get_regression_model_trainer().y_train))))), + regression_bounded(lightgbm.LGBMRegressor( + **LIGHTGBM_PARAMS, objective="xentropy")), + regression_bounded(lightgbm.LGBMRegressor( + **LIGHTGBM_PARAMS, objective="xentlambda")), + classification(lightgbm.LGBMClassifier( + **LIGHTGBM_PARAMS, objective="ovr", sigmoid=0.5)), + classification_binary(lightgbm.LGBMClassifier( + **LIGHTGBM_PARAMS, sigmoid=1.5)), + # XGBoost regression(xgboost.XGBRegressor(**XGBOOST_PARAMS)), classification(xgboost.XGBClassifier(**XGBOOST_PARAMS)), diff --git a/tests/interpreters/test_haskell.py b/tests/interpreters/test_haskell.py index 5b41e666..0e14dcb3 100644 --- a/tests/interpreters/test_haskell.py +++ b/tests/interpreters/test_haskell.py @@ -318,28 +318,29 @@ def test_log1p_expr(): m_epsilon = encodeFloat (signif + 1) expo - 1.0 where (signif, expo) = decodeFloat (1.0::Double) x' = abs x - coeffs = [0.10378693562743769800686267719098e+1, - -0.13364301504908918098766041553133e+0, - 0.19408249135520563357926199374750e-1, - -0.30107551127535777690376537776592e-2, - 0.48694614797154850090456366509137e-3, - -0.81054881893175356066809943008622e-4, - 0.13778847799559524782938251496059e-4, - -0.23802210894358970251369992914935e-5, - 0.41640416213865183476391859901989e-6, - -0.73595828378075994984266837031998e-7, - 0.13117611876241674949152294345011e-7, - -0.23546709317742425136696092330175e-8, - 0.42522773276034997775638052962567e-9, - -0.77190894134840796826108107493300e-10, - 0.14075746481359069909215356472191e-10, - -0.25769072058024680627537078627584e-11, - 0.47342406666294421849154395005938e-12, - -0.87249012674742641745301263292675e-13, - 0.16124614902740551465739833119115e-13, - -0.29875652015665773006710792416815e-14, - 0.55480701209082887983041321697279e-15, - -0.10324619158271569595141333961932e-15] + coeffs = [ + 0.10378693562743769800686267719098e+1, + -0.13364301504908918098766041553133e+0, + 0.19408249135520563357926199374750e-1, + -0.30107551127535777690376537776592e-2, + 0.48694614797154850090456366509137e-3, + -0.81054881893175356066809943008622e-4, + 0.13778847799559524782938251496059e-4, + -0.23802210894358970251369992914935e-5, + 0.41640416213865183476391859901989e-6, + -0.73595828378075994984266837031998e-7, + 0.13117611876241674949152294345011e-7, + -0.23546709317742425136696092330175e-8, + 0.42522773276034997775638052962567e-9, + -0.77190894134840796826108107493300e-10, + 0.14075746481359069909215356472191e-10, + -0.25769072058024680627537078627584e-11, + 0.47342406666294421849154395005938e-12, + -0.87249012674742641745301263292675e-13, + 0.16124614902740551465739833119115e-13, + -0.29875652015665773006710792416815e-14, + 0.55480701209082887983041321697279e-15, + -0.10324619158271569595141333961932e-15] chebyshevBroucke i = fini . foldr step [0, 0, 0] where step k [b0, b1, _] = [(k + i * 2 * b0 - b1), b0, b1] diff --git a/tests/interpreters/test_powershell.py b/tests/interpreters/test_powershell.py index 7d4b7dbd..5df065c5 100644 --- a/tests/interpreters/test_powershell.py +++ b/tests/interpreters/test_powershell.py @@ -319,7 +319,7 @@ def test_log1p_expr(): if ($x -lt -1.0) { return [double]::NaN } [double] $xAbs = [math]::Abs($x) if ($xAbs -lt 0.5 * [double]::Epsilon) { return $x } - if ((($x -gt 0.0) -and ($x -lt 1e-8)) + if ((($x -gt 0.0) -and ($x -lt 1e-8)) ` -or (($x -gt -1e-9) -and ($x -lt 0.0))) { return $x * (1.0 - $x * 0.5) } diff --git a/tests/interpreters/test_visual_basic.py b/tests/interpreters/test_visual_basic.py index c277a536..1db50d6f 100644 --- a/tests/interpreters/test_visual_basic.py +++ b/tests/interpreters/test_visual_basic.py @@ -475,27 +475,27 @@ def test_log1p_expr(): End If If xAbs < 0.375 Then Dim coeffs(22) As Double - coeffs(0) = 0.10378693562743769800686267719098e+1 - coeffs(1) = -0.13364301504908918098766041553133e+0 - coeffs(2) = 0.19408249135520563357926199374750e-1 - coeffs(3) = -0.30107551127535777690376537776592e-2 - coeffs(4) = 0.48694614797154850090456366509137e-3 - coeffs(5) = -0.81054881893175356066809943008622e-4 - coeffs(6) = 0.13778847799559524782938251496059e-4 - coeffs(7) = -0.23802210894358970251369992914935e-5 - coeffs(8) = 0.41640416213865183476391859901989e-6 - coeffs(9) = -0.73595828378075994984266837031998e-7 - coeffs(10) = 0.13117611876241674949152294345011e-7 + coeffs(0) = 0.10378693562743769800686267719098e+1 + coeffs(1) = -0.13364301504908918098766041553133e+0 + coeffs(2) = 0.19408249135520563357926199374750e-1 + coeffs(3) = -0.30107551127535777690376537776592e-2 + coeffs(4) = 0.48694614797154850090456366509137e-3 + coeffs(5) = -0.81054881893175356066809943008622e-4 + coeffs(6) = 0.13778847799559524782938251496059e-4 + coeffs(7) = -0.23802210894358970251369992914935e-5 + coeffs(8) = 0.41640416213865183476391859901989e-6 + coeffs(9) = -0.73595828378075994984266837031998e-7 + coeffs(10) = 0.13117611876241674949152294345011e-7 coeffs(11) = -0.23546709317742425136696092330175e-8 - coeffs(12) = 0.42522773276034997775638052962567e-9 + coeffs(12) = 0.42522773276034997775638052962567e-9 coeffs(13) = -0.77190894134840796826108107493300e-10 - coeffs(14) = 0.14075746481359069909215356472191e-10 + coeffs(14) = 0.14075746481359069909215356472191e-10 coeffs(15) = -0.25769072058024680627537078627584e-11 - coeffs(16) = 0.47342406666294421849154395005938e-12 + coeffs(16) = 0.47342406666294421849154395005938e-12 coeffs(17) = -0.87249012674742641745301263292675e-13 - coeffs(18) = 0.16124614902740551465739833119115e-13 + coeffs(18) = 0.16124614902740551465739833119115e-13 coeffs(19) = -0.29875652015665773006710792416815e-14 - coeffs(20) = 0.55480701209082887983041321697279e-15 + coeffs(20) = 0.55480701209082887983041321697279e-15 coeffs(21) = -0.10324619158271569595141333961932e-15 Log1p = x * (1.0 - x * ChebyshevBroucke(x / 0.375, coeffs)) Exit Function