From 15c6b37a21f8e0e80b26891d5c9d670527c9de1b Mon Sep 17 00:00:00 2001 From: alvinthai Date: Fri, 17 Nov 2017 14:35:48 -0800 Subject: [PATCH] renamed FRI_MAP and FRD_MAP to FRI_MAD and FRD_MAD --- auto_ml/predictor.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/auto_ml/predictor.py b/auto_ml/predictor.py index eb87a17..cf045ac 100644 --- a/auto_ml/predictor.py +++ b/auto_ml/predictor.py @@ -941,7 +941,7 @@ def create_feature_responses(self, model, X_transformed, y, top_features=None): col_result['FRI_abs'] = np.mean(absolute_prediction_deltas) median_prediction = np.median(absolute_prediction_deltas) - col_result['FRI_MAP'] = median_prediction + col_result['FRI_MAD'] = median_prediction X[:, col_idx] -= 2 * col_delta @@ -960,7 +960,7 @@ def create_feature_responses(self, model, X_transformed, y, top_features=None): col_result['FRD_abs'] = np.mean(absolute_prediction_deltas) median_prediction = np.median(absolute_prediction_deltas) - col_result['FRD_MAP'] = median_prediction + col_result['FRD_MAD'] = median_prediction # Put the column back to it's original state X[:, col_idx] += col_delta @@ -1003,7 +1003,7 @@ def print_results(self, model_name, model, X, y): feature_responses = feature_responses.reset_index(drop=True) feature_responses = feature_responses.head(n=100) feature_responses = feature_responses.sort_values(by='FR_Incrementing_abs', ascending=True) - feature_responses = feature_responses[['Feature Name', 'Delta', 'FR_Decrementing', 'FR_Incrementing', 'FRD_MAP', 'FRI_MAP']] + feature_responses = feature_responses[['Feature Name', 'Delta', 'FR_Decrementing', 'FR_Incrementing', 'FRD_MAD', 'FRI_MAD']] print('Here are our feature responses for the trained model') print(tabulate(feature_responses, headers='keys', floatfmt='.4f', tablefmt='psql')) @@ -1455,7 +1455,7 @@ def _join_and_print_analytics_results(self, df_feature_responses, df_features, s # Sort by coefficients or feature importances df_results = df_results.sort_values(by=sort_field, ascending=False) - df_results = df_results[['Feature Name', sort_field, 'Delta', 'FR_Decrementing', 'FR_Incrementing', 'FRD_abs', 'FRI_abs', 'FRD_MAP', 'FRI_MAP']] + df_results = df_results[['Feature Name', sort_field, 'Delta', 'FR_Decrementing', 'FR_Incrementing', 'FRD_abs', 'FRI_abs', 'FRD_MAD', 'FRI_MAD']] df_results = df_results.reset_index(drop=True) df_results = df_results.head(n=100) df_results = df_results.sort_values(by=sort_field, ascending=True) @@ -1509,7 +1509,7 @@ def _print_ml_analytics_results_random_forest(self, trained_model_for_analytics) except AttributeError as e: try: # There was a version of LightGBM that had this misnamed to miss the "s" at the end - trained_feature_importances = final_model_obj.model.feature_importance_ + trained_feature_importances = final_model_obj.model.feature_importance_ except AttributeError as e: # There is a version of XGBoost does not have feature_importance_ imp_vals = final_model_obj.model.get_booster().get_fscore() @@ -1920,5 +1920,3 @@ def _train_ensemble(self, X_train, y_train): # ensembler will be added to pipeline later back inside main train section self.trained_final_model = ensembler - -