From 1c389583ab1a80cffda8321e1a69d42154169bfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Jankowski?= Date: Tue, 6 Jul 2021 20:53:16 +0200 Subject: [PATCH] Exception for providing Run instead of Experiment (#127) --- neptunecontrib/monitoring/exceptions.py | 37 +++++++++++++++++++++++++ neptunecontrib/monitoring/fairness.py | 3 +- neptunecontrib/monitoring/fastai.py | 5 ++++ neptunecontrib/monitoring/keras.py | 4 +++ neptunecontrib/monitoring/kerastuner.py | 4 +++ neptunecontrib/monitoring/lightgbm.py | 3 ++ neptunecontrib/monitoring/metrics.py | 28 ++++++++++++++++++- neptunecontrib/monitoring/optuna.py | 5 ++++ neptunecontrib/monitoring/sklearn.py | 3 ++ neptunecontrib/monitoring/skopt.py | 13 ++++++++- neptunecontrib/monitoring/utils.py | 6 ++++ neptunecontrib/monitoring/xgboost.py | 4 +++ 12 files changed, 112 insertions(+), 3 deletions(-) create mode 100644 neptunecontrib/monitoring/exceptions.py diff --git a/neptunecontrib/monitoring/exceptions.py b/neptunecontrib/monitoring/exceptions.py new file mode 100644 index 0000000..e8d7c5d --- /dev/null +++ b/neptunecontrib/monitoring/exceptions.py @@ -0,0 +1,37 @@ +# +# Copyright (c) 2021, Neptune Labs Sp. z o.o. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from neptune.exceptions import NeptuneException, STYLES + + +class NeptuneLegacyIncompatibilityException(NeptuneException): + def __init__(self): + message = """ +{h1} +----NeptuneLegacyIncompatibilityException---------------------------------------- +{end} +It seems you are passing a Run object, to a legacy integration which expects Experiment object. + +What can I do? + - Update your code to use the updated integration: + https://docs.neptune.ai/integrations-and-supported-tools/intro + - If you prefer to use the legacy integration, you can find their examples how to use theme here: + https://docs-legacy.neptune.ai/integrations/index.html + +{correct}Need help?{end}-> https://docs.neptune.ai/getting-started/getting-help +""" + inputs = dict(list({}.items()) + list(STYLES.items())) + super().__init__(message.format(**inputs)) diff --git a/neptunecontrib/monitoring/fairness.py b/neptunecontrib/monitoring/fairness.py index 6c30be5..7afc1f7 100644 --- a/neptunecontrib/monitoring/fairness.py +++ b/neptunecontrib/monitoring/fairness.py @@ -21,7 +21,7 @@ import matplotlib.pyplot as plt import seaborn as sns -from neptunecontrib.monitoring.utils import send_figure +from neptunecontrib.monitoring.utils import send_figure, expect_not_a_run def log_fairness_classification_metrics(y_true, y_pred_class, y_pred_score, sensitive_attributes, @@ -78,6 +78,7 @@ def log_fairness_classification_metrics(y_true, y_pred_class, y_pred_score, sens """ _exp = experiment if experiment else neptune + expect_not_a_run(_exp) bias_info = {'favorable_label': favorable_label, 'unfavorable_label': unfavorable_label, diff --git a/neptunecontrib/monitoring/fastai.py b/neptunecontrib/monitoring/fastai.py index 4e3514c..913914d 100644 --- a/neptunecontrib/monitoring/fastai.py +++ b/neptunecontrib/monitoring/fastai.py @@ -18,6 +18,8 @@ import neptune +from neptunecontrib.monitoring.utils import expect_not_a_run + if sys.version_info[0] == 3 and sys.version_info[1] >= 6: from fastai.basic_train import LearnerCallback else: @@ -79,6 +81,9 @@ class NeptuneMonitor(LearnerCallback): def __init__(self, learn=None, experiment=None, prefix=''): self._exp = experiment if experiment else neptune self._prefix = prefix + + expect_not_a_run(self._exp) + if learn is not None: super().__init__(learn) diff --git a/neptunecontrib/monitoring/keras.py b/neptunecontrib/monitoring/keras.py index d463dc9..b166f79 100644 --- a/neptunecontrib/monitoring/keras.py +++ b/neptunecontrib/monitoring/keras.py @@ -34,6 +34,8 @@ pip install tensorflow""" raise ModuleNotFoundError(msg) # pylint:disable=undefined-variable +from neptunecontrib.monitoring.utils import expect_not_a_run + class NeptuneMonitor(Callback): """Logs Keras metrics to Neptune. @@ -90,6 +92,8 @@ def __init__(self, experiment=None, prefix=''): self._exp = experiment if experiment else neptune self._prefix = prefix + expect_not_a_run(self._exp) + def _log_metrics(self, logs, trigger): if not logs: return diff --git a/neptunecontrib/monitoring/kerastuner.py b/neptunecontrib/monitoring/kerastuner.py index 6244c25..153c52b 100644 --- a/neptunecontrib/monitoring/kerastuner.py +++ b/neptunecontrib/monitoring/kerastuner.py @@ -17,6 +17,8 @@ from kerastuner.engine.logger import Logger +from neptunecontrib.monitoring.utils import expect_not_a_run + class NeptuneLogger(Logger): """Logs hyperparameter optimization process to Neptune. @@ -58,6 +60,8 @@ class NeptuneLogger(Logger): def __init__(self, experiment=None): self.exp = experiment if experiment else neptune + expect_not_a_run(self.exp) + def report_trial_state(self, trial_id, trial_state): """Gives the logger information about trial status.""" diff --git a/neptunecontrib/monitoring/lightgbm.py b/neptunecontrib/monitoring/lightgbm.py index a023e20..b8a3d71 100644 --- a/neptunecontrib/monitoring/lightgbm.py +++ b/neptunecontrib/monitoring/lightgbm.py @@ -16,6 +16,8 @@ import neptune +from neptunecontrib.monitoring.utils import expect_not_a_run + def neptune_monitor(experiment=None, prefix=''): """Logs lightGBM learning curves to Neptune. @@ -78,6 +80,7 @@ def neptune_monitor(experiment=None, prefix=''): """ _exp = experiment if experiment else neptune + expect_not_a_run(_exp) def callback(env): for name, loss_name, loss_value, _ in env.evaluation_result_list: diff --git a/neptunecontrib/monitoring/metrics.py b/neptunecontrib/monitoring/metrics.py index b7c96a6..dbf0dca 100644 --- a/neptunecontrib/monitoring/metrics.py +++ b/neptunecontrib/monitoring/metrics.py @@ -15,7 +15,7 @@ # import matplotlib.pyplot as plt import neptune -from neptunecontrib.monitoring.utils import send_figure +from neptunecontrib.monitoring.utils import send_figure, expect_not_a_run import numpy as np import pandas as pd import scikitplot.metrics as plt_metrics @@ -78,6 +78,8 @@ def log_binary_classification_metrics(y_true, y_pred, threshold=0.5, experiment= _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + log_confusion_matrix(y_true, y_pred[:, 1] > threshold, experiment=_exp, prefix=prefix) log_classification_report(y_true, y_pred[:, 1] > threshold, experiment=_exp, prefix=prefix) log_class_metrics(y_true, y_pred[:, 1] > threshold, experiment=_exp, prefix=prefix) @@ -134,6 +136,8 @@ def log_confusion_matrix(y_true, y_pred_class, experiment=None, channel_name='me _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + fig, ax = plt.subplots() _plot_confusion_matrix(y_true, y_pred_class, ax=ax) send_figure(fig, channel_name=prefix + channel_name, experiment=_exp) @@ -182,6 +186,8 @@ def log_classification_report(y_true, y_pred_class, experiment=None, channel_nam _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + fig = _plot_classification_report(y_true, y_pred_class) send_figure(fig, channel_name=prefix + channel_name, experiment=_exp) plt.close() @@ -232,6 +238,8 @@ def log_class_metrics(y_true, y_pred_class, experiment=None, prefix=''): _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + scores = _class_metrics(y_true, y_pred_class) for metric_name, score in scores.items(): _exp.log_metric(prefix + metric_name, score) @@ -283,6 +291,8 @@ def log_class_metrics_by_threshold(y_true, y_pred_pos, experiment=None, channel_ _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + figs = _plot_class_metrics_by_threshold(y_true, y_pred_pos) for fig in figs: @@ -332,6 +342,8 @@ def log_roc_auc(y_true, y_pred, experiment=None, channel_name='metric_charts', p _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + roc_auc = sk_metrics.roc_auc_score(y_true, y_pred[:, 1]) _exp.log_metric(prefix + 'roc_auc', roc_auc) @@ -383,6 +395,8 @@ def log_precision_recall_auc(y_true, y_pred, experiment=None, channel_name='metr _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + avg_precision = sk_metrics.average_precision_score(y_true, y_pred[:, 1]) _exp.log_metric(prefix + 'avg_precision', avg_precision) @@ -433,6 +447,8 @@ def log_brier_loss(y_true, y_pred_pos, experiment=None, prefix=''): _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + brier = sk_metrics.brier_score_loss(y_true, y_pred_pos) _exp.log_metric(prefix + 'brier_loss', brier) @@ -478,6 +494,8 @@ def log_log_loss(y_true, y_pred, experiment=None, prefix=''): _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + log_loss = sk_metrics.log_loss(y_true, y_pred) _exp.log_metric(prefix + 'log_loss', log_loss) @@ -528,6 +546,8 @@ def log_ks_statistic(y_true, y_pred, experiment=None, channel_name='metric_chart _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + res = binary_ks_curve(y_true, y_pred[:, 1]) ks_stat = res[3] _exp.log_metric(prefix + 'ks_statistic', ks_stat) @@ -580,6 +600,8 @@ def log_cumulative_gain(y_true, y_pred, experiment=None, channel_name='metric_ch _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + fig, ax = plt.subplots() plt_metrics.plot_cumulative_gain(y_true, y_pred, ax=ax) send_figure(fig, channel_name=prefix + channel_name, experiment=_exp) @@ -628,6 +650,8 @@ def log_lift_curve(y_true, y_pred, experiment=None, channel_name='metric_charts' _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + fig, ax = plt.subplots() plt_metrics.plot_lift_curve(y_true, y_pred, ax=ax) send_figure(fig, channel_name=prefix + channel_name, experiment=_exp) @@ -670,6 +694,8 @@ def log_prediction_distribution(y_true, y_pred_pos, experiment=None, channel_nam _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + fig, ax = plt.subplots() _plot_prediction_distribution(y_true, y_pred_pos, ax=ax) send_figure(fig, channel_name=prefix + channel_name, experiment=_exp) diff --git a/neptunecontrib/monitoring/optuna.py b/neptunecontrib/monitoring/optuna.py index f4a3f2d..fe82f4d 100644 --- a/neptunecontrib/monitoring/optuna.py +++ b/neptunecontrib/monitoring/optuna.py @@ -18,6 +18,7 @@ import neptune from neptunecontrib.api import log_chart, pickle_and_log_artifact +from neptunecontrib.monitoring.utils import expect_not_a_run class NeptuneCallback: @@ -77,6 +78,8 @@ def __init__(self, experiment=None, self.exp = experiment if experiment else neptune self.log_study = log_study + expect_not_a_run(self.exp) + if log_charts: message = """log_charts argument is depraceted and will be removed in future releases. @@ -170,6 +173,8 @@ def log_study_info(study, experiment=None, _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + _exp.log_metric('best_score', study.best_value) _exp.set_property('best_parameters', study.best_params) diff --git a/neptunecontrib/monitoring/sklearn.py b/neptunecontrib/monitoring/sklearn.py index c1e4ca0..6f8c956 100644 --- a/neptunecontrib/monitoring/sklearn.py +++ b/neptunecontrib/monitoring/sklearn.py @@ -30,6 +30,7 @@ from neptunecontrib.api.table import log_csv from neptunecontrib.api.utils import log_pickle +from neptunecontrib.monitoring.utils import expect_not_a_run def log_regressor_summary(regressor, X_train, X_test, y_train, y_test, @@ -1228,6 +1229,8 @@ def log_silhouette_chart(model, X, experiment=None, **kwargs): def _validate_experiment(experiment): if experiment is not None: + expect_not_a_run(experiment) + if not isinstance(experiment, neptune.experiments.Experiment): ValueError('Passed experiment is not Neptune experiment. Create one by using "create_experiment()"') else: diff --git a/neptunecontrib/monitoring/skopt.py b/neptunecontrib/monitoring/skopt.py index b270e03..5343c13 100644 --- a/neptunecontrib/monitoring/skopt.py +++ b/neptunecontrib/monitoring/skopt.py @@ -21,7 +21,7 @@ import skopt.plots as sk_plots from skopt.utils import dump -from neptunecontrib.monitoring.utils import axes2fig +from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run class NeptuneCallback: @@ -55,6 +55,9 @@ class NeptuneCallback: def __init__(self, experiment=None, log_checkpoint=True): self._exp = experiment if experiment else neptune + + expect_not_a_run(self._exp) + self.log_checkpoint = log_checkpoint self._iteration = 0 @@ -114,6 +117,8 @@ def log_results(results, experiment=None, log_plots=True, log_pickle=True): """ _exp = experiment if experiment else neptune + expect_not_a_run(_exp) + _log_best_score(results, _exp) _log_best_parameters(results, _exp) @@ -135,6 +140,7 @@ def NeptuneMonitor(*args, **kwargs): def _log_best_parameters(results, experiment): + expect_not_a_run(experiment) named_params = ([(dimension.name, param) for dimension, param in zip(results.space, results.x)]) experiment.set_property('best_parameters', str(named_params)) @@ -144,18 +150,21 @@ def _log_best_score(results, experiment): def _log_plot_convergence(results, experiment, name='diagnostics'): + expect_not_a_run(experiment) fig, ax = plt.subplots() sk_plots.plot_convergence(results, ax=ax) experiment.log_image(name, fig) def _log_plot_regret(results, experiment, name='diagnostics'): + expect_not_a_run(experiment) fig, ax = plt.subplots() sk_plots.plot_regret(results, ax=ax) experiment.log_image(name, fig) def _log_plot_evaluations(results, experiment, name='diagnostics'): + expect_not_a_run(experiment) fig = plt.figure(figsize=(16, 12)) fig = axes2fig(sk_plots.plot_evaluations(results, bins=10), fig=fig) experiment.log_image(name, fig) @@ -163,6 +172,7 @@ def _log_plot_evaluations(results, experiment, name='diagnostics'): def _log_plot_objective(results, experiment, name='diagnostics'): try: + expect_not_a_run(experiment) fig = plt.figure(figsize=(16, 12)) fig = axes2fig(sk_plots.plot_objective(results), fig=fig) experiment.log_image(name, fig) @@ -171,6 +181,7 @@ def _log_plot_objective(results, experiment, name='diagnostics'): def _log_results_object(results, experiment=None): + expect_not_a_run(experiment) experiment.log_artifact(_export_results_object(results), 'results.pkl') diff --git a/neptunecontrib/monitoring/utils.py b/neptunecontrib/monitoring/utils.py index dd3ae1d..1d3e407 100644 --- a/neptunecontrib/monitoring/utils.py +++ b/neptunecontrib/monitoring/utils.py @@ -21,6 +21,7 @@ import neptune from neptunecontrib.api import pickle_and_log_artifact +from neptunecontrib.monitoring.exceptions import NeptuneLegacyIncompatibilityException def axes2fig(axes, fig=None): @@ -83,3 +84,8 @@ def pickle_and_send_artifact(obj, filename, experiment=None): warnings.warn(message) pickle_and_log_artifact(obj, filename, experiment) + + +def expect_not_a_run(experiment): + if type(experiment).__name__ == 'Run': + raise NeptuneLegacyIncompatibilityException() diff --git a/neptunecontrib/monitoring/xgboost.py b/neptunecontrib/monitoring/xgboost.py index b5f5fe1..16134ed 100644 --- a/neptunecontrib/monitoring/xgboost.py +++ b/neptunecontrib/monitoring/xgboost.py @@ -19,6 +19,8 @@ import neptune import xgboost as xgb +from neptunecontrib.monitoring.utils import expect_not_a_run + def neptune_callback(log_model=True, log_importance=True, @@ -160,6 +162,8 @@ def neptune_callback(log_model=True, except neptune.exceptions.NeptuneNoExperimentContextException: raise neptune.exceptions.NeptuneNoExperimentContextException() + expect_not_a_run(_exp) + assert isinstance(log_model, bool),\ 'log_model must be bool, got {} instead. Check log_model parameter.'.format(type(log_model)) assert isinstance(log_importance, bool),\