Skip to content

Commit

Permalink
Update NeptuneLogger for neptune 1.0 (#934)
Browse files Browse the repository at this point in the history
Co-authored-by: Sabine <sabine.nyholm@neptune.ai>
  • Loading branch information
kshitij12345 and normandy7 committed Mar 17, 2023
1 parent 8c7a814 commit e500dd4
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 36 deletions.
2 changes: 1 addition & 1 deletion requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ future>=0.17.1
gpytorch>=1.5
jupyter
matplotlib>=2.0.2
neptune-client>=0.14.3
neptune
numpydoc
openpyxl
pandas
Expand Down
59 changes: 37 additions & 22 deletions skorch/callbacks/logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,19 +81,18 @@ class NeptuneLogger(Callback):
$ python -m pip install psutil
You can view example experiment logs here:
https://app.neptune.ai/shared/skorch-integration/e/SKOR-23/
https://app.neptune.ai/o/common/org/skorch-integration/e/SKOR-32/all
Examples
--------
$ # Install Neptune
$ python -m pip install neptune-client
$ python -m pip install neptune
>>> # Create a Neptune run
>>> import neptune.new as neptune
>>> from neptune.new.types import File
...
... # This example uses the API token for anonymous users.
... # For your own projects, use the token associated with your neptune.ai account.
>>> import neptune
>>> from neptune.types import File
>>> # This example uses the API token for anonymous users.
>>> # For your own projects, use the token associated with your neptune.ai account.
>>> run = neptune.init_run(
... api_token=neptune.ANONYMOUS_API_TOKEN,
... project='shared/skorch-integration',
Expand All @@ -113,32 +112,30 @@ class NeptuneLogger(Callback):
>>> net.fit(X, y)
>>> # Save the checkpoints to Neptune
>>> neptune_logger.run["checkpoints].upload_files("./checkpoints")
>>> neptune_logger.run["checkpoints"].upload_files("./checkpoints")
>>> # Log additional metrics after training has finished
>>> from sklearn.metrics import roc_auc_score
... y_proba = net.predict_proba(X)
... auc = roc_auc_score(y, y_proba[:, 1])
...
... neptune_logger.run["roc_auc_score"].log(auc)
>>> y_proba = net.predict_proba(X)
>>> auc = roc_auc_score(y, y_proba[:, 1])
>>> neptune_logger.run["roc_auc_score"].log(auc)
>>> # Log charts, such as an ROC curve
>>> from sklearn.metrics import RocCurveDisplay
...
>>> roc_plot = RocCurveDisplay.from_estimator(net, X, y)
>>> neptune_logger.run["roc_curve"].upload(File.as_html(roc_plot.figure_))
>>> # Log the net object after training
... net.save_params(f_params='basic_model.pkl')
... neptune_logger.run["basic_model"].upload(File('basic_model.pkl'))
>>> net.save_params(f_params='basic_model.pkl')
>>> neptune_logger.run["basic_model"].upload(File('basic_model.pkl'))
>>> # Close the run
... neptune_logger.run.stop()
>>> neptune_logger.run.stop()
Parameters
----------
run : neptune.new.Run
Instantiated ``Run`` class.
run : neptune.Run or neptune.handler.Handler
Instantiated ``Run`` or ``Handler`` class.
log_on_batch_end : bool (default=False)
Whether to log loss and other metrics on batch level.
Expand Down Expand Up @@ -231,18 +228,36 @@ def on_epoch_end(self, net, **kwargs):

def on_train_end(self, net, **kwargs):
try:
self._metric_logger['train/epoch/event_lr'].log(net.history[:, 'event_lr'])
self._metric_logger['train/epoch/event_lr'].append(net.history[:, 'event_lr'])
except KeyError:
pass
if self.close_after_train:
self.run.stop()
try: # >1.0 package structure
from neptune.handler import Handler
except ImportError: # <1.0 package structure
from neptune.new.handler import Handler

# Neptune integrations now accept passing Handler object
# to an integration.
# Ref: https://docs.neptune.ai/api/field_types/#handler
# Example of getting an handler from a `Run` object.
# handler = run["foo"]
# handler['bar'] = 1 # Logs to `foo/bar`
# NOTE: Handler provides most of the functionality of `Run`
# for logging, however it doesn't implement a few methods like
# `stop`, `wait`, etc.
root_obj = self.run
if isinstance(self.run, Handler):
root_obj = self.run.get_root_object()

root_obj.stop()

def _log_metric(self, name, logs, batch):
kind, _, key = name.partition('_')

if not key:
key = 'epoch_duration' if kind == 'dur' else kind
self._metric_logger[key].log(logs[name])
self._metric_logger[key].append(logs[name])
else:
if kind == 'valid':
kind = 'validation'
Expand All @@ -253,7 +268,7 @@ def _log_metric(self, name, logs, batch):
granularity = 'epoch'

# for example: train / epoch / loss
self._metric_logger[kind][granularity][key].log(logs[name])
self._metric_logger[kind][granularity][key].append(logs[name])

@staticmethod
def _model_summary_file(model):
Expand Down
39 changes: 26 additions & 13 deletions skorch/tests/callbacks/test_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,7 @@ def neptune_logger_cls(self):

@pytest.fixture
def neptune_run_object(self):
try:
# neptune-client=0.9.0+ package structure
import neptune.new as neptune
except ImportError:
# neptune-client>=1.0.0 package structure
import neptune
import neptune

run = neptune.init_run(
project="tests/dry-run",
Expand Down Expand Up @@ -149,6 +144,29 @@ def test_fit_with_real_experiment(
# Checkpoint callback was not used
assert not neptune_run_object.exists('training/model/checkpoint')

def test_fit_with_handler(
self,
net_cls,
classifier_module,
data,
neptune_logger_cls,
neptune_run_object,
):
net = net_cls(
classifier_module,
callbacks=[neptune_logger_cls(neptune_run_object['my_namespace'])],
max_epochs=5,
)
net.fit(*data)

assert neptune_run_object.exists('my_namespace/training/epoch_duration')
assert neptune_run_object.exists('my_namespace/training/train/epoch/loss')
assert neptune_run_object.exists('my_namespace/training/validation/epoch/loss')
assert neptune_run_object.exists('my_namespace/training/validation/epoch/acc')

# Checkpoint callback was not used
assert not neptune_run_object.exists('my_namespace/training/model/checkpoint')

def test_log_on_batch_level_on(
self,
net_cls,
Expand All @@ -168,7 +186,7 @@ def test_log_on_batch_level_on(

# (5 epochs x (40/4 batches x 2 batch metrics + 2 epoch metrics) = 110 calls) + base metrics
assert mock_experiment.__getitem__.call_count == 110 + self.NUM_BASE_METRICS
mock_experiment['training']['train']['batch']['batch_size'].log.assert_any_call(4)
mock_experiment['training']['train']['batch']['batch_size'].append.assert_any_call(4)

def test_log_on_batch_level_off(
self,
Expand Down Expand Up @@ -206,12 +224,7 @@ def test_fit_with_real_experiment_saving_checkpoints(
neptune_logger_cls,
neptune_run_object,
):
try:
# neptune-client=0.9.0+ package structure
from neptune.new.attributes.file_set import FileSet
except ImportError:
# neptune-client>=1.0.0 package structure
from neptune.attributes.file_set import FileSet
from neptune.attributes.file_set import FileSet
from skorch.callbacks import Checkpoint

with tempfile.TemporaryDirectory() as directory:
Expand Down

0 comments on commit e500dd4

Please sign in to comment.