Skip to content

Commit

Permalink
BugFix of "AttributeError: 'ProgbarLogger' object has no attribute 't…
Browse files Browse the repository at this point in the history
…arget'" (or 'log_values') error (keras-team#12898) (keras-team#12893) (keras-team#8944)

This Change set is fixing two missing attribute bugs:
* AttributeError: 'ProgbarLogger' object has no attribute 'target' keras-team#12898
  Abstract reproduction scenario is provided in ticket
* AttributeError: 'ProgbarLogger' object has no attribute 'log_values' keras-team#3657
* AttributeError: 'ProgbarLogger' object has no attribute 'log_values' keras-team#8944 (dup)

Related changes:
* Cases of regression are covered by tests.
* Some potential bugs with same nature are prevented and covered by manifestation checks.
* run with empty data array (but having valid shape) is now handled properly
  and yielding related warnings on callback and training routine level without execution fail

Note:
Changes that affect `ProgbarLogger` should be aware of following things:
* proper target initialisation is requiring two attributes: `params` and `use_steps` to be defined
* `use_steps` is guaranteed attribute the is set in the constructor (but could be altered after object instantiation. It's currently safe condition.
* class `params` attribute could be altered between initialisation and training start. And current logic is made to be aware of this
* we don't have `params` initialisation in constructor, this attribute will be assigned on call of `set_params` of base class somewhere on caller level (no strict guarantees :( )
* `seen` attribute is working in pair with `target` during `log_values` initialisation and their initialisation should be under the equal condition, currently thats not true
* `if self.seen < self.target` condition is being checked whenever verbose mode value so both of them should be initialised without any conditions
* `if self.seen < self.target` is checking for training iteration being not finished but in case of degenerate case with zero length it will not be called and `log_values` will stay not initialised but i don't see any explicit logic preventing using it on exit from 0-length training cycle and potentially it is the bug of some kind that is prevented on caller logic level
* `progbar` attribute initialisation is definitely related to output verbosity (log values accumulation are not) and should be left under verbosity condition
  • Loading branch information
mrjj committed May 31, 2019
1 parent aa3cc72 commit 657c590
Show file tree
Hide file tree
Showing 4 changed files with 143 additions and 8 deletions.
21 changes: 15 additions & 6 deletions keras/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -549,6 +549,12 @@ class ProgbarLogger(Callback):
# Raises
ValueError: In case of invalid `count_mode`.
"""
verbose = None
epochs = None
log_values = None
target = 0
seen = 0
progbar = None

def __init__(self, count_mode='samples',
stateful_metrics=None):
Expand All @@ -567,19 +573,22 @@ def __init__(self, count_mode='samples',
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
self.log_values = []
if self.target is 0:
warnings.warn('%s count is 0 and this run will do noop' %
('steps' if self.use_steps else 'samples'))

def on_epoch_begin(self, epoch, logs=None):
if self.use_steps:
self.target = self.params['steps']
else:
self.target = self.params['samples']
self.seen = 0
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics)
self.seen = 0

def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
Expand Down
4 changes: 4 additions & 0 deletions keras/engine/training_arrays.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

import numpy as np
from scipy.sparse import issparse
import warnings

from .training_utils import batch_shuffle
from .training_utils import check_num_samples
Expand Down Expand Up @@ -183,6 +184,9 @@ def fit_loop(model, fit_function, fit_inputs,
np.random.shuffle(index_array)

batches = make_batches(num_train_samples, batch_size)
if len(batches) == 0:
warnings.warn('batches count is 0 and this run will do noop')
batch_index = 0
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
Expand Down
5 changes: 3 additions & 2 deletions keras/utils/generic_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,10 +388,11 @@ def update(self, current, values=None):
sys.stdout.write('\n')

if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
numdigits = int(np.floor(np.log10(self.target or 1))) + 1
barstr = '%%%dd/%d [' % (numdigits, self.target)
bar = barstr % current
prog = float(current) / self.target
# if target is 0 we considering progress as auto-completed (1.0)
prog = (float(current) / self.target) if self.target else 1.0
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
Expand Down
121 changes: 121 additions & 0 deletions tests/keras/test_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -493,6 +493,127 @@ def test_ModelCheckpoint(tmpdir):
assert not tmpdir.listdir()


@pytest.fixture
def model_and_data():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = get_data_callbacks()
model = Sequential()
model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy']
)
return dict(
model=model,
x_test=x_test,
y_test=np_utils.to_categorical(y_test),
x_train=x_train,
y_train=np_utils.to_categorical(y_train),
)


class TestProgbarLogger(object):
"""
Test ProgbarLogger execution and known regressions
"""

def test_verbose_run(self, model_and_data):
"""
Run with verbose=1 mode
"""
model_and_data['model'].fit(
model_and_data['x_train'],
model_and_data['y_train'],
batch_size=batch_size,
validation_data=(model_and_data['x_test'], model_and_data['y_test']),
callbacks=[callbacks.ProgbarLogger(count_mode='samples')],
epochs=13,
verbose=1)

def test_silent_run(self, model_and_data):
"""
Run with verbose=0 mode to test regression with fingerprint:
`AttributeError: 'ProgbarLogger' object has no attribute 'target'` #12898
"""
model_and_data['model'].fit(
model_and_data['x_train'],
model_and_data['y_train'],
batch_size=batch_size,
validation_data=(model_and_data['x_test'], model_and_data['y_test'],),
callbacks=[callbacks.ProgbarLogger(count_mode='samples')],
epochs=13,
verbose=0)

def test_zero_cycle_verbose_run(self, model_and_data):
"""
Run with 0-length training cycle to ensure that finalization callbacks
are not missing attributes which might be lazy-initialized
only after first batch is executed
with verbose output enabled
"""
model_and_data['model'].fit(
model_and_data['x_train'],
model_and_data['y_train'],
batch_size=batch_size,
validation_data=(model_and_data['x_test'], model_and_data['y_test'],),
callbacks=[callbacks.ProgbarLogger(count_mode='samples')],
epochs=0,
verbose=1)

def test_zero_cycle_silent_run(self, model_and_data):
"""
Run with 0-length training cycle to ensure that finalization callbacks
are not missing attributes which might be lazy-initialized
only after first batch is executed
with suppressed verbose output
"""
model_and_data['model'].fit(
model_and_data['x_train'],
model_and_data['y_train'],
batch_size=batch_size,
validation_data=(model_and_data['x_test'], model_and_data['y_test'],),
callbacks=[callbacks.ProgbarLogger(count_mode='samples')],
epochs=0,
verbose=0)

def test_empty_data_verbose_run(self, model_and_data):
"""
Check for regression of:
* "AttributeError: 'ProgbarLogger' object has no attribute 'log_values'"
#3657
* "AttributeError: 'ProgbarLogger' object has no attribute 'log_values'"
#8944 (dup)
with verbose output enabled
"""
model_and_data['model'].fit(
np.ndarray(shape=(0, 2,)),
np.ndarray(shape=(0, 2,)),
batch_size=batch_size,
validation_data=(np.ndarray(shape=(0, 2)), np.ndarray(shape=(0, 2)),),
callbacks=[callbacks.ProgbarLogger(count_mode='samples')],
epochs=13,
verbose=1)

def test_empty_data_silent_run(self, model_and_data):
"""
This test is checking for possible init flaws in silent mode
with empty data similar to #3657
"""
model_and_data['model'].fit(
np.ndarray(shape=(0, 2,)),
np.ndarray(shape=(0, 2,)),
batch_size=batch_size,
validation_data=(
np.ndarray(shape=(0, 2)),
np.ndarray(shape=(0, 2)),
),
callbacks=[callbacks.ProgbarLogger(count_mode='samples')],
epochs=13,
verbose=0)


def test_EarlyStopping():
np.random.seed(1337)
(X_train, y_train), (X_test, y_test) = get_data_callbacks()
Expand Down

0 comments on commit 657c590

Please sign in to comment.