Skip to content
This repository has been archived by the owner on Nov 3, 2022. It is now read-only.

Commit

Permalink
Removed some exceptions from the pytest.ini (#368)
Browse files Browse the repository at this point in the history
  • Loading branch information
gabrieldemarmiesse committed Dec 26, 2018
1 parent 99a6a22 commit 6175bc7
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 13 deletions.
10 changes: 6 additions & 4 deletions examples/cifar10_resnet.py
Expand Up @@ -19,7 +19,8 @@


weights_file = 'ResNet18v2-CIFAR-10.h5'
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0,
patience=5, min_lr=0.5e-6)
early_stopper = EarlyStopping(min_delta=0.001, patience=10)
csv_logger = CSVLogger('ResNet18v2-CIFAR-10.csv')
model_checkpoint = ModelCheckpoint(weights_file, monitor='val_acc', save_best_only=True,
Expand Down Expand Up @@ -75,21 +76,22 @@
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
width_shift_range=0.1, # randomly shift images horizontally
height_shift_range=0.1, # randomly shift images vertically
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images

# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(X_train)

callbacks = [lr_reducer, early_stopper, csv_logger, model_checkpoint]
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),
steps_per_epoch=X_train.shape[0] // batch_size,
validation_data=(X_test, Y_test),
epochs=nb_epoch, verbose=2,
callbacks=[lr_reducer, early_stopper, csv_logger, model_checkpoint])
callbacks=callbacks)

scores = model.evaluate(X_test, Y_test, batch_size=batch_size)
print('Test loss : ', scores[0])
Expand Down
12 changes: 7 additions & 5 deletions examples/cifar10_wide_resnet.py
Expand Up @@ -46,12 +46,14 @@

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
print('Finished compiling')

model.fit_generator(generator.flow(trainX, trainY, batch_size=batch_size), steps_per_epoch=len(trainX) // batch_size,
model_checkpoint = callbacks.ModelCheckpoint('WRN-28-8 Weights.h5',
monitor='val_acc',
save_best_only=True,
save_weights_only=True)
model.fit_generator(generator.flow(trainX, trainY, batch_size=batch_size),
steps_per_epoch=len(trainX) // batch_size,
epochs=epochs,
callbacks=[
callbacks.ModelCheckpoint('WRN-28-8 Weights.h5', monitor='val_acc', save_best_only=True,
save_weights_only=True)],
callbacks=[model_checkpoint],
validation_data=(testX, testY))

scores = model.evaluate(testX, testY, batch_size)
Expand Down
4 changes: 0 additions & 4 deletions pytest.ini
Expand Up @@ -10,16 +10,12 @@ addopts=-v
norecursedirs= build

# PEP-8 The following are ignored:
# E501 line too long (82 > 79 characters)
# E402 module level import not at top of file - temporary measure to continue adding ros python packaged in sys.path
# E731 do not assign a lambda expression, use a def

pep8ignore=* E402 \
* E731 \
* W503
examples/cifar10_densenet.py E501 \
examples/cifar10_resnet.py E501 \
examples/cifar10_wide_resnet.py E501 \
examples/improved_wgan.py E501 \
keras_contrib/applications/densenet.py E501 \
keras_contrib/applications/nasnet.py E501 \
Expand Down

0 comments on commit 6175bc7

Please sign in to comment.