Skip to content

Commit

Permalink
About to make a few modifications to DRY
Browse files Browse the repository at this point in the history
  • Loading branch information
kastnerkyle committed Jan 28, 2014
1 parent 9192428 commit 35abf19
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 10 deletions.
2 changes: 1 addition & 1 deletion convertscript.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
#!/bin/bash
#Make sure the convert program is installed sudo apt-get install imagemagick
for i in `ls *.jpg`; do convert $i -resize 512x512\! "${i%.jpg}.png"; done
for i in `ls *.jpg`; do convert $i -resize 221x221\! "${i%.jpg}.png"; done
File renamed without changes.
10 changes: 3 additions & 7 deletions kaggle_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,14 +50,14 @@ def to_one_hot(l):

output = mlp.HingeLoss(n_classes=2,
layer_name='y',
irange=.0001)
irange=.001)

layers = [l1, l2, l3, l4, output]

mdl = mlp.MLP(layers,
input_space=in_space)

lr = .001
lr = .01
epochs = 100
trainer = sgd.SGD(learning_rate=lr,
batch_size=128,
Expand All @@ -82,12 +82,8 @@ def to_one_hot(l):

decay = sgd.LinearDecayOverEpoch(start=1,
saturate=100,
decay_factor=.05 * lr)
decay_factor=lr*.05)

win = window_flip.WindowAndFlipC01B(pad_randomized=8,
window_shape=(32, 32),
randomize=[trn],
center=[tst])
experiment = Train(dataset=trn,
model=mdl,
algorithm=trainer,
Expand Down
6 changes: 4 additions & 2 deletions kaggle_train_full.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,13 +53,15 @@ def to_one_hot(l):
mdl = mlp.MLP(layers,
input_space=in_space)

lr = .01
epochs = 100
trainer = sgd.SGD(learning_rate=.01,
batch_size=128,
learning_rule=learning_rule.Momentum(.5),
# Remember, default dropout is .5
cost=Dropout(input_include_probs={'l1': .8},
input_scales={'l1': 1.}),
termination_criterion=EpochCounter(100),
termination_criterion=EpochCounter(epochs),
monitoring_dataset={'train': full})

watcher = best_params.MonitorBasedSaveBest(
Expand All @@ -72,7 +74,7 @@ def to_one_hot(l):

decay = sgd.LinearDecayOverEpoch(start=1,
saturate=250,
decay_factor=.0005)
decay_factor=lr*.05)
experiment = Train(dataset=full,
model=mdl,
algorithm=trainer,
Expand Down

0 comments on commit 35abf19

Please sign in to comment.