Skip to content

Commit

Permalink
Merge pull request #46 from IBM/develop
Browse files Browse the repository at this point in the history
Release 0.3
  • Loading branch information
tkornuta-ibm committed Nov 13, 2018
2 parents b4f2442 + 2a92103 commit 2be45de
Show file tree
Hide file tree
Showing 166 changed files with 3,869 additions and 2,109 deletions.
23 changes: 23 additions & 0 deletions .github/PULL_REQUEST_TEMPLATE/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
## Pull Request template
Please, go through these steps before you submit a PR.

1. Make sure that your PR is not a duplicate.
2. If not, then make sure that:

2.1. You have done your changes in a separate branch. Branches should have descriptive names that start with either the `fix/` or `feature/` prefixes. Good examples are: `fix/signin-issue` or `feature/new-model`.

2.2. You have descriptive commits messages with short titles (first line).

2.3. You have only one commit (if not, squash them into one commit).

3. **After** these steps, you're ready to open a pull request.

3.1. Give a descriptive title to your PR.

3.2. Provide a description of your changes.

3.3. Put `closes #XXXX` in your comment to auto-close the issue that your PR fixes (if such).

Important: Please review the [CONTRIBUTING.md](../CONTRIBUTING.md) file for detailed contributing guidelines.

*Please remove this template before submitting.*
12 changes: 7 additions & 5 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,21 @@
!Readme.md
!readthedocs.yml
!setup.py
!doc_build.sh
!__init__.py
!/configs/**
!/docs/**
!/miprometheus/**
!.github/**

# You can be specific with these rules
__pycache__*
*.swp
*.vector_cache
!.gitignore

# not sure if those are needed
problems/.DS_Store
problems/image_text_to_class/.DS_Store
problems/image_text_to_class/CLEVR_v1.0
CLEVR_v1.0/
# Ignore every DS_Store
**/.DS_Store

# Ignore build directory
/build/**
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,15 +51,15 @@ The dependencies of MI-prometheus are:

* pytorch (v. 0.4.0)
* numpy
* torchvision (v. 0.2.0)
* torchvision
* torchtext
* tensorboardx
* matplotlib
* psutil (enables grid-* to span child processes on MacOS and Ubuntu)
* PyYAML
* tqdm
* nltk
* h5py
* six
* pyqt5 (v. 5.10.1)


Expand Down
15 changes: 0 additions & 15 deletions configs/example_trainer_gpu.yaml

This file was deleted.

4 changes: 4 additions & 0 deletions configs/maes_baselines/default_training.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@ training:
initial_max_sequence_length: 5
# must_finish: false

# Sampler.
sampler:
name: RandomSampler

# Optimizer parameters:
optimizer:
# Exact name of the pytorch optimizer function
Expand Down
19 changes: 11 additions & 8 deletions configs/vision/alexnet_cifar10.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,12 @@ training:
problem:
name: &name CIFAR10
batch_size: &b 64
index: [0, 40000]
use_train_data: True
padding: &p [0,0,0,0] # ex: (x1, x2, x3, x4) pad last dim by (x1, x2) and 2nd to last by (x3, x4)
up_scaling: &scale True # if up_scale true the image is resized to 224 x 224
resize: [224, 224]
# Use sampler that operates on a subset.
sampler:
name: SubsetRandomSampler
indices: [0, 45000]
# optimizer parameters:
optimizer:
name: Adam
Expand All @@ -22,19 +24,20 @@ validation:
problem:
name: *name
batch_size: *b
index: [40000, 49999]
use_train_data: True # True because we are splitting the training set to: validation and training
padding: *p
up_scaling: *scale
resize: [224, 224]
# Use sampler that operates on a subset.
sampler:
name: SubsetRandomSampler
indices: [45000, 50000]

# Problem parameters:
testing:
problem:
name: *name
batch_size: *b
use_train_data: False
padding: *p
up_scaling: *scale
resize: [224, 224]

# Model parameters:
model:
Expand Down
17 changes: 8 additions & 9 deletions configs/vision/alexnet_mnist.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,19 @@ training:
problem:
name: &name MNIST
batch_size: &b 64
index: [0, 54999]
use_train_data: True
padding: &p [0,0,0,0] # ex: (x1, x2, x3, x4) pad last dim by (x1, x2) and 2nd to last by (x3, x4)
up_scaling: &scale True # if up_scale true, the image is resized to 224 x 224
resize: [224, 224]
# Use sampler that operates on a subset.
sampler:
name: SubsetRandomSampler
indices: [0, 55000]
# optimizer parameters:
optimizer:
name: Adam
lr: 0.01
# settings parameters
terminal_conditions:
loss_stop: 1.0e-5
loss_stop: 1.0e-2
episode_limit: 50000
epochs_limit: 10

Expand All @@ -24,19 +26,16 @@ validation:
problem:
name: *name
batch_size: *b
index: [54999, 59999]
use_train_data: True # True because we are splitting the training set to: validation and training
padding: *p
up_scaling: *scale
resize: [224, 224]

# Problem parameters:
testing:
problem:
name: *name
batch_size: *b
use_train_data: False
padding: *p
up_scaling: *scale
resize: [224, 224]

# Model parameters:
model:
Expand Down
43 changes: 43 additions & 0 deletions configs/vision/grid_trainer_mnist.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
grid_tasks:
-
default_configs: configs/vision/lenet5_mnist.yaml
-
default_configs: configs/vision/simplecnn_mnist.yaml

# Set exactly the same experiment conditions for the 2 tasks.
grid_overwrite:
training:
problem:
batch_size: &b 1000
sampler:
name: SubsetRandomSampler
indices: [0, 55000]
# Set the same optimizer parameters.
optimizer:
name: Adam
lr: 0.01
# Set the same terminal conditions.
terminal_conditions:
loss_stop: 4.0e-2
episode_limit: 10000
epoch_limit: 10

# Problem parameters:
validation:
problem:
batch_size: *b
sampler:
name: SubsetRandomSampler
indices: [55000, 60000]

testing:
problem:
batch_size: *b

grid_settings:
# Set number of repetitions of each experiments.
experiment_repetitions: 5
# Set number of concurrent running experiments.
max_concurrent_runs: 4
# Set trainer.
trainer: mip-online-trainer
48 changes: 48 additions & 0 deletions configs/vision/lenet5_mnist.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
# Training parameters:
training:
problem:
name: &name MNIST
batch_size: &b 64
use_train_data: True
data_folder: &folder '~/data/mnist'
resize: [32, 32]
# Use sampler that operates on a subset.
sampler:
name: SubsetRandomSampler
indices: [0, 55000]
# optimizer parameters:
optimizer:
name: Adam
lr: 0.01
# settings parameters
terminal_conditions:
loss_stop: 1.0e-2
episode_limit: 10000
epoch_limit: 10

# Validation parameters:
validation:
#partial_validation_interval: 100
problem:
name: *name
batch_size: *b
use_train_data: True # True because we are splitting the training set to: validation and training
data_folder: *folder
resize: [32, 32]
# Use sampler that operates on a subset.
sampler:
name: SubsetRandomSampler
indices: [55000, 60000]

# Testing parameters:
testing:
problem:
name: *name
batch_size: *b
use_train_data: False
data_folder: *folder
resize: [32, 32]

# Model parameters:
model:
name: LeNet5
16 changes: 8 additions & 8 deletions configs/vision/simplecnn_cifar10.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,11 @@ training:
problem:
name: &name CIFAR10
batch_size: &b 64
index: [0, 40000]
use_train_data: True
padding: &p [0,0,0,0] # ex: (x1, x2, x3, x4) pad last dim by (x1, x2) and 2nd to last by (x3, x4)
up_scaling: &scale False # if up_scale true the image is resized to 224 x 224
# Use sampler that operates on a subset.
sampler:
name: SubsetRandomSampler
indices: [0, 45000]
# optimizer parameters:
optimizer:
name: Adam
Expand All @@ -22,19 +23,18 @@ validation:
problem:
name: *name
batch_size: *b
index: [40000, 49999]
use_train_data: True # True because we are splitting the training set to: validation and training
padding: *p
up_scaling: *scale
# Use sampler that operates on a subset.
sampler:
name: SubsetRandomSampler
indices: [45000, 50000]

# Problem parameters:
testing:
problem:
name: *name
batch_size: *b
use_train_data: False
padding: *p
up_scaling: *scale

# Model parameters:
model:
Expand Down
32 changes: 21 additions & 11 deletions configs/vision/simplecnn_mnist.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,38 +5,48 @@ training:
problem:
name: &name MNIST
batch_size: &b 64
index: [0, 54999]
data_folder: &folder '~/data/mnist'
use_train_data: True
padding: &p [0,0,0,0] # ex: (x1, x2, x3, x4) pad last dim by (x1, x2) and 2nd to last by (x3, x4)
up_scaling: &scale False # if up_scale true, the image is resized to 224 x 224
resize: [32, 32]
sampler:
name: SubsetRandomSampler
indices: [0, 55000]
#indices: ~/data/mnist/split_a.txt
# optimizer parameters:
optimizer:
name: Adam
lr: 0.01
# settings parameters
terminal_conditions:
loss_stop: 1.0e-5
episode_limit: 50000
epochs_limit: 10
loss_stop: 1.0e-2
episode_limit: 1000
epoch_limit: 1

# Problem parameters:
validation:
problem:
name: *name
batch_size: *b
index: [54999, 59999]
data_folder: *folder
use_train_data: True # True because we are splitting the training set to: validation and training
padding: *p
up_scaling: *scale
resize: [32, 32]
sampler:
name: SubsetRandomSampler
indices: [55000, 60000]
#indices: ~/data/mnist/split_b.txt
#dataloader:
# drop_last: True

# Problem parameters:
testing:
#seed_numpy: 4354
#seed_torch: 2452
problem:
name: *name
batch_size: *b
data_folder: *folder
use_train_data: False
padding: *p
up_scaling: *scale
resize: [32, 32]


# Model parameters:
Expand Down
14 changes: 14 additions & 0 deletions doc_build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#!/usr/bin/env bash

cd docs
rm -rf build

# create html pages
sphinx-build -b html source build
make html

# open web browser(s) to master table of content
if which firefox
then
firefox build/index.html
fi
Loading

0 comments on commit 2be45de

Please sign in to comment.