Skip to content

Commit

Permalink
Merge branch 'develop'
Browse files Browse the repository at this point in the history
changed code structure for the submission with two mandatory parameters
  • Loading branch information
Ali committed Sep 25, 2020
2 parents 4a7d8d0 + a90ce86 commit 91bcc73
Show file tree
Hide file tree
Showing 16 changed files with 173 additions and 176 deletions.
57 changes: 29 additions & 28 deletions docs/examples/Classification/README.MD
Original file line number Diff line number Diff line change
Expand Up @@ -44,34 +44,35 @@ After that the data is provided in the desired shape, you can simply use the cod
```python
from instantdl import GetPipeLine

pipeline = GetPipeLine( use_algorithm = "Classification",
path= "docs/examples/Classification",
pretrained_weights= "docs/examples/data/Classification/logs/pretrained_weights_Classification.hdf5",
batchsize= 2,
iterations_over_dataset= 0,
data_gen_args= {
"save_augmented_images": False,
"resample_images": False,
"std_normalization": False,
"feature_scaling": False,
"horizontal_flip": False,
"vertical_flip": False,
"poission_noise": 1,
"rotation_range": 20,
"zoom_range": False,
"contrast_range": 1,
"brightness_range": 1,
"gamma_shift": 0,
"threshold_background_image": False,
"threshold_background_groundtruth": False,
"binarize_mask": False
},
loss_function= "binary_crossentropy",
num_classes= 2,
image_size= None,
calculate_uncertainty= True,
evaluation= True
)
configs = { "use_algorithm" : "Classification", # mandatory field
"path": "docs/examples/Classification", # mandatory field
"pretrained_weights": "docs/examples/data/Classification/logs/pretrained_weights_Classification.hdf5",
"batchsize": 2,
"iterations_over_dataset": 0,
"data_gen_args": {
"save_augmented_images": False,
"resample_images": False,
"std_normalization": False,
"feature_scaling": False,
"horizontal_flip": False,
"vertical_flip": False,
"poission_noise": 1,
"rotation_range": 20,
"zoom_range": False,
"contrast_range": 1,
"brightness_range": 1,
"gamma_shift": 0,
"threshold_background_image": False,
"threshold_background_groundtruth": False,
"binarize_mask": False
},
"loss_function": "binary_crossentropy",
"num_classes": 2,
"image_size": None,
"calculate_uncertainty": True,
"evaluation": True}

pipeline = GetPipeLine(configs)

pipeline.run()
```
Expand Down
60 changes: 30 additions & 30 deletions docs/examples/InstanceSegmentation/README.MD
Original file line number Diff line number Diff line change
Expand Up @@ -58,34 +58,36 @@ After that the data is provided in the desired shape, you can simply use the cod
```python
from instantdl import GetPipeLine

pipeline = GetPipeLine( use_algorithm = "InstanceSegmentation",
path= "docs/examples/InstanceSegmentation",
pretrained_weights= None,
batchsize= 1,
iterations_over_dataset= 10,
data_gen_args= {
"save_augmented_images": False,
"resample_images": False,
"std_normalization": False,
"feature_scaling": False,
"horizontal_flip": True,
"vertical_flip": True,
"poission_noise": False,
"rotation_range": 20,
"zoom_range": 2,
"contrast_range": False,
"brightness_range": False,
"gamma_shift": False,
"threshold_background_image": False,
"threshold_background_groundtruth": False,
"binarize_mask": False
},
loss_function= "mse",
num_classes= 1,
image_size= None,
calculate_uncertainty= True,
evaluation= True
)
configs = {
"use_algorithm" : "InstanceSegmentation", # mandatory field
"path" : "docs/examples/InstanceSegmentation", # mandatory field
"pretrained_weights" : None,
"batchsize" : 1,
"iterations_over_dataset": 10,
"data_gen_args": {
"save_augmented_images": False,
"resample_images": False,
"std_normalization": False,
"feature_scaling": False,
"horizontal_flip": True,
"vertical_flip": True,
"poission_noise": False,
"rotation_range": 20,
"zoom_range": 2,
"contrast_range": False,
"brightness_range": False,
"gamma_shift": False,
"threshold_background_image": False,
"threshold_background_groundtruth": False,
"binarize_mask": False
},
"loss_function": "mse",
"num_classes": 1,
"image_size": None,
"calculate_uncertainty": True,
"evaluation": True}

pipeline = GetPipeLine(configs)

pipeline.run()
```
Expand All @@ -94,7 +96,5 @@ For instance segmentation uncertainty estimation is not supported.
Once InstantDL is finished it will output as png files visualizing the result and .npy files containing the predicted masks to the results to the result folder.
The performance can be evaluated if evaluation is set to True, then InstantDL will save evaluations such as the mean squared error to the insights folder as a .txt file and print visualizations of the segmentation to the evaluation folder.



As you can see, it is very straightforward to use the pipeline and there is no need for more programming.

58 changes: 31 additions & 27 deletions docs/examples/Regression/README.MD
Original file line number Diff line number Diff line change
Expand Up @@ -41,33 +41,37 @@ After that the data is provided in the desired shape, you can simply use the cod
```python
from instantdl import GetPipeLine

pipeline = GetPipeLine( use_algorithm = "Regression",
path= "docs/examples/Regression",
pretrained_weights= None,
batchsize= 1,
iterations_over_dataset= 10,
data_gen_args= {
"save_augmented_images": False,
"resample_images": False,
"std_normalization": False,
"feature_scaling": False,
"horizontal_flip": True,
"vertical_flip": True,
"poission_noise": False,
"rotation_range": 20,
"zoom_range": 2,
"contrast_range": False,
"brightness_range": False,
"gamma_shift": False,
"threshold_background_image": False,
"threshold_background_groundtruth": False,
"binarize_mask": False
},
loss_function= "mse",
num_classes= 1,
image_size= None,
calculate_uncertainty= True,
evaluation= True
configs = {
"use_algorithm" : "Regression", # mandatory field
"path": "docs/examples/Regression", # mandatory field
"pretrained_weights": None,
"batchsize": 1,
"iterations_over_dataset": 10,
"data_gen_args": {
"save_augmented_images": False,
"resample_images": False,
"std_normalization": False,
"feature_scaling": False,
"horizontal_flip": True,
"vertical_flip": True,
"poission_noise": False,
"rotation_range": 20,
"zoom_range": 2,
"contrast_range": False,
"brightness_range": False,
"gamma_shift": False,
"threshold_background_image": False,
"threshold_background_groundtruth": False,
"binarize_mask": False
},
"loss_function": "mse",
"num_classes": 1,
"image_size": None,
"calculate_uncertainty": True,
"evaluation": True
}

pipeline = GetPipeLine(
)

pipeline.run()
Expand Down
62 changes: 33 additions & 29 deletions docs/examples/SemanticSegmentation/README.MD
Original file line number Diff line number Diff line change
Expand Up @@ -38,35 +38,39 @@ path
│ ├── .
```
After that the data is provided in the desired shape, you can simply use the code with this small snippet:
```
pipeline = GetPipeLine( "use_algorithm": "SemanticSegmentation",
"path": "docs/examples/SemanticSegmentation/",
"pretrained_weights": "docs/examples/SemanticSegmentation/logs/pretrained_weights_Lung_SemanticSegmentation.hdf5",
"batchsize": 1,
"iterations_over_dataset": 0,
"data_gen_args": {
"save_augmented_images": false,
"resample_images": false,
"std_normalization": false,
"feature_scaling": false,
"horizontal_flip": false,
"vertical_flip": true,
"poission_noise": false,
"rotation_range": false,
"zoom_range": false,
"contrast_range": false,
"brightness_range": false,
"gamma_shift": false,
"threshold_background_image": false,
"threshold_background_groundtruth": false,
"binarize_mask": false
},
"loss_function": "binary_crossentropy",
"num_classes": 1,
"image_size": null,
"calculate_uncertainty": false,
"evaluation": true
)

```python
from instantdl import GetPipeLine

configs = { "use_algorithm": "SemanticSegmentation", # mandatory field
"path": "docs/examples/SemanticSegmentation/", # mandatory field
"pretrained_weights": "docs/examples/SemanticSegmentation/logs/pretrained_weights_Lung_SemanticSegmentation.hdf5",
"batchsize": 1,
"iterations_over_dataset": 0,
"data_gen_args": {
"save_augmented_images": False,
"resample_images": False,
"std_normalization": False,
"feature_scaling": False,
"horizontal_flip": False,
"vertical_flip": True,
"poission_noise": False,
"rotation_range": False,
"zoom_range": False,
"contrast_range": False,
"brightness_range": False,
"gamma_shift": False,
"threshold_background_image": False,
"threshold_background_groundtruth": False,
"binarize_mask": False
},
"loss_function": "binary_crossentropy",
"num_classes": 1,
"image_size": None,
"calculate_uncertainty": False,
"evaluation": True}

pipeline = GetPipeLine(configs)

pipeline.run()
```
Expand Down
2 changes: 1 addition & 1 deletion instantdl/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ Please dont expect to achieve competitive results on these datasets, as they are
- [x] add docker for gpu and cuda toolkit
- [x] add objects instead of the main functions
- [x] add google colab
- [ ] add tests
- [x] add tests
- [ ] add thresholding for semantic segmentation
- [ ] add versions
- [ ] create automatically train and test set
Expand Down
2 changes: 1 addition & 1 deletion instantdl/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from instantdl.segmentation.InstanceSegmentation import InstanceSegmentation
from instantdl.segmentation.SemanticSegmentation import SemanticSegmentation

def GetPipeLine(**configs):
def GetPipeLine(configs):
if configs["use_algorithm"] == "Classification":
pipeline = Classification(**configs)
return pipeline
Expand Down
2 changes: 1 addition & 1 deletion instantdl/classification/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def train_model(self, model,TrainingDataGenerator,ValidationDataGenerator , step
- Checkpoints: Save model after each epoch if the validation loss has improved
- Tensorboard: Monitor training live with tensorboard. Start tensorboard in terminal with: tensorboard --logdir=/path_to/logs
'''
early_stopping = EarlyStopping(monitor='val_loss', patience=25, mode='auto', verbose=0)
early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='auto', verbose=0)
datasetname = self.path.rsplit("/",1)[1]
checkpoint_filepath = (self.path + "/logs" + "/pretrained_weights" + datasetname + ".hdf5") #.{epoch:02d}.hdf5")
os.makedirs(os.getcwd() + (self.path + "/logs"), exist_ok=True)
Expand Down
3 changes: 1 addition & 2 deletions instantdl/config.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
{
"use_algorithm": "SemanticSegmentation",
"path": "/home/dominik/Documents/InstantDL/Vessel/",
"loss_function": "mse"
"path": "/Users/dominik.waibel/Desktop/InstantDL/Multi-organ_Semantic_Segmentation-2/"
}
2 changes: 2 additions & 0 deletions instantdl/evaluation/Utils_data_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,8 @@ def prepare_data_for_evaluation(root_dir, max_images):
predictions.append(resize(prediction, (256,256)))
logging.info("pred %s" % np.shape(predictions))
logging.info("gt %s" % np.shape(groundtruth))
predictions = np.array(predictions)
groundtruth = np.array(groundtruth)
abs_errormap_norm, rel_errormap_norm = calcerrormap(predictions, groundtruth)

else:
Expand Down
6 changes: 3 additions & 3 deletions instantdl/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,12 @@
import logging
from keras import backend as K

def start_learning( **configs):
def start_learning(configs):

logging.info("Start learning")
logging.info(configs["use_algorithm"])

pipeline = GetPipeLine(**configs)
pipeline = GetPipeLine(configs)

pipeline.run()
K.clear_session()
Expand Down Expand Up @@ -70,4 +70,4 @@ def start_learning( **configs):
else:
configs["pretrained_weights"] = None

start_learning( **configs)
start_learning(configs)
5 changes: 2 additions & 3 deletions instantdl/segmentation/InstanceSegmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def __init__( self,

def run(self):
'''
Initialize a model for instance segmentation
Initialize a model for instance segmentation
'''
UseResnet = 50

Expand Down Expand Up @@ -113,7 +113,7 @@ def run(self):
else:
model.load_weights(weights_path, by_name=True)
tensorboard = TensorBoard(log_dir="logs/" + self.path + "/" + format(time.time())) # , update_freq='batch')
custom_callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=20, verbose=0, mode='auto'), tensorboard]
custom_callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto'), tensorboard]
if self.iterations_over_dataset > 0:
print("Start train")
train(model, dataset, trainsubset, VAL_IMAGE_IDS, self.iterations_over_dataset, custom_callbacks)
Expand All @@ -131,4 +131,3 @@ def run(self):
segmentation_regression_evaluation(self.path)

model = None

2 changes: 1 addition & 1 deletion instantdl/segmentation/Regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def train_model(self, model,TrainingDataGenerator,ValidationDataGenerator , step
- Checkpoints: Save model after each epoch if the validation loss has improved
- Tensorboard: Monitor training live with tensorboard. Start tensorboard in terminal with: tensorboard --logdir=/path_to/logs
'''
early_stopping = EarlyStopping(monitor='val_loss', patience=25, mode='auto', verbose=0)
early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='auto', verbose=0)
datasetname = self.path.rsplit("/",1)[1]
checkpoint_filepath = (self.path + "/logs" + "/pretrained_weights" + datasetname + ".hdf5") #.{epoch:02d}.hdf5")
os.makedirs(os.getcwd()+ (self.path + "/logs"), exist_ok=True)
Expand Down
2 changes: 1 addition & 1 deletion instantdl/segmentation/SemanticSegmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def train_model(self, model,TrainingDataGenerator,ValidationDataGenerator , step
- Checkpoints: Save model after each epoch if the validation loss has improved
- Tensorboard: Monitor training live with tensorboard. Start tensorboard in terminal with: tensorboard --logdir=/path_to/logs
'''
early_stopping = EarlyStopping(monitor='val_loss', patience=25, mode='auto', verbose=0)
early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='auto', verbose=0)
datasetname = self.path.rsplit("/",1)[1]
checkpoint_filepath = (self.path + "/logs" + "/pretrained_weights" + datasetname + ".hdf5") #.{epoch:02d}.hdf5")
os.makedirs(os.getcwd() + (self.path + "/logs"), exist_ok=True)
Expand Down
2 changes: 1 addition & 1 deletion tests/segmentation/InstanceSegmentation_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from instantdl.utils import *
def test_InstanceSegmentation():
X_true = np.ones((64, 64, 3))
X_true = np.ones((64, 64, 3))git com
X_true[10:20, 20:30] = 255
Y_true = np.zeros((64, 64))
Y_true[10:20,20:30] = 1
Expand Down

0 comments on commit 91bcc73

Please sign in to comment.