From fd30ae0bd783e668d2ca13a8809cfdee6b89029d Mon Sep 17 00:00:00 2001 From: Ayla Khan Date: Tue, 11 Apr 2017 16:10:12 -0600 Subject: [PATCH 01/10] Add MHA file type to exporters. --- src/Interface/Application/LayerIOFunctions.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Interface/Application/LayerIOFunctions.cc b/src/Interface/Application/LayerIOFunctions.cc index d474b12a2..378dbbb02 100644 --- a/src/Interface/Application/LayerIOFunctions.cc +++ b/src/Interface/Application/LayerIOFunctions.cc @@ -390,6 +390,7 @@ void LayerIOFunctions::ExportLayer( QMainWindow* main_window ) filters["TIFF files"] = ".tiff"; filters["PNG files"] = ".png"; filters["MRC files"] = ".mrc"; + filters["MHA files"] = ".mha"; filters["Matlab files"] = ".mat"; size_t counter = 1; From 1ed9282bb9eb28fc34fee4279b7ee329c690639d Mon Sep 17 00:00:00 2001 From: Ayla Khan Date: Tue, 11 Apr 2017 16:11:03 -0600 Subject: [PATCH 02/10] Cleanup CMake variables. --- Superbuild/ITKExternal.cmake | 9 --------- 1 file changed, 9 deletions(-) diff --git a/Superbuild/ITKExternal.cmake b/Superbuild/ITKExternal.cmake index da15afbb8..0e589c7d0 100644 --- a/Superbuild/ITKExternal.cmake +++ b/Superbuild/ITKExternal.cmake @@ -49,15 +49,6 @@ SET(itk_CACHE_ARGS "-DCMAKE_INSTALL_PREFIX:PATH=" "-DITK_BUILD_DEFAULT_MODULES:BOOL=OFF" "-DITKV3_COMPATIBILITY:BOOL=ON" -# "-DITK_INSTALL_NO_LIBRARIES:BOOL=ON" -# "-DITK_INSTALL_NO_DEVELOPMENT:BOOL=ON" -# "-DITK_INSTALL_NO_RUNTIME:BOOL=ON" -# "-DITK_INSTALL_NO_DOCUMENTATION:BOOL=ON" -# "-DGDCM_INSTALL_NO_DEVELOPMENT:BOOL=ON" -# "-DGDCM_INSTALL_NO_RUNTIME:BOOL=ON" -# "-DGDCM_INSTALL_NO_DOCUMENTATION:BOOL=ON" -# "-DHDF5_INSTALL_NO_DEVELOPMENT:BOOL=ON" -# "-DModule_ITKReview:BOOL=ON" "-DModule_ITKRegistrationCommon:BOOL=ON" "-DModule_ITKSmoothing:BOOL=ON" "-DModule_ITKAnisotropicSmoothing:BOOL=ON" From fac1b6741d83b7e3372c53442c14372346dfd224 Mon Sep 17 00:00:00 2001 From: Ayla Khan Date: Fri, 14 Apr 2017 15:14:56 -0600 Subject: [PATCH 03/10] Save batch script. --- scripts/seg3d_filters.py | 67 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 scripts/seg3d_filters.py diff --git a/scripts/seg3d_filters.py b/scripts/seg3d_filters.py new file mode 100644 index 000000000..f7b52fe99 --- /dev/null +++ b/scripts/seg3d_filters.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +import os, sys, types, threading + +# how to call from Seg3D scripts directory: +#exec(open('/Users/ayla/devel/Seg3D_DHS/bin/Seg3D/test_script.py').read()) + +import seg3d2 + +class MyThread(threading.Thread): + def __init__(self, layerID, timeout=2.0): + self.layerID = layerID + self.TIMEOUT = timeout + self.condition = threading.Condition() + threading.Thread.__init__(self) + def run(self): + stateIDData = self.layerID + "::data" + layerStatus = get(stateid=stateIDData) + #print(self.layerID, " ", layerStatus) + with self.condition: + self.condition.wait_for(lambda: "available" == get(stateid=stateIDData), timeout=self.TIMEOUT) + #print("Layer {} done processing".format(self.layerID)) + +def update_filepath(filename, strings): + newfilename = filename + for key, value in strings.items(): + newfilename = newfilename.replace(key, value) + return newfilename + +def wait_on_layer(layer): + thread = MyThread(layer, 1.0) + thread.start() + thread.join() + +truth_region_files = [os.path.join(truth_region, f) for f in os.listdir(truth_region) if os.path.isfile(os.path.join(truth_region, f)) and f.lower().endswith('.png')] +im_gray_files = [os.path.join(im_gray, f) for f in os.listdir(im_gray) if os.path.isfile(os.path.join(im_gray, f)) and f.lower().endswith('.mha')] + +# ground truth segmentations: +# import -> crop -> export +for f in truth_region_files: + layers = importlayer(filename="{}".format(f),importer='[ITK Importer]',mode='mask') + print(layers[0]) + wait_on_layer(layers[0]) + layers = crop(layerids="{}".format(layers[0]),origin='[-0.0735295,-0.0735295,-0.5]',size='[150.515,129.927,1]',replace='false') + print(layers[0]) + wait_on_layer(layers[0]) + export_file = update_filepath(f, { truth_region: truth_cropped_region }) + print(export_file) + retval = exportlayer(layer="{}".format(layers[0]),file_path='{}'.format(export_file),extension='.mha',exporter='[ITK Mask Exporter]'); + +# input data images: +# import -> crop -> gradient anisotropic diffusion -> gradient magnitude (boundaries) -> export +for f in im_gray_files: + layers = importlayer(filename="{}".format(f),importer='[ITK Importer]',mode='data') + print(layers[0]) + wait_on_layer(layers[0]) + layers = crop(layerids="{}".format(layers[0]),origin='[-0.0735295,-0.0735295,-0.5]',size='[150.515,129.927,1]',replace='false') + print(layers[0]) + wait_on_layer(layers[0]) + layers = gradientanisotropicdiffusionfilter(layerid=="{}".format(layers[0])preserve_data_format='true',replace='false',iterations='20',sensitivity='0.25'); + print(layers[0]) + wait_on_layer(layers[0]) + layers = gradientmagnitudefilter(layerid="{}".format(layers[0]),replace='false',preserve_data_format='true'); + print(layers[0]) + wait_on_layer(layers[0]) + export_file = update_filepath(f, { im_gray: im_cropped_gray }) + print(export_file) + retval = exportlayer(layer="{}".format(layers[0]),file_path='{}'.format(export_file),extension='.mha',exporter='[ITK Data Exporter]'); From b36dcb6e3c18d36306cf7f7ad0d5b6a0e9a948f5 Mon Sep 17 00:00:00 2001 From: Ayla Khan Date: Fri, 14 Apr 2017 15:20:09 -0600 Subject: [PATCH 04/10] Add null or empty string check. --- scripts/seg3d_filters.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/scripts/seg3d_filters.py b/scripts/seg3d_filters.py index f7b52fe99..e90e7d787 100644 --- a/scripts/seg3d_filters.py +++ b/scripts/seg3d_filters.py @@ -31,6 +31,14 @@ def wait_on_layer(layer): thread.start() thread.join() +# since this is a batch script, truth_region and im_gray need to be set beforehand +# i.e. set as command via socket +if not truth_region: + raise ValueError + +if not im_gray: + raise ValueError + truth_region_files = [os.path.join(truth_region, f) for f in os.listdir(truth_region) if os.path.isfile(os.path.join(truth_region, f)) and f.lower().endswith('.png')] im_gray_files = [os.path.join(im_gray, f) for f in os.listdir(im_gray) if os.path.isfile(os.path.join(im_gray, f)) and f.lower().endswith('.mha')] From 73f86b8bdf38d74b538d1fba1b605f668c5bbde7 Mon Sep 17 00:00:00 2001 From: Ayla Khan Date: Fri, 14 Apr 2017 16:23:00 -0600 Subject: [PATCH 05/10] Allow a single PNG image to be imported as a volume with Z depth=1. --- src/Application/LayerIO/ITKLayerImporter.cc | 35 +++++++++++++------ src/Application/LayerIO/ITKLayerImporter.h | 2 +- .../LayerIO/ITKSeriesLayerImporter.cc | 30 ++++++++-------- .../LayerIO/ITKSeriesLayerImporter.h | 2 +- 4 files changed, 43 insertions(+), 26 deletions(-) diff --git a/src/Application/LayerIO/ITKLayerImporter.cc b/src/Application/LayerIO/ITKLayerImporter.cc index e4fc34678..4ead9ae00 100644 --- a/src/Application/LayerIO/ITKLayerImporter.cc +++ b/src/Application/LayerIO/ITKLayerImporter.cc @@ -31,6 +31,7 @@ // ITK includes #include +#include #include #include #include @@ -127,6 +128,11 @@ class ITKLayerImporterPrivate public: // file type detection helpers + bool detect_png( const std::string& extension ) + { + return extension == ".png"; + } + bool detect_tiff( const std::string& extension ) { return ( extension == ".tif" || extension == ".tiff" || extension == ".stk" ); @@ -161,35 +167,35 @@ class ITKLayerImporterPrivate Core::DataType ITKLayerImporterPrivate::convert_data_type( std::string& type ) { // Convert ITK types into our enum - if( type == "unsigned_char" ) + if ( type == "unsigned_char" ) { return Core::DataType::UCHAR_E; } - else if( type == "char" ) + else if ( type == "char" ) { return Core::DataType::CHAR_E; } - else if( type == "unsigned_short" ) + else if ( type == "unsigned_short" ) { return Core::DataType::USHORT_E; } - else if( type == "short" ) + else if ( type == "short" ) { return Core::DataType::SHORT_E; } - else if( type == "unsigned_int" ) + else if ( type == "unsigned_int" ) { return Core::DataType::UINT_E; } - else if( type == "int" ) + else if ( type == "int" ) { return Core::DataType::INT_E; } - else if( type == "float" ) + else if ( type == "float" ) { return Core::DataType::FLOAT_E; } - else if( type == "double" ) + else if ( type == "double" ) { return Core::DataType::DOUBLE_E; } @@ -269,7 +275,12 @@ bool ITKLayerImporterPrivate::read_header() std::tie( extension, base ) = Core::GetFullExtension( full_filename ); // Set file type and scan the file for data type and transform - if ( detect_tiff(extension) ) + if ( detect_png(extension) ) + { + this->file_type_ = "png"; + return this->scan_simple_volume< itk::PNGImageIO >(); + } + else if ( detect_tiff(extension) ) { this->file_type_ = "tiff"; return this->scan_simple_volume< itk::TIFFImageIO >(); @@ -411,7 +422,11 @@ bool ITKLayerImporterPrivate::read_data() std::string extension, base; std::tie( extension, base ) = Core::GetFullExtension( full_filename ); - if ( detect_tiff(extension) ) + if ( detect_png(extension) ) + { + return this->import_simple_volume(); + } + else if ( detect_tiff(extension) ) { return this->import_simple_volume(); } diff --git a/src/Application/LayerIO/ITKLayerImporter.h b/src/Application/LayerIO/ITKLayerImporter.h index c31d92c70..aa7012c9e 100644 --- a/src/Application/LayerIO/ITKLayerImporter.h +++ b/src/Application/LayerIO/ITKLayerImporter.h @@ -58,7 +58,7 @@ typedef boost::shared_ptr ITKLayerImporterPrivateHandle class ITKLayerImporter : public LayerSingleFileImporter { SEG3D_IMPORTER_TYPE( "ITK Importer",".lsm;.LSM;" - ".tiff;.tif;.TIFF;.TIF;.stk;.STK;" + ".png;.tiff;.tif;.TIFF;.TIF;.stk;.STK;" ".nii;.nii.gz;.img;.hdr;" ".vtk;.VTK;" ".mha;.mhd", 5 ) diff --git a/src/Application/LayerIO/ITKSeriesLayerImporter.cc b/src/Application/LayerIO/ITKSeriesLayerImporter.cc index 5d61958b3..876956b55 100644 --- a/src/Application/LayerIO/ITKSeriesLayerImporter.cc +++ b/src/Application/LayerIO/ITKSeriesLayerImporter.cc @@ -45,6 +45,7 @@ #include #include #include +#include // Application includes #include @@ -120,35 +121,35 @@ class ITKSeriesLayerImporterPrivate Core::DataType ITKSeriesLayerImporterPrivate::convert_data_type( std::string& type ) { // Convert ITK types into our enum - if( type == "unsigned_char" ) + if ( type == "unsigned_char" ) { return Core::DataType::UCHAR_E; } - else if( type == "char" ) + else if ( type == "char" ) { return Core::DataType::CHAR_E; } - else if( type == "unsigned_short" ) + else if ( type == "unsigned_short" ) { return Core::DataType::USHORT_E; } - else if( type == "short" ) + else if ( type == "short" ) { return Core::DataType::SHORT_E; } - else if( type == "unsigned_int" ) + else if ( type == "unsigned_int" ) { return Core::DataType::UINT_E; } - else if( type == "int" ) + else if ( type == "int" ) { return Core::DataType::INT_E; } - else if( type == "float" ) + else if ( type == "float" ) { return Core::DataType::FLOAT_E; } - else if( type == "double" ) + else if ( type == "double" ) { return Core::DataType::DOUBLE_E; } @@ -167,29 +168,30 @@ bool ITKSeriesLayerImporterPrivate::read_header() // Extract the extension from the file name and use this to define // which importer to use. boost::filesystem::path full_filename( this->importer_->get_filename() ); - std::string extension = boost::to_lower_copy( boost::filesystem::extension( full_filename ) ); + std::string extension, base; + std::tie( extension, base ) = Core::GetFullExtension( full_filename ); - if( extension == ".png" ) + if ( extension == ".png" ) { this->file_type_ = "png"; return this->scan_simple_series< itk::PNGImageIO >(); } - else if( extension == ".tif" || extension == ".tiff" ) + else if ( extension == ".tif" || extension == ".tiff" ) { this->file_type_ = "tiff"; return this->scan_simple_series< itk::TIFFImageIO >(); } - else if( extension == ".jpg" || extension == ".jpeg" ) + else if ( extension == ".jpg" || extension == ".jpeg" ) { this->file_type_ = "jpeg"; return this->scan_simple_series< itk::JPEGImageIO >(); } - else if( extension == ".bmp" ) + else if ( extension == ".bmp" ) { this->file_type_ = "bitmap"; return this->scan_simple_series< itk::BMPImageIO >(); } - else if( extension == ".vtk" ) + else if ( extension == ".vtk" ) { this->file_type_ = "VTK"; return this->scan_simple_series< itk::VTKImageIO >(); diff --git a/src/Application/LayerIO/ITKSeriesLayerImporter.h b/src/Application/LayerIO/ITKSeriesLayerImporter.h index 5d13c51cf..311f72980 100644 --- a/src/Application/LayerIO/ITKSeriesLayerImporter.h +++ b/src/Application/LayerIO/ITKSeriesLayerImporter.h @@ -57,7 +57,7 @@ class ITKSeriesLayerImporter : public LayerFileSeriesImporter /// NOTE: Since this one accepts any type, the upper case version are ignored, as they /// do not fit on the line SEG3D_IMPORTER_TYPE( "ITK FileSeries Importer","*;" - ".dcm;.dicom;.ima;.tiff;.tif;.png;.jpeg;.jpg;.bmp;.vtk", 5 ) + ".dcm;.dicom;.ima;.tiff;.tif;.TIFF;.TIF;.png;.jpeg;.jpg;.bmp;.vtk", 5 ) // -- Constructor/Destructor -- public: From ecd9ce4fc1a994c0a6354d5147f711d428a968d2 Mon Sep 17 00:00:00 2001 From: Ayla Khan Date: Mon, 17 Apr 2017 13:08:21 -0600 Subject: [PATCH 06/10] Save batch processing scripts. --- scripts/batch_processing.py | 151 ++++++++++++++++++++++++++++++++++++ scripts/seg3d_filters.py | 88 +++++++++++++-------- 2 files changed, 206 insertions(+), 33 deletions(-) create mode 100644 scripts/batch_processing.py diff --git a/scripts/batch_processing.py b/scripts/batch_processing.py new file mode 100644 index 000000000..331fa5f7d --- /dev/null +++ b/scripts/batch_processing.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python +import os, sys, shutil, socket, getopt, subprocess + +paths = { + 'data': '/Users/ayla/devel/dhs_batch/U308', + 'tools': '/Users/ayla/devel/Seg3D_DHS/tools', + 'hmtscript': '/Users/ayla/devel/Seg3D_DHS' +} + +def seg3d_connect(port, size): + clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + clientsocket.connect(("localhost", port)) + data = clientsocket.recv(size) + print "Received: [{}]".format(data) + return clientsocket + +def seg3d_disconnect(clientsocket, size): + clientsocket.send("exit\r\n") + data = clientsocket.recv(size) + clientsocket.close(); + print "Received: [{}]".format(data) + + +def seg3d_command(clientsocket, size, command): + clientsocket.send(command) + data = clientsocket.recv(size) + #print "Received: [{}]".format(data) + return data + +def ensure_directories(directories): + for d in directories: + if not os.path.exists(d): + os.makedirs(d) + +def update_filepath(filename, strings): + newfilename = filename + for key, value in strings.items(): + newfilename = newfilename.replace(key, value) + #print(newfilename) + return newfilename + +def main(argv): + size = 1024 + port = 9000 + + try: + opts, args = getopt.getopt(argv, "p:", ["port="]) + + except getopt.GetoptError as e: + print 'Exception: {0}.'.format(e.msg) + print 'batch_processing.py [-p,--port=]' + sys.exit(2) + + for opt, arg in opts: + if opt in ("-s", "--port"): + port = int(arg) + + im = os.path.join(paths['data'], 'im') + im_raw = os.path.join(im, 'raw') + im_gray = os.path.join(im, 'gray') + # expecting TIFF files + im_raw_files = [os.path.join(im_raw, f) for f in os.listdir(im_raw) if os.path.isfile(os.path.join(im_raw, f)) and f.lower().endswith('.tif')] + + # test + im_raw_test = os.path.join(im, 'raw_test') + im_gray_test = os.path.join(im, 'gray_test') + # expecting TIFF files + im_raw_test_files = [os.path.join(im_raw_test, f) for f in os.listdir(im_raw_test) if os.path.isfile(os.path.join(im_raw_test, f)) and f.lower().endswith('.tif')] + + # setup directories + im_cropped = os.path.join(paths['data'], 'im_cropped') + im_cropped_gray = os.path.join(im_cropped, 'gray') + im_cropped_chm = os.path.join(im_cropped, 'chm') + im_cropped_blur = os.path.join(im_cropped, 'chm-blur') + # test + im_cropped_gray_test = os.path.join(im_cropped, 'gray_test') + im_cropped_chm_test = os.path.join(im_cropped, 'chm_test') + im_cropped_blur_test = os.path.join(im_cropped, 'chm-blur_test') + + truth = os.path.join(paths['data'], 'truth') + truth_raw = os.path.join(truth, 'raw') + truth_region = os.path.join(truth, 'region') + truth_raw_files = [os.path.join(truth_raw, f) for f in os.listdir(truth_raw) if os.path.isfile(os.path.join(truth_raw, f)) and f.lower().endswith('.png')] + + # setup directories + truth_cropped = os.path.join(paths['data'], 'truth_cropped') + truth_cropped_region = os.path.join(truth_cropped, 'region') + # TODO: not sure if needed + #truth_cropped_gray = os.path.join(truth_cropped, 'gray') + + ensure_directories( (im_gray, im_cropped, im_cropped_gray, im_cropped_chm, im_cropped_blur, im_gray_test, im_cropped_gray_test, im_cropped_chm_test, im_cropped_blur_test, truth_region, truth_cropped, truth_cropped_region) ) + + rgbToGray2D_tool = os.path.join(paths['tools'], 'RGBToGray2D') + padImage_tool = os.path.join(paths['tools'], 'PadImage') + blur_image_tool = os.path.join(paths['tools'], 'blur_image') + + # clean up filenames + for f in truth_raw_files: + # shorten filename, remove spaces, change directory + newfile = update_filepath(f, { 'U3O8_': '', 'Particle ': 'p', truth_raw: truth_region }) + os.rename(f, newfile) + + raw_files = im_raw_files + im_raw_test_files + + # Raw data RGB files to grayscale (ITK tool) - RGBToGray2D + for f in raw_files: + # shorten filename, remove spaces, change directory + newfile = update_filepath(f, { 'U3O8_': '', 'Particle ': 'p', im_raw_test: im_gray_test, im_raw: im_gray, '.tif': '.mha' }) + subprocess.check_call([rgbToGray2D_tool, f, newfile, 'float']) + + clientsocket = seg3d_connect(port, size) + # add some variables to Seg3D's interpreter + retval = seg3d_command(clientsocket, size, "im_gray='{}'\r\n".format(im_gray)) + retval = seg3d_command(clientsocket, size, "im_cropped_gray='{}'\r\n".format(im_cropped_gray)) + retval = seg3d_command(clientsocket, size, "im_cropped_chm='{}'\r\n".format(im_cropped_chm)) + retval = seg3d_command(clientsocket, size, "im_gray_test='{}'\r\n".format(im_gray_test)) + retval = seg3d_command(clientsocket, size, "im_cropped_gray_test='{}'\r\n".format(im_cropped_gray_test)) + retval = seg3d_command(clientsocket, size, "im_cropped_chm_test='{}'\r\n".format(im_cropped_chm_test)) + retval = seg3d_command(clientsocket, size, "truth_region='{}'\r\n".format(truth_region)) + retval = seg3d_command(clientsocket, size, "truth_cropped_region='{}'\r\n".format(truth_cropped_region)) + #retval = seg3d_command(clientsocket, size, "exec(open('/Users/ayla/devel/Seg3D_DHS/scripts/seg3d_filters.py').read())\r\n") + seg3d_disconnect(clientsocket, size) + + # cleanup + truth_cropped_files = [os.path.join(truth_cropped_region, f) for f in os.listdir(truth_cropped_region) if os.path.isfile(os.path.join(truth_cropped_region, f)) and f.lower().endswith('.png')] + for f in truth_cropped_files: + newfile = update_filepath(f, { 'Crop_': '', '_label-00': '' }) + os.rename(f, newfile) + + boundary_files = [os.path.join(im_cropped_chm, f) for f in os.listdir(im_cropped_chm) if os.path.isfile(os.path.join(im_cropped_chm, f)) and f.lower().endswith('.mha')] + boundary_files_test = [os.path.join(im_cropped_chm_test, f) for f in os.listdir(im_cropped_chm_test) if os.path.isfile(os.path.join(im_cropped_chm_test, f)) and f.lower().endswith('.mha')] + boundary_files_all = boundary_files + boundary_files_test + + # Boundary files to blurred boundary files + for f in boundary_files_all: + # shorten filename, remove spaces, change directory + newfile = update_filepath(f, { im_cropped_chm: im_cropped_blur }) + subprocess.check_call([blur_image_tool, "--inputImage={}".format(f), "--outputImage={}".format(newfile), "--sigma=1", "--kernelWidth=3"]) + + # Call GLIA script + subprocess.check_call(['python', '/Users/ayla/devel/Seg3D_DHS/hmt_test_batch.py', im_cropped_chm, im_cropped_chm_test, im_cropped_blur, im_cropped_blur_test, truth_cropped_region ]) + + # Pad to original dimensions + # ./PadImage gm_rep1_A_004.png gm_rep1_A_004_padded.png 0 0 0 59 0 + #for f in raw_files: + # # shorten filename, remove spaces, change directory + # newfile = + # subprocess.check_call([padImage_tool f, newfile, '0', '0', '0', '59', '0']) + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/scripts/seg3d_filters.py b/scripts/seg3d_filters.py index e90e7d787..4ade76f55 100644 --- a/scripts/seg3d_filters.py +++ b/scripts/seg3d_filters.py @@ -6,19 +6,39 @@ import seg3d2 +# since this is a batch script, truth_region, etc. need to be set beforehand +# i.e. set as command via socket +if not truth_region: + raise ValueError +if not truth_cropped_region: + raise ValueError +if not im_gray: + raise ValueError +if not im_cropped_gray: + raise ValueError +if not im_cropped_chm: + raise ValueError +if not im_gray_test: + raise ValueError +if not im_cropped_gray_test: + raise ValueError +if not im_cropped_chm_test: + raise ValueError + class MyThread(threading.Thread): - def __init__(self, layerID, timeout=2.0): + def __init__(self, layerID, timeout=5.0): self.layerID = layerID self.TIMEOUT = timeout self.condition = threading.Condition() threading.Thread.__init__(self) def run(self): + availableState = "available" stateIDData = self.layerID + "::data" layerStatus = get(stateid=stateIDData) #print(self.layerID, " ", layerStatus) with self.condition: - self.condition.wait_for(lambda: "available" == get(stateid=stateIDData), timeout=self.TIMEOUT) - #print("Layer {} done processing".format(self.layerID)) + while get(stateid=stateIDData) != availableState: + self.condition.wait_for(lambda: get(stateid=stateIDData) == availableState, timeout=self.TIMEOUT) def update_filepath(filename, strings): newfilename = filename @@ -26,50 +46,52 @@ def update_filepath(filename, strings): newfilename = newfilename.replace(key, value) return newfilename -def wait_on_layer(layer): - thread = MyThread(layer, 1.0) +def wait_on_layer(layer, timeout=2.0): + thread = MyThread(layer, timeout) thread.start() thread.join() -# since this is a batch script, truth_region and im_gray need to be set beforehand -# i.e. set as command via socket -if not truth_region: - raise ValueError - -if not im_gray: - raise ValueError - truth_region_files = [os.path.join(truth_region, f) for f in os.listdir(truth_region) if os.path.isfile(os.path.join(truth_region, f)) and f.lower().endswith('.png')] im_gray_files = [os.path.join(im_gray, f) for f in os.listdir(im_gray) if os.path.isfile(os.path.join(im_gray, f)) and f.lower().endswith('.mha')] +im_gray_files_test = [os.path.join(im_gray_test, f) for f in os.listdir(im_gray_test) if os.path.isfile(os.path.join(im_gray_test, f)) and f.lower().endswith('.mha')] + +gray_files=im_gray_files+im_gray_files_test # ground truth segmentations: # import -> crop -> export for f in truth_region_files: - layers = importlayer(filename="{}".format(f),importer='[ITK Importer]',mode='mask') - print(layers[0]) + layers = importlayer(filename="{}".format(f),importer='[ITK Importer]',mode='single_mask') wait_on_layer(layers[0]) - layers = crop(layerids="{}".format(layers[0]),origin='[-0.0735295,-0.0735295,-0.5]',size='[150.515,129.927,1]',replace='false') - print(layers[0]) + layers = crop(layerids="{}".format(layers[0]),origin='[-0.5,-0.5,-0.5]',size='[1024,883.5,1]',replace='true') wait_on_layer(layers[0]) - export_file = update_filepath(f, { truth_region: truth_cropped_region }) - print(export_file) - retval = exportlayer(layer="{}".format(layers[0]),file_path='{}'.format(export_file),extension='.mha',exporter='[ITK Mask Exporter]'); + retval = exportsegmentation(layers="{}".format(layers[0]),file_path='{}'.format(truth_cropped_region),mode='single_mask',extension='.png') + if not retval: + print("exportsegmentation failed") # input data images: # import -> crop -> gradient anisotropic diffusion -> gradient magnitude (boundaries) -> export -for f in im_gray_files: +for f in gray_files: + # crop and export grayscale raw input images layers = importlayer(filename="{}".format(f),importer='[ITK Importer]',mode='data') - print(layers[0]) - wait_on_layer(layers[0]) - layers = crop(layerids="{}".format(layers[0]),origin='[-0.0735295,-0.0735295,-0.5]',size='[150.515,129.927,1]',replace='false') - print(layers[0]) wait_on_layer(layers[0]) - layers = gradientanisotropicdiffusionfilter(layerid=="{}".format(layers[0])preserve_data_format='true',replace='false',iterations='20',sensitivity='0.25'); - print(layers[0]) + layers = transform(layerids="{}".format(layers[0]),origin='[0,0,0]',spacing='[1,1,1]',replace='true') wait_on_layer(layers[0]) - layers = gradientmagnitudefilter(layerid="{}".format(layers[0]),replace='false',preserve_data_format='true'); - print(layers[0]) - wait_on_layer(layers[0]) - export_file = update_filepath(f, { im_gray: im_cropped_gray }) - print(export_file) - retval = exportlayer(layer="{}".format(layers[0]),file_path='{}'.format(export_file),extension='.mha',exporter='[ITK Data Exporter]'); + layers = crop(layerids="{}".format(layers[0]),origin='[-0.5,-0.5,-0.5]',size='[1024,883.5,1]',replace='true') + layer = layers[0] + wait_on_layer(layer) + export_file = update_filepath(f, { im_gray_test: im_cropped_gray_test, im_gray: im_cropped_gray }) + retval = exportlayer(layer="{}".format(layer),file_path='{}'.format(export_file),extension='.mha',exporter='[ITK Data Exporter]') + if not retval: + print("exportlayer failed") + + # get boundaries + #layer = gradientanisotropicdiffusionfilter(layerid="{}".format(layer),preserve_data_format='true',replace='true',iterations='20',sensitivity='0.25') + #wait_on_layer(layer, 5.0) + layer = gradientanisotropicdiffusionfilter(layerid="{}".format(layer),preserve_data_format='true',replace='true',iterations='2',sensitivity='0.25') + wait_on_layer(layer) + layer = gradientmagnitudefilter(layerid="{}".format(layer),replace='true',preserve_data_format='true') + wait_on_layer(layer) + export_file = update_filepath(f, { im_gray_test: im_cropped_chm_test, im_gray: im_cropped_chm }) + retval = exportlayer(layer="{}".format(layer),file_path='{}'.format(export_file),extension='.mha') + if not retval: + print("exportlayer failed") From c6ce124dab5e99263f2a8de96d4866f76fea7019 Mon Sep 17 00:00:00 2001 From: Ayla Khan Date: Mon, 17 Apr 2017 15:34:09 -0600 Subject: [PATCH 07/10] Save changes to batch processing scripts. --- scripts/batch_processing.py | 45 +++---- scripts/hmt_test_batch.py | 262 ++++++++++++++++++++++++++++++++++++ scripts/seg3d_filters.py | 17 +-- 3 files changed, 286 insertions(+), 38 deletions(-) create mode 100644 scripts/hmt_test_batch.py diff --git a/scripts/batch_processing.py b/scripts/batch_processing.py index 331fa5f7d..38224a8d2 100644 --- a/scripts/batch_processing.py +++ b/scripts/batch_processing.py @@ -57,7 +57,8 @@ def main(argv): im = os.path.join(paths['data'], 'im') im_raw = os.path.join(im, 'raw') - im_gray = os.path.join(im, 'gray') + im_gray_training = os.path.join(im, 'gray') + im_gray_all = os.path.join(im, 'gray_all') # expecting TIFF files im_raw_files = [os.path.join(im_raw, f) for f in os.listdir(im_raw) if os.path.isfile(os.path.join(im_raw, f)) and f.lower().endswith('.tif')] @@ -72,10 +73,6 @@ def main(argv): im_cropped_gray = os.path.join(im_cropped, 'gray') im_cropped_chm = os.path.join(im_cropped, 'chm') im_cropped_blur = os.path.join(im_cropped, 'chm-blur') - # test - im_cropped_gray_test = os.path.join(im_cropped, 'gray_test') - im_cropped_chm_test = os.path.join(im_cropped, 'chm_test') - im_cropped_blur_test = os.path.join(im_cropped, 'chm-blur_test') truth = os.path.join(paths['data'], 'truth') truth_raw = os.path.join(truth, 'raw') @@ -88,7 +85,7 @@ def main(argv): # TODO: not sure if needed #truth_cropped_gray = os.path.join(truth_cropped, 'gray') - ensure_directories( (im_gray, im_cropped, im_cropped_gray, im_cropped_chm, im_cropped_blur, im_gray_test, im_cropped_gray_test, im_cropped_chm_test, im_cropped_blur_test, truth_region, truth_cropped, truth_cropped_region) ) + ensure_directories( (im_gray_training, im_gray_all, im_cropped, im_cropped_gray, im_cropped_chm, im_cropped_blur, im_gray_test, truth_region, truth_cropped, truth_cropped_region) ) rgbToGray2D_tool = os.path.join(paths['tools'], 'RGBToGray2D') padImage_tool = os.path.join(paths['tools'], 'PadImage') @@ -103,23 +100,22 @@ def main(argv): raw_files = im_raw_files + im_raw_test_files # Raw data RGB files to grayscale (ITK tool) - RGBToGray2D + # consolidate all data files for processing for f in raw_files: # shorten filename, remove spaces, change directory - newfile = update_filepath(f, { 'U3O8_': '', 'Particle ': 'p', im_raw_test: im_gray_test, im_raw: im_gray, '.tif': '.mha' }) + newfile = update_filepath(f, { 'U3O8_': '', 'Particle ': 'p', im_raw_test: im_gray_test, im_raw: im_gray_training, '.tif': '.mha' }) subprocess.check_call([rgbToGray2D_tool, f, newfile, 'float']) - - clientsocket = seg3d_connect(port, size) - # add some variables to Seg3D's interpreter - retval = seg3d_command(clientsocket, size, "im_gray='{}'\r\n".format(im_gray)) - retval = seg3d_command(clientsocket, size, "im_cropped_gray='{}'\r\n".format(im_cropped_gray)) - retval = seg3d_command(clientsocket, size, "im_cropped_chm='{}'\r\n".format(im_cropped_chm)) - retval = seg3d_command(clientsocket, size, "im_gray_test='{}'\r\n".format(im_gray_test)) - retval = seg3d_command(clientsocket, size, "im_cropped_gray_test='{}'\r\n".format(im_cropped_gray_test)) - retval = seg3d_command(clientsocket, size, "im_cropped_chm_test='{}'\r\n".format(im_cropped_chm_test)) - retval = seg3d_command(clientsocket, size, "truth_region='{}'\r\n".format(truth_region)) - retval = seg3d_command(clientsocket, size, "truth_cropped_region='{}'\r\n".format(truth_cropped_region)) - #retval = seg3d_command(clientsocket, size, "exec(open('/Users/ayla/devel/Seg3D_DHS/scripts/seg3d_filters.py').read())\r\n") - seg3d_disconnect(clientsocket, size) + shutil.copy(newfile, im_gray_all) + + # clientsocket = seg3d_connect(port, size) + # # add some variables to Seg3D's interpreter + # retval = seg3d_command(clientsocket, size, "im_gray_all='{}'\r\n".format(im_gray_all)) + # retval = seg3d_command(clientsocket, size, "im_cropped_gray='{}'\r\n".format(im_cropped_gray)) + # retval = seg3d_command(clientsocket, size, "im_cropped_chm='{}'\r\n".format(im_cropped_chm)) + # retval = seg3d_command(clientsocket, size, "truth_region='{}'\r\n".format(truth_region)) + # retval = seg3d_command(clientsocket, size, "truth_cropped_region='{}'\r\n".format(truth_cropped_region)) + # retval = seg3d_command(clientsocket, size, "exec(open('/Users/ayla/devel/Seg3D_DHS/scripts/seg3d_filters.py').read())\r\n") + # seg3d_disconnect(clientsocket, size) # cleanup truth_cropped_files = [os.path.join(truth_cropped_region, f) for f in os.listdir(truth_cropped_region) if os.path.isfile(os.path.join(truth_cropped_region, f)) and f.lower().endswith('.png')] @@ -128,24 +124,23 @@ def main(argv): os.rename(f, newfile) boundary_files = [os.path.join(im_cropped_chm, f) for f in os.listdir(im_cropped_chm) if os.path.isfile(os.path.join(im_cropped_chm, f)) and f.lower().endswith('.mha')] - boundary_files_test = [os.path.join(im_cropped_chm_test, f) for f in os.listdir(im_cropped_chm_test) if os.path.isfile(os.path.join(im_cropped_chm_test, f)) and f.lower().endswith('.mha')] - boundary_files_all = boundary_files + boundary_files_test # Boundary files to blurred boundary files - for f in boundary_files_all: + for f in boundary_files: # shorten filename, remove spaces, change directory newfile = update_filepath(f, { im_cropped_chm: im_cropped_blur }) subprocess.check_call([blur_image_tool, "--inputImage={}".format(f), "--outputImage={}".format(newfile), "--sigma=1", "--kernelWidth=3"]) # Call GLIA script - subprocess.check_call(['python', '/Users/ayla/devel/Seg3D_DHS/hmt_test_batch.py', im_cropped_chm, im_cropped_chm_test, im_cropped_blur, im_cropped_blur_test, truth_cropped_region ]) + subprocess.check_call(['python', '/Users/ayla/devel/Seg3D_DHS/hmt_test_batch.py', im_gray_training, im_gray_test, im_gray_all, im_cropped_chm, im_cropped_blur, truth_cropped_region ]) # Pad to original dimensions # ./PadImage gm_rep1_A_004.png gm_rep1_A_004_padded.png 0 0 0 59 0 #for f in raw_files: # # shorten filename, remove spaces, change directory # newfile = - # subprocess.check_call([padImage_tool f, newfile, '0', '0', '0', '59', '0']) + # subprocess.check_call([padImage_tool f, newfile, '0', '0', '0', '59', '0', 'ushort']) + # subprocess.check_call([padImage_tool f, newfile, '0', '0', '0', '59', '0', 'uchar']) if __name__ == "__main__": main(sys.argv[1:]) diff --git a/scripts/hmt_test_batch.py b/scripts/hmt_test_batch.py new file mode 100644 index 000000000..428268373 --- /dev/null +++ b/scripts/hmt_test_batch.py @@ -0,0 +1,262 @@ +#!/usr/bin/env python +#import numpy as np +#import sys, shutil, socket, argparse +import sys, shutil, socket, os +#sys.path.append('/path/to/glia/code/gadget/python') +sys.path.append('/Users/ayla/devel/glia/code/gadget/python') +from script_util import * + +#dim = '2d' + +t_resume = False +#t_resume = (len(sys.argv) > 1 and sys.argv[1] == '-r') + +# paths +p_d = { + 'bin': '/Users/ayla/devel/glia/code/build', + 'res': '/Users/ayla/devel/dhs_batch/U308/output' +} +p_dbin = p_d['bin'] + '/{b}' + +def main(argv): + #print(argv) + if len(argv) < 7: + raise StandardError('{} requires at least 7 arguments'.format(argv[0])) + + im_gray_training = argv[1] + im_gray_test = argv[2] + im_gray_all = argv[3] + im_cropped_chm = argv[4] + im_cropped_blur = argv[5] + truth_cropped_region = argv[6] + + gray_training_files = [os.path.join(im_gray_training, f) for f in os.listdir(im_gray_training) if os.path.isfile(os.path.join(im_gray_training, f)) and f.lower().endswith('.mha')] + if len(gray_training_files) < 1: + raise StandardError("Missing grayscale data files") + + gray_test_files = [os.path.join(im_gray_test, f) for f in os.listdir(im_gray_test) if os.path.isfile(os.path.join(im_gray_test, f)) and f.lower().endswith('.mha')] + if len(gray_test_files) < 1: + raise StandardError("Missing grayscale test data files") + + boundary_files = [os.path.join(im_cropped_chm, f) for f in os.listdir(im_cropped_chm) if os.path.isfile(os.path.join(im_cropped_chm, f)) and f.lower().endswith('.mha')] + if len(boundary_files) < 1: + raise StandardError("Missing boundary files") + + blur_boundary_files = [os.path.join(im_cropped_blur, f) for f in os.listdir(im_cropped_blur) if os.path.isfile(os.path.join(im_cropped_blur, f)) and f.lower().endswith('.mha')] + if len(blur_boundary_files) < 1: + raise StandardError("Missing boundary files") + + if len(boundary_files) != len(blur_boundary_files): + raise StandardError("Require same number of boundary and blurred boundary files") + + truth_files = [os.path.join(truth_cropped_region, f) for f in os.listdir(truth_cropped_region) if os.path.isfile(os.path.join(truth_cropped_region, f)) and f.lower().endswith('.png')] + if len(truth_files) < 1: + raise StandardError("Missing ground truth files") + + file_names = [] + for f in gray_training_files: + basename = os.path.basename(f) + name, ext = os.path.splitext(basename) + file_names.append(name) + + test_names = [] + for f in gray_test_files: + basename = os.path.basename(f) + name, ext = os.path.splitext(basename) + test_names.append(name) + + print(file_names) + print(test_names) + + p_fdat = { + # ground truth segmentations + 'truth': truth_cropped_region + '/{i}.png', + # all raw images + 'gray_all': im_gray_all + '/{i}.mha', + # training raw images + 'gray_training': im_gray_training + '/{i}.mha', + # boundary probability maps + 'pb': im_cropped_chm + '/{i}.mha', + # blurred boundary probability maps + 'pbb': im_cropped_blur + '/{i}.mha' + } + + p_dres = { + 'segii': p_d['res'] + '/segii', # watershed superpixels + 'segiit': p_d['res'] + '/segiitest', # watershed superpixels + 'segi': p_d['res'] + '/segi', # pre-merged superpixels + 'segit': p_d['res'] + '/segitest', # pre-merged superpixels + 'order': p_d['res'] + '/order', # merge trees + 'sal': p_d['res'] + '/sal', # merge saliencies + 'bcf': p_d['res'] + '/bcf', # boundary features + 'bcl': p_d['res'] + '/bcl', # boundary labels + 'bcm': p_d['res'] + '/bcm', # boundary classifier model + 'bcp': (p_d['res'] + '/bcp'), # boundary predictions + 'seg': (p_d['res'] + '/seg'), # final segmentations + 'segt': (p_d['res'] + '/segtest'), # final segmentations + } + + p_fres = {} + for x in ['segiit', 'segit', 'segt', 'segii', 'segi', 'seg']: p_fres[x] = p_dres[x] + '/{i}.png' + for x in ['order', 'sal', 'bcf', 'bcl', 'bcp']: + p_fres[x] = p_dres[x] + '/{i}.ssv' + p_fres['bcm'] = p_dres['bcm'] + '/bcm.bin' + + # parameters + t_nproc = 70 # number of max parallel processes + t_nthrds= 1 # number of max parallel threads + t_wt = '0.008' # initial water level + t_th = '0.001' # watershed threshold + t_st = ['100', '500', '3.0'] # pre-merging params + + t_iids = { + 'tr' : file_names, 'te': test_names + } + + t_iids['all'] = t_iids['tr'] + t_iids['te'] + print(t_iids) + + # initial superpixels + _jobs = list() + make_dir(p_dres['segii'].format(s='')) + make_dir(p_dres['segiit'].format(s='')) + for i in t_iids['all']: + _f = p_fres['segii'].format(s='', i=i) + _g = p_fres['segiit'].format(s='', i=i) + if not (t_resume and is_file_valid(_f)): + _job = [ + p_dbin.format(b='watershed'), + '-i', p_fdat['pbb'].format(s='', i=i), + '-l', t_wt, '-t', t_th, '-u', 'true', '-o', _f, '--toi', _g] + _jobs.append(_job) + execute(_jobs, nproc=t_nproc, nt=t_nthrds, name='segii') + + # pre-merging + _jobs = list() + make_dir(p_dres['segi'].format(s='')) + make_dir(p_dres['segit'].format(s='')) + for i in t_iids['all']: + _f = p_fres['segi'].format(s='', i=i) + _g = p_fres['segit'].format(s='', i=i) + if not (t_resume and is_file_valid(_f)): + _job = [ + p_dbin.format(b='pre_merge'), + '-s', p_fres['segii'].format(s='', i=i), + '-p', p_fdat['pb'].format(s='', i=i), + '-t', t_st[0], t_st[1], '-b', t_st[2], + '-r', 'true', '-u', 'true', '-o', _f, '--toi', _g] + _jobs.append(_job) + execute(_jobs, nproc=t_nproc, nt=t_nthrds, name='segi') +# execute(_jobs, nproc=1, nt=1, name='segi') + + # trees and merging saliencies + _jobs = list() + make_dir(p_dres['order'].format(s='')) + make_dir(p_dres['sal'].format(s='')) + for i in t_iids['all']: + _f_o = p_fres['order'].format(s='', i=i) + _f_s = p_fres['sal'].format(s='', i=i) + if not (t_resume and is_file_valid(_f_o) and is_file_valid(_f_s)): + _job = [ + p_dbin.format(b='merge_order_pb'), + '-s', p_fres['segi'].format(s='', i=i), + '-p', p_fdat['pb'].format(s='', i=i), + '-t', '1', '-o', _f_o, '-y', _f_s] + _jobs.append(_job) + execute(_jobs, nproc=t_nproc, nt=t_nthrds, name='order/sal') + + # boundary features + _jobs = list() + make_dir(p_dres['bcf'].format(s='')) + for i in t_iids['all']: + _f = p_fres['bcf'].format(s='', i=i) + if not (t_resume and is_file_valid(_f)): + _job = [ + p_dbin.format(b='bc_feat'), + '-s', p_fres['segi'].format(s='', i=i), + '-o', p_fres['order'].format(s='', i=i), + '-y', p_fres['sal'].format(s='', i=i), + # since we're going over all data, use all raw grayscale files + '--rbi', p_fdat['gray_all'].format(s='', i=i), + # 16 or 32 appears to work best here + '--rbb', '16', '--rbl', '120.0', '--rbu', '255.0', + '--rbi', p_fdat['pb'].format(s='', i=i), + '--rbb', '16', '--rbl', '120.0', '--rbu', '255.0', + '--pb', p_fdat['pb'].format(s='', i=i), + '--s0', '1.0', '--sb', '1.0', '--bt', '70', '190', '255', + '-n', 'false', '-l', 'false', '--simpf', 'false', '-b', _f] + _jobs.append(_job) + execute(_jobs, nproc=t_nproc, nt=t_nthrds, name='bcf') +# execute(_jobs, nproc=1, nt=1, name='bcf') + + # boundary labels + _jobs = list() + make_dir(p_dres['bcl'].format(s='')) + #for i in t_iids['all']: + # no segmentations available for test data + for i in t_iids['tr']: + _f = p_fres['bcl'].format(s='', i=i) + if not (t_resume and is_file_valid(_f)): + _job = [ + p_dbin.format(b='bc_label_ri'), + '-s', p_fres['segi'].format(s='', i=i), + '-o', p_fres['order'].format(s='', i=i), + '-t', p_fdat['truth'].format(s='', i=i), + '--f1', 'true', '-d', '0.5', '-g', '0', '-p', 'false', '-w', 'false', '-l', _f] + _jobs.append(_job) + execute(_jobs, nproc=t_nproc, nt=t_nthrds, name='bcl') + + # boundary classifier training + _jobs = list() + make_dir(p_dres['bcm']).format(s='') + _f = p_fres['bcm'].format(s='') + if not (t_resume and is_file_valid(_f)): + _job = [ + p_dbin.format(b='train_rf'), + '--nt', '255', '--mt', '0', '--sr', '0.1', '--ns', '10', '--bal', 'true', + '--m', _f] + for i in t_iids['tr']: + _job.extend([ + '--f', p_fres['bcf'].format(s='', i=i), + '--l', p_fres['bcl'].format(s='', i=i)]) + print(_job) + _jobs.append(_job) + execute(_jobs, nproc=1, nt=1, name='bcm') + + # boundary predictions + _jobs = list() + make_dir(p_dres['bcp']).format(s='') + for i in t_iids['all']: + _f = p_fres['bcp'].format(s='', i=i) + if not (t_resume and is_file_valid(_f)): + _job = [ + p_dbin.format(b='pred_rf'), + '--m', p_fres['bcm'].format(s=''), '--l', '-1', + '--f', p_fres['bcf'].format(s='', i=i), + '--p', p_fres['bcp'].format(s='', i=i)] + print(_job) + _jobs.append(_job) + execute(_jobs, nproc=t_nproc, nt=1, name='bcp') + + # final segmentation + _jobs = list() + make_dir(p_dres['seg'].format(s='')) + make_dir(p_dres['segt'].format(s='')) + for i in t_iids['all']: + _f = p_fres['seg'].format(s='', i=i) + _g = p_fres['segt'].format(s='', i=i) + if not (t_resume and is_file_valid(_f)): + _job = [ + p_dbin.format(b='segment_greedy'), + '-s', p_fres['segi'].format(s='', i=i), + '-o', p_fres['order'].format(s='', i=i), + '-p', p_fres['bcp'].format(s='', i=i), + '-t', '0.5', + '-r', 'true', '-u', 'true', '-f', _f, '--toi', _g] + _jobs.append(_job) + execute(_jobs, nproc=t_nproc, nt=1, name='seg') +# execute(_jobs, nproc=1, nt=1, name='seg') + + +if __name__ == "__main__": + main(sys.argv) diff --git a/scripts/seg3d_filters.py b/scripts/seg3d_filters.py index 4ade76f55..086073108 100644 --- a/scripts/seg3d_filters.py +++ b/scripts/seg3d_filters.py @@ -12,18 +12,12 @@ raise ValueError if not truth_cropped_region: raise ValueError -if not im_gray: +if not im_gray_all: raise ValueError if not im_cropped_gray: raise ValueError if not im_cropped_chm: raise ValueError -if not im_gray_test: - raise ValueError -if not im_cropped_gray_test: - raise ValueError -if not im_cropped_chm_test: - raise ValueError class MyThread(threading.Thread): def __init__(self, layerID, timeout=5.0): @@ -52,10 +46,7 @@ def wait_on_layer(layer, timeout=2.0): thread.join() truth_region_files = [os.path.join(truth_region, f) for f in os.listdir(truth_region) if os.path.isfile(os.path.join(truth_region, f)) and f.lower().endswith('.png')] -im_gray_files = [os.path.join(im_gray, f) for f in os.listdir(im_gray) if os.path.isfile(os.path.join(im_gray, f)) and f.lower().endswith('.mha')] -im_gray_files_test = [os.path.join(im_gray_test, f) for f in os.listdir(im_gray_test) if os.path.isfile(os.path.join(im_gray_test, f)) and f.lower().endswith('.mha')] - -gray_files=im_gray_files+im_gray_files_test +gray_files = [os.path.join(im_gray_all, f) for f in os.listdir(im_gray_all) if os.path.isfile(os.path.join(im_gray_all, f)) and f.lower().endswith('.mha')] # ground truth segmentations: # import -> crop -> export @@ -79,7 +70,7 @@ def wait_on_layer(layer, timeout=2.0): layers = crop(layerids="{}".format(layers[0]),origin='[-0.5,-0.5,-0.5]',size='[1024,883.5,1]',replace='true') layer = layers[0] wait_on_layer(layer) - export_file = update_filepath(f, { im_gray_test: im_cropped_gray_test, im_gray: im_cropped_gray }) + export_file = update_filepath(f, { im_gray_all: im_cropped_gray }) retval = exportlayer(layer="{}".format(layer),file_path='{}'.format(export_file),extension='.mha',exporter='[ITK Data Exporter]') if not retval: print("exportlayer failed") @@ -91,7 +82,7 @@ def wait_on_layer(layer, timeout=2.0): wait_on_layer(layer) layer = gradientmagnitudefilter(layerid="{}".format(layer),replace='true',preserve_data_format='true') wait_on_layer(layer) - export_file = update_filepath(f, { im_gray_test: im_cropped_chm_test, im_gray: im_cropped_chm }) + export_file = update_filepath(f, { im_gray_all: im_cropped_chm }) retval = exportlayer(layer="{}".format(layer),file_path='{}'.format(export_file),extension='.mha') if not retval: print("exportlayer failed") From 61e71f7a960077b5ad3c43a44dff1e49b09ec35e Mon Sep 17 00:00:00 2001 From: Ayla Khan Date: Mon, 17 Apr 2017 15:59:35 -0600 Subject: [PATCH 08/10] Should copy file. --- scripts/batch_processing.py | 47 +++++++++++++++++++++++-------------- scripts/hmt_test_batch.py | 20 +++++++++------- 2 files changed, 40 insertions(+), 27 deletions(-) diff --git a/scripts/batch_processing.py b/scripts/batch_processing.py index 38224a8d2..f5c634c0f 100644 --- a/scripts/batch_processing.py +++ b/scripts/batch_processing.py @@ -4,7 +4,9 @@ paths = { 'data': '/Users/ayla/devel/dhs_batch/U308', 'tools': '/Users/ayla/devel/Seg3D_DHS/tools', - 'hmtscript': '/Users/ayla/devel/Seg3D_DHS' + 'glia_bin': '/Users/ayla/devel/glia/code/build', + 'hmtscript': '/Users/ayla/devel/Seg3D_DHS/hmt_test_batch.py', + 'glia_results': '/Users/ayla/devel/dhs_batch/U308/output' } def seg3d_connect(port, size): @@ -85,6 +87,9 @@ def main(argv): # TODO: not sure if needed #truth_cropped_gray = os.path.join(truth_cropped, 'gray') + seg_ushort = os.path.join(paths['glia_results'], 'seg') + seg_uchar = os.path.join(paths['glia_results'], 'segtest') + ensure_directories( (im_gray_training, im_gray_all, im_cropped, im_cropped_gray, im_cropped_chm, im_cropped_blur, im_gray_test, truth_region, truth_cropped, truth_cropped_region) ) rgbToGray2D_tool = os.path.join(paths['tools'], 'RGBToGray2D') @@ -95,7 +100,7 @@ def main(argv): for f in truth_raw_files: # shorten filename, remove spaces, change directory newfile = update_filepath(f, { 'U3O8_': '', 'Particle ': 'p', truth_raw: truth_region }) - os.rename(f, newfile) + shutil.copyfile(f, newfile) raw_files = im_raw_files + im_raw_test_files @@ -107,15 +112,15 @@ def main(argv): subprocess.check_call([rgbToGray2D_tool, f, newfile, 'float']) shutil.copy(newfile, im_gray_all) - # clientsocket = seg3d_connect(port, size) - # # add some variables to Seg3D's interpreter - # retval = seg3d_command(clientsocket, size, "im_gray_all='{}'\r\n".format(im_gray_all)) - # retval = seg3d_command(clientsocket, size, "im_cropped_gray='{}'\r\n".format(im_cropped_gray)) - # retval = seg3d_command(clientsocket, size, "im_cropped_chm='{}'\r\n".format(im_cropped_chm)) - # retval = seg3d_command(clientsocket, size, "truth_region='{}'\r\n".format(truth_region)) - # retval = seg3d_command(clientsocket, size, "truth_cropped_region='{}'\r\n".format(truth_cropped_region)) - # retval = seg3d_command(clientsocket, size, "exec(open('/Users/ayla/devel/Seg3D_DHS/scripts/seg3d_filters.py').read())\r\n") - # seg3d_disconnect(clientsocket, size) + clientsocket = seg3d_connect(port, size) + # add some variables to Seg3D's interpreter + retval = seg3d_command(clientsocket, size, "im_gray_all='{}'\r\n".format(im_gray_all)) + retval = seg3d_command(clientsocket, size, "im_cropped_gray='{}'\r\n".format(im_cropped_gray)) + retval = seg3d_command(clientsocket, size, "im_cropped_chm='{}'\r\n".format(im_cropped_chm)) + retval = seg3d_command(clientsocket, size, "truth_region='{}'\r\n".format(truth_region)) + retval = seg3d_command(clientsocket, size, "truth_cropped_region='{}'\r\n".format(truth_cropped_region)) + retval = seg3d_command(clientsocket, size, "exec(open('/Users/ayla/devel/Seg3D_DHS/scripts/seg3d_filters.py').read())\r\n") + seg3d_disconnect(clientsocket, size) # cleanup truth_cropped_files = [os.path.join(truth_cropped_region, f) for f in os.listdir(truth_cropped_region) if os.path.isfile(os.path.join(truth_cropped_region, f)) and f.lower().endswith('.png')] @@ -132,15 +137,21 @@ def main(argv): subprocess.check_call([blur_image_tool, "--inputImage={}".format(f), "--outputImage={}".format(newfile), "--sigma=1", "--kernelWidth=3"]) # Call GLIA script - subprocess.check_call(['python', '/Users/ayla/devel/Seg3D_DHS/hmt_test_batch.py', im_gray_training, im_gray_test, im_gray_all, im_cropped_chm, im_cropped_blur, truth_cropped_region ]) + subprocess.check_call(['python', paths['hmtscript'], im_gray_training, im_gray_test, im_gray_all, im_cropped_chm, im_cropped_blur, truth_cropped_region, paths['glia_bin'], paths['glia_results'] ]) + + seg_ushort_files = [os.path.join(seg_ushort, f) for f in os.listdir(seg_ushort) if os.path.isfile(os.path.join(seg_ushort, f)) and f.lower().endswith('.png')] + seg_uchar_files = [os.path.join(seg_uchar, f) for f in os.listdir(seg_uchar) if os.path.isfile(os.path.join(seg_uchar, f)) and f.lower().endswith('.png')] # Pad to original dimensions - # ./PadImage gm_rep1_A_004.png gm_rep1_A_004_padded.png 0 0 0 59 0 - #for f in raw_files: - # # shorten filename, remove spaces, change directory - # newfile = - # subprocess.check_call([padImage_tool f, newfile, '0', '0', '0', '59', '0', 'ushort']) - # subprocess.check_call([padImage_tool f, newfile, '0', '0', '0', '59', '0', 'uchar']) + for f in seg_ushort_files: + # shorten filename, remove spaces, change directory + newfile = update_filepath(f, { '.png': '_pad.png' }) + subprocess.check_call([padImage_tool, f, newfile, '0', '0', '0', '59', '0', 'ushort']) + + for f in seg_uchar_files: + # shorten filename, remove spaces, change directory + newfile = update_filepath(f, { '.png': '_pad.png' }) + subprocess.check_call([padImage_tool, f, newfile, '0', '0', '0', '59', '0', 'uchar']) if __name__ == "__main__": main(sys.argv[1:]) diff --git a/scripts/hmt_test_batch.py b/scripts/hmt_test_batch.py index 428268373..cfa2e0edf 100644 --- a/scripts/hmt_test_batch.py +++ b/scripts/hmt_test_batch.py @@ -11,17 +11,10 @@ t_resume = False #t_resume = (len(sys.argv) > 1 and sys.argv[1] == '-r') -# paths -p_d = { - 'bin': '/Users/ayla/devel/glia/code/build', - 'res': '/Users/ayla/devel/dhs_batch/U308/output' -} -p_dbin = p_d['bin'] + '/{b}' - def main(argv): #print(argv) - if len(argv) < 7: - raise StandardError('{} requires at least 7 arguments'.format(argv[0])) + if len(argv) < 9: + raise StandardError('{} requires at least 9 arguments'.format(argv[0])) im_gray_training = argv[1] im_gray_test = argv[2] @@ -29,6 +22,15 @@ def main(argv): im_cropped_chm = argv[4] im_cropped_blur = argv[5] truth_cropped_region = argv[6] + glia_bin = argv[7] + glia_results = argv[8] + + # paths + p_d = { + 'bin': glia_bin, + 'res': glia_results + } + p_dbin = p_d['bin'] + '/{b}' gray_training_files = [os.path.join(im_gray_training, f) for f in os.listdir(im_gray_training) if os.path.isfile(os.path.join(im_gray_training, f)) and f.lower().endswith('.mha')] if len(gray_training_files) < 1: From f03638d92cc255e0a13f344b9eca736bd9f80b55 Mon Sep 17 00:00:00 2001 From: Ayla Khan Date: Mon, 17 Apr 2017 16:33:25 -0600 Subject: [PATCH 09/10] Disable prints. --- scripts/hmt_test_batch.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/scripts/hmt_test_batch.py b/scripts/hmt_test_batch.py index cfa2e0edf..5b2a0d9e6 100644 --- a/scripts/hmt_test_batch.py +++ b/scripts/hmt_test_batch.py @@ -67,9 +67,6 @@ def main(argv): name, ext = os.path.splitext(basename) test_names.append(name) - print(file_names) - print(test_names) - p_fdat = { # ground truth segmentations 'truth': truth_cropped_region + '/{i}.png', @@ -221,7 +218,7 @@ def main(argv): _job.extend([ '--f', p_fres['bcf'].format(s='', i=i), '--l', p_fres['bcl'].format(s='', i=i)]) - print(_job) + #print(_job) _jobs.append(_job) execute(_jobs, nproc=1, nt=1, name='bcm') @@ -236,7 +233,7 @@ def main(argv): '--m', p_fres['bcm'].format(s=''), '--l', '-1', '--f', p_fres['bcf'].format(s='', i=i), '--p', p_fres['bcp'].format(s='', i=i)] - print(_job) + #print(_job) _jobs.append(_job) execute(_jobs, nproc=t_nproc, nt=1, name='bcp') From 339fa4b37c457b78f397254d24f2c75aa5ab0729 Mon Sep 17 00:00:00 2001 From: Ayla Khan Date: Tue, 18 Apr 2017 12:07:30 -0600 Subject: [PATCH 10/10] Fix generated python command. --- src/Core/Action/Action.cc | 2 +- src/Core/Action/Tests/ActionTests.cc | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Core/Action/Action.cc b/src/Core/Action/Action.cc index b7e8fc7ce..f2490350c 100644 --- a/src/Core/Action/Action.cc +++ b/src/Core/Action/Action.cc @@ -170,7 +170,7 @@ std::string Action::export_to_python_string() const } } - command << ");"; + command << ")"; // Return the command return command.str(); diff --git a/src/Core/Action/Tests/ActionTests.cc b/src/Core/Action/Tests/ActionTests.cc index 0906ea93f..24bee3d92 100644 --- a/src/Core/Action/Tests/ActionTests.cc +++ b/src/Core/Action/Tests/ActionTests.cc @@ -143,7 +143,7 @@ TEST_F(ActionTests, EmptyAction) ASSERT_EQ( actionExportString, actionString ); std::string pythonActionExportString = action.export_to_python_string(); - ASSERT_EQ( pythonActionExportString, "dummyaction();"); + ASSERT_EQ( pythonActionExportString, "dummyaction()"); ASSERT_TRUE( action.translate( context ) ); ASSERT_TRUE( action.post_create( context ) ); @@ -162,7 +162,7 @@ TEST_F(ActionTests, ActionImportFromString) ASSERT_EQ( actionExportString, actionString ); std::string pythonActionExportString = action.export_to_python_string(); - ASSERT_EQ( pythonActionExportString, "dummyaction();"); + ASSERT_EQ( pythonActionExportString, "dummyaction()"); ASSERT_TRUE( action.translate( context ) ); ASSERT_TRUE( action.post_create( context ) ); @@ -182,7 +182,7 @@ TEST_F(ActionTests, ActionImportFromStringWithError) ASSERT_EQ( actionExportString, actionString ); std::string pythonActionExportString = action.export_to_python_string(); - ASSERT_EQ( pythonActionExportString, "dummyaction();"); + ASSERT_EQ( pythonActionExportString, "dummyaction()"); ASSERT_TRUE( error.empty() ); @@ -207,7 +207,7 @@ TEST_F(ActionTests, ActionImportFromStringWithParam) ASSERT_EQ( actionExportString, "DummyAction param_required=\'param\' " ); std::string pythonActionExportString = action.export_to_python_string(); - ASSERT_EQ( pythonActionExportString, "dummyaction(param_required='param');"); + ASSERT_EQ( pythonActionExportString, "dummyaction(param_required='param')"); ASSERT_TRUE( action.translate( context ) ); ASSERT_TRUE( action.post_create( context ) );