diff --git a/scripts/sct_extract_metric.py b/scripts/sct_extract_metric.py index b79ed0eb01..2375aa74e8 100755 --- a/scripts/sct_extract_metric.py +++ b/scripts/sct_extract_metric.py @@ -131,7 +131,7 @@ def get_parser(): parser.add_option(name='-perlevel', type_value='int', description='Set to 1 to output one metric per vertebral level instead of a single ' - 'output metric.', + 'output metric. This flag needs to be used with flag -vert.', mandatory=False, default_value=0) parser.add_option(name="-v", diff --git a/scripts/sct_process_segmentation.py b/scripts/sct_process_segmentation.py index 6605b544f9..d1e76f9da1 100755 --- a/scripts/sct_process_segmentation.py +++ b/scripts/sct_process_segmentation.py @@ -106,7 +106,7 @@ def get_parser(): parser.add_option(name='-perlevel', type_value='int', description='Set to 1 to output one metric per vertebral level instead of a single ' - 'output metric.', + 'output metric. This flag needs to be used with flag -vert.', mandatory=False, default_value=Param().perlevel) parser.add_option(name='-r', diff --git a/spinalcordtoolbox/aggregate_slicewise.py b/spinalcordtoolbox/aggregate_slicewise.py index 1efaffd94d..33f126401a 100644 --- a/spinalcordtoolbox/aggregate_slicewise.py +++ b/spinalcordtoolbox/aggregate_slicewise.py @@ -107,7 +107,9 @@ def aggregate_per_slice_or_level(metric, mask=None, slices=[], levels=[], persli # loop across slice group for slicegroup in slicegroups: # add level info - if levels: + if vertgroups is None: + agg_metric[slicegroup]['VertLevel'] = None + else: agg_metric[slicegroup]['VertLevel'] = vertgroups[slicegroups.index(slicegroup)] # Loop across functions (e.g.: MEAN, STD) for (name, func) in group_funcs: @@ -402,7 +404,7 @@ def save_as_csv(agg_metric, fname_out, fname_in=None, append=False): # list_item = ['VertLevel', 'Label', 'MEAN', 'WA', 'BIN', 'ML', 'MAP', 'STD', 'MAX'] # TODO: The thing below is ugly and needs to be fixed, but this is the only solution I found to order the columns # without refactoring the code with OrderedDict. - list_item = ['VertLevel', 'Label', 'Size [vox]', 'MEAN(area)', 'STD(area)', 'MEAN(AP_diameter)', 'STD(AP_diameter)', + list_item = ['Label', 'Size [vox]', 'MEAN(area)', 'STD(area)', 'MEAN(AP_diameter)', 'STD(AP_diameter)', 'MEAN(RL_diameter)', 'STD(RL_diameter)', 'MEAN(ratio_minor_major)', 'STD(ratio_minor_major)', 'MEAN(eccentricity)', 'STD(eccentricity)', 'MEAN(orientation)', 'STD(orientation)', 'MEAN(equivalent_diameter)', 'STD(equivalent_diameter)', 'MEAN(solidity)', 'STD(solidity)', @@ -412,7 +414,7 @@ def save_as_csv(agg_metric, fname_out, fname_in=None, append=False): if not append or not os.path.isfile(fname_out): with open(fname_out, 'w') as csvfile: # spamwriter = csv.writer(csvfile, delimiter=',') - header = ['Timestamp', 'SCT Version', 'Filename', 'Slice (I->S)'] + header = ['Timestamp', 'SCT Version', 'Filename', 'Slice (I->S)', 'VertLevel'] agg_metric_key = agg_metric[agg_metric.keys()[0]].keys() for item in list_item: for key in agg_metric_key: @@ -431,15 +433,11 @@ def save_as_csv(agg_metric, fname_out, fname_in=None, append=False): line.append(sct.__version__) # SCT Version line.append(fname_in) # file name associated with the results line.append(parse_num_list_inv(slicegroup)) # list all slices in slicegroup + line.append(parse_num_list_inv(agg_metric[slicegroup]['VertLevel'])) # list vertebral levels agg_metric_key = agg_metric[agg_metric.keys()[0]].keys() for item in list_item: for key in agg_metric_key: if item in key: - # Special case for VertLevel - if key == 'VertLevel': - line.append( - parse_num_list_inv(agg_metric[slicegroup]['VertLevel'])) # list vertebral levels - else: - line.append(str(agg_metric[slicegroup][key])) + line.append(str(agg_metric[slicegroup][key])) break spamwriter.writerow(line) diff --git a/unit_testing/test_aggregate_slicewise.py b/unit_testing/test_aggregate_slicewise.py index 1b7f0c687d..a25acda00e 100644 --- a/unit_testing/test_aggregate_slicewise.py +++ b/unit_testing/test_aggregate_slicewise.py @@ -199,15 +199,15 @@ def test_save_as_csv(dummy_metrics): with open('tmp_file_out.csv', 'r') as csvfile: spamreader = csv.reader(csvfile, delimiter=',') spamreader.next() # skip header - assert spamreader.next()[1:] == [sct.__version__, 'FakeFile.txt', '3:4', '45.5', '4.5'] + assert spamreader.next()[1:] == [sct.__version__, 'FakeFile.txt', '3:4', '', '45.5', '4.5'] # with appending aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv') aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv', append=True) with open('tmp_file_out.csv', 'r') as csvfile: spamreader = csv.reader(csvfile, delimiter=',') spamreader.next() # skip header - assert spamreader.next()[1:] == [sct.__version__, '', '3:4', '45.5', '4.5'] - assert spamreader.next()[1:] == [sct.__version__, '', '3:4', '45.5', '4.5'] + assert spamreader.next()[1:] == [sct.__version__, '', '3:4', '', '45.5', '4.5'] + assert spamreader.next()[1:] == [sct.__version__, '', '3:4', '', '45.5', '4.5'] # noinspection 801,PyShadowingNames @@ -225,6 +225,43 @@ def test_save_as_csv_slices(dummy_metrics, dummy_vert_level): assert row['VertLevel'] == '3:4' +# noinspection 801,PyShadowingNames +def test_save_as_csv_per_level(dummy_metrics, dummy_vert_level): + """Make sure slices are listed in reduced form""" + agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[3, 4], + perlevel=True, + vert_level=dummy_vert_level, + group_funcs=(('WA', aggregate_slicewise.func_wa),)) + aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv') + with open('tmp_file_out.csv', 'r') as csvfile: + reader = csv.DictReader(csvfile, delimiter=',') + row = reader.next() + assert row['Slice (I->S)'] == '2:3' + assert row['VertLevel'] == '3' + + +# noinspection 801,PyShadowingNames +def test_save_as_csv_per_slice_then_per_level(dummy_metrics, dummy_vert_level): + """Test with and without specifying perlevel. See: https://github.com/neuropoly/spinalcordtoolbox/issues/2141""" + agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[3, 4], + perlevel=True, + vert_level=dummy_vert_level, + group_funcs=(('WA', aggregate_slicewise.func_wa),)) + aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv') + agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], slices=[0], + group_funcs=(('WA', aggregate_slicewise.func_wa),),) + aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv', append=True) + with open('tmp_file_out.csv', 'r') as csvfile: + reader = csv.DictReader(csvfile, delimiter=',') + row = reader.next() + assert row['Slice (I->S)'] == '2:3' + assert row['VertLevel'] == '3' + reader.next() + row = reader.next() + assert row['Slice (I->S)'] == '0' + assert row['VertLevel'] == '' + + # noinspection 801,PyShadowingNames def test_save_as_csv_sorting(dummy_metrics): """Make sure slices are sorted in output csv file""" @@ -248,4 +285,4 @@ def test_save_as_csv_extract_metric(dummy_data_and_labels): with open('tmp_file_out.csv', 'r') as csvfile: spamreader = csv.reader(csvfile, delimiter=',') spamreader.next() # skip header - assert spamreader.next()[1:-1] == [sct.__version__, '', '0:4', 'label_0', '2.5', '38.0'] + assert spamreader.next()[1:-1] == [sct.__version__, '', '0:4', '', 'label_0', '2.5', '38.0']