Skip to content

Commit

Permalink
Minor fix in usage and csv output (#2199)
Browse files Browse the repository at this point in the history
* test_aggregate_slicewise: Added unit test perlevel

* sct_extract_metric,sct_process_segmentation: Clarified usage

Explicitly says that perlevel requires flag -vert

* aggregate_slicewise: Now always display VertLevel column

Fixes #2141

* test_aggregate_slicewise: Updated to account for VertLevel column
  • Loading branch information
jcohenadad committed Mar 20, 2019
1 parent e5e4517 commit 894de89
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 15 deletions.
2 changes: 1 addition & 1 deletion scripts/sct_extract_metric.py
Expand Up @@ -131,7 +131,7 @@ def get_parser():
parser.add_option(name='-perlevel',
type_value='int',
description='Set to 1 to output one metric per vertebral level instead of a single '
'output metric.',
'output metric. This flag needs to be used with flag -vert.',
mandatory=False,
default_value=0)
parser.add_option(name="-v",
Expand Down
2 changes: 1 addition & 1 deletion scripts/sct_process_segmentation.py
Expand Up @@ -106,7 +106,7 @@ def get_parser():
parser.add_option(name='-perlevel',
type_value='int',
description='Set to 1 to output one metric per vertebral level instead of a single '
'output metric.',
'output metric. This flag needs to be used with flag -vert.',
mandatory=False,
default_value=Param().perlevel)
parser.add_option(name='-r',
Expand Down
16 changes: 7 additions & 9 deletions spinalcordtoolbox/aggregate_slicewise.py
Expand Up @@ -107,7 +107,9 @@ def aggregate_per_slice_or_level(metric, mask=None, slices=[], levels=[], persli
# loop across slice group
for slicegroup in slicegroups:
# add level info
if levels:
if vertgroups is None:
agg_metric[slicegroup]['VertLevel'] = None
else:
agg_metric[slicegroup]['VertLevel'] = vertgroups[slicegroups.index(slicegroup)]
# Loop across functions (e.g.: MEAN, STD)
for (name, func) in group_funcs:
Expand Down Expand Up @@ -402,7 +404,7 @@ def save_as_csv(agg_metric, fname_out, fname_in=None, append=False):
# list_item = ['VertLevel', 'Label', 'MEAN', 'WA', 'BIN', 'ML', 'MAP', 'STD', 'MAX']
# TODO: The thing below is ugly and needs to be fixed, but this is the only solution I found to order the columns
# without refactoring the code with OrderedDict.
list_item = ['VertLevel', 'Label', 'Size [vox]', 'MEAN(area)', 'STD(area)', 'MEAN(AP_diameter)', 'STD(AP_diameter)',
list_item = ['Label', 'Size [vox]', 'MEAN(area)', 'STD(area)', 'MEAN(AP_diameter)', 'STD(AP_diameter)',
'MEAN(RL_diameter)', 'STD(RL_diameter)', 'MEAN(ratio_minor_major)', 'STD(ratio_minor_major)',
'MEAN(eccentricity)', 'STD(eccentricity)', 'MEAN(orientation)', 'STD(orientation)',
'MEAN(equivalent_diameter)', 'STD(equivalent_diameter)', 'MEAN(solidity)', 'STD(solidity)',
Expand All @@ -412,7 +414,7 @@ def save_as_csv(agg_metric, fname_out, fname_in=None, append=False):
if not append or not os.path.isfile(fname_out):
with open(fname_out, 'w') as csvfile:
# spamwriter = csv.writer(csvfile, delimiter=',')
header = ['Timestamp', 'SCT Version', 'Filename', 'Slice (I->S)']
header = ['Timestamp', 'SCT Version', 'Filename', 'Slice (I->S)', 'VertLevel']
agg_metric_key = agg_metric[agg_metric.keys()[0]].keys()
for item in list_item:
for key in agg_metric_key:
Expand All @@ -431,15 +433,11 @@ def save_as_csv(agg_metric, fname_out, fname_in=None, append=False):
line.append(sct.__version__) # SCT Version
line.append(fname_in) # file name associated with the results
line.append(parse_num_list_inv(slicegroup)) # list all slices in slicegroup
line.append(parse_num_list_inv(agg_metric[slicegroup]['VertLevel'])) # list vertebral levels
agg_metric_key = agg_metric[agg_metric.keys()[0]].keys()
for item in list_item:
for key in agg_metric_key:
if item in key:
# Special case for VertLevel
if key == 'VertLevel':
line.append(
parse_num_list_inv(agg_metric[slicegroup]['VertLevel'])) # list vertebral levels
else:
line.append(str(agg_metric[slicegroup][key]))
line.append(str(agg_metric[slicegroup][key]))
break
spamwriter.writerow(line)
45 changes: 41 additions & 4 deletions unit_testing/test_aggregate_slicewise.py
Expand Up @@ -199,15 +199,15 @@ def test_save_as_csv(dummy_metrics):
with open('tmp_file_out.csv', 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
spamreader.next() # skip header
assert spamreader.next()[1:] == [sct.__version__, 'FakeFile.txt', '3:4', '45.5', '4.5']
assert spamreader.next()[1:] == [sct.__version__, 'FakeFile.txt', '3:4', '', '45.5', '4.5']
# with appending
aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv')
aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv', append=True)
with open('tmp_file_out.csv', 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
spamreader.next() # skip header
assert spamreader.next()[1:] == [sct.__version__, '', '3:4', '45.5', '4.5']
assert spamreader.next()[1:] == [sct.__version__, '', '3:4', '45.5', '4.5']
assert spamreader.next()[1:] == [sct.__version__, '', '3:4', '', '45.5', '4.5']
assert spamreader.next()[1:] == [sct.__version__, '', '3:4', '', '45.5', '4.5']


# noinspection 801,PyShadowingNames
Expand All @@ -225,6 +225,43 @@ def test_save_as_csv_slices(dummy_metrics, dummy_vert_level):
assert row['VertLevel'] == '3:4'


# noinspection 801,PyShadowingNames
def test_save_as_csv_per_level(dummy_metrics, dummy_vert_level):
"""Make sure slices are listed in reduced form"""
agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[3, 4],
perlevel=True,
vert_level=dummy_vert_level,
group_funcs=(('WA', aggregate_slicewise.func_wa),))
aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv')
with open('tmp_file_out.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
row = reader.next()
assert row['Slice (I->S)'] == '2:3'
assert row['VertLevel'] == '3'


# noinspection 801,PyShadowingNames
def test_save_as_csv_per_slice_then_per_level(dummy_metrics, dummy_vert_level):
"""Test with and without specifying perlevel. See: https://github.com/neuropoly/spinalcordtoolbox/issues/2141"""
agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], levels=[3, 4],
perlevel=True,
vert_level=dummy_vert_level,
group_funcs=(('WA', aggregate_slicewise.func_wa),))
aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv')
agg_metric = aggregate_slicewise.aggregate_per_slice_or_level(dummy_metrics['with float'], slices=[0],
group_funcs=(('WA', aggregate_slicewise.func_wa),),)
aggregate_slicewise.save_as_csv(agg_metric, 'tmp_file_out.csv', append=True)
with open('tmp_file_out.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
row = reader.next()
assert row['Slice (I->S)'] == '2:3'
assert row['VertLevel'] == '3'
reader.next()
row = reader.next()
assert row['Slice (I->S)'] == '0'
assert row['VertLevel'] == ''


# noinspection 801,PyShadowingNames
def test_save_as_csv_sorting(dummy_metrics):
"""Make sure slices are sorted in output csv file"""
Expand All @@ -248,4 +285,4 @@ def test_save_as_csv_extract_metric(dummy_data_and_labels):
with open('tmp_file_out.csv', 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
spamreader.next() # skip header
assert spamreader.next()[1:-1] == [sct.__version__, '', '0:4', 'label_0', '2.5', '38.0']
assert spamreader.next()[1:-1] == [sct.__version__, '', '0:4', '', 'label_0', '2.5', '38.0']

0 comments on commit 894de89

Please sign in to comment.