Skip to content

Commit

Permalink
Merge pull request #682 from neuroscout/split-report
Browse files Browse the repository at this point in the history
Enhance report plots
  • Loading branch information
adelavega committed Oct 23, 2019
2 parents 0e15656 + 94f5c98 commit d9bce50
Show file tree
Hide file tree
Showing 6 changed files with 81 additions and 73 deletions.
17 changes: 9 additions & 8 deletions neuroscout/tasks/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,14 @@

from ..models import Analysis, Report

from .compile import build_analysis, PathBuilder, impute_confounds
from .viz import plot_design_matrix, plot_corr_matrix, sort_dm
from .utils import update_record, write_jsons, write_tarball, dump_analysis

from .utils.build import build_analysis, impute_confounds
from .utils.viz import plot_design_matrix, plot_corr_matrix, sort_dm
from .utils.io import (
dump_analysis, update_record, PathBuilder, write_jsons, write_tarball)

MIN_CLI_VERSION = '0.3'


def compile(flask_app, hash_id, run_ids=None, build=False):
""" Compile analysis_id. Validate analysis using pybids and
writout analysis bundle
Expand All @@ -27,7 +28,7 @@ def compile(flask_app, hash_id, run_ids=None, build=False):
'traceback': f'Error loading {hash_id} from db /n {str(e)}'
}
try:
a_id, analysis, resources, predictor_events, bids_dir = dump_analysis(
a_id, analysis, resources, pes, bids_dir = dump_analysis(
hash_id)
except Exception as e:
update_record(
Expand All @@ -38,7 +39,7 @@ def compile(flask_app, hash_id, run_ids=None, build=False):
raise
try:
tmp_dir, bundle_paths, _ = build_analysis(
analysis, predictor_events, bids_dir, run_ids, build=build)
analysis, pes, bids_dir, run_ids, build=build)
except Exception as e:
update_record(
analysis_object,
Expand Down Expand Up @@ -102,7 +103,7 @@ def generate_report(flask_app, hash_id, report_id,
}

try:
a_id, analysis, resources, predictor_events, bids_dir = dump_analysis(
a_id, analysis, resources, pes, bids_dir = dump_analysis(
hash_id)
except Exception as e:
update_record(
Expand All @@ -114,7 +115,7 @@ def generate_report(flask_app, hash_id, report_id,

try:
_, _, bids_analysis = build_analysis(
analysis, predictor_events, bids_dir, run_ids)
analysis, pes, bids_dir, run_ids)
except Exception as e:
# Todo: In future, could add more messages here
update_record(
Expand Down
2 changes: 1 addition & 1 deletion neuroscout/tasks/upload.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
Predictor, PredictorCollection, PredictorEvent, PredictorRun,
NeurovaultFileUpload)

from .utils import update_record
from .utils.io import update_record


def upload_collection(flask_app, filenames, runs, dataset_id, collection_id,
Expand Down
Empty file.
81 changes: 30 additions & 51 deletions neuroscout/tasks/compile.py → neuroscout/tasks/utils/build.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,17 @@
import json
import numpy as np
import pandas as pd
from collections import defaultdict
from pathlib import Path
from tempfile import mkdtemp
from bids.analysis import Analysis
from bids.analysis import Analysis as BIDSAnalysis
from bids.layout import BIDSLayout
from grabbit.extensions.writable import build_path
from copy import deepcopy
import pandas as pd
from collections import defaultdict
from celery.utils.log import get_task_logger
from grabbit.extensions.writable import build_path

logger = get_task_logger(__name__)

PATHS = ['sub-{subject}_[ses-{session}_]task-{task}_[acq-{acquisition}_]'
'[run-{run}_]events.tsv']
REPORT_PATHS = ['sub-{subject}_[ses-{session}_]task-{task}_'
'[acq-{acquisition}_][run-{run}_]{type}.{extension}']


def get_entities(run):
""" Get BIDS-entities from run object """
valid = ['number', 'session', 'subject', 'acquisition']
entities = {
r: v
for r, v in run.items()
if r in valid and v is not None
}

if 'number' in entities:
entities['run'] = entities.pop('number')
return entities


def writeout_events(analysis, pes, outdir):
Expand Down Expand Up @@ -85,17 +67,6 @@ def writeout_events(analysis, pes, outdir):
return paths


def merge_dictionaries(*arg):
""" Set merge dictionaries """
dd = defaultdict(set)

for d in arg: # you can list as many input dicts as you want here
for key, value in d.items():
dd[key].add(value)
return dict(((k, list(v)) if len(v) > 1 else (k, list(v)[0])
for k, v in dd.items()))


def build_analysis(analysis, predictor_events, bids_dir, run_id=None,
build=True):
tmp_dir = Path(mkdtemp())
Expand All @@ -122,13 +93,38 @@ def build_analysis(analysis, predictor_events, bids_dir, run_id=None,
# Load events and try applying transformations
bids_layout = BIDSLayout(bids_dir, derivatives=str(tmp_dir),
validate=False)
bids_analysis = Analysis(
bids_analysis = BIDSAnalysis(
bids_layout, deepcopy(analysis.get('model')))
bids_analysis.setup(**entities)

return tmp_dir, paths, bids_analysis


def get_entities(run):
""" Get BIDS-entities from run object """
valid = ['number', 'session', 'subject', 'acquisition']
entities = {
r: v
for r, v in run.items()
if r in valid and v is not None
}

if 'number' in entities:
entities['run'] = entities.pop('number')
return entities


def merge_dictionaries(*arg):
""" Set merge dictionaries """
dd = defaultdict(set)

for d in arg: # you can list as many input dicts as you want here
for key, value in d.items():
dd[key].add(value)
return dict(((k, list(v)) if len(v) > 1 else (k, list(v)[0])
for k, v in dd.items()))


def impute_confounds(dense):
""" Impute first TR for confounds that may have n/as """
for imputable in ('framewise_displacement', 'std_dvars', 'dvars'):
Expand All @@ -140,20 +136,3 @@ def impute_confounds(dense):
# Impute the mean non-zero, non-NaN value
dense[imputable][0] = np.nanmean(vals[vals != 0])
return dense


class PathBuilder():
def __init__(self, outdir, domain, hash, entities):
self.outdir = outdir
prepend = "https://" if "neuroscout.org" in domain else "http://"
self.domain = prepend + domain
self.hash = hash
self.entities = entities

def build(self, type, extension):
file = build_path(
{**self.entities, 'type': type, 'extension': extension},
path_patterns=REPORT_PATHS)
outfile = str(self.outdir / file)

return outfile, '{}/reports/{}/{}'.format(self.domain, self.hash, file)
27 changes: 24 additions & 3 deletions neuroscout/tasks/utils.py → neuroscout/tasks/utils/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,13 @@
import json
import tarfile
from pathlib import Path
from ..utils.db import put_record, dump_pe
from ..models import Analysis, PredictorEvent, Predictor, RunStimulus
from ..schemas.analysis import AnalysisFullSchema, AnalysisResourcesSchema
from grabbit.extensions.writable import build_path
from ...utils.db import put_record, dump_pe
from ...models import Analysis, PredictorEvent, Predictor, RunStimulus
from ...schemas.analysis import AnalysisFullSchema, AnalysisResourcesSchema

REPORT_PATHS = ['sub-{subject}_[ses-{session}_]task-{task}_'
'[acq-{acquisition}_][run-{run}_]{type}.{extension}']


def update_record(model, exception=None, **fields):
Expand Down Expand Up @@ -45,6 +49,23 @@ def write_tarball(paths, filename):
tar.add(path, arcname=arcname)


class PathBuilder():
def __init__(self, outdir, domain, hash, entities):
self.outdir = outdir
prepend = "https://" if "neuroscout.org" in domain else "http://"
self.domain = prepend + domain
self.hash = hash
self.entities = entities

def build(self, type, extension):
file = build_path(
{**self.entities, 'type': type, 'extension': extension},
path_patterns=REPORT_PATHS)
outfile = str(self.outdir / file)

return outfile, '{}/reports/{}/{}'.format(self.domain, self.hash, file)


def create_pes(predictors, run_ids):
""" Create PredictorEvents from EFs """
all_pes = []
Expand Down
27 changes: 17 additions & 10 deletions neuroscout/tasks/viz.py → neuroscout/tasks/utils/viz.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ def plot_design_matrix(dm_wide, scale=True):
dm_wide = (dm_wide - dm_wide.mean()) / (dm_wide.max() - dm_wide.min())
dm = melt_dm(dm_wide)

pts = alt.selection_multi(encodings=['x'])
time_labels = list(range(0, dm.scan_number.max(), 50))

base_color = alt.Color(
Expand All @@ -36,16 +35,18 @@ def plot_design_matrix(dm_wide, scale=True):
alt.X('regressor:N', sort=None, axis=alt.Axis(
labelAngle=-45, title=None, ticks=False)),
fill=base_color,
stroke=base_color,
opacity=alt.condition(pts, alt.value(1), alt.value(0.7))
stroke=base_color
).properties(
width=750,
height=450,
selection=pts
).interactive()
)

selection = alt.selection_multi(fields=['regressor'])
color = alt.Color('regressor:N', legend=None)

line = alt.Chart(
dm,
title='Timecourse (shift-click columns to select)').mark_line(
title='Timecourse (shift-click legend to select)').mark_line(
clip=True).encode(
alt.X('scan_number',
axis=alt.Axis(
Expand All @@ -54,18 +55,24 @@ def plot_design_matrix(dm_wide, scale=True):
),
y=alt.Y('value:Q',
axis=alt.Axis(title='Amplitude (scaled for visualization)')),
color=alt.Color(
'regressor', sort=None, legend=alt.Legend(orient='right'))
color=color
).transform_filter(
pts
selection
).properties(
width=650,
height=225,
)

legend = alt.Chart(dm).mark_square(size=200).encode(
y=alt.Y('regressor:N', axis=alt.Axis(orient='right')),
color=color
).add_selection(
selection
)

plt = alt.vconcat(
heat,
line,
line | legend,
resolve=alt.Resolve(
scale=alt.LegendResolveMap(color=alt.ResolveMode('independent')))
).configure_scale(bandPaddingInner=0.0)
Expand Down

0 comments on commit d9bce50

Please sign in to comment.