Skip to content

Commit

Permalink
Examples (#221)
Browse files Browse the repository at this point in the history
* Examples: Added examples for mrc_meguk notts and cambridge sites.
* Update CamCAN example scripts.
* Added recommended setting example scripts for Oxford.
* Updated Wakeman-Henson example scripts.

---------

Co-authored-by: Rukuang Huang <rukuang.huang@jesus.ox.ac.uk>
  • Loading branch information
cgohil8 and RukuangHuang committed Sep 4, 2023
1 parent 168bdae commit fbaedbd
Show file tree
Hide file tree
Showing 41 changed files with 1,444 additions and 877 deletions.
20 changes: 13 additions & 7 deletions examples/camcan/preprocess.py → examples/camcan/1_preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,33 @@
"""

# Authors: Chetan Gohil <chetan.gohil@psych.ox.ac.uk>

import pathlib
from glob import glob
from dask.distributed import Client

from osl import preprocessing, utils

# Directories
BASE_DIR = "/well/woolrich/projects/camcan"
RAW_DIR = BASE_DIR + "/cc700/meg/pipeline/release005/BIDSsep/derivatives_rest/aa/AA_movecomp/aamod_meg_maxfilt_00002"
PREPROC_DIR = BASE_DIR + "/winter23/preproc"
PREPROC_DIR = BASE_DIR + "/summer23/preproc"

# Settings
config = """
preproc:
- crop: {tmin: 30}
- filter: {l_freq: 0.5, h_freq: 125, method: iir, iir_params: {order: 5, ftype: butter}}
- notch_filter: {freqs: 50 100}
- resample: {sfreq: 250}
- filter: {l_freq: 0.5, h_freq: 200, method: iir, iir_params: {order: 5, ftype: butter}}
- notch_filter: {freqs: 50 88 100 118 150 156 185, notch_widths: 2}
- resample: {sfreq: 400}
- bad_segments: {segment_len: 500, picks: mag, significance_level: 0.1}
- bad_segments: {segment_len: 500, picks: grad, significance_level: 0.1}
- bad_segments: {segment_len: 500, picks: mag, mode: diff, significance_level: 0.1}
- bad_segments: {segment_len: 500, picks: grad, mode: diff, significance_level: 0.1}
- bad_channels: {picks: meg, significance_level: 0.1}
- ica_raw: {picks: meg, n_components: 64}
- bad_channels: {picks: mag, significance_level: 0.1}
- bad_channels: {picks: grad, significance_level: 0.1}
- ica_raw: {picks: meg, n_components: 0.99}
- ica_autoreject: {picks: meg, ecgmethod: correlation, eogthreshold: auto}
- interpolate_bads: {}
"""
Expand All @@ -32,7 +38,7 @@

# Get input files
inputs = []
for subject in glob(RAW_DIR + "/sub-*"):
for subject in sorted(glob(RAW_DIR + "/sub-*")):
subject = pathlib.Path(subject).stem
inputs.append(RAW_DIR + f"/{subject}/mf2pt2_{subject}_ses-rest_task-rest_meg.fif")

Expand Down
108 changes: 108 additions & 0 deletions examples/camcan/2_coregister.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
"""Coregisteration.
The scripts was first run for all subjects (with n_init=1). Then for subjects
whose coregistration looked a bit off we re-run this script just for that
particular subject with a higher n_init.
"""

# Authors: Chetan Gohil <chetan.gohil@psych.ox.ac.uk>

import numpy as np
import pathlib
from glob import glob
from dask.distributed import Client

from osl import source_recon, utils

# Directories
BASE_DIR = "/well/woolrich/projects/camcan"
PREPROC_DIR = BASE_DIR + "/summer23/preproc"
COREG_DIR = BASE_DIR + "/summer23/coreg"
ANAT_DIR = BASE_DIR + "/cc700/mri/pipeline/release004/BIDS_20190411/anat"
FSL_DIR = "/well/woolrich/projects/software/fsl"

# Files
PREPROC_FILE = (
PREPROC_DIR
+ "/mf2pt2_{0}_ses-rest_task-rest_meg"
+ "/mf2pt2_{0}_ses-rest_task-rest_meg_preproc_raw.fif"
)
SMRI_FILE = ANAT_DIR + "/{0}/anat/{0}_T1w.nii"

# Settings
config = """
source_recon:
- extract_fiducials_from_fif: {}
- fix_headshape_points: {}
- compute_surfaces:
include_nose: False
- coregister:
use_nose: False
use_headshape: True
#n_init: 50
"""


def fix_headshape_points(src_dir, subject, preproc_file, smri_file, epoch_file):
filenames = source_recon.rhino.get_coreg_filenames(src_dir, subject)

# Load saved headshape and nasion files
hs = np.loadtxt(filenames["polhemus_headshape_file"])
nas = np.loadtxt(filenames["polhemus_nasion_file"])
lpa = np.loadtxt(filenames["polhemus_lpa_file"])
rpa = np.loadtxt(filenames["polhemus_rpa_file"])

# Drop nasion by 4cm and remove headshape points more than 7 cm away
nas[2] -= 40
distances = np.sqrt(
(nas[0] - hs[0]) ** 2 + (nas[1] - hs[1]) ** 2 + (nas[2] - hs[2]) ** 2
)
keep = distances > 70
hs = hs[:, keep]

# Remove anything outside of rpa
keep = hs[0] < rpa[0]
hs = hs[:, keep]

# Remove anything outside of lpa
keep = hs[0] > lpa[0]
hs = hs[:, keep]

# Remove headshape points on the neck
remove = hs[2] < min(lpa[2], rpa[2]) - 4
hs = hs[:, ~remove]

# Overwrite headshape file
utils.logger.log_or_print(f"overwritting {filenames['polhemus_headshape_file']}")
np.savetxt(filenames["polhemus_headshape_file"], hs)


if __name__ == "__main__":
utils.logger.set_up(level="INFO")
source_recon.setup_fsl(FSL_DIR)

# Get subjects
subjects = []
for subject in sorted(glob(PREPROC_FILE.replace("{0}", "*"))):
subjects.append(pathlib.Path(subject).stem.split("_")[1])

# Setup files
smri_files = []
preproc_files = []
for subject in subjects:
smri_files.append(SMRI_FILE.format(subject))
preproc_files.append(PREPROC_FILE.format(subject))

# Setup parallel processing
# client = Client(n_workers=8, threads_per_worker=1)

# Run coregistration
source_recon.run_src_batch(
config,
src_dir=COREG_DIR,
subjects=subjects,
preproc_files=preproc_files,
smri_files=smri_files,
extra_funcs=[fix_headshape_points],
# dask_client=True,
)
Original file line number Diff line number Diff line change
Expand Up @@ -7,27 +7,35 @@
import pathlib
from glob import glob
from dask.distributed import Client

from osl import source_recon, utils

# Directories
BASE_DIR = "/well/woolrich/projects/camcan"
PREPROC_DIR = BASE_DIR + "/winter23/preproc"
SRC_DIR = BASE_DIR + "/winter23/src"
ANAT_DIR = BASE_DIR + "/cc700/mri/pipeline/release004/BIDS_20190411/anat"
PREPROC_FILE = PREPROC_DIR + "/mf2pt2_{0}_ses-rest_task-rest_meg/mf2pt2_{0}_ses-rest_task-rest_meg_preproc_raw.fif"
SMRI_FILE = ANAT_DIR + "/{0}/anat/{0}_T1w.nii"
PREPROC_DIR = BASE_DIR + "/summer23/preproc"
SRC_DIR = BASE_DIR + "/summer23/src"
FSL_DIR = "/well/woolrich/projects/software/fsl"

# Files
PREPROC_FILE = (
PREPROC_DIR
+ "/mf2pt2_{0}_ses-rest_task-rest_meg"
+ "/mf2pt2_{0}_ses-rest_task-rest_meg_preproc_raw.fif"
)

# Settings
config = """
source_recon:
- forward_model:
model: Single Layer
- beamform_and_parcellate:
freq_range: [1, 45]
freq_range: [1, 80]
chantypes: [mag, grad]
rank: {meg: 60}
parcellation_file: fmri_d100_parcellation_with_PCC_reduced_2mm_ss5mm_ds8mm.nii.gz
parcellation_file: Glasser52_binary_space-MNI152NLin6_res-8x8x8.nii.gz
method: spatial_basis
orthogonalisation: symmetric
extra_chans: [eog, ecg]
"""

if __name__ == "__main__":
Expand All @@ -36,20 +44,12 @@

# Get subjects
subjects = []
for subject in sorted(
glob(
PREPROC_DIR
+ "/mf2pt2_*_ses-rest_task-rest_meg"
+ "/mf2pt2_sub-*_ses-rest_task-rest_meg_preproc_raw.fif"
)
):
for subject in sorted(glob(PREPROC_FILE.replace("{0}", "*"))):
subjects.append(pathlib.Path(subject).stem.split("_")[1])

# Setup files
smri_files = []
preproc_files = []
for subject in subjects:
smri_files.append(SMRI_FILE.format(subject))
preproc_files.append(PREPROC_FILE.format(subject))

# Setup parallel processing
Expand All @@ -61,6 +61,5 @@
src_dir=SRC_DIR,
subjects=subjects,
preproc_files=preproc_files,
smri_files=smri_files,
dask_client=True,
)
10 changes: 7 additions & 3 deletions examples/camcan/sign_flip.py → examples/camcan/4_sign_flip.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,16 @@
"""

# Authors: Chetan Gohil <chetan.gohil@psych.ox.ac.uk>

from glob import glob
from dask.distributed import Client

from osl import utils
from osl.source_recon import find_template_subject, run_src_batch, setup_fsl

SRC_DIR = "/well/woolrich/projects/camcan/winter23/src"
# Directories
SRC_DIR = "/well/woolrich/projects/camcan/summer23/src"
FSL_DIR = "/well/woolrich/projects/software/fsl"

if __name__ == "__main__":
Expand All @@ -30,8 +34,8 @@
template: {template}
n_embeddings: 15
standardize: True
n_init: 3
n_iter: 3000
n_init: 5
n_iter: 5000
max_flips: 20
"""

Expand Down
19 changes: 19 additions & 0 deletions examples/camcan/5_prepare.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
"""Data preparation: time-delay embedding and principal component analysis.
"""

# Authors: Chetan Gohil <chetan.gohil@psych.ox.ac.uk>

from glob import glob

from osl_dynamics.data import Data

files = sorted(glob("/well/woolrich/projects/camcan/summer23/src/*/sflip_parc-raw.fif"))
data = Data(files, picks="misc", reject_by_annotation="omit", n_jobs=16)
methods = {
"tde_pca": {"n_embeddings": 15, "n_pca_components": 100},
"standardize": {},
}
data.prepare(methods)
data.save("/well/woolrich/projects/camcan/summer23/prepared")
data.delete_dir()
Loading

0 comments on commit fbaedbd

Please sign in to comment.